1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
49 #include "aarch64-tdep.h"
52 #include "elf/aarch64.h"
57 #include "record-full.h"
59 #include "features/aarch64.c"
61 #include "arch/aarch64-insn.h"
63 #include "opcode/aarch64.h"
66 #define submask(x) ((1L << ((x) + 1)) - 1)
67 #define bit(obj,st) (((obj) >> (st)) & 1)
68 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
70 /* Pseudo register base numbers. */
71 #define AARCH64_Q0_REGNUM 0
72 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
73 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
77 /* The standard register names, and all the valid aliases for them. */
80 const char *const name;
82 } aarch64_register_aliases[] =
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
127 /* The required core 'R' registers. */
128 static const char *const aarch64_r_register_names[] =
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
143 /* The FP/SIMD 'V' registers. */
144 static const char *const aarch64_v_register_names[] =
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
160 /* AArch64 prologue cache structure. */
161 struct aarch64_prologue_cache
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
177 /* Is the target available to read from? */
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
185 /* The register used to hold the frame pointer for this frame. */
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg *saved_regs;
193 show_aarch64_debug (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
196 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
201 /* Abstract instruction reader. */
203 class abstract_instruction_reader
206 /* Read in one instruction. */
207 virtual ULONGEST read (CORE_ADDR memaddr, int len,
208 enum bfd_endian byte_order) = 0;
211 /* Instruction reader from real target. */
213 class instruction_reader : public abstract_instruction_reader
216 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
218 return read_code_unsigned_integer (memaddr, len, byte_order);
224 /* Analyze a prologue, looking for a recognizable stack frame
225 and frame pointer. Scan until we encounter a store that could
226 clobber the stack frame unexpectedly, or an unknown instruction. */
229 aarch64_analyze_prologue (struct gdbarch *gdbarch,
230 CORE_ADDR start, CORE_ADDR limit,
231 struct aarch64_prologue_cache *cache,
232 abstract_instruction_reader& reader)
234 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
236 /* Track X registers and D registers in prologue. */
237 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
238 struct pv_area *stack;
239 struct cleanup *back_to;
241 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
242 regs[i] = pv_register (i, 0);
243 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
244 back_to = make_cleanup_free_pv_area (stack);
246 for (; start < limit; start += 4)
251 insn = reader.read (start, 4, byte_order_for_code);
253 if (aarch64_decode_insn (insn, &inst, 1) != 0)
256 if (inst.opcode->iclass == addsub_imm
257 && (inst.opcode->op == OP_ADD
258 || strcmp ("sub", inst.opcode->name) == 0))
260 unsigned rd = inst.operands[0].reg.regno;
261 unsigned rn = inst.operands[1].reg.regno;
263 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
264 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
265 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
266 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
268 if (inst.opcode->op == OP_ADD)
270 regs[rd] = pv_add_constant (regs[rn],
271 inst.operands[2].imm.value);
275 regs[rd] = pv_add_constant (regs[rn],
276 -inst.operands[2].imm.value);
279 else if (inst.opcode->iclass == pcreladdr
280 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
282 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
283 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
285 regs[inst.operands[0].reg.regno] = pv_unknown ();
287 else if (inst.opcode->iclass == branch_imm)
289 /* Stop analysis on branch. */
292 else if (inst.opcode->iclass == condbranch)
294 /* Stop analysis on branch. */
297 else if (inst.opcode->iclass == branch_reg)
299 /* Stop analysis on branch. */
302 else if (inst.opcode->iclass == compbranch)
304 /* Stop analysis on branch. */
307 else if (inst.opcode->op == OP_MOVZ)
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
310 regs[inst.operands[0].reg.regno] = pv_unknown ();
312 else if (inst.opcode->iclass == log_shift
313 && strcmp (inst.opcode->name, "orr") == 0)
315 unsigned rd = inst.operands[0].reg.regno;
316 unsigned rn = inst.operands[1].reg.regno;
317 unsigned rm = inst.operands[2].reg.regno;
319 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
320 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
321 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
323 if (inst.operands[2].shifter.amount == 0
324 && rn == AARCH64_SP_REGNUM)
330 debug_printf ("aarch64: prologue analysis gave up "
331 "addr=%s opcode=0x%x (orr x register)\n",
332 core_addr_to_string_nz (start), insn);
337 else if (inst.opcode->op == OP_STUR)
339 unsigned rt = inst.operands[0].reg.regno;
340 unsigned rn = inst.operands[1].addr.base_regno;
342 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
344 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
345 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
346 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
347 gdb_assert (!inst.operands[1].addr.offset.is_reg);
349 pv_area_store (stack, pv_add_constant (regs[rn],
350 inst.operands[1].addr.offset.imm),
351 is64 ? 8 : 4, regs[rt]);
353 else if ((inst.opcode->iclass == ldstpair_off
354 || (inst.opcode->iclass == ldstpair_indexed
355 && inst.operands[2].addr.preind))
356 && strcmp ("stp", inst.opcode->name) == 0)
358 /* STP with addressing mode Pre-indexed and Base register. */
361 unsigned rn = inst.operands[2].addr.base_regno;
362 int32_t imm = inst.operands[2].addr.offset.imm;
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
365 || inst.operands[0].type == AARCH64_OPND_Ft);
366 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
367 || inst.operands[1].type == AARCH64_OPND_Ft2);
368 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
369 gdb_assert (!inst.operands[2].addr.offset.is_reg);
371 /* If recording this store would invalidate the store area
372 (perhaps because rn is not known) then we should abandon
373 further prologue analysis. */
374 if (pv_area_store_would_trash (stack,
375 pv_add_constant (regs[rn], imm)))
378 if (pv_area_store_would_trash (stack,
379 pv_add_constant (regs[rn], imm + 8)))
382 rt1 = inst.operands[0].reg.regno;
383 rt2 = inst.operands[1].reg.regno;
384 if (inst.operands[0].type == AARCH64_OPND_Ft)
386 /* Only bottom 64-bit of each V register (D register) need
388 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
389 rt1 += AARCH64_X_REGISTER_COUNT;
390 rt2 += AARCH64_X_REGISTER_COUNT;
393 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
395 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
398 if (inst.operands[2].addr.writeback)
399 regs[rn] = pv_add_constant (regs[rn], imm);
402 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
403 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
404 && (inst.opcode->op == OP_STR_POS
405 || inst.opcode->op == OP_STRF_POS)))
406 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
407 && strcmp ("str", inst.opcode->name) == 0)
409 /* STR (immediate) */
410 unsigned int rt = inst.operands[0].reg.regno;
411 int32_t imm = inst.operands[1].addr.offset.imm;
412 unsigned int rn = inst.operands[1].addr.base_regno;
414 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
415 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
416 || inst.operands[0].type == AARCH64_OPND_Ft);
418 if (inst.operands[0].type == AARCH64_OPND_Ft)
420 /* Only bottom 64-bit of each V register (D register) need
422 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
423 rt += AARCH64_X_REGISTER_COUNT;
426 pv_area_store (stack, pv_add_constant (regs[rn], imm),
427 is64 ? 8 : 4, regs[rt]);
428 if (inst.operands[1].addr.writeback)
429 regs[rn] = pv_add_constant (regs[rn], imm);
431 else if (inst.opcode->iclass == testbranch)
433 /* Stop analysis on branch. */
440 debug_printf ("aarch64: prologue analysis gave up addr=%s"
442 core_addr_to_string_nz (start), insn);
450 do_cleanups (back_to);
454 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
456 /* Frame pointer is fp. Frame size is constant. */
457 cache->framereg = AARCH64_FP_REGNUM;
458 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
460 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
462 /* Try the stack pointer. */
463 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
464 cache->framereg = AARCH64_SP_REGNUM;
468 /* We're just out of luck. We don't know where the frame is. */
469 cache->framereg = -1;
470 cache->framesize = 0;
473 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
477 if (pv_area_find_reg (stack, gdbarch, i, &offset))
478 cache->saved_regs[i].addr = offset;
481 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
483 int regnum = gdbarch_num_regs (gdbarch);
486 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
488 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
491 do_cleanups (back_to);
496 aarch64_analyze_prologue (struct gdbarch *gdbarch,
497 CORE_ADDR start, CORE_ADDR limit,
498 struct aarch64_prologue_cache *cache)
500 instruction_reader reader;
502 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
508 namespace selftests {
510 /* Instruction reader from manually cooked instruction sequences. */
512 class instruction_reader_test : public abstract_instruction_reader
515 template<size_t SIZE>
516 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
517 : m_insns (insns), m_insns_size (SIZE)
520 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
522 SELF_CHECK (len == 4);
523 SELF_CHECK (memaddr % 4 == 0);
524 SELF_CHECK (memaddr / 4 < m_insns_size);
526 return m_insns[memaddr / 4];
530 const uint32_t *m_insns;
535 aarch64_analyze_prologue_test (void)
537 struct gdbarch_info info;
539 gdbarch_info_init (&info);
540 info.bfd_arch_info = bfd_scan_arch ("aarch64");
542 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
543 SELF_CHECK (gdbarch != NULL);
545 /* Test the simple prologue in which frame pointer is used. */
547 struct aarch64_prologue_cache cache;
548 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
550 static const uint32_t insns[] = {
551 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
552 0x910003fd, /* mov x29, sp */
553 0x97ffffe6, /* bl 0x400580 */
555 instruction_reader_test reader (insns);
557 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
558 SELF_CHECK (end == 4 * 2);
560 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
561 SELF_CHECK (cache.framesize == 272);
563 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
565 if (i == AARCH64_FP_REGNUM)
566 SELF_CHECK (cache.saved_regs[i].addr == -272);
567 else if (i == AARCH64_LR_REGNUM)
568 SELF_CHECK (cache.saved_regs[i].addr == -264);
570 SELF_CHECK (cache.saved_regs[i].addr == -1);
573 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
575 int regnum = gdbarch_num_regs (gdbarch);
577 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
582 /* Test a prologue in which STR is used and frame pointer is not
585 struct aarch64_prologue_cache cache;
586 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
588 static const uint32_t insns[] = {
589 0xf81d0ff3, /* str x19, [sp, #-48]! */
590 0xb9002fe0, /* str w0, [sp, #44] */
591 0xf90013e1, /* str x1, [sp, #32]*/
592 0xfd000fe0, /* str d0, [sp, #24] */
593 0xaa0203f3, /* mov x19, x2 */
594 0xf94013e0, /* ldr x0, [sp, #32] */
596 instruction_reader_test reader (insns);
598 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
600 SELF_CHECK (end == 4 * 5);
602 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
603 SELF_CHECK (cache.framesize == 48);
605 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
608 SELF_CHECK (cache.saved_regs[i].addr == -16);
610 SELF_CHECK (cache.saved_regs[i].addr == -48);
612 SELF_CHECK (cache.saved_regs[i].addr == -1);
615 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
617 int regnum = gdbarch_num_regs (gdbarch);
620 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
623 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
628 } // namespace selftests
629 #endif /* GDB_SELF_TEST */
631 /* Implement the "skip_prologue" gdbarch method. */
634 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
636 CORE_ADDR func_addr, limit_pc;
638 /* See if we can determine the end of the prologue via the symbol
639 table. If so, then return either PC, or the PC after the
640 prologue, whichever is greater. */
641 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
643 CORE_ADDR post_prologue_pc
644 = skip_prologue_using_sal (gdbarch, func_addr);
646 if (post_prologue_pc != 0)
647 return std::max (pc, post_prologue_pc);
650 /* Can't determine prologue from the symbol table, need to examine
653 /* Find an upper limit on the function prologue using the debug
654 information. If the debug information could not be used to
655 provide that bound, then use an arbitrary large number as the
657 limit_pc = skip_prologue_using_sal (gdbarch, pc);
659 limit_pc = pc + 128; /* Magic. */
661 /* Try disassembling prologue. */
662 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
665 /* Scan the function prologue for THIS_FRAME and populate the prologue
669 aarch64_scan_prologue (struct frame_info *this_frame,
670 struct aarch64_prologue_cache *cache)
672 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
673 CORE_ADDR prologue_start;
674 CORE_ADDR prologue_end;
675 CORE_ADDR prev_pc = get_frame_pc (this_frame);
676 struct gdbarch *gdbarch = get_frame_arch (this_frame);
678 cache->prev_pc = prev_pc;
680 /* Assume we do not find a frame. */
681 cache->framereg = -1;
682 cache->framesize = 0;
684 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
687 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
691 /* No line info so use the current PC. */
692 prologue_end = prev_pc;
694 else if (sal.end < prologue_end)
696 /* The next line begins after the function end. */
697 prologue_end = sal.end;
700 prologue_end = std::min (prologue_end, prev_pc);
701 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
707 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
711 cache->framereg = AARCH64_FP_REGNUM;
712 cache->framesize = 16;
713 cache->saved_regs[29].addr = 0;
714 cache->saved_regs[30].addr = 8;
718 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
719 function may throw an exception if the inferior's registers or memory is
723 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
724 struct aarch64_prologue_cache *cache)
726 CORE_ADDR unwound_fp;
729 aarch64_scan_prologue (this_frame, cache);
731 if (cache->framereg == -1)
734 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
738 cache->prev_sp = unwound_fp + cache->framesize;
740 /* Calculate actual addresses of saved registers using offsets
741 determined by aarch64_analyze_prologue. */
742 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
743 if (trad_frame_addr_p (cache->saved_regs, reg))
744 cache->saved_regs[reg].addr += cache->prev_sp;
746 cache->func = get_frame_func (this_frame);
748 cache->available_p = 1;
751 /* Allocate and fill in *THIS_CACHE with information about the prologue of
752 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
753 Return a pointer to the current aarch64_prologue_cache in
756 static struct aarch64_prologue_cache *
757 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
759 struct aarch64_prologue_cache *cache;
761 if (*this_cache != NULL)
762 return (struct aarch64_prologue_cache *) *this_cache;
764 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
765 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
770 aarch64_make_prologue_cache_1 (this_frame, cache);
772 CATCH (ex, RETURN_MASK_ERROR)
774 if (ex.error != NOT_AVAILABLE_ERROR)
775 throw_exception (ex);
782 /* Implement the "stop_reason" frame_unwind method. */
784 static enum unwind_stop_reason
785 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
788 struct aarch64_prologue_cache *cache
789 = aarch64_make_prologue_cache (this_frame, this_cache);
791 if (!cache->available_p)
792 return UNWIND_UNAVAILABLE;
794 /* Halt the backtrace at "_start". */
795 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
796 return UNWIND_OUTERMOST;
798 /* We've hit a wall, stop. */
799 if (cache->prev_sp == 0)
800 return UNWIND_OUTERMOST;
802 return UNWIND_NO_REASON;
805 /* Our frame ID for a normal frame is the current function's starting
806 PC and the caller's SP when we were called. */
809 aarch64_prologue_this_id (struct frame_info *this_frame,
810 void **this_cache, struct frame_id *this_id)
812 struct aarch64_prologue_cache *cache
813 = aarch64_make_prologue_cache (this_frame, this_cache);
815 if (!cache->available_p)
816 *this_id = frame_id_build_unavailable_stack (cache->func);
818 *this_id = frame_id_build (cache->prev_sp, cache->func);
821 /* Implement the "prev_register" frame_unwind method. */
823 static struct value *
824 aarch64_prologue_prev_register (struct frame_info *this_frame,
825 void **this_cache, int prev_regnum)
827 struct aarch64_prologue_cache *cache
828 = aarch64_make_prologue_cache (this_frame, this_cache);
830 /* If we are asked to unwind the PC, then we need to return the LR
831 instead. The prologue may save PC, but it will point into this
832 frame's prologue, not the next frame's resume location. */
833 if (prev_regnum == AARCH64_PC_REGNUM)
837 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
838 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
841 /* SP is generally not saved to the stack, but this frame is
842 identified by the next frame's stack pointer at the time of the
843 call. The value was already reconstructed into PREV_SP. */
856 if (prev_regnum == AARCH64_SP_REGNUM)
857 return frame_unwind_got_constant (this_frame, prev_regnum,
860 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
864 /* AArch64 prologue unwinder. */
865 struct frame_unwind aarch64_prologue_unwind =
868 aarch64_prologue_frame_unwind_stop_reason,
869 aarch64_prologue_this_id,
870 aarch64_prologue_prev_register,
872 default_frame_sniffer
875 /* Allocate and fill in *THIS_CACHE with information about the prologue of
876 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
877 Return a pointer to the current aarch64_prologue_cache in
880 static struct aarch64_prologue_cache *
881 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
883 struct aarch64_prologue_cache *cache;
885 if (*this_cache != NULL)
886 return (struct aarch64_prologue_cache *) *this_cache;
888 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
889 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
894 cache->prev_sp = get_frame_register_unsigned (this_frame,
896 cache->prev_pc = get_frame_pc (this_frame);
897 cache->available_p = 1;
899 CATCH (ex, RETURN_MASK_ERROR)
901 if (ex.error != NOT_AVAILABLE_ERROR)
902 throw_exception (ex);
909 /* Implement the "stop_reason" frame_unwind method. */
911 static enum unwind_stop_reason
912 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
915 struct aarch64_prologue_cache *cache
916 = aarch64_make_stub_cache (this_frame, this_cache);
918 if (!cache->available_p)
919 return UNWIND_UNAVAILABLE;
921 return UNWIND_NO_REASON;
924 /* Our frame ID for a stub frame is the current SP and LR. */
927 aarch64_stub_this_id (struct frame_info *this_frame,
928 void **this_cache, struct frame_id *this_id)
930 struct aarch64_prologue_cache *cache
931 = aarch64_make_stub_cache (this_frame, this_cache);
933 if (cache->available_p)
934 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
936 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
939 /* Implement the "sniffer" frame_unwind method. */
942 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
943 struct frame_info *this_frame,
944 void **this_prologue_cache)
946 CORE_ADDR addr_in_block;
949 addr_in_block = get_frame_address_in_block (this_frame);
950 if (in_plt_section (addr_in_block)
951 /* We also use the stub winder if the target memory is unreadable
952 to avoid having the prologue unwinder trying to read it. */
953 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
959 /* AArch64 stub unwinder. */
960 struct frame_unwind aarch64_stub_unwind =
963 aarch64_stub_frame_unwind_stop_reason,
964 aarch64_stub_this_id,
965 aarch64_prologue_prev_register,
967 aarch64_stub_unwind_sniffer
970 /* Return the frame base address of *THIS_FRAME. */
973 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
975 struct aarch64_prologue_cache *cache
976 = aarch64_make_prologue_cache (this_frame, this_cache);
978 return cache->prev_sp - cache->framesize;
981 /* AArch64 default frame base information. */
982 struct frame_base aarch64_normal_base =
984 &aarch64_prologue_unwind,
985 aarch64_normal_frame_base,
986 aarch64_normal_frame_base,
987 aarch64_normal_frame_base
990 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
991 dummy frame. The frame ID's base needs to match the TOS value
992 saved by save_dummy_frame_tos () and returned from
993 aarch64_push_dummy_call, and the PC needs to match the dummy
994 frame's breakpoint. */
996 static struct frame_id
997 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
999 return frame_id_build (get_frame_register_unsigned (this_frame,
1001 get_frame_pc (this_frame));
1004 /* Implement the "unwind_pc" gdbarch method. */
1007 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1010 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1015 /* Implement the "unwind_sp" gdbarch method. */
1018 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1020 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1023 /* Return the value of the REGNUM register in the previous frame of
1026 static struct value *
1027 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1028 void **this_cache, int regnum)
1034 case AARCH64_PC_REGNUM:
1035 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1036 return frame_unwind_got_constant (this_frame, regnum, lr);
1039 internal_error (__FILE__, __LINE__,
1040 _("Unexpected register %d"), regnum);
1044 /* Implement the "init_reg" dwarf2_frame_ops method. */
1047 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1048 struct dwarf2_frame_state_reg *reg,
1049 struct frame_info *this_frame)
1053 case AARCH64_PC_REGNUM:
1054 reg->how = DWARF2_FRAME_REG_FN;
1055 reg->loc.fn = aarch64_dwarf2_prev_register;
1057 case AARCH64_SP_REGNUM:
1058 reg->how = DWARF2_FRAME_REG_CFA;
1063 /* When arguments must be pushed onto the stack, they go on in reverse
1064 order. The code below implements a FILO (stack) to do this. */
1068 /* Value to pass on stack. It can be NULL if this item is for stack
1070 const gdb_byte *data;
1072 /* Size in bytes of value to pass on stack. */
1076 DEF_VEC_O (stack_item_t);
1078 /* Return the alignment (in bytes) of the given type. */
1081 aarch64_type_align (struct type *t)
1087 t = check_typedef (t);
1088 switch (TYPE_CODE (t))
1091 /* Should never happen. */
1092 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1096 case TYPE_CODE_ENUM:
1100 case TYPE_CODE_RANGE:
1101 case TYPE_CODE_BITSTRING:
1103 case TYPE_CODE_RVALUE_REF:
1104 case TYPE_CODE_CHAR:
1105 case TYPE_CODE_BOOL:
1106 return TYPE_LENGTH (t);
1108 case TYPE_CODE_ARRAY:
1109 if (TYPE_VECTOR (t))
1111 /* Use the natural alignment for vector types (the same for
1112 scalar type), but the maximum alignment is 128-bit. */
1113 if (TYPE_LENGTH (t) > 16)
1116 return TYPE_LENGTH (t);
1119 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1120 case TYPE_CODE_COMPLEX:
1121 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1123 case TYPE_CODE_STRUCT:
1124 case TYPE_CODE_UNION:
1126 for (n = 0; n < TYPE_NFIELDS (t); n++)
1128 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1136 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1137 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1138 document; otherwise return 0. */
1141 is_hfa_or_hva (struct type *ty)
1143 switch (TYPE_CODE (ty))
1145 case TYPE_CODE_ARRAY:
1147 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1149 if (TYPE_VECTOR (ty))
1152 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1153 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1154 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1155 && TYPE_VECTOR (target_ty))))
1160 case TYPE_CODE_UNION:
1161 case TYPE_CODE_STRUCT:
1163 /* HFA or HVA has at most four members. */
1164 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1166 struct type *member0_type;
1168 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1169 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1170 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1171 && TYPE_VECTOR (member0_type)))
1175 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1177 struct type *member1_type;
1179 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1180 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1181 || (TYPE_LENGTH (member0_type)
1182 != TYPE_LENGTH (member1_type)))
1198 /* AArch64 function call information structure. */
1199 struct aarch64_call_info
1201 /* the current argument number. */
1204 /* The next general purpose register number, equivalent to NGRN as
1205 described in the AArch64 Procedure Call Standard. */
1208 /* The next SIMD and floating point register number, equivalent to
1209 NSRN as described in the AArch64 Procedure Call Standard. */
1212 /* The next stacked argument address, equivalent to NSAA as
1213 described in the AArch64 Procedure Call Standard. */
1216 /* Stack item vector. */
1217 VEC(stack_item_t) *si;
1220 /* Pass a value in a sequence of consecutive X registers. The caller
1221 is responsbile for ensuring sufficient registers are available. */
1224 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1225 struct aarch64_call_info *info, struct type *type,
1228 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1229 int len = TYPE_LENGTH (type);
1230 enum type_code typecode = TYPE_CODE (type);
1231 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1232 const bfd_byte *buf = value_contents (arg);
1238 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1239 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1243 /* Adjust sub-word struct/union args when big-endian. */
1244 if (byte_order == BFD_ENDIAN_BIG
1245 && partial_len < X_REGISTER_SIZE
1246 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1247 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1251 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1252 gdbarch_register_name (gdbarch, regnum),
1253 phex (regval, X_REGISTER_SIZE));
1255 regcache_cooked_write_unsigned (regcache, regnum, regval);
1262 /* Attempt to marshall a value in a V register. Return 1 if
1263 successful, or 0 if insufficient registers are available. This
1264 function, unlike the equivalent pass_in_x() function does not
1265 handle arguments spread across multiple registers. */
1268 pass_in_v (struct gdbarch *gdbarch,
1269 struct regcache *regcache,
1270 struct aarch64_call_info *info,
1271 int len, const bfd_byte *buf)
1275 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1276 gdb_byte reg[V_REGISTER_SIZE];
1281 memset (reg, 0, sizeof (reg));
1282 /* PCS C.1, the argument is allocated to the least significant
1283 bits of V register. */
1284 memcpy (reg, buf, len);
1285 regcache_cooked_write (regcache, regnum, reg);
1289 debug_printf ("arg %d in %s\n", info->argnum,
1290 gdbarch_register_name (gdbarch, regnum));
1298 /* Marshall an argument onto the stack. */
1301 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1304 const bfd_byte *buf = value_contents (arg);
1305 int len = TYPE_LENGTH (type);
1311 align = aarch64_type_align (type);
1313 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1314 Natural alignment of the argument's type. */
1315 align = align_up (align, 8);
1317 /* The AArch64 PCS requires at most doubleword alignment. */
1323 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1329 VEC_safe_push (stack_item_t, info->si, &item);
1332 if (info->nsaa & (align - 1))
1334 /* Push stack alignment padding. */
1335 int pad = align - (info->nsaa & (align - 1));
1340 VEC_safe_push (stack_item_t, info->si, &item);
1345 /* Marshall an argument into a sequence of one or more consecutive X
1346 registers or, if insufficient X registers are available then onto
1350 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1351 struct aarch64_call_info *info, struct type *type,
1354 int len = TYPE_LENGTH (type);
1355 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1357 /* PCS C.13 - Pass in registers if we have enough spare */
1358 if (info->ngrn + nregs <= 8)
1360 pass_in_x (gdbarch, regcache, info, type, arg);
1361 info->ngrn += nregs;
1366 pass_on_stack (info, type, arg);
1370 /* Pass a value in a V register, or on the stack if insufficient are
1374 pass_in_v_or_stack (struct gdbarch *gdbarch,
1375 struct regcache *regcache,
1376 struct aarch64_call_info *info,
1380 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1381 value_contents (arg)))
1382 pass_on_stack (info, type, arg);
1385 /* Implement the "push_dummy_call" gdbarch method. */
1388 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1389 struct regcache *regcache, CORE_ADDR bp_addr,
1391 struct value **args, CORE_ADDR sp, int struct_return,
1392 CORE_ADDR struct_addr)
1395 struct aarch64_call_info info;
1396 struct type *func_type;
1397 struct type *return_type;
1398 int lang_struct_return;
1400 memset (&info, 0, sizeof (info));
1402 /* We need to know what the type of the called function is in order
1403 to determine the number of named/anonymous arguments for the
1404 actual argument placement, and the return type in order to handle
1405 return value correctly.
1407 The generic code above us views the decision of return in memory
1408 or return in registers as a two stage processes. The language
1409 handler is consulted first and may decide to return in memory (eg
1410 class with copy constructor returned by value), this will cause
1411 the generic code to allocate space AND insert an initial leading
1414 If the language code does not decide to pass in memory then the
1415 target code is consulted.
1417 If the language code decides to pass in memory we want to move
1418 the pointer inserted as the initial argument from the argument
1419 list and into X8, the conventional AArch64 struct return pointer
1422 This is slightly awkward, ideally the flag "lang_struct_return"
1423 would be passed to the targets implementation of push_dummy_call.
1424 Rather that change the target interface we call the language code
1425 directly ourselves. */
1427 func_type = check_typedef (value_type (function));
1429 /* Dereference function pointer types. */
1430 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1431 func_type = TYPE_TARGET_TYPE (func_type);
1433 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1434 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1436 /* If language_pass_by_reference () returned true we will have been
1437 given an additional initial argument, a hidden pointer to the
1438 return slot in memory. */
1439 return_type = TYPE_TARGET_TYPE (func_type);
1440 lang_struct_return = language_pass_by_reference (return_type);
1442 /* Set the return address. For the AArch64, the return breakpoint
1443 is always at BP_ADDR. */
1444 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1446 /* If we were given an initial argument for the return slot because
1447 lang_struct_return was true, lose it. */
1448 if (lang_struct_return)
1454 /* The struct_return pointer occupies X8. */
1455 if (struct_return || lang_struct_return)
1459 debug_printf ("struct return in %s = 0x%s\n",
1460 gdbarch_register_name (gdbarch,
1461 AARCH64_STRUCT_RETURN_REGNUM),
1462 paddress (gdbarch, struct_addr));
1464 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1468 for (argnum = 0; argnum < nargs; argnum++)
1470 struct value *arg = args[argnum];
1471 struct type *arg_type;
1474 arg_type = check_typedef (value_type (arg));
1475 len = TYPE_LENGTH (arg_type);
1477 switch (TYPE_CODE (arg_type))
1480 case TYPE_CODE_BOOL:
1481 case TYPE_CODE_CHAR:
1482 case TYPE_CODE_RANGE:
1483 case TYPE_CODE_ENUM:
1486 /* Promote to 32 bit integer. */
1487 if (TYPE_UNSIGNED (arg_type))
1488 arg_type = builtin_type (gdbarch)->builtin_uint32;
1490 arg_type = builtin_type (gdbarch)->builtin_int32;
1491 arg = value_cast (arg_type, arg);
1493 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1496 case TYPE_CODE_COMPLEX:
1499 const bfd_byte *buf = value_contents (arg);
1500 struct type *target_type =
1501 check_typedef (TYPE_TARGET_TYPE (arg_type));
1503 pass_in_v (gdbarch, regcache, &info,
1504 TYPE_LENGTH (target_type), buf);
1505 pass_in_v (gdbarch, regcache, &info,
1506 TYPE_LENGTH (target_type),
1507 buf + TYPE_LENGTH (target_type));
1512 pass_on_stack (&info, arg_type, arg);
1516 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1519 case TYPE_CODE_STRUCT:
1520 case TYPE_CODE_ARRAY:
1521 case TYPE_CODE_UNION:
1522 if (is_hfa_or_hva (arg_type))
1524 int elements = TYPE_NFIELDS (arg_type);
1526 /* Homogeneous Aggregates */
1527 if (info.nsrn + elements < 8)
1531 for (i = 0; i < elements; i++)
1533 /* We know that we have sufficient registers
1534 available therefore this will never fallback
1536 struct value *field =
1537 value_primitive_field (arg, 0, i, arg_type);
1538 struct type *field_type =
1539 check_typedef (value_type (field));
1541 pass_in_v_or_stack (gdbarch, regcache, &info,
1548 pass_on_stack (&info, arg_type, arg);
1551 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1552 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1554 /* Short vector types are passed in V registers. */
1555 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1559 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1560 invisible reference. */
1562 /* Allocate aligned storage. */
1563 sp = align_down (sp - len, 16);
1565 /* Write the real data into the stack. */
1566 write_memory (sp, value_contents (arg), len);
1568 /* Construct the indirection. */
1569 arg_type = lookup_pointer_type (arg_type);
1570 arg = value_from_pointer (arg_type, sp);
1571 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1574 /* PCS C.15 / C.18 multiple values pass. */
1575 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1579 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1584 /* Make sure stack retains 16 byte alignment. */
1586 sp -= 16 - (info.nsaa & 15);
1588 while (!VEC_empty (stack_item_t, info.si))
1590 stack_item_t *si = VEC_last (stack_item_t, info.si);
1593 if (si->data != NULL)
1594 write_memory (sp, si->data, si->len);
1595 VEC_pop (stack_item_t, info.si);
1598 VEC_free (stack_item_t, info.si);
1600 /* Finally, update the SP register. */
1601 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1606 /* Implement the "frame_align" gdbarch method. */
1609 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1611 /* Align the stack to sixteen bytes. */
1612 return sp & ~(CORE_ADDR) 15;
1615 /* Return the type for an AdvSISD Q register. */
1617 static struct type *
1618 aarch64_vnq_type (struct gdbarch *gdbarch)
1620 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1622 if (tdep->vnq_type == NULL)
1627 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1630 elem = builtin_type (gdbarch)->builtin_uint128;
1631 append_composite_type_field (t, "u", elem);
1633 elem = builtin_type (gdbarch)->builtin_int128;
1634 append_composite_type_field (t, "s", elem);
1639 return tdep->vnq_type;
1642 /* Return the type for an AdvSISD D register. */
1644 static struct type *
1645 aarch64_vnd_type (struct gdbarch *gdbarch)
1647 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1649 if (tdep->vnd_type == NULL)
1654 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1657 elem = builtin_type (gdbarch)->builtin_double;
1658 append_composite_type_field (t, "f", elem);
1660 elem = builtin_type (gdbarch)->builtin_uint64;
1661 append_composite_type_field (t, "u", elem);
1663 elem = builtin_type (gdbarch)->builtin_int64;
1664 append_composite_type_field (t, "s", elem);
1669 return tdep->vnd_type;
1672 /* Return the type for an AdvSISD S register. */
1674 static struct type *
1675 aarch64_vns_type (struct gdbarch *gdbarch)
1677 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1679 if (tdep->vns_type == NULL)
1684 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1687 elem = builtin_type (gdbarch)->builtin_float;
1688 append_composite_type_field (t, "f", elem);
1690 elem = builtin_type (gdbarch)->builtin_uint32;
1691 append_composite_type_field (t, "u", elem);
1693 elem = builtin_type (gdbarch)->builtin_int32;
1694 append_composite_type_field (t, "s", elem);
1699 return tdep->vns_type;
1702 /* Return the type for an AdvSISD H register. */
1704 static struct type *
1705 aarch64_vnh_type (struct gdbarch *gdbarch)
1707 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1709 if (tdep->vnh_type == NULL)
1714 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1717 elem = builtin_type (gdbarch)->builtin_uint16;
1718 append_composite_type_field (t, "u", elem);
1720 elem = builtin_type (gdbarch)->builtin_int16;
1721 append_composite_type_field (t, "s", elem);
1726 return tdep->vnh_type;
1729 /* Return the type for an AdvSISD B register. */
1731 static struct type *
1732 aarch64_vnb_type (struct gdbarch *gdbarch)
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1736 if (tdep->vnb_type == NULL)
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1744 elem = builtin_type (gdbarch)->builtin_uint8;
1745 append_composite_type_field (t, "u", elem);
1747 elem = builtin_type (gdbarch)->builtin_int8;
1748 append_composite_type_field (t, "s", elem);
1753 return tdep->vnb_type;
1756 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1759 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1761 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1762 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1764 if (reg == AARCH64_DWARF_SP)
1765 return AARCH64_SP_REGNUM;
1767 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1768 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1774 /* Implement the "print_insn" gdbarch method. */
1777 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1779 info->symbols = NULL;
1780 return print_insn_aarch64 (memaddr, info);
1783 /* AArch64 BRK software debug mode instruction.
1784 Note that AArch64 code is always little-endian.
1785 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1786 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1788 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1790 /* Extract from an array REGS containing the (raw) register state a
1791 function return value of type TYPE, and copy that, in virtual
1792 format, into VALBUF. */
1795 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1798 struct gdbarch *gdbarch = get_regcache_arch (regs);
1799 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1801 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1803 bfd_byte buf[V_REGISTER_SIZE];
1804 int len = TYPE_LENGTH (type);
1806 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1807 memcpy (valbuf, buf, len);
1809 else if (TYPE_CODE (type) == TYPE_CODE_INT
1810 || TYPE_CODE (type) == TYPE_CODE_CHAR
1811 || TYPE_CODE (type) == TYPE_CODE_BOOL
1812 || TYPE_CODE (type) == TYPE_CODE_PTR
1813 || TYPE_IS_REFERENCE (type)
1814 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1816 /* If the the type is a plain integer, then the access is
1817 straight-forward. Otherwise we have to play around a bit
1819 int len = TYPE_LENGTH (type);
1820 int regno = AARCH64_X0_REGNUM;
1825 /* By using store_unsigned_integer we avoid having to do
1826 anything special for small big-endian values. */
1827 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1828 store_unsigned_integer (valbuf,
1829 (len > X_REGISTER_SIZE
1830 ? X_REGISTER_SIZE : len), byte_order, tmp);
1831 len -= X_REGISTER_SIZE;
1832 valbuf += X_REGISTER_SIZE;
1835 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1837 int regno = AARCH64_V0_REGNUM;
1838 bfd_byte buf[V_REGISTER_SIZE];
1839 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1840 int len = TYPE_LENGTH (target_type);
1842 regcache_cooked_read (regs, regno, buf);
1843 memcpy (valbuf, buf, len);
1845 regcache_cooked_read (regs, regno + 1, buf);
1846 memcpy (valbuf, buf, len);
1849 else if (is_hfa_or_hva (type))
1851 int elements = TYPE_NFIELDS (type);
1852 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1853 int len = TYPE_LENGTH (member_type);
1856 for (i = 0; i < elements; i++)
1858 int regno = AARCH64_V0_REGNUM + i;
1859 bfd_byte buf[V_REGISTER_SIZE];
1863 debug_printf ("read HFA or HVA return value element %d from %s\n",
1865 gdbarch_register_name (gdbarch, regno));
1867 regcache_cooked_read (regs, regno, buf);
1869 memcpy (valbuf, buf, len);
1873 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1874 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1876 /* Short vector is returned in V register. */
1877 gdb_byte buf[V_REGISTER_SIZE];
1879 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1880 memcpy (valbuf, buf, TYPE_LENGTH (type));
1884 /* For a structure or union the behaviour is as if the value had
1885 been stored to word-aligned memory and then loaded into
1886 registers with 64-bit load instruction(s). */
1887 int len = TYPE_LENGTH (type);
1888 int regno = AARCH64_X0_REGNUM;
1889 bfd_byte buf[X_REGISTER_SIZE];
1893 regcache_cooked_read (regs, regno++, buf);
1894 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1895 len -= X_REGISTER_SIZE;
1896 valbuf += X_REGISTER_SIZE;
1902 /* Will a function return an aggregate type in memory or in a
1903 register? Return 0 if an aggregate type can be returned in a
1904 register, 1 if it must be returned in memory. */
1907 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1909 type = check_typedef (type);
1911 if (is_hfa_or_hva (type))
1913 /* v0-v7 are used to return values and one register is allocated
1914 for one member. However, HFA or HVA has at most four members. */
1918 if (TYPE_LENGTH (type) > 16)
1920 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1921 invisible reference. */
1929 /* Write into appropriate registers a function return value of type
1930 TYPE, given in virtual format. */
1933 aarch64_store_return_value (struct type *type, struct regcache *regs,
1934 const gdb_byte *valbuf)
1936 struct gdbarch *gdbarch = get_regcache_arch (regs);
1937 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1939 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1941 bfd_byte buf[V_REGISTER_SIZE];
1942 int len = TYPE_LENGTH (type);
1944 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1945 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1947 else if (TYPE_CODE (type) == TYPE_CODE_INT
1948 || TYPE_CODE (type) == TYPE_CODE_CHAR
1949 || TYPE_CODE (type) == TYPE_CODE_BOOL
1950 || TYPE_CODE (type) == TYPE_CODE_PTR
1951 || TYPE_IS_REFERENCE (type)
1952 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1954 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1956 /* Values of one word or less are zero/sign-extended and
1958 bfd_byte tmpbuf[X_REGISTER_SIZE];
1959 LONGEST val = unpack_long (type, valbuf);
1961 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1962 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1966 /* Integral values greater than one word are stored in
1967 consecutive registers starting with r0. This will always
1968 be a multiple of the regiser size. */
1969 int len = TYPE_LENGTH (type);
1970 int regno = AARCH64_X0_REGNUM;
1974 regcache_cooked_write (regs, regno++, valbuf);
1975 len -= X_REGISTER_SIZE;
1976 valbuf += X_REGISTER_SIZE;
1980 else if (is_hfa_or_hva (type))
1982 int elements = TYPE_NFIELDS (type);
1983 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1984 int len = TYPE_LENGTH (member_type);
1987 for (i = 0; i < elements; i++)
1989 int regno = AARCH64_V0_REGNUM + i;
1990 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1994 debug_printf ("write HFA or HVA return value element %d to %s\n",
1996 gdbarch_register_name (gdbarch, regno));
1999 memcpy (tmpbuf, valbuf, len);
2000 regcache_cooked_write (regs, regno, tmpbuf);
2004 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2005 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2008 gdb_byte buf[V_REGISTER_SIZE];
2010 memcpy (buf, valbuf, TYPE_LENGTH (type));
2011 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2015 /* For a structure or union the behaviour is as if the value had
2016 been stored to word-aligned memory and then loaded into
2017 registers with 64-bit load instruction(s). */
2018 int len = TYPE_LENGTH (type);
2019 int regno = AARCH64_X0_REGNUM;
2020 bfd_byte tmpbuf[X_REGISTER_SIZE];
2024 memcpy (tmpbuf, valbuf,
2025 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2026 regcache_cooked_write (regs, regno++, tmpbuf);
2027 len -= X_REGISTER_SIZE;
2028 valbuf += X_REGISTER_SIZE;
2033 /* Implement the "return_value" gdbarch method. */
2035 static enum return_value_convention
2036 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2037 struct type *valtype, struct regcache *regcache,
2038 gdb_byte *readbuf, const gdb_byte *writebuf)
2041 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2042 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2043 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2045 if (aarch64_return_in_memory (gdbarch, valtype))
2048 debug_printf ("return value in memory\n");
2049 return RETURN_VALUE_STRUCT_CONVENTION;
2054 aarch64_store_return_value (valtype, regcache, writebuf);
2057 aarch64_extract_return_value (valtype, regcache, readbuf);
2060 debug_printf ("return value in registers\n");
2062 return RETURN_VALUE_REGISTER_CONVENTION;
2065 /* Implement the "get_longjmp_target" gdbarch method. */
2068 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2071 gdb_byte buf[X_REGISTER_SIZE];
2072 struct gdbarch *gdbarch = get_frame_arch (frame);
2073 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2074 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2076 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2078 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2082 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2086 /* Implement the "gen_return_address" gdbarch method. */
2089 aarch64_gen_return_address (struct gdbarch *gdbarch,
2090 struct agent_expr *ax, struct axs_value *value,
2093 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2094 value->kind = axs_lvalue_register;
2095 value->u.reg = AARCH64_LR_REGNUM;
2099 /* Return the pseudo register name corresponding to register regnum. */
2102 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2104 static const char *const q_name[] =
2106 "q0", "q1", "q2", "q3",
2107 "q4", "q5", "q6", "q7",
2108 "q8", "q9", "q10", "q11",
2109 "q12", "q13", "q14", "q15",
2110 "q16", "q17", "q18", "q19",
2111 "q20", "q21", "q22", "q23",
2112 "q24", "q25", "q26", "q27",
2113 "q28", "q29", "q30", "q31",
2116 static const char *const d_name[] =
2118 "d0", "d1", "d2", "d3",
2119 "d4", "d5", "d6", "d7",
2120 "d8", "d9", "d10", "d11",
2121 "d12", "d13", "d14", "d15",
2122 "d16", "d17", "d18", "d19",
2123 "d20", "d21", "d22", "d23",
2124 "d24", "d25", "d26", "d27",
2125 "d28", "d29", "d30", "d31",
2128 static const char *const s_name[] =
2130 "s0", "s1", "s2", "s3",
2131 "s4", "s5", "s6", "s7",
2132 "s8", "s9", "s10", "s11",
2133 "s12", "s13", "s14", "s15",
2134 "s16", "s17", "s18", "s19",
2135 "s20", "s21", "s22", "s23",
2136 "s24", "s25", "s26", "s27",
2137 "s28", "s29", "s30", "s31",
2140 static const char *const h_name[] =
2142 "h0", "h1", "h2", "h3",
2143 "h4", "h5", "h6", "h7",
2144 "h8", "h9", "h10", "h11",
2145 "h12", "h13", "h14", "h15",
2146 "h16", "h17", "h18", "h19",
2147 "h20", "h21", "h22", "h23",
2148 "h24", "h25", "h26", "h27",
2149 "h28", "h29", "h30", "h31",
2152 static const char *const b_name[] =
2154 "b0", "b1", "b2", "b3",
2155 "b4", "b5", "b6", "b7",
2156 "b8", "b9", "b10", "b11",
2157 "b12", "b13", "b14", "b15",
2158 "b16", "b17", "b18", "b19",
2159 "b20", "b21", "b22", "b23",
2160 "b24", "b25", "b26", "b27",
2161 "b28", "b29", "b30", "b31",
2164 regnum -= gdbarch_num_regs (gdbarch);
2166 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2167 return q_name[regnum - AARCH64_Q0_REGNUM];
2169 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2170 return d_name[regnum - AARCH64_D0_REGNUM];
2172 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2173 return s_name[regnum - AARCH64_S0_REGNUM];
2175 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2176 return h_name[regnum - AARCH64_H0_REGNUM];
2178 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2179 return b_name[regnum - AARCH64_B0_REGNUM];
2181 internal_error (__FILE__, __LINE__,
2182 _("aarch64_pseudo_register_name: bad register number %d"),
2186 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2188 static struct type *
2189 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2191 regnum -= gdbarch_num_regs (gdbarch);
2193 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2194 return aarch64_vnq_type (gdbarch);
2196 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2197 return aarch64_vnd_type (gdbarch);
2199 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2200 return aarch64_vns_type (gdbarch);
2202 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2203 return aarch64_vnh_type (gdbarch);
2205 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2206 return aarch64_vnb_type (gdbarch);
2208 internal_error (__FILE__, __LINE__,
2209 _("aarch64_pseudo_register_type: bad register number %d"),
2213 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2216 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2217 struct reggroup *group)
2219 regnum -= gdbarch_num_regs (gdbarch);
2221 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2222 return group == all_reggroup || group == vector_reggroup;
2223 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2224 return (group == all_reggroup || group == vector_reggroup
2225 || group == float_reggroup);
2226 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2227 return (group == all_reggroup || group == vector_reggroup
2228 || group == float_reggroup);
2229 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2230 return group == all_reggroup || group == vector_reggroup;
2231 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2232 return group == all_reggroup || group == vector_reggroup;
2234 return group == all_reggroup;
2237 /* Implement the "pseudo_register_read_value" gdbarch method. */
2239 static struct value *
2240 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2241 struct regcache *regcache,
2244 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2245 struct value *result_value;
2248 result_value = allocate_value (register_type (gdbarch, regnum));
2249 VALUE_LVAL (result_value) = lval_register;
2250 VALUE_REGNUM (result_value) = regnum;
2251 buf = value_contents_raw (result_value);
2253 regnum -= gdbarch_num_regs (gdbarch);
2255 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2257 enum register_status status;
2260 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2261 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2262 if (status != REG_VALID)
2263 mark_value_bytes_unavailable (result_value, 0,
2264 TYPE_LENGTH (value_type (result_value)));
2266 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2267 return result_value;
2270 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2272 enum register_status status;
2275 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2276 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2277 if (status != REG_VALID)
2278 mark_value_bytes_unavailable (result_value, 0,
2279 TYPE_LENGTH (value_type (result_value)));
2281 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2282 return result_value;
2285 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2287 enum register_status status;
2290 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2291 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2292 if (status != REG_VALID)
2293 mark_value_bytes_unavailable (result_value, 0,
2294 TYPE_LENGTH (value_type (result_value)));
2296 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2297 return result_value;
2300 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2302 enum register_status status;
2305 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2306 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2307 if (status != REG_VALID)
2308 mark_value_bytes_unavailable (result_value, 0,
2309 TYPE_LENGTH (value_type (result_value)));
2311 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2312 return result_value;
2315 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2317 enum register_status status;
2320 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2321 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2322 if (status != REG_VALID)
2323 mark_value_bytes_unavailable (result_value, 0,
2324 TYPE_LENGTH (value_type (result_value)));
2326 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2327 return result_value;
2330 gdb_assert_not_reached ("regnum out of bound");
2333 /* Implement the "pseudo_register_write" gdbarch method. */
2336 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2337 int regnum, const gdb_byte *buf)
2339 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2341 /* Ensure the register buffer is zero, we want gdb writes of the
2342 various 'scalar' pseudo registers to behavior like architectural
2343 writes, register width bytes are written the remainder are set to
2345 memset (reg_buf, 0, sizeof (reg_buf));
2347 regnum -= gdbarch_num_regs (gdbarch);
2349 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2351 /* pseudo Q registers */
2354 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2355 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2356 regcache_raw_write (regcache, v_regnum, reg_buf);
2360 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2362 /* pseudo D registers */
2365 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2366 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2367 regcache_raw_write (regcache, v_regnum, reg_buf);
2371 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2375 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2376 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2377 regcache_raw_write (regcache, v_regnum, reg_buf);
2381 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2383 /* pseudo H registers */
2386 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2387 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2388 regcache_raw_write (regcache, v_regnum, reg_buf);
2392 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2394 /* pseudo B registers */
2397 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2398 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2399 regcache_raw_write (regcache, v_regnum, reg_buf);
2403 gdb_assert_not_reached ("regnum out of bound");
2406 /* Callback function for user_reg_add. */
2408 static struct value *
2409 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2411 const int *reg_p = (const int *) baton;
2413 return value_of_register (*reg_p, frame);
2417 /* Implement the "software_single_step" gdbarch method, needed to
2418 single step through atomic sequences on AArch64. */
2420 static VEC (CORE_ADDR) *
2421 aarch64_software_single_step (struct regcache *regcache)
2423 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2424 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2425 const int insn_size = 4;
2426 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2427 CORE_ADDR pc = regcache_read_pc (regcache);
2428 CORE_ADDR breaks[2] = { -1, -1 };
2430 CORE_ADDR closing_insn = 0;
2431 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2432 byte_order_for_code);
2435 int bc_insn_count = 0; /* Conditional branch instruction count. */
2436 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2438 VEC (CORE_ADDR) *next_pcs = NULL;
2440 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2443 /* Look for a Load Exclusive instruction which begins the sequence. */
2444 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2447 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2450 insn = read_memory_unsigned_integer (loc, insn_size,
2451 byte_order_for_code);
2453 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2455 /* Check if the instruction is a conditional branch. */
2456 if (inst.opcode->iclass == condbranch)
2458 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2460 if (bc_insn_count >= 1)
2463 /* It is, so we'll try to set a breakpoint at the destination. */
2464 breaks[1] = loc + inst.operands[0].imm.value;
2470 /* Look for the Store Exclusive which closes the atomic sequence. */
2471 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2478 /* We didn't find a closing Store Exclusive instruction, fall back. */
2482 /* Insert breakpoint after the end of the atomic sequence. */
2483 breaks[0] = loc + insn_size;
2485 /* Check for duplicated breakpoints, and also check that the second
2486 breakpoint is not within the atomic sequence. */
2488 && (breaks[1] == breaks[0]
2489 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2490 last_breakpoint = 0;
2492 /* Insert the breakpoint at the end of the sequence, and one at the
2493 destination of the conditional branch, if it exists. */
2494 for (index = 0; index <= last_breakpoint; index++)
2495 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
2500 struct displaced_step_closure
2502 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2503 is being displaced stepping. */
2506 /* PC adjustment offset after displaced stepping. */
2510 /* Data when visiting instructions for displaced stepping. */
2512 struct aarch64_displaced_step_data
2514 struct aarch64_insn_data base;
2516 /* The address where the instruction will be executed at. */
2518 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2519 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2520 /* Number of instructions in INSN_BUF. */
2521 unsigned insn_count;
2522 /* Registers when doing displaced stepping. */
2523 struct regcache *regs;
2525 struct displaced_step_closure *dsc;
2528 /* Implementation of aarch64_insn_visitor method "b". */
2531 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2532 struct aarch64_insn_data *data)
2534 struct aarch64_displaced_step_data *dsd
2535 = (struct aarch64_displaced_step_data *) data;
2536 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2538 if (can_encode_int32 (new_offset, 28))
2540 /* Emit B rather than BL, because executing BL on a new address
2541 will get the wrong address into LR. In order to avoid this,
2542 we emit B, and update LR if the instruction is BL. */
2543 emit_b (dsd->insn_buf, 0, new_offset);
2549 emit_nop (dsd->insn_buf);
2551 dsd->dsc->pc_adjust = offset;
2557 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2558 data->insn_addr + 4);
2562 /* Implementation of aarch64_insn_visitor method "b_cond". */
2565 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2566 struct aarch64_insn_data *data)
2568 struct aarch64_displaced_step_data *dsd
2569 = (struct aarch64_displaced_step_data *) data;
2571 /* GDB has to fix up PC after displaced step this instruction
2572 differently according to the condition is true or false. Instead
2573 of checking COND against conditional flags, we can use
2574 the following instructions, and GDB can tell how to fix up PC
2575 according to the PC value.
2577 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2583 emit_bcond (dsd->insn_buf, cond, 8);
2585 dsd->dsc->pc_adjust = offset;
2586 dsd->insn_count = 1;
2589 /* Dynamically allocate a new register. If we know the register
2590 statically, we should make it a global as above instead of using this
2593 static struct aarch64_register
2594 aarch64_register (unsigned num, int is64)
2596 return (struct aarch64_register) { num, is64 };
2599 /* Implementation of aarch64_insn_visitor method "cb". */
2602 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2603 const unsigned rn, int is64,
2604 struct aarch64_insn_data *data)
2606 struct aarch64_displaced_step_data *dsd
2607 = (struct aarch64_displaced_step_data *) data;
2609 /* The offset is out of range for a compare and branch
2610 instruction. We can use the following instructions instead:
2612 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2617 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2618 dsd->insn_count = 1;
2620 dsd->dsc->pc_adjust = offset;
2623 /* Implementation of aarch64_insn_visitor method "tb". */
2626 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2627 const unsigned rt, unsigned bit,
2628 struct aarch64_insn_data *data)
2630 struct aarch64_displaced_step_data *dsd
2631 = (struct aarch64_displaced_step_data *) data;
2633 /* The offset is out of range for a test bit and branch
2634 instruction We can use the following instructions instead:
2636 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2642 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2643 dsd->insn_count = 1;
2645 dsd->dsc->pc_adjust = offset;
2648 /* Implementation of aarch64_insn_visitor method "adr". */
2651 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2652 const int is_adrp, struct aarch64_insn_data *data)
2654 struct aarch64_displaced_step_data *dsd
2655 = (struct aarch64_displaced_step_data *) data;
2656 /* We know exactly the address the ADR{P,} instruction will compute.
2657 We can just write it to the destination register. */
2658 CORE_ADDR address = data->insn_addr + offset;
2662 /* Clear the lower 12 bits of the offset to get the 4K page. */
2663 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2667 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2670 dsd->dsc->pc_adjust = 4;
2671 emit_nop (dsd->insn_buf);
2672 dsd->insn_count = 1;
2675 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2678 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2679 const unsigned rt, const int is64,
2680 struct aarch64_insn_data *data)
2682 struct aarch64_displaced_step_data *dsd
2683 = (struct aarch64_displaced_step_data *) data;
2684 CORE_ADDR address = data->insn_addr + offset;
2685 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2687 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2691 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2692 aarch64_register (rt, 1), zero);
2694 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2695 aarch64_register (rt, 1), zero);
2697 dsd->dsc->pc_adjust = 4;
2700 /* Implementation of aarch64_insn_visitor method "others". */
2703 aarch64_displaced_step_others (const uint32_t insn,
2704 struct aarch64_insn_data *data)
2706 struct aarch64_displaced_step_data *dsd
2707 = (struct aarch64_displaced_step_data *) data;
2709 aarch64_emit_insn (dsd->insn_buf, insn);
2710 dsd->insn_count = 1;
2712 if ((insn & 0xfffffc1f) == 0xd65f0000)
2715 dsd->dsc->pc_adjust = 0;
2718 dsd->dsc->pc_adjust = 4;
2721 static const struct aarch64_insn_visitor visitor =
2723 aarch64_displaced_step_b,
2724 aarch64_displaced_step_b_cond,
2725 aarch64_displaced_step_cb,
2726 aarch64_displaced_step_tb,
2727 aarch64_displaced_step_adr,
2728 aarch64_displaced_step_ldr_literal,
2729 aarch64_displaced_step_others,
2732 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2734 struct displaced_step_closure *
2735 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2736 CORE_ADDR from, CORE_ADDR to,
2737 struct regcache *regs)
2739 struct displaced_step_closure *dsc = NULL;
2740 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2741 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2742 struct aarch64_displaced_step_data dsd;
2745 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2748 /* Look for a Load Exclusive instruction which begins the sequence. */
2749 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2751 /* We can't displaced step atomic sequences. */
2755 dsc = XCNEW (struct displaced_step_closure);
2756 dsd.base.insn_addr = from;
2761 aarch64_relocate_instruction (insn, &visitor,
2762 (struct aarch64_insn_data *) &dsd);
2763 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2765 if (dsd.insn_count != 0)
2769 /* Instruction can be relocated to scratch pad. Copy
2770 relocated instruction(s) there. */
2771 for (i = 0; i < dsd.insn_count; i++)
2773 if (debug_displaced)
2775 debug_printf ("displaced: writing insn ");
2776 debug_printf ("%.8x", dsd.insn_buf[i]);
2777 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2779 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2780 (ULONGEST) dsd.insn_buf[i]);
2792 /* Implement the "displaced_step_fixup" gdbarch method. */
2795 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2796 struct displaced_step_closure *dsc,
2797 CORE_ADDR from, CORE_ADDR to,
2798 struct regcache *regs)
2804 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2807 /* Condition is true. */
2809 else if (pc - to == 4)
2811 /* Condition is false. */
2815 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2818 if (dsc->pc_adjust != 0)
2820 if (debug_displaced)
2822 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2823 paddress (gdbarch, from), dsc->pc_adjust);
2825 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2826 from + dsc->pc_adjust);
2830 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2833 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2834 struct displaced_step_closure *closure)
2839 /* Initialize the current architecture based on INFO. If possible,
2840 re-use an architecture from ARCHES, which is a list of
2841 architectures already created during this debugging session.
2843 Called e.g. at program startup, when reading a core file, and when
2844 reading a binary file. */
2846 static struct gdbarch *
2847 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2849 struct gdbarch_tdep *tdep;
2850 struct gdbarch *gdbarch;
2851 struct gdbarch_list *best_arch;
2852 struct tdesc_arch_data *tdesc_data = NULL;
2853 const struct target_desc *tdesc = info.target_desc;
2856 const struct tdesc_feature *feature;
2858 int num_pseudo_regs = 0;
2860 /* Ensure we always have a target descriptor. */
2861 if (!tdesc_has_registers (tdesc))
2862 tdesc = tdesc_aarch64;
2866 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2868 if (feature == NULL)
2871 tdesc_data = tdesc_data_alloc ();
2873 /* Validate the descriptor provides the mandatory core R registers
2874 and allocate their numbers. */
2875 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2877 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2878 aarch64_r_register_names[i]);
2880 num_regs = AARCH64_X0_REGNUM + i;
2882 /* Look for the V registers. */
2883 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2886 /* Validate the descriptor provides the mandatory V registers
2887 and allocate their numbers. */
2888 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2890 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2891 aarch64_v_register_names[i]);
2893 num_regs = AARCH64_V0_REGNUM + i;
2895 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2896 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2897 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2898 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2899 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2904 tdesc_data_cleanup (tdesc_data);
2908 /* AArch64 code is always little-endian. */
2909 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2911 /* If there is already a candidate, use it. */
2912 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2914 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2916 /* Found a match. */
2920 if (best_arch != NULL)
2922 if (tdesc_data != NULL)
2923 tdesc_data_cleanup (tdesc_data);
2924 return best_arch->gdbarch;
2927 tdep = XCNEW (struct gdbarch_tdep);
2928 gdbarch = gdbarch_alloc (&info, tdep);
2930 /* This should be low enough for everything. */
2931 tdep->lowest_pc = 0x20;
2932 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2933 tdep->jb_elt_size = 8;
2935 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2936 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2938 /* Frame handling. */
2939 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2940 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2941 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2943 /* Advance PC across function entry code. */
2944 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2946 /* The stack grows downward. */
2947 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2949 /* Breakpoint manipulation. */
2950 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2951 aarch64_breakpoint::kind_from_pc);
2952 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2953 aarch64_breakpoint::bp_from_kind);
2954 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2955 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2957 /* Information about registers, etc. */
2958 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2959 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2960 set_gdbarch_num_regs (gdbarch, num_regs);
2962 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2963 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2964 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2965 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2966 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2967 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2968 aarch64_pseudo_register_reggroup_p);
2971 set_gdbarch_short_bit (gdbarch, 16);
2972 set_gdbarch_int_bit (gdbarch, 32);
2973 set_gdbarch_float_bit (gdbarch, 32);
2974 set_gdbarch_double_bit (gdbarch, 64);
2975 set_gdbarch_long_double_bit (gdbarch, 128);
2976 set_gdbarch_long_bit (gdbarch, 64);
2977 set_gdbarch_long_long_bit (gdbarch, 64);
2978 set_gdbarch_ptr_bit (gdbarch, 64);
2979 set_gdbarch_char_signed (gdbarch, 0);
2980 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2981 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2982 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2984 /* Internal <-> external register number maps. */
2985 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2987 /* Returning results. */
2988 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2991 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2993 /* Virtual tables. */
2994 set_gdbarch_vbit_in_delta (gdbarch, 1);
2996 /* Hook in the ABI-specific overrides, if they have been registered. */
2997 info.target_desc = tdesc;
2998 info.tdep_info = (void *) tdesc_data;
2999 gdbarch_init_osabi (info, gdbarch);
3001 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3003 /* Add some default predicates. */
3004 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3005 dwarf2_append_unwinders (gdbarch);
3006 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3008 frame_base_set_default (gdbarch, &aarch64_normal_base);
3010 /* Now we have tuned the configuration, set a few final things,
3011 based on what the OS ABI has told us. */
3013 if (tdep->jb_pc >= 0)
3014 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3016 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3018 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3020 /* Add standard register aliases. */
3021 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3022 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3023 value_of_aarch64_user_reg,
3024 &aarch64_register_aliases[i].regnum);
3030 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3032 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3037 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3038 paddress (gdbarch, tdep->lowest_pc));
3041 /* Suppress warning from -Wmissing-prototypes. */
3042 extern initialize_file_ftype _initialize_aarch64_tdep;
3045 _initialize_aarch64_tdep (void)
3047 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3050 initialize_tdesc_aarch64 ();
3052 /* Debug this file's internals. */
3053 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3054 Set AArch64 debugging."), _("\
3055 Show AArch64 debugging."), _("\
3056 When on, AArch64 specific debugging is enabled."),
3059 &setdebuglist, &showdebuglist);
3062 register_self_test (selftests::aarch64_analyze_prologue_test);
3066 /* AArch64 process record-replay related structures, defines etc. */
3068 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3071 unsigned int reg_len = LENGTH; \
3074 REGS = XNEWVEC (uint32_t, reg_len); \
3075 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3080 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3083 unsigned int mem_len = LENGTH; \
3086 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3087 memcpy(&MEMS->len, &RECORD_BUF[0], \
3088 sizeof(struct aarch64_mem_r) * LENGTH); \
3093 /* AArch64 record/replay structures and enumerations. */
3095 struct aarch64_mem_r
3097 uint64_t len; /* Record length. */
3098 uint64_t addr; /* Memory address. */
3101 enum aarch64_record_result
3103 AARCH64_RECORD_SUCCESS,
3104 AARCH64_RECORD_FAILURE,
3105 AARCH64_RECORD_UNSUPPORTED,
3106 AARCH64_RECORD_UNKNOWN
3109 typedef struct insn_decode_record_t
3111 struct gdbarch *gdbarch;
3112 struct regcache *regcache;
3113 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3114 uint32_t aarch64_insn; /* Insn to be recorded. */
3115 uint32_t mem_rec_count; /* Count of memory records. */
3116 uint32_t reg_rec_count; /* Count of register records. */
3117 uint32_t *aarch64_regs; /* Registers to be recorded. */
3118 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3119 } insn_decode_record;
3121 /* Record handler for data processing - register instructions. */
3124 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3126 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3127 uint32_t record_buf[4];
3129 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3130 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3131 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3133 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3137 /* Logical (shifted register). */
3138 if (insn_bits24_27 == 0x0a)
3139 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3141 else if (insn_bits24_27 == 0x0b)
3142 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3144 return AARCH64_RECORD_UNKNOWN;
3146 record_buf[0] = reg_rd;
3147 aarch64_insn_r->reg_rec_count = 1;
3149 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3153 if (insn_bits24_27 == 0x0b)
3155 /* Data-processing (3 source). */
3156 record_buf[0] = reg_rd;
3157 aarch64_insn_r->reg_rec_count = 1;
3159 else if (insn_bits24_27 == 0x0a)
3161 if (insn_bits21_23 == 0x00)
3163 /* Add/subtract (with carry). */
3164 record_buf[0] = reg_rd;
3165 aarch64_insn_r->reg_rec_count = 1;
3166 if (bit (aarch64_insn_r->aarch64_insn, 29))
3168 record_buf[1] = AARCH64_CPSR_REGNUM;
3169 aarch64_insn_r->reg_rec_count = 2;
3172 else if (insn_bits21_23 == 0x02)
3174 /* Conditional compare (register) and conditional compare
3175 (immediate) instructions. */
3176 record_buf[0] = AARCH64_CPSR_REGNUM;
3177 aarch64_insn_r->reg_rec_count = 1;
3179 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3181 /* CConditional select. */
3182 /* Data-processing (2 source). */
3183 /* Data-processing (1 source). */
3184 record_buf[0] = reg_rd;
3185 aarch64_insn_r->reg_rec_count = 1;
3188 return AARCH64_RECORD_UNKNOWN;
3192 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3194 return AARCH64_RECORD_SUCCESS;
3197 /* Record handler for data processing - immediate instructions. */
3200 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3202 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3203 uint32_t record_buf[4];
3205 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3206 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3207 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3209 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3210 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3211 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3213 record_buf[0] = reg_rd;
3214 aarch64_insn_r->reg_rec_count = 1;
3216 else if (insn_bits24_27 == 0x01)
3218 /* Add/Subtract (immediate). */
3219 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3220 record_buf[0] = reg_rd;
3221 aarch64_insn_r->reg_rec_count = 1;
3223 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3225 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3227 /* Logical (immediate). */
3228 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3229 record_buf[0] = reg_rd;
3230 aarch64_insn_r->reg_rec_count = 1;
3232 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3235 return AARCH64_RECORD_UNKNOWN;
3237 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3239 return AARCH64_RECORD_SUCCESS;
3242 /* Record handler for branch, exception generation and system instructions. */
3245 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3247 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3248 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3249 uint32_t record_buf[4];
3251 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3252 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3253 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3255 if (insn_bits28_31 == 0x0d)
3257 /* Exception generation instructions. */
3258 if (insn_bits24_27 == 0x04)
3260 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3261 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3262 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3264 ULONGEST svc_number;
3266 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3268 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3272 return AARCH64_RECORD_UNSUPPORTED;
3274 /* System instructions. */
3275 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3277 uint32_t reg_rt, reg_crn;
3279 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3280 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3282 /* Record rt in case of sysl and mrs instructions. */
3283 if (bit (aarch64_insn_r->aarch64_insn, 21))
3285 record_buf[0] = reg_rt;
3286 aarch64_insn_r->reg_rec_count = 1;
3288 /* Record cpsr for hint and msr(immediate) instructions. */
3289 else if (reg_crn == 0x02 || reg_crn == 0x04)
3291 record_buf[0] = AARCH64_CPSR_REGNUM;
3292 aarch64_insn_r->reg_rec_count = 1;
3295 /* Unconditional branch (register). */
3296 else if((insn_bits24_27 & 0x0e) == 0x06)
3298 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3299 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3300 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3303 return AARCH64_RECORD_UNKNOWN;
3305 /* Unconditional branch (immediate). */
3306 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3308 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3309 if (bit (aarch64_insn_r->aarch64_insn, 31))
3310 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3313 /* Compare & branch (immediate), Test & branch (immediate) and
3314 Conditional branch (immediate). */
3315 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3317 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3319 return AARCH64_RECORD_SUCCESS;
3322 /* Record handler for advanced SIMD load and store instructions. */
3325 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3328 uint64_t addr_offset = 0;
3329 uint32_t record_buf[24];
3330 uint64_t record_buf_mem[24];
3331 uint32_t reg_rn, reg_rt;
3332 uint32_t reg_index = 0, mem_index = 0;
3333 uint8_t opcode_bits, size_bits;
3335 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3336 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3337 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3338 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3339 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3342 debug_printf ("Process record: Advanced SIMD load/store\n");
3344 /* Load/store single structure. */
3345 if (bit (aarch64_insn_r->aarch64_insn, 24))
3347 uint8_t sindex, scale, selem, esize, replicate = 0;
3348 scale = opcode_bits >> 2;
3349 selem = ((opcode_bits & 0x02) |
3350 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3354 if (size_bits & 0x01)
3355 return AARCH64_RECORD_UNKNOWN;
3358 if ((size_bits >> 1) & 0x01)
3359 return AARCH64_RECORD_UNKNOWN;
3360 if (size_bits & 0x01)
3362 if (!((opcode_bits >> 1) & 0x01))
3365 return AARCH64_RECORD_UNKNOWN;
3369 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3376 return AARCH64_RECORD_UNKNOWN;
3382 for (sindex = 0; sindex < selem; sindex++)
3384 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3385 reg_rt = (reg_rt + 1) % 32;
3389 for (sindex = 0; sindex < selem; sindex++)
3391 if (bit (aarch64_insn_r->aarch64_insn, 22))
3392 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3395 record_buf_mem[mem_index++] = esize / 8;
3396 record_buf_mem[mem_index++] = address + addr_offset;
3398 addr_offset = addr_offset + (esize / 8);
3399 reg_rt = (reg_rt + 1) % 32;
3403 /* Load/store multiple structure. */
3406 uint8_t selem, esize, rpt, elements;
3407 uint8_t eindex, rindex;
3409 esize = 8 << size_bits;
3410 if (bit (aarch64_insn_r->aarch64_insn, 30))
3411 elements = 128 / esize;
3413 elements = 64 / esize;
3415 switch (opcode_bits)
3417 /*LD/ST4 (4 Registers). */
3422 /*LD/ST1 (4 Registers). */
3427 /*LD/ST3 (3 Registers). */
3432 /*LD/ST1 (3 Registers). */
3437 /*LD/ST1 (1 Register). */
3442 /*LD/ST2 (2 Registers). */
3447 /*LD/ST1 (2 Registers). */
3453 return AARCH64_RECORD_UNSUPPORTED;
3456 for (rindex = 0; rindex < rpt; rindex++)
3457 for (eindex = 0; eindex < elements; eindex++)
3459 uint8_t reg_tt, sindex;
3460 reg_tt = (reg_rt + rindex) % 32;
3461 for (sindex = 0; sindex < selem; sindex++)
3463 if (bit (aarch64_insn_r->aarch64_insn, 22))
3464 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3467 record_buf_mem[mem_index++] = esize / 8;
3468 record_buf_mem[mem_index++] = address + addr_offset;
3470 addr_offset = addr_offset + (esize / 8);
3471 reg_tt = (reg_tt + 1) % 32;
3476 if (bit (aarch64_insn_r->aarch64_insn, 23))
3477 record_buf[reg_index++] = reg_rn;
3479 aarch64_insn_r->reg_rec_count = reg_index;
3480 aarch64_insn_r->mem_rec_count = mem_index / 2;
3481 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3483 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3485 return AARCH64_RECORD_SUCCESS;
3488 /* Record handler for load and store instructions. */
3491 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3493 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3494 uint8_t insn_bit23, insn_bit21;
3495 uint8_t opc, size_bits, ld_flag, vector_flag;
3496 uint32_t reg_rn, reg_rt, reg_rt2;
3497 uint64_t datasize, offset;
3498 uint32_t record_buf[8];
3499 uint64_t record_buf_mem[8];
3502 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3503 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3504 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3505 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3506 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3507 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3508 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3509 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3510 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3511 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3512 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3514 /* Load/store exclusive. */
3515 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3518 debug_printf ("Process record: load/store exclusive\n");
3522 record_buf[0] = reg_rt;
3523 aarch64_insn_r->reg_rec_count = 1;
3526 record_buf[1] = reg_rt2;
3527 aarch64_insn_r->reg_rec_count = 2;
3533 datasize = (8 << size_bits) * 2;
3535 datasize = (8 << size_bits);
3536 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3538 record_buf_mem[0] = datasize / 8;
3539 record_buf_mem[1] = address;
3540 aarch64_insn_r->mem_rec_count = 1;
3543 /* Save register rs. */
3544 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3545 aarch64_insn_r->reg_rec_count = 1;
3549 /* Load register (literal) instructions decoding. */
3550 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3553 debug_printf ("Process record: load register (literal)\n");
3555 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3557 record_buf[0] = reg_rt;
3558 aarch64_insn_r->reg_rec_count = 1;
3560 /* All types of load/store pair instructions decoding. */
3561 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3564 debug_printf ("Process record: load/store pair\n");
3570 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3571 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3575 record_buf[0] = reg_rt;
3576 record_buf[1] = reg_rt2;
3578 aarch64_insn_r->reg_rec_count = 2;
3583 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3585 size_bits = size_bits >> 1;
3586 datasize = 8 << (2 + size_bits);
3587 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3588 offset = offset << (2 + size_bits);
3589 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3591 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3593 if (imm7_off & 0x40)
3594 address = address - offset;
3596 address = address + offset;
3599 record_buf_mem[0] = datasize / 8;
3600 record_buf_mem[1] = address;
3601 record_buf_mem[2] = datasize / 8;
3602 record_buf_mem[3] = address + (datasize / 8);
3603 aarch64_insn_r->mem_rec_count = 2;
3605 if (bit (aarch64_insn_r->aarch64_insn, 23))
3606 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3608 /* Load/store register (unsigned immediate) instructions. */
3609 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3611 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3618 if (size_bits != 0x03)
3621 return AARCH64_RECORD_UNKNOWN;
3625 debug_printf ("Process record: load/store (unsigned immediate):"
3626 " size %x V %d opc %x\n", size_bits, vector_flag,
3632 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3633 datasize = 8 << size_bits;
3634 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3636 offset = offset << size_bits;
3637 address = address + offset;
3639 record_buf_mem[0] = datasize >> 3;
3640 record_buf_mem[1] = address;
3641 aarch64_insn_r->mem_rec_count = 1;
3646 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3648 record_buf[0] = reg_rt;
3649 aarch64_insn_r->reg_rec_count = 1;
3652 /* Load/store register (register offset) instructions. */
3653 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3654 && insn_bits10_11 == 0x02 && insn_bit21)
3657 debug_printf ("Process record: load/store (register offset)\n");
3658 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3665 if (size_bits != 0x03)
3668 return AARCH64_RECORD_UNKNOWN;
3672 ULONGEST reg_rm_val;
3674 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3675 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3676 if (bit (aarch64_insn_r->aarch64_insn, 12))
3677 offset = reg_rm_val << size_bits;
3679 offset = reg_rm_val;
3680 datasize = 8 << size_bits;
3681 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3683 address = address + offset;
3684 record_buf_mem[0] = datasize >> 3;
3685 record_buf_mem[1] = address;
3686 aarch64_insn_r->mem_rec_count = 1;
3691 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3693 record_buf[0] = reg_rt;
3694 aarch64_insn_r->reg_rec_count = 1;
3697 /* Load/store register (immediate and unprivileged) instructions. */
3698 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3703 debug_printf ("Process record: load/store "
3704 "(immediate and unprivileged)\n");
3706 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3713 if (size_bits != 0x03)
3716 return AARCH64_RECORD_UNKNOWN;
3721 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3722 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3723 datasize = 8 << size_bits;
3724 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3726 if (insn_bits10_11 != 0x01)
3728 if (imm9_off & 0x0100)
3729 address = address - offset;
3731 address = address + offset;
3733 record_buf_mem[0] = datasize >> 3;
3734 record_buf_mem[1] = address;
3735 aarch64_insn_r->mem_rec_count = 1;
3740 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3742 record_buf[0] = reg_rt;
3743 aarch64_insn_r->reg_rec_count = 1;
3745 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3746 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3748 /* Advanced SIMD load/store instructions. */
3750 return aarch64_record_asimd_load_store (aarch64_insn_r);
3752 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3754 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3756 return AARCH64_RECORD_SUCCESS;
3759 /* Record handler for data processing SIMD and floating point instructions. */
3762 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3764 uint8_t insn_bit21, opcode, rmode, reg_rd;
3765 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3766 uint8_t insn_bits11_14;
3767 uint32_t record_buf[2];
3769 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3770 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3771 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3772 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3773 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3774 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3775 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3776 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3777 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3780 debug_printf ("Process record: data processing SIMD/FP: ");
3782 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3784 /* Floating point - fixed point conversion instructions. */
3788 debug_printf ("FP - fixed point conversion");
3790 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3791 record_buf[0] = reg_rd;
3793 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3795 /* Floating point - conditional compare instructions. */
3796 else if (insn_bits10_11 == 0x01)
3799 debug_printf ("FP - conditional compare");
3801 record_buf[0] = AARCH64_CPSR_REGNUM;
3803 /* Floating point - data processing (2-source) and
3804 conditional select instructions. */
3805 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3808 debug_printf ("FP - DP (2-source)");
3810 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3812 else if (insn_bits10_11 == 0x00)
3814 /* Floating point - immediate instructions. */
3815 if ((insn_bits12_15 & 0x01) == 0x01
3816 || (insn_bits12_15 & 0x07) == 0x04)
3819 debug_printf ("FP - immediate");
3820 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3822 /* Floating point - compare instructions. */
3823 else if ((insn_bits12_15 & 0x03) == 0x02)
3826 debug_printf ("FP - immediate");
3827 record_buf[0] = AARCH64_CPSR_REGNUM;
3829 /* Floating point - integer conversions instructions. */
3830 else if (insn_bits12_15 == 0x00)
3832 /* Convert float to integer instruction. */
3833 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3836 debug_printf ("float to int conversion");
3838 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3840 /* Convert integer to float instruction. */
3841 else if ((opcode >> 1) == 0x01 && !rmode)
3844 debug_printf ("int to float conversion");
3846 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3848 /* Move float to integer instruction. */
3849 else if ((opcode >> 1) == 0x03)
3852 debug_printf ("move float to int");
3854 if (!(opcode & 0x01))
3855 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3857 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3860 return AARCH64_RECORD_UNKNOWN;
3863 return AARCH64_RECORD_UNKNOWN;
3866 return AARCH64_RECORD_UNKNOWN;
3868 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3871 debug_printf ("SIMD copy");
3873 /* Advanced SIMD copy instructions. */
3874 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3875 && !bit (aarch64_insn_r->aarch64_insn, 15)
3876 && bit (aarch64_insn_r->aarch64_insn, 10))
3878 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3879 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3881 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3884 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3886 /* All remaining floating point or advanced SIMD instructions. */
3890 debug_printf ("all remain");
3892 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3896 debug_printf ("\n");
3898 aarch64_insn_r->reg_rec_count++;
3899 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3900 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3902 return AARCH64_RECORD_SUCCESS;
3905 /* Decodes insns type and invokes its record handler. */
3908 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3910 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3912 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3913 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3914 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3915 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3917 /* Data processing - immediate instructions. */
3918 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3919 return aarch64_record_data_proc_imm (aarch64_insn_r);
3921 /* Branch, exception generation and system instructions. */
3922 if (ins_bit26 && !ins_bit27 && ins_bit28)
3923 return aarch64_record_branch_except_sys (aarch64_insn_r);
3925 /* Load and store instructions. */
3926 if (!ins_bit25 && ins_bit27)
3927 return aarch64_record_load_store (aarch64_insn_r);
3929 /* Data processing - register instructions. */
3930 if (ins_bit25 && !ins_bit26 && ins_bit27)
3931 return aarch64_record_data_proc_reg (aarch64_insn_r);
3933 /* Data processing - SIMD and floating point instructions. */
3934 if (ins_bit25 && ins_bit26 && ins_bit27)
3935 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3937 return AARCH64_RECORD_UNSUPPORTED;
3940 /* Cleans up local record registers and memory allocations. */
3943 deallocate_reg_mem (insn_decode_record *record)
3945 xfree (record->aarch64_regs);
3946 xfree (record->aarch64_mems);
3949 /* Parse the current instruction and record the values of the registers and
3950 memory that will be changed in current instruction to record_arch_list
3951 return -1 if something is wrong. */
3954 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3955 CORE_ADDR insn_addr)
3957 uint32_t rec_no = 0;
3958 uint8_t insn_size = 4;
3960 gdb_byte buf[insn_size];
3961 insn_decode_record aarch64_record;
3963 memset (&buf[0], 0, insn_size);
3964 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3965 target_read_memory (insn_addr, &buf[0], insn_size);
3966 aarch64_record.aarch64_insn
3967 = (uint32_t) extract_unsigned_integer (&buf[0],
3969 gdbarch_byte_order (gdbarch));
3970 aarch64_record.regcache = regcache;
3971 aarch64_record.this_addr = insn_addr;
3972 aarch64_record.gdbarch = gdbarch;
3974 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3975 if (ret == AARCH64_RECORD_UNSUPPORTED)
3977 printf_unfiltered (_("Process record does not support instruction "
3978 "0x%0x at address %s.\n"),
3979 aarch64_record.aarch64_insn,
3980 paddress (gdbarch, insn_addr));
3986 /* Record registers. */
3987 record_full_arch_list_add_reg (aarch64_record.regcache,
3989 /* Always record register CPSR. */
3990 record_full_arch_list_add_reg (aarch64_record.regcache,
3991 AARCH64_CPSR_REGNUM);
3992 if (aarch64_record.aarch64_regs)
3993 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3994 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3995 aarch64_record.aarch64_regs[rec_no]))
3998 /* Record memories. */
3999 if (aarch64_record.aarch64_mems)
4000 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4001 if (record_full_arch_list_add_mem
4002 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4003 aarch64_record.aarch64_mems[rec_no].len))
4006 if (record_full_arch_list_add_end ())
4010 deallocate_reg_mem (&aarch64_record);