1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
49 #include "aarch64-tdep.h"
52 #include "elf/aarch64.h"
57 #include "record-full.h"
59 #include "features/aarch64.c"
61 #include "arch/aarch64-insn.h"
63 #include "opcode/aarch64.h"
66 #define submask(x) ((1L << ((x) + 1)) - 1)
67 #define bit(obj,st) (((obj) >> (st)) & 1)
68 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
70 /* Pseudo register base numbers. */
71 #define AARCH64_Q0_REGNUM 0
72 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
73 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
77 /* The standard register names, and all the valid aliases for them. */
80 const char *const name;
82 } aarch64_register_aliases[] =
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
127 /* The required core 'R' registers. */
128 static const char *const aarch64_r_register_names[] =
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
143 /* The FP/SIMD 'V' registers. */
144 static const char *const aarch64_v_register_names[] =
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
160 /* AArch64 prologue cache structure. */
161 struct aarch64_prologue_cache
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
177 /* Is the target available to read from? */
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
185 /* The register used to hold the frame pointer for this frame. */
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg *saved_regs;
193 show_aarch64_debug (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
196 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
199 /* Abstract instruction reader. */
201 class abstract_instruction_reader
204 /* Read in one instruction. */
205 virtual ULONGEST read (CORE_ADDR memaddr, int len,
206 enum bfd_endian byte_order) = 0;
209 /* Instruction reader from real target. */
211 class instruction_reader : public abstract_instruction_reader
214 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
216 return read_memory_unsigned_integer (memaddr, len, byte_order);
220 /* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
225 aarch64_analyze_prologue (struct gdbarch *gdbarch,
226 CORE_ADDR start, CORE_ADDR limit,
227 struct aarch64_prologue_cache *cache,
228 abstract_instruction_reader& reader)
230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
232 /* Track X registers and D registers in prologue. */
233 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
234 struct pv_area *stack;
235 struct cleanup *back_to;
237 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
238 regs[i] = pv_register (i, 0);
239 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
240 back_to = make_cleanup_free_pv_area (stack);
242 for (; start < limit; start += 4)
247 insn = reader.read (start, 4, byte_order_for_code);
249 if (aarch64_decode_insn (insn, &inst, 1) != 0)
252 if (inst.opcode->iclass == addsub_imm
253 && (inst.opcode->op == OP_ADD
254 || strcmp ("sub", inst.opcode->name) == 0))
256 unsigned rd = inst.operands[0].reg.regno;
257 unsigned rn = inst.operands[1].reg.regno;
259 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
260 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
261 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
262 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
264 if (inst.opcode->op == OP_ADD)
266 regs[rd] = pv_add_constant (regs[rn],
267 inst.operands[2].imm.value);
271 regs[rd] = pv_add_constant (regs[rn],
272 -inst.operands[2].imm.value);
275 else if (inst.opcode->iclass == pcreladdr
276 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
278 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
279 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
283 else if (inst.opcode->iclass == branch_imm)
285 /* Stop analysis on branch. */
288 else if (inst.opcode->iclass == condbranch)
290 /* Stop analysis on branch. */
293 else if (inst.opcode->iclass == branch_reg)
295 /* Stop analysis on branch. */
298 else if (inst.opcode->iclass == compbranch)
300 /* Stop analysis on branch. */
303 else if (inst.opcode->op == OP_MOVZ)
305 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
306 regs[inst.operands[0].reg.regno] = pv_unknown ();
308 else if (inst.opcode->iclass == log_shift
309 && strcmp (inst.opcode->name, "orr") == 0)
311 unsigned rd = inst.operands[0].reg.regno;
312 unsigned rn = inst.operands[1].reg.regno;
313 unsigned rm = inst.operands[2].reg.regno;
315 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
316 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
317 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
319 if (inst.operands[2].shifter.amount == 0
320 && rn == AARCH64_SP_REGNUM)
326 debug_printf ("aarch64: prologue analysis gave up "
327 "addr=%s opcode=0x%x (orr x register)\n",
328 core_addr_to_string_nz (start), insn);
333 else if (inst.opcode->op == OP_STUR)
335 unsigned rt = inst.operands[0].reg.regno;
336 unsigned rn = inst.operands[1].addr.base_regno;
338 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
340 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
341 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
342 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
343 gdb_assert (!inst.operands[1].addr.offset.is_reg);
345 pv_area_store (stack, pv_add_constant (regs[rn],
346 inst.operands[1].addr.offset.imm),
347 is64 ? 8 : 4, regs[rt]);
349 else if ((inst.opcode->iclass == ldstpair_off
350 || (inst.opcode->iclass == ldstpair_indexed
351 && inst.operands[2].addr.preind))
352 && strcmp ("stp", inst.opcode->name) == 0)
354 /* STP with addressing mode Pre-indexed and Base register. */
357 unsigned rn = inst.operands[2].addr.base_regno;
358 int32_t imm = inst.operands[2].addr.offset.imm;
360 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
361 || inst.operands[0].type == AARCH64_OPND_Ft);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
363 || inst.operands[1].type == AARCH64_OPND_Ft2);
364 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
365 gdb_assert (!inst.operands[2].addr.offset.is_reg);
367 /* If recording this store would invalidate the store area
368 (perhaps because rn is not known) then we should abandon
369 further prologue analysis. */
370 if (pv_area_store_would_trash (stack,
371 pv_add_constant (regs[rn], imm)))
374 if (pv_area_store_would_trash (stack,
375 pv_add_constant (regs[rn], imm + 8)))
378 rt1 = inst.operands[0].reg.regno;
379 rt2 = inst.operands[1].reg.regno;
380 if (inst.operands[0].type == AARCH64_OPND_Ft)
382 /* Only bottom 64-bit of each V register (D register) need
384 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
385 rt1 += AARCH64_X_REGISTER_COUNT;
386 rt2 += AARCH64_X_REGISTER_COUNT;
389 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
391 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
394 if (inst.operands[2].addr.writeback)
395 regs[rn] = pv_add_constant (regs[rn], imm);
398 else if (inst.opcode->iclass == testbranch)
400 /* Stop analysis on branch. */
407 debug_printf ("aarch64: prologue analysis gave up addr=%s"
409 core_addr_to_string_nz (start), insn);
417 do_cleanups (back_to);
421 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
423 /* Frame pointer is fp. Frame size is constant. */
424 cache->framereg = AARCH64_FP_REGNUM;
425 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
427 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
429 /* Try the stack pointer. */
430 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
431 cache->framereg = AARCH64_SP_REGNUM;
435 /* We're just out of luck. We don't know where the frame is. */
436 cache->framereg = -1;
437 cache->framesize = 0;
440 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
444 if (pv_area_find_reg (stack, gdbarch, i, &offset))
445 cache->saved_regs[i].addr = offset;
448 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
450 int regnum = gdbarch_num_regs (gdbarch);
453 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
455 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
458 do_cleanups (back_to);
463 aarch64_analyze_prologue (struct gdbarch *gdbarch,
464 CORE_ADDR start, CORE_ADDR limit,
465 struct aarch64_prologue_cache *cache)
467 instruction_reader reader;
469 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
475 namespace selftests {
477 /* Instruction reader from manually cooked instruction sequences. */
479 class instruction_reader_test : public abstract_instruction_reader
482 template<size_t SIZE>
483 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
484 : m_insns (insns), m_insns_size (SIZE)
487 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
489 SELF_CHECK (len == 4);
490 SELF_CHECK (memaddr % 4 == 0);
491 SELF_CHECK (memaddr / 4 < m_insns_size);
493 return m_insns[memaddr / 4];
497 const uint32_t *m_insns;
502 aarch64_analyze_prologue_test (void)
504 struct gdbarch_info info;
506 gdbarch_info_init (&info);
507 info.bfd_arch_info = bfd_scan_arch ("aarch64");
509 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
510 SELF_CHECK (gdbarch != NULL);
512 /* Test the simple prologue in which frame pointer is used. */
514 struct aarch64_prologue_cache cache;
515 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
517 static const uint32_t insns[] = {
518 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
519 0x910003fd, /* mov x29, sp */
520 0x97ffffe6, /* bl 0x400580 */
522 instruction_reader_test reader (insns);
524 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
525 SELF_CHECK (end == 4 * 2);
527 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
528 SELF_CHECK (cache.framesize == 272);
530 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
532 if (i == AARCH64_FP_REGNUM)
533 SELF_CHECK (cache.saved_regs[i].addr == -272);
534 else if (i == AARCH64_LR_REGNUM)
535 SELF_CHECK (cache.saved_regs[i].addr == -264);
537 SELF_CHECK (cache.saved_regs[i].addr == -1);
540 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
542 int regnum = gdbarch_num_regs (gdbarch);
544 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
549 } // namespace selftests
550 #endif /* GDB_SELF_TEST */
552 /* Implement the "skip_prologue" gdbarch method. */
555 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
557 CORE_ADDR func_addr, limit_pc;
559 /* See if we can determine the end of the prologue via the symbol
560 table. If so, then return either PC, or the PC after the
561 prologue, whichever is greater. */
562 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
564 CORE_ADDR post_prologue_pc
565 = skip_prologue_using_sal (gdbarch, func_addr);
567 if (post_prologue_pc != 0)
568 return std::max (pc, post_prologue_pc);
571 /* Can't determine prologue from the symbol table, need to examine
574 /* Find an upper limit on the function prologue using the debug
575 information. If the debug information could not be used to
576 provide that bound, then use an arbitrary large number as the
578 limit_pc = skip_prologue_using_sal (gdbarch, pc);
580 limit_pc = pc + 128; /* Magic. */
582 /* Try disassembling prologue. */
583 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
586 /* Scan the function prologue for THIS_FRAME and populate the prologue
590 aarch64_scan_prologue (struct frame_info *this_frame,
591 struct aarch64_prologue_cache *cache)
593 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
594 CORE_ADDR prologue_start;
595 CORE_ADDR prologue_end;
596 CORE_ADDR prev_pc = get_frame_pc (this_frame);
597 struct gdbarch *gdbarch = get_frame_arch (this_frame);
599 cache->prev_pc = prev_pc;
601 /* Assume we do not find a frame. */
602 cache->framereg = -1;
603 cache->framesize = 0;
605 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
608 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
612 /* No line info so use the current PC. */
613 prologue_end = prev_pc;
615 else if (sal.end < prologue_end)
617 /* The next line begins after the function end. */
618 prologue_end = sal.end;
621 prologue_end = std::min (prologue_end, prev_pc);
622 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
628 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
632 cache->framereg = AARCH64_FP_REGNUM;
633 cache->framesize = 16;
634 cache->saved_regs[29].addr = 0;
635 cache->saved_regs[30].addr = 8;
639 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
640 function may throw an exception if the inferior's registers or memory is
644 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
645 struct aarch64_prologue_cache *cache)
647 CORE_ADDR unwound_fp;
650 aarch64_scan_prologue (this_frame, cache);
652 if (cache->framereg == -1)
655 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
659 cache->prev_sp = unwound_fp + cache->framesize;
661 /* Calculate actual addresses of saved registers using offsets
662 determined by aarch64_analyze_prologue. */
663 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
664 if (trad_frame_addr_p (cache->saved_regs, reg))
665 cache->saved_regs[reg].addr += cache->prev_sp;
667 cache->func = get_frame_func (this_frame);
669 cache->available_p = 1;
672 /* Allocate and fill in *THIS_CACHE with information about the prologue of
673 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
674 Return a pointer to the current aarch64_prologue_cache in
677 static struct aarch64_prologue_cache *
678 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
680 struct aarch64_prologue_cache *cache;
682 if (*this_cache != NULL)
683 return (struct aarch64_prologue_cache *) *this_cache;
685 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
686 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
691 aarch64_make_prologue_cache_1 (this_frame, cache);
693 CATCH (ex, RETURN_MASK_ERROR)
695 if (ex.error != NOT_AVAILABLE_ERROR)
696 throw_exception (ex);
703 /* Implement the "stop_reason" frame_unwind method. */
705 static enum unwind_stop_reason
706 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
709 struct aarch64_prologue_cache *cache
710 = aarch64_make_prologue_cache (this_frame, this_cache);
712 if (!cache->available_p)
713 return UNWIND_UNAVAILABLE;
715 /* Halt the backtrace at "_start". */
716 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
717 return UNWIND_OUTERMOST;
719 /* We've hit a wall, stop. */
720 if (cache->prev_sp == 0)
721 return UNWIND_OUTERMOST;
723 return UNWIND_NO_REASON;
726 /* Our frame ID for a normal frame is the current function's starting
727 PC and the caller's SP when we were called. */
730 aarch64_prologue_this_id (struct frame_info *this_frame,
731 void **this_cache, struct frame_id *this_id)
733 struct aarch64_prologue_cache *cache
734 = aarch64_make_prologue_cache (this_frame, this_cache);
736 if (!cache->available_p)
737 *this_id = frame_id_build_unavailable_stack (cache->func);
739 *this_id = frame_id_build (cache->prev_sp, cache->func);
742 /* Implement the "prev_register" frame_unwind method. */
744 static struct value *
745 aarch64_prologue_prev_register (struct frame_info *this_frame,
746 void **this_cache, int prev_regnum)
748 struct aarch64_prologue_cache *cache
749 = aarch64_make_prologue_cache (this_frame, this_cache);
751 /* If we are asked to unwind the PC, then we need to return the LR
752 instead. The prologue may save PC, but it will point into this
753 frame's prologue, not the next frame's resume location. */
754 if (prev_regnum == AARCH64_PC_REGNUM)
758 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
759 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
762 /* SP is generally not saved to the stack, but this frame is
763 identified by the next frame's stack pointer at the time of the
764 call. The value was already reconstructed into PREV_SP. */
777 if (prev_regnum == AARCH64_SP_REGNUM)
778 return frame_unwind_got_constant (this_frame, prev_regnum,
781 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
785 /* AArch64 prologue unwinder. */
786 struct frame_unwind aarch64_prologue_unwind =
789 aarch64_prologue_frame_unwind_stop_reason,
790 aarch64_prologue_this_id,
791 aarch64_prologue_prev_register,
793 default_frame_sniffer
796 /* Allocate and fill in *THIS_CACHE with information about the prologue of
797 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
798 Return a pointer to the current aarch64_prologue_cache in
801 static struct aarch64_prologue_cache *
802 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
804 struct aarch64_prologue_cache *cache;
806 if (*this_cache != NULL)
807 return (struct aarch64_prologue_cache *) *this_cache;
809 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
810 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
815 cache->prev_sp = get_frame_register_unsigned (this_frame,
817 cache->prev_pc = get_frame_pc (this_frame);
818 cache->available_p = 1;
820 CATCH (ex, RETURN_MASK_ERROR)
822 if (ex.error != NOT_AVAILABLE_ERROR)
823 throw_exception (ex);
830 /* Implement the "stop_reason" frame_unwind method. */
832 static enum unwind_stop_reason
833 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
836 struct aarch64_prologue_cache *cache
837 = aarch64_make_stub_cache (this_frame, this_cache);
839 if (!cache->available_p)
840 return UNWIND_UNAVAILABLE;
842 return UNWIND_NO_REASON;
845 /* Our frame ID for a stub frame is the current SP and LR. */
848 aarch64_stub_this_id (struct frame_info *this_frame,
849 void **this_cache, struct frame_id *this_id)
851 struct aarch64_prologue_cache *cache
852 = aarch64_make_stub_cache (this_frame, this_cache);
854 if (cache->available_p)
855 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
857 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
860 /* Implement the "sniffer" frame_unwind method. */
863 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
864 struct frame_info *this_frame,
865 void **this_prologue_cache)
867 CORE_ADDR addr_in_block;
870 addr_in_block = get_frame_address_in_block (this_frame);
871 if (in_plt_section (addr_in_block)
872 /* We also use the stub winder if the target memory is unreadable
873 to avoid having the prologue unwinder trying to read it. */
874 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
880 /* AArch64 stub unwinder. */
881 struct frame_unwind aarch64_stub_unwind =
884 aarch64_stub_frame_unwind_stop_reason,
885 aarch64_stub_this_id,
886 aarch64_prologue_prev_register,
888 aarch64_stub_unwind_sniffer
891 /* Return the frame base address of *THIS_FRAME. */
894 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
896 struct aarch64_prologue_cache *cache
897 = aarch64_make_prologue_cache (this_frame, this_cache);
899 return cache->prev_sp - cache->framesize;
902 /* AArch64 default frame base information. */
903 struct frame_base aarch64_normal_base =
905 &aarch64_prologue_unwind,
906 aarch64_normal_frame_base,
907 aarch64_normal_frame_base,
908 aarch64_normal_frame_base
911 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
912 dummy frame. The frame ID's base needs to match the TOS value
913 saved by save_dummy_frame_tos () and returned from
914 aarch64_push_dummy_call, and the PC needs to match the dummy
915 frame's breakpoint. */
917 static struct frame_id
918 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
920 return frame_id_build (get_frame_register_unsigned (this_frame,
922 get_frame_pc (this_frame));
925 /* Implement the "unwind_pc" gdbarch method. */
928 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
931 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
936 /* Implement the "unwind_sp" gdbarch method. */
939 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
941 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
944 /* Return the value of the REGNUM register in the previous frame of
947 static struct value *
948 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
949 void **this_cache, int regnum)
955 case AARCH64_PC_REGNUM:
956 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
957 return frame_unwind_got_constant (this_frame, regnum, lr);
960 internal_error (__FILE__, __LINE__,
961 _("Unexpected register %d"), regnum);
965 /* Implement the "init_reg" dwarf2_frame_ops method. */
968 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
969 struct dwarf2_frame_state_reg *reg,
970 struct frame_info *this_frame)
974 case AARCH64_PC_REGNUM:
975 reg->how = DWARF2_FRAME_REG_FN;
976 reg->loc.fn = aarch64_dwarf2_prev_register;
978 case AARCH64_SP_REGNUM:
979 reg->how = DWARF2_FRAME_REG_CFA;
984 /* When arguments must be pushed onto the stack, they go on in reverse
985 order. The code below implements a FILO (stack) to do this. */
989 /* Value to pass on stack. It can be NULL if this item is for stack
991 const gdb_byte *data;
993 /* Size in bytes of value to pass on stack. */
997 DEF_VEC_O (stack_item_t);
999 /* Return the alignment (in bytes) of the given type. */
1002 aarch64_type_align (struct type *t)
1008 t = check_typedef (t);
1009 switch (TYPE_CODE (t))
1012 /* Should never happen. */
1013 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1017 case TYPE_CODE_ENUM:
1021 case TYPE_CODE_RANGE:
1022 case TYPE_CODE_BITSTRING:
1024 case TYPE_CODE_CHAR:
1025 case TYPE_CODE_BOOL:
1026 return TYPE_LENGTH (t);
1028 case TYPE_CODE_ARRAY:
1029 if (TYPE_VECTOR (t))
1031 /* Use the natural alignment for vector types (the same for
1032 scalar type), but the maximum alignment is 128-bit. */
1033 if (TYPE_LENGTH (t) > 16)
1036 return TYPE_LENGTH (t);
1039 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1040 case TYPE_CODE_COMPLEX:
1041 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1043 case TYPE_CODE_STRUCT:
1044 case TYPE_CODE_UNION:
1046 for (n = 0; n < TYPE_NFIELDS (t); n++)
1048 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1056 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1057 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1058 document; otherwise return 0. */
1061 is_hfa_or_hva (struct type *ty)
1063 switch (TYPE_CODE (ty))
1065 case TYPE_CODE_ARRAY:
1067 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1069 if (TYPE_VECTOR (ty))
1072 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1073 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1074 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1075 && TYPE_VECTOR (target_ty))))
1080 case TYPE_CODE_UNION:
1081 case TYPE_CODE_STRUCT:
1083 /* HFA or HVA has at most four members. */
1084 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1086 struct type *member0_type;
1088 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1089 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1090 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1091 && TYPE_VECTOR (member0_type)))
1095 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1097 struct type *member1_type;
1099 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1100 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1101 || (TYPE_LENGTH (member0_type)
1102 != TYPE_LENGTH (member1_type)))
1118 /* AArch64 function call information structure. */
1119 struct aarch64_call_info
1121 /* the current argument number. */
1124 /* The next general purpose register number, equivalent to NGRN as
1125 described in the AArch64 Procedure Call Standard. */
1128 /* The next SIMD and floating point register number, equivalent to
1129 NSRN as described in the AArch64 Procedure Call Standard. */
1132 /* The next stacked argument address, equivalent to NSAA as
1133 described in the AArch64 Procedure Call Standard. */
1136 /* Stack item vector. */
1137 VEC(stack_item_t) *si;
1140 /* Pass a value in a sequence of consecutive X registers. The caller
1141 is responsbile for ensuring sufficient registers are available. */
1144 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1145 struct aarch64_call_info *info, struct type *type,
1148 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1149 int len = TYPE_LENGTH (type);
1150 enum type_code typecode = TYPE_CODE (type);
1151 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1152 const bfd_byte *buf = value_contents (arg);
1158 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1159 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1163 /* Adjust sub-word struct/union args when big-endian. */
1164 if (byte_order == BFD_ENDIAN_BIG
1165 && partial_len < X_REGISTER_SIZE
1166 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1167 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1171 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1172 gdbarch_register_name (gdbarch, regnum),
1173 phex (regval, X_REGISTER_SIZE));
1175 regcache_cooked_write_unsigned (regcache, regnum, regval);
1182 /* Attempt to marshall a value in a V register. Return 1 if
1183 successful, or 0 if insufficient registers are available. This
1184 function, unlike the equivalent pass_in_x() function does not
1185 handle arguments spread across multiple registers. */
1188 pass_in_v (struct gdbarch *gdbarch,
1189 struct regcache *regcache,
1190 struct aarch64_call_info *info,
1191 int len, const bfd_byte *buf)
1195 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1196 gdb_byte reg[V_REGISTER_SIZE];
1201 memset (reg, 0, sizeof (reg));
1202 /* PCS C.1, the argument is allocated to the least significant
1203 bits of V register. */
1204 memcpy (reg, buf, len);
1205 regcache_cooked_write (regcache, regnum, reg);
1209 debug_printf ("arg %d in %s\n", info->argnum,
1210 gdbarch_register_name (gdbarch, regnum));
1218 /* Marshall an argument onto the stack. */
1221 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1224 const bfd_byte *buf = value_contents (arg);
1225 int len = TYPE_LENGTH (type);
1231 align = aarch64_type_align (type);
1233 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1234 Natural alignment of the argument's type. */
1235 align = align_up (align, 8);
1237 /* The AArch64 PCS requires at most doubleword alignment. */
1243 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1249 VEC_safe_push (stack_item_t, info->si, &item);
1252 if (info->nsaa & (align - 1))
1254 /* Push stack alignment padding. */
1255 int pad = align - (info->nsaa & (align - 1));
1260 VEC_safe_push (stack_item_t, info->si, &item);
1265 /* Marshall an argument into a sequence of one or more consecutive X
1266 registers or, if insufficient X registers are available then onto
1270 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1271 struct aarch64_call_info *info, struct type *type,
1274 int len = TYPE_LENGTH (type);
1275 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1277 /* PCS C.13 - Pass in registers if we have enough spare */
1278 if (info->ngrn + nregs <= 8)
1280 pass_in_x (gdbarch, regcache, info, type, arg);
1281 info->ngrn += nregs;
1286 pass_on_stack (info, type, arg);
1290 /* Pass a value in a V register, or on the stack if insufficient are
1294 pass_in_v_or_stack (struct gdbarch *gdbarch,
1295 struct regcache *regcache,
1296 struct aarch64_call_info *info,
1300 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1301 value_contents (arg)))
1302 pass_on_stack (info, type, arg);
1305 /* Implement the "push_dummy_call" gdbarch method. */
1308 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1309 struct regcache *regcache, CORE_ADDR bp_addr,
1311 struct value **args, CORE_ADDR sp, int struct_return,
1312 CORE_ADDR struct_addr)
1315 struct aarch64_call_info info;
1316 struct type *func_type;
1317 struct type *return_type;
1318 int lang_struct_return;
1320 memset (&info, 0, sizeof (info));
1322 /* We need to know what the type of the called function is in order
1323 to determine the number of named/anonymous arguments for the
1324 actual argument placement, and the return type in order to handle
1325 return value correctly.
1327 The generic code above us views the decision of return in memory
1328 or return in registers as a two stage processes. The language
1329 handler is consulted first and may decide to return in memory (eg
1330 class with copy constructor returned by value), this will cause
1331 the generic code to allocate space AND insert an initial leading
1334 If the language code does not decide to pass in memory then the
1335 target code is consulted.
1337 If the language code decides to pass in memory we want to move
1338 the pointer inserted as the initial argument from the argument
1339 list and into X8, the conventional AArch64 struct return pointer
1342 This is slightly awkward, ideally the flag "lang_struct_return"
1343 would be passed to the targets implementation of push_dummy_call.
1344 Rather that change the target interface we call the language code
1345 directly ourselves. */
1347 func_type = check_typedef (value_type (function));
1349 /* Dereference function pointer types. */
1350 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1351 func_type = TYPE_TARGET_TYPE (func_type);
1353 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1354 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1356 /* If language_pass_by_reference () returned true we will have been
1357 given an additional initial argument, a hidden pointer to the
1358 return slot in memory. */
1359 return_type = TYPE_TARGET_TYPE (func_type);
1360 lang_struct_return = language_pass_by_reference (return_type);
1362 /* Set the return address. For the AArch64, the return breakpoint
1363 is always at BP_ADDR. */
1364 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1366 /* If we were given an initial argument for the return slot because
1367 lang_struct_return was true, lose it. */
1368 if (lang_struct_return)
1374 /* The struct_return pointer occupies X8. */
1375 if (struct_return || lang_struct_return)
1379 debug_printf ("struct return in %s = 0x%s\n",
1380 gdbarch_register_name (gdbarch,
1381 AARCH64_STRUCT_RETURN_REGNUM),
1382 paddress (gdbarch, struct_addr));
1384 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1388 for (argnum = 0; argnum < nargs; argnum++)
1390 struct value *arg = args[argnum];
1391 struct type *arg_type;
1394 arg_type = check_typedef (value_type (arg));
1395 len = TYPE_LENGTH (arg_type);
1397 switch (TYPE_CODE (arg_type))
1400 case TYPE_CODE_BOOL:
1401 case TYPE_CODE_CHAR:
1402 case TYPE_CODE_RANGE:
1403 case TYPE_CODE_ENUM:
1406 /* Promote to 32 bit integer. */
1407 if (TYPE_UNSIGNED (arg_type))
1408 arg_type = builtin_type (gdbarch)->builtin_uint32;
1410 arg_type = builtin_type (gdbarch)->builtin_int32;
1411 arg = value_cast (arg_type, arg);
1413 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1416 case TYPE_CODE_COMPLEX:
1419 const bfd_byte *buf = value_contents (arg);
1420 struct type *target_type =
1421 check_typedef (TYPE_TARGET_TYPE (arg_type));
1423 pass_in_v (gdbarch, regcache, &info,
1424 TYPE_LENGTH (target_type), buf);
1425 pass_in_v (gdbarch, regcache, &info,
1426 TYPE_LENGTH (target_type),
1427 buf + TYPE_LENGTH (target_type));
1432 pass_on_stack (&info, arg_type, arg);
1436 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1439 case TYPE_CODE_STRUCT:
1440 case TYPE_CODE_ARRAY:
1441 case TYPE_CODE_UNION:
1442 if (is_hfa_or_hva (arg_type))
1444 int elements = TYPE_NFIELDS (arg_type);
1446 /* Homogeneous Aggregates */
1447 if (info.nsrn + elements < 8)
1451 for (i = 0; i < elements; i++)
1453 /* We know that we have sufficient registers
1454 available therefore this will never fallback
1456 struct value *field =
1457 value_primitive_field (arg, 0, i, arg_type);
1458 struct type *field_type =
1459 check_typedef (value_type (field));
1461 pass_in_v_or_stack (gdbarch, regcache, &info,
1468 pass_on_stack (&info, arg_type, arg);
1471 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1472 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1474 /* Short vector types are passed in V registers. */
1475 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1479 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1480 invisible reference. */
1482 /* Allocate aligned storage. */
1483 sp = align_down (sp - len, 16);
1485 /* Write the real data into the stack. */
1486 write_memory (sp, value_contents (arg), len);
1488 /* Construct the indirection. */
1489 arg_type = lookup_pointer_type (arg_type);
1490 arg = value_from_pointer (arg_type, sp);
1491 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1494 /* PCS C.15 / C.18 multiple values pass. */
1495 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1499 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1504 /* Make sure stack retains 16 byte alignment. */
1506 sp -= 16 - (info.nsaa & 15);
1508 while (!VEC_empty (stack_item_t, info.si))
1510 stack_item_t *si = VEC_last (stack_item_t, info.si);
1513 if (si->data != NULL)
1514 write_memory (sp, si->data, si->len);
1515 VEC_pop (stack_item_t, info.si);
1518 VEC_free (stack_item_t, info.si);
1520 /* Finally, update the SP register. */
1521 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1526 /* Implement the "frame_align" gdbarch method. */
1529 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1531 /* Align the stack to sixteen bytes. */
1532 return sp & ~(CORE_ADDR) 15;
1535 /* Return the type for an AdvSISD Q register. */
1537 static struct type *
1538 aarch64_vnq_type (struct gdbarch *gdbarch)
1540 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1542 if (tdep->vnq_type == NULL)
1547 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1550 elem = builtin_type (gdbarch)->builtin_uint128;
1551 append_composite_type_field (t, "u", elem);
1553 elem = builtin_type (gdbarch)->builtin_int128;
1554 append_composite_type_field (t, "s", elem);
1559 return tdep->vnq_type;
1562 /* Return the type for an AdvSISD D register. */
1564 static struct type *
1565 aarch64_vnd_type (struct gdbarch *gdbarch)
1567 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1569 if (tdep->vnd_type == NULL)
1574 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1577 elem = builtin_type (gdbarch)->builtin_double;
1578 append_composite_type_field (t, "f", elem);
1580 elem = builtin_type (gdbarch)->builtin_uint64;
1581 append_composite_type_field (t, "u", elem);
1583 elem = builtin_type (gdbarch)->builtin_int64;
1584 append_composite_type_field (t, "s", elem);
1589 return tdep->vnd_type;
1592 /* Return the type for an AdvSISD S register. */
1594 static struct type *
1595 aarch64_vns_type (struct gdbarch *gdbarch)
1597 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1599 if (tdep->vns_type == NULL)
1604 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1607 elem = builtin_type (gdbarch)->builtin_float;
1608 append_composite_type_field (t, "f", elem);
1610 elem = builtin_type (gdbarch)->builtin_uint32;
1611 append_composite_type_field (t, "u", elem);
1613 elem = builtin_type (gdbarch)->builtin_int32;
1614 append_composite_type_field (t, "s", elem);
1619 return tdep->vns_type;
1622 /* Return the type for an AdvSISD H register. */
1624 static struct type *
1625 aarch64_vnh_type (struct gdbarch *gdbarch)
1627 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1629 if (tdep->vnh_type == NULL)
1634 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1637 elem = builtin_type (gdbarch)->builtin_uint16;
1638 append_composite_type_field (t, "u", elem);
1640 elem = builtin_type (gdbarch)->builtin_int16;
1641 append_composite_type_field (t, "s", elem);
1646 return tdep->vnh_type;
1649 /* Return the type for an AdvSISD B register. */
1651 static struct type *
1652 aarch64_vnb_type (struct gdbarch *gdbarch)
1654 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1656 if (tdep->vnb_type == NULL)
1661 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1664 elem = builtin_type (gdbarch)->builtin_uint8;
1665 append_composite_type_field (t, "u", elem);
1667 elem = builtin_type (gdbarch)->builtin_int8;
1668 append_composite_type_field (t, "s", elem);
1673 return tdep->vnb_type;
1676 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1679 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1681 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1682 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1684 if (reg == AARCH64_DWARF_SP)
1685 return AARCH64_SP_REGNUM;
1687 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1688 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1694 /* Implement the "print_insn" gdbarch method. */
1697 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1699 info->symbols = NULL;
1700 return print_insn_aarch64 (memaddr, info);
1703 /* AArch64 BRK software debug mode instruction.
1704 Note that AArch64 code is always little-endian.
1705 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1706 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1708 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1710 /* Extract from an array REGS containing the (raw) register state a
1711 function return value of type TYPE, and copy that, in virtual
1712 format, into VALBUF. */
1715 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1718 struct gdbarch *gdbarch = get_regcache_arch (regs);
1719 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1721 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1723 bfd_byte buf[V_REGISTER_SIZE];
1724 int len = TYPE_LENGTH (type);
1726 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1727 memcpy (valbuf, buf, len);
1729 else if (TYPE_CODE (type) == TYPE_CODE_INT
1730 || TYPE_CODE (type) == TYPE_CODE_CHAR
1731 || TYPE_CODE (type) == TYPE_CODE_BOOL
1732 || TYPE_CODE (type) == TYPE_CODE_PTR
1733 || TYPE_CODE (type) == TYPE_CODE_REF
1734 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1736 /* If the the type is a plain integer, then the access is
1737 straight-forward. Otherwise we have to play around a bit
1739 int len = TYPE_LENGTH (type);
1740 int regno = AARCH64_X0_REGNUM;
1745 /* By using store_unsigned_integer we avoid having to do
1746 anything special for small big-endian values. */
1747 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1748 store_unsigned_integer (valbuf,
1749 (len > X_REGISTER_SIZE
1750 ? X_REGISTER_SIZE : len), byte_order, tmp);
1751 len -= X_REGISTER_SIZE;
1752 valbuf += X_REGISTER_SIZE;
1755 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1757 int regno = AARCH64_V0_REGNUM;
1758 bfd_byte buf[V_REGISTER_SIZE];
1759 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1760 int len = TYPE_LENGTH (target_type);
1762 regcache_cooked_read (regs, regno, buf);
1763 memcpy (valbuf, buf, len);
1765 regcache_cooked_read (regs, regno + 1, buf);
1766 memcpy (valbuf, buf, len);
1769 else if (is_hfa_or_hva (type))
1771 int elements = TYPE_NFIELDS (type);
1772 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1773 int len = TYPE_LENGTH (member_type);
1776 for (i = 0; i < elements; i++)
1778 int regno = AARCH64_V0_REGNUM + i;
1779 bfd_byte buf[V_REGISTER_SIZE];
1783 debug_printf ("read HFA or HVA return value element %d from %s\n",
1785 gdbarch_register_name (gdbarch, regno));
1787 regcache_cooked_read (regs, regno, buf);
1789 memcpy (valbuf, buf, len);
1793 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1794 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1796 /* Short vector is returned in V register. */
1797 gdb_byte buf[V_REGISTER_SIZE];
1799 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1800 memcpy (valbuf, buf, TYPE_LENGTH (type));
1804 /* For a structure or union the behaviour is as if the value had
1805 been stored to word-aligned memory and then loaded into
1806 registers with 64-bit load instruction(s). */
1807 int len = TYPE_LENGTH (type);
1808 int regno = AARCH64_X0_REGNUM;
1809 bfd_byte buf[X_REGISTER_SIZE];
1813 regcache_cooked_read (regs, regno++, buf);
1814 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1815 len -= X_REGISTER_SIZE;
1816 valbuf += X_REGISTER_SIZE;
1822 /* Will a function return an aggregate type in memory or in a
1823 register? Return 0 if an aggregate type can be returned in a
1824 register, 1 if it must be returned in memory. */
1827 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1829 type = check_typedef (type);
1831 if (is_hfa_or_hva (type))
1833 /* v0-v7 are used to return values and one register is allocated
1834 for one member. However, HFA or HVA has at most four members. */
1838 if (TYPE_LENGTH (type) > 16)
1840 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1841 invisible reference. */
1849 /* Write into appropriate registers a function return value of type
1850 TYPE, given in virtual format. */
1853 aarch64_store_return_value (struct type *type, struct regcache *regs,
1854 const gdb_byte *valbuf)
1856 struct gdbarch *gdbarch = get_regcache_arch (regs);
1857 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1859 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1861 bfd_byte buf[V_REGISTER_SIZE];
1862 int len = TYPE_LENGTH (type);
1864 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1865 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1867 else if (TYPE_CODE (type) == TYPE_CODE_INT
1868 || TYPE_CODE (type) == TYPE_CODE_CHAR
1869 || TYPE_CODE (type) == TYPE_CODE_BOOL
1870 || TYPE_CODE (type) == TYPE_CODE_PTR
1871 || TYPE_CODE (type) == TYPE_CODE_REF
1872 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1874 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1876 /* Values of one word or less are zero/sign-extended and
1878 bfd_byte tmpbuf[X_REGISTER_SIZE];
1879 LONGEST val = unpack_long (type, valbuf);
1881 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1882 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1886 /* Integral values greater than one word are stored in
1887 consecutive registers starting with r0. This will always
1888 be a multiple of the regiser size. */
1889 int len = TYPE_LENGTH (type);
1890 int regno = AARCH64_X0_REGNUM;
1894 regcache_cooked_write (regs, regno++, valbuf);
1895 len -= X_REGISTER_SIZE;
1896 valbuf += X_REGISTER_SIZE;
1900 else if (is_hfa_or_hva (type))
1902 int elements = TYPE_NFIELDS (type);
1903 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1904 int len = TYPE_LENGTH (member_type);
1907 for (i = 0; i < elements; i++)
1909 int regno = AARCH64_V0_REGNUM + i;
1910 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1914 debug_printf ("write HFA or HVA return value element %d to %s\n",
1916 gdbarch_register_name (gdbarch, regno));
1919 memcpy (tmpbuf, valbuf, len);
1920 regcache_cooked_write (regs, regno, tmpbuf);
1924 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1925 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1928 gdb_byte buf[V_REGISTER_SIZE];
1930 memcpy (buf, valbuf, TYPE_LENGTH (type));
1931 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1935 /* For a structure or union the behaviour is as if the value had
1936 been stored to word-aligned memory and then loaded into
1937 registers with 64-bit load instruction(s). */
1938 int len = TYPE_LENGTH (type);
1939 int regno = AARCH64_X0_REGNUM;
1940 bfd_byte tmpbuf[X_REGISTER_SIZE];
1944 memcpy (tmpbuf, valbuf,
1945 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1946 regcache_cooked_write (regs, regno++, tmpbuf);
1947 len -= X_REGISTER_SIZE;
1948 valbuf += X_REGISTER_SIZE;
1953 /* Implement the "return_value" gdbarch method. */
1955 static enum return_value_convention
1956 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1957 struct type *valtype, struct regcache *regcache,
1958 gdb_byte *readbuf, const gdb_byte *writebuf)
1961 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1962 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1963 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1965 if (aarch64_return_in_memory (gdbarch, valtype))
1968 debug_printf ("return value in memory\n");
1969 return RETURN_VALUE_STRUCT_CONVENTION;
1974 aarch64_store_return_value (valtype, regcache, writebuf);
1977 aarch64_extract_return_value (valtype, regcache, readbuf);
1980 debug_printf ("return value in registers\n");
1982 return RETURN_VALUE_REGISTER_CONVENTION;
1985 /* Implement the "get_longjmp_target" gdbarch method. */
1988 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1991 gdb_byte buf[X_REGISTER_SIZE];
1992 struct gdbarch *gdbarch = get_frame_arch (frame);
1993 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1994 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1996 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1998 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2002 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2006 /* Implement the "gen_return_address" gdbarch method. */
2009 aarch64_gen_return_address (struct gdbarch *gdbarch,
2010 struct agent_expr *ax, struct axs_value *value,
2013 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2014 value->kind = axs_lvalue_register;
2015 value->u.reg = AARCH64_LR_REGNUM;
2019 /* Return the pseudo register name corresponding to register regnum. */
2022 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2024 static const char *const q_name[] =
2026 "q0", "q1", "q2", "q3",
2027 "q4", "q5", "q6", "q7",
2028 "q8", "q9", "q10", "q11",
2029 "q12", "q13", "q14", "q15",
2030 "q16", "q17", "q18", "q19",
2031 "q20", "q21", "q22", "q23",
2032 "q24", "q25", "q26", "q27",
2033 "q28", "q29", "q30", "q31",
2036 static const char *const d_name[] =
2038 "d0", "d1", "d2", "d3",
2039 "d4", "d5", "d6", "d7",
2040 "d8", "d9", "d10", "d11",
2041 "d12", "d13", "d14", "d15",
2042 "d16", "d17", "d18", "d19",
2043 "d20", "d21", "d22", "d23",
2044 "d24", "d25", "d26", "d27",
2045 "d28", "d29", "d30", "d31",
2048 static const char *const s_name[] =
2050 "s0", "s1", "s2", "s3",
2051 "s4", "s5", "s6", "s7",
2052 "s8", "s9", "s10", "s11",
2053 "s12", "s13", "s14", "s15",
2054 "s16", "s17", "s18", "s19",
2055 "s20", "s21", "s22", "s23",
2056 "s24", "s25", "s26", "s27",
2057 "s28", "s29", "s30", "s31",
2060 static const char *const h_name[] =
2062 "h0", "h1", "h2", "h3",
2063 "h4", "h5", "h6", "h7",
2064 "h8", "h9", "h10", "h11",
2065 "h12", "h13", "h14", "h15",
2066 "h16", "h17", "h18", "h19",
2067 "h20", "h21", "h22", "h23",
2068 "h24", "h25", "h26", "h27",
2069 "h28", "h29", "h30", "h31",
2072 static const char *const b_name[] =
2074 "b0", "b1", "b2", "b3",
2075 "b4", "b5", "b6", "b7",
2076 "b8", "b9", "b10", "b11",
2077 "b12", "b13", "b14", "b15",
2078 "b16", "b17", "b18", "b19",
2079 "b20", "b21", "b22", "b23",
2080 "b24", "b25", "b26", "b27",
2081 "b28", "b29", "b30", "b31",
2084 regnum -= gdbarch_num_regs (gdbarch);
2086 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2087 return q_name[regnum - AARCH64_Q0_REGNUM];
2089 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2090 return d_name[regnum - AARCH64_D0_REGNUM];
2092 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2093 return s_name[regnum - AARCH64_S0_REGNUM];
2095 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2096 return h_name[regnum - AARCH64_H0_REGNUM];
2098 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2099 return b_name[regnum - AARCH64_B0_REGNUM];
2101 internal_error (__FILE__, __LINE__,
2102 _("aarch64_pseudo_register_name: bad register number %d"),
2106 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2108 static struct type *
2109 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2111 regnum -= gdbarch_num_regs (gdbarch);
2113 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2114 return aarch64_vnq_type (gdbarch);
2116 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2117 return aarch64_vnd_type (gdbarch);
2119 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2120 return aarch64_vns_type (gdbarch);
2122 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2123 return aarch64_vnh_type (gdbarch);
2125 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2126 return aarch64_vnb_type (gdbarch);
2128 internal_error (__FILE__, __LINE__,
2129 _("aarch64_pseudo_register_type: bad register number %d"),
2133 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2136 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2137 struct reggroup *group)
2139 regnum -= gdbarch_num_regs (gdbarch);
2141 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2142 return group == all_reggroup || group == vector_reggroup;
2143 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2144 return (group == all_reggroup || group == vector_reggroup
2145 || group == float_reggroup);
2146 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2147 return (group == all_reggroup || group == vector_reggroup
2148 || group == float_reggroup);
2149 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2150 return group == all_reggroup || group == vector_reggroup;
2151 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2152 return group == all_reggroup || group == vector_reggroup;
2154 return group == all_reggroup;
2157 /* Implement the "pseudo_register_read_value" gdbarch method. */
2159 static struct value *
2160 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2161 struct regcache *regcache,
2164 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2165 struct value *result_value;
2168 result_value = allocate_value (register_type (gdbarch, regnum));
2169 VALUE_LVAL (result_value) = lval_register;
2170 VALUE_REGNUM (result_value) = regnum;
2171 buf = value_contents_raw (result_value);
2173 regnum -= gdbarch_num_regs (gdbarch);
2175 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2177 enum register_status status;
2180 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2181 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2182 if (status != REG_VALID)
2183 mark_value_bytes_unavailable (result_value, 0,
2184 TYPE_LENGTH (value_type (result_value)));
2186 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2187 return result_value;
2190 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2192 enum register_status status;
2195 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2196 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2197 if (status != REG_VALID)
2198 mark_value_bytes_unavailable (result_value, 0,
2199 TYPE_LENGTH (value_type (result_value)));
2201 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2202 return result_value;
2205 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2207 enum register_status status;
2210 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2211 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2212 if (status != REG_VALID)
2213 mark_value_bytes_unavailable (result_value, 0,
2214 TYPE_LENGTH (value_type (result_value)));
2216 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2217 return result_value;
2220 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2222 enum register_status status;
2225 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2226 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2227 if (status != REG_VALID)
2228 mark_value_bytes_unavailable (result_value, 0,
2229 TYPE_LENGTH (value_type (result_value)));
2231 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2232 return result_value;
2235 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2237 enum register_status status;
2240 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2241 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2242 if (status != REG_VALID)
2243 mark_value_bytes_unavailable (result_value, 0,
2244 TYPE_LENGTH (value_type (result_value)));
2246 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2247 return result_value;
2250 gdb_assert_not_reached ("regnum out of bound");
2253 /* Implement the "pseudo_register_write" gdbarch method. */
2256 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2257 int regnum, const gdb_byte *buf)
2259 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2261 /* Ensure the register buffer is zero, we want gdb writes of the
2262 various 'scalar' pseudo registers to behavior like architectural
2263 writes, register width bytes are written the remainder are set to
2265 memset (reg_buf, 0, sizeof (reg_buf));
2267 regnum -= gdbarch_num_regs (gdbarch);
2269 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2271 /* pseudo Q registers */
2274 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2275 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2276 regcache_raw_write (regcache, v_regnum, reg_buf);
2280 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2282 /* pseudo D registers */
2285 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2286 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2287 regcache_raw_write (regcache, v_regnum, reg_buf);
2291 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2295 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2296 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2297 regcache_raw_write (regcache, v_regnum, reg_buf);
2301 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2303 /* pseudo H registers */
2306 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2307 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2308 regcache_raw_write (regcache, v_regnum, reg_buf);
2312 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2314 /* pseudo B registers */
2317 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2318 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2319 regcache_raw_write (regcache, v_regnum, reg_buf);
2323 gdb_assert_not_reached ("regnum out of bound");
2326 /* Callback function for user_reg_add. */
2328 static struct value *
2329 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2331 const int *reg_p = (const int *) baton;
2333 return value_of_register (*reg_p, frame);
2337 /* Implement the "software_single_step" gdbarch method, needed to
2338 single step through atomic sequences on AArch64. */
2340 static VEC (CORE_ADDR) *
2341 aarch64_software_single_step (struct regcache *regcache)
2343 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2344 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2345 const int insn_size = 4;
2346 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2347 CORE_ADDR pc = regcache_read_pc (regcache);
2348 CORE_ADDR breaks[2] = { -1, -1 };
2350 CORE_ADDR closing_insn = 0;
2351 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2352 byte_order_for_code);
2355 int bc_insn_count = 0; /* Conditional branch instruction count. */
2356 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2358 VEC (CORE_ADDR) *next_pcs = NULL;
2360 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2363 /* Look for a Load Exclusive instruction which begins the sequence. */
2364 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2367 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2370 insn = read_memory_unsigned_integer (loc, insn_size,
2371 byte_order_for_code);
2373 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2375 /* Check if the instruction is a conditional branch. */
2376 if (inst.opcode->iclass == condbranch)
2378 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2380 if (bc_insn_count >= 1)
2383 /* It is, so we'll try to set a breakpoint at the destination. */
2384 breaks[1] = loc + inst.operands[0].imm.value;
2390 /* Look for the Store Exclusive which closes the atomic sequence. */
2391 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2398 /* We didn't find a closing Store Exclusive instruction, fall back. */
2402 /* Insert breakpoint after the end of the atomic sequence. */
2403 breaks[0] = loc + insn_size;
2405 /* Check for duplicated breakpoints, and also check that the second
2406 breakpoint is not within the atomic sequence. */
2408 && (breaks[1] == breaks[0]
2409 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2410 last_breakpoint = 0;
2412 /* Insert the breakpoint at the end of the sequence, and one at the
2413 destination of the conditional branch, if it exists. */
2414 for (index = 0; index <= last_breakpoint; index++)
2415 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
2420 struct displaced_step_closure
2422 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2423 is being displaced stepping. */
2426 /* PC adjustment offset after displaced stepping. */
2430 /* Data when visiting instructions for displaced stepping. */
2432 struct aarch64_displaced_step_data
2434 struct aarch64_insn_data base;
2436 /* The address where the instruction will be executed at. */
2438 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2439 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2440 /* Number of instructions in INSN_BUF. */
2441 unsigned insn_count;
2442 /* Registers when doing displaced stepping. */
2443 struct regcache *regs;
2445 struct displaced_step_closure *dsc;
2448 /* Implementation of aarch64_insn_visitor method "b". */
2451 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2452 struct aarch64_insn_data *data)
2454 struct aarch64_displaced_step_data *dsd
2455 = (struct aarch64_displaced_step_data *) data;
2456 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2458 if (can_encode_int32 (new_offset, 28))
2460 /* Emit B rather than BL, because executing BL on a new address
2461 will get the wrong address into LR. In order to avoid this,
2462 we emit B, and update LR if the instruction is BL. */
2463 emit_b (dsd->insn_buf, 0, new_offset);
2469 emit_nop (dsd->insn_buf);
2471 dsd->dsc->pc_adjust = offset;
2477 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2478 data->insn_addr + 4);
2482 /* Implementation of aarch64_insn_visitor method "b_cond". */
2485 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2486 struct aarch64_insn_data *data)
2488 struct aarch64_displaced_step_data *dsd
2489 = (struct aarch64_displaced_step_data *) data;
2491 /* GDB has to fix up PC after displaced step this instruction
2492 differently according to the condition is true or false. Instead
2493 of checking COND against conditional flags, we can use
2494 the following instructions, and GDB can tell how to fix up PC
2495 according to the PC value.
2497 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2503 emit_bcond (dsd->insn_buf, cond, 8);
2505 dsd->dsc->pc_adjust = offset;
2506 dsd->insn_count = 1;
2509 /* Dynamically allocate a new register. If we know the register
2510 statically, we should make it a global as above instead of using this
2513 static struct aarch64_register
2514 aarch64_register (unsigned num, int is64)
2516 return (struct aarch64_register) { num, is64 };
2519 /* Implementation of aarch64_insn_visitor method "cb". */
2522 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2523 const unsigned rn, int is64,
2524 struct aarch64_insn_data *data)
2526 struct aarch64_displaced_step_data *dsd
2527 = (struct aarch64_displaced_step_data *) data;
2529 /* The offset is out of range for a compare and branch
2530 instruction. We can use the following instructions instead:
2532 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2537 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2538 dsd->insn_count = 1;
2540 dsd->dsc->pc_adjust = offset;
2543 /* Implementation of aarch64_insn_visitor method "tb". */
2546 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2547 const unsigned rt, unsigned bit,
2548 struct aarch64_insn_data *data)
2550 struct aarch64_displaced_step_data *dsd
2551 = (struct aarch64_displaced_step_data *) data;
2553 /* The offset is out of range for a test bit and branch
2554 instruction We can use the following instructions instead:
2556 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2562 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2563 dsd->insn_count = 1;
2565 dsd->dsc->pc_adjust = offset;
2568 /* Implementation of aarch64_insn_visitor method "adr". */
2571 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2572 const int is_adrp, struct aarch64_insn_data *data)
2574 struct aarch64_displaced_step_data *dsd
2575 = (struct aarch64_displaced_step_data *) data;
2576 /* We know exactly the address the ADR{P,} instruction will compute.
2577 We can just write it to the destination register. */
2578 CORE_ADDR address = data->insn_addr + offset;
2582 /* Clear the lower 12 bits of the offset to get the 4K page. */
2583 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2587 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2590 dsd->dsc->pc_adjust = 4;
2591 emit_nop (dsd->insn_buf);
2592 dsd->insn_count = 1;
2595 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2598 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2599 const unsigned rt, const int is64,
2600 struct aarch64_insn_data *data)
2602 struct aarch64_displaced_step_data *dsd
2603 = (struct aarch64_displaced_step_data *) data;
2604 CORE_ADDR address = data->insn_addr + offset;
2605 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2607 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2611 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2612 aarch64_register (rt, 1), zero);
2614 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2615 aarch64_register (rt, 1), zero);
2617 dsd->dsc->pc_adjust = 4;
2620 /* Implementation of aarch64_insn_visitor method "others". */
2623 aarch64_displaced_step_others (const uint32_t insn,
2624 struct aarch64_insn_data *data)
2626 struct aarch64_displaced_step_data *dsd
2627 = (struct aarch64_displaced_step_data *) data;
2629 aarch64_emit_insn (dsd->insn_buf, insn);
2630 dsd->insn_count = 1;
2632 if ((insn & 0xfffffc1f) == 0xd65f0000)
2635 dsd->dsc->pc_adjust = 0;
2638 dsd->dsc->pc_adjust = 4;
2641 static const struct aarch64_insn_visitor visitor =
2643 aarch64_displaced_step_b,
2644 aarch64_displaced_step_b_cond,
2645 aarch64_displaced_step_cb,
2646 aarch64_displaced_step_tb,
2647 aarch64_displaced_step_adr,
2648 aarch64_displaced_step_ldr_literal,
2649 aarch64_displaced_step_others,
2652 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2654 struct displaced_step_closure *
2655 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2656 CORE_ADDR from, CORE_ADDR to,
2657 struct regcache *regs)
2659 struct displaced_step_closure *dsc = NULL;
2660 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2661 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2662 struct aarch64_displaced_step_data dsd;
2665 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2668 /* Look for a Load Exclusive instruction which begins the sequence. */
2669 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2671 /* We can't displaced step atomic sequences. */
2675 dsc = XCNEW (struct displaced_step_closure);
2676 dsd.base.insn_addr = from;
2681 aarch64_relocate_instruction (insn, &visitor,
2682 (struct aarch64_insn_data *) &dsd);
2683 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2685 if (dsd.insn_count != 0)
2689 /* Instruction can be relocated to scratch pad. Copy
2690 relocated instruction(s) there. */
2691 for (i = 0; i < dsd.insn_count; i++)
2693 if (debug_displaced)
2695 debug_printf ("displaced: writing insn ");
2696 debug_printf ("%.8x", dsd.insn_buf[i]);
2697 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2699 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2700 (ULONGEST) dsd.insn_buf[i]);
2712 /* Implement the "displaced_step_fixup" gdbarch method. */
2715 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2716 struct displaced_step_closure *dsc,
2717 CORE_ADDR from, CORE_ADDR to,
2718 struct regcache *regs)
2724 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2727 /* Condition is true. */
2729 else if (pc - to == 4)
2731 /* Condition is false. */
2735 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2738 if (dsc->pc_adjust != 0)
2740 if (debug_displaced)
2742 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2743 paddress (gdbarch, from), dsc->pc_adjust);
2745 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2746 from + dsc->pc_adjust);
2750 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2753 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2754 struct displaced_step_closure *closure)
2759 /* Initialize the current architecture based on INFO. If possible,
2760 re-use an architecture from ARCHES, which is a list of
2761 architectures already created during this debugging session.
2763 Called e.g. at program startup, when reading a core file, and when
2764 reading a binary file. */
2766 static struct gdbarch *
2767 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2769 struct gdbarch_tdep *tdep;
2770 struct gdbarch *gdbarch;
2771 struct gdbarch_list *best_arch;
2772 struct tdesc_arch_data *tdesc_data = NULL;
2773 const struct target_desc *tdesc = info.target_desc;
2776 const struct tdesc_feature *feature;
2778 int num_pseudo_regs = 0;
2780 /* Ensure we always have a target descriptor. */
2781 if (!tdesc_has_registers (tdesc))
2782 tdesc = tdesc_aarch64;
2786 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2788 if (feature == NULL)
2791 tdesc_data = tdesc_data_alloc ();
2793 /* Validate the descriptor provides the mandatory core R registers
2794 and allocate their numbers. */
2795 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2797 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2798 aarch64_r_register_names[i]);
2800 num_regs = AARCH64_X0_REGNUM + i;
2802 /* Look for the V registers. */
2803 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2806 /* Validate the descriptor provides the mandatory V registers
2807 and allocate their numbers. */
2808 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2810 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2811 aarch64_v_register_names[i]);
2813 num_regs = AARCH64_V0_REGNUM + i;
2815 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2816 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2817 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2818 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2819 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2824 tdesc_data_cleanup (tdesc_data);
2828 /* AArch64 code is always little-endian. */
2829 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2831 /* If there is already a candidate, use it. */
2832 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2834 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2836 /* Found a match. */
2840 if (best_arch != NULL)
2842 if (tdesc_data != NULL)
2843 tdesc_data_cleanup (tdesc_data);
2844 return best_arch->gdbarch;
2847 tdep = XCNEW (struct gdbarch_tdep);
2848 gdbarch = gdbarch_alloc (&info, tdep);
2850 /* This should be low enough for everything. */
2851 tdep->lowest_pc = 0x20;
2852 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2853 tdep->jb_elt_size = 8;
2855 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2856 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2858 /* Frame handling. */
2859 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2860 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2861 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2863 /* Advance PC across function entry code. */
2864 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2866 /* The stack grows downward. */
2867 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2869 /* Breakpoint manipulation. */
2870 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2871 aarch64_breakpoint::kind_from_pc);
2872 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2873 aarch64_breakpoint::bp_from_kind);
2874 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2875 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2877 /* Information about registers, etc. */
2878 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2879 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2880 set_gdbarch_num_regs (gdbarch, num_regs);
2882 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2883 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2884 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2885 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2886 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2887 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2888 aarch64_pseudo_register_reggroup_p);
2891 set_gdbarch_short_bit (gdbarch, 16);
2892 set_gdbarch_int_bit (gdbarch, 32);
2893 set_gdbarch_float_bit (gdbarch, 32);
2894 set_gdbarch_double_bit (gdbarch, 64);
2895 set_gdbarch_long_double_bit (gdbarch, 128);
2896 set_gdbarch_long_bit (gdbarch, 64);
2897 set_gdbarch_long_long_bit (gdbarch, 64);
2898 set_gdbarch_ptr_bit (gdbarch, 64);
2899 set_gdbarch_char_signed (gdbarch, 0);
2900 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2901 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2902 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2904 /* Internal <-> external register number maps. */
2905 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2907 /* Returning results. */
2908 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2911 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2913 /* Virtual tables. */
2914 set_gdbarch_vbit_in_delta (gdbarch, 1);
2916 /* Hook in the ABI-specific overrides, if they have been registered. */
2917 info.target_desc = tdesc;
2918 info.tdep_info = (void *) tdesc_data;
2919 gdbarch_init_osabi (info, gdbarch);
2921 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2923 /* Add some default predicates. */
2924 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2925 dwarf2_append_unwinders (gdbarch);
2926 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2928 frame_base_set_default (gdbarch, &aarch64_normal_base);
2930 /* Now we have tuned the configuration, set a few final things,
2931 based on what the OS ABI has told us. */
2933 if (tdep->jb_pc >= 0)
2934 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2936 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2938 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2940 /* Add standard register aliases. */
2941 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2942 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2943 value_of_aarch64_user_reg,
2944 &aarch64_register_aliases[i].regnum);
2950 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2952 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2957 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2958 paddress (gdbarch, tdep->lowest_pc));
2961 /* Suppress warning from -Wmissing-prototypes. */
2962 extern initialize_file_ftype _initialize_aarch64_tdep;
2965 _initialize_aarch64_tdep (void)
2967 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2970 initialize_tdesc_aarch64 ();
2972 /* Debug this file's internals. */
2973 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2974 Set AArch64 debugging."), _("\
2975 Show AArch64 debugging."), _("\
2976 When on, AArch64 specific debugging is enabled."),
2979 &setdebuglist, &showdebuglist);
2982 register_self_test (selftests::aarch64_analyze_prologue_test);
2986 /* AArch64 process record-replay related structures, defines etc. */
2988 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2991 unsigned int reg_len = LENGTH; \
2994 REGS = XNEWVEC (uint32_t, reg_len); \
2995 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3000 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3003 unsigned int mem_len = LENGTH; \
3006 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3007 memcpy(&MEMS->len, &RECORD_BUF[0], \
3008 sizeof(struct aarch64_mem_r) * LENGTH); \
3013 /* AArch64 record/replay structures and enumerations. */
3015 struct aarch64_mem_r
3017 uint64_t len; /* Record length. */
3018 uint64_t addr; /* Memory address. */
3021 enum aarch64_record_result
3023 AARCH64_RECORD_SUCCESS,
3024 AARCH64_RECORD_FAILURE,
3025 AARCH64_RECORD_UNSUPPORTED,
3026 AARCH64_RECORD_UNKNOWN
3029 typedef struct insn_decode_record_t
3031 struct gdbarch *gdbarch;
3032 struct regcache *regcache;
3033 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3034 uint32_t aarch64_insn; /* Insn to be recorded. */
3035 uint32_t mem_rec_count; /* Count of memory records. */
3036 uint32_t reg_rec_count; /* Count of register records. */
3037 uint32_t *aarch64_regs; /* Registers to be recorded. */
3038 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3039 } insn_decode_record;
3041 /* Record handler for data processing - register instructions. */
3044 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3046 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3047 uint32_t record_buf[4];
3049 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3050 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3051 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3053 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3057 /* Logical (shifted register). */
3058 if (insn_bits24_27 == 0x0a)
3059 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3061 else if (insn_bits24_27 == 0x0b)
3062 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3064 return AARCH64_RECORD_UNKNOWN;
3066 record_buf[0] = reg_rd;
3067 aarch64_insn_r->reg_rec_count = 1;
3069 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3073 if (insn_bits24_27 == 0x0b)
3075 /* Data-processing (3 source). */
3076 record_buf[0] = reg_rd;
3077 aarch64_insn_r->reg_rec_count = 1;
3079 else if (insn_bits24_27 == 0x0a)
3081 if (insn_bits21_23 == 0x00)
3083 /* Add/subtract (with carry). */
3084 record_buf[0] = reg_rd;
3085 aarch64_insn_r->reg_rec_count = 1;
3086 if (bit (aarch64_insn_r->aarch64_insn, 29))
3088 record_buf[1] = AARCH64_CPSR_REGNUM;
3089 aarch64_insn_r->reg_rec_count = 2;
3092 else if (insn_bits21_23 == 0x02)
3094 /* Conditional compare (register) and conditional compare
3095 (immediate) instructions. */
3096 record_buf[0] = AARCH64_CPSR_REGNUM;
3097 aarch64_insn_r->reg_rec_count = 1;
3099 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3101 /* CConditional select. */
3102 /* Data-processing (2 source). */
3103 /* Data-processing (1 source). */
3104 record_buf[0] = reg_rd;
3105 aarch64_insn_r->reg_rec_count = 1;
3108 return AARCH64_RECORD_UNKNOWN;
3112 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3114 return AARCH64_RECORD_SUCCESS;
3117 /* Record handler for data processing - immediate instructions. */
3120 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3122 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3123 uint32_t record_buf[4];
3125 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3126 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3127 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3129 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3130 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3131 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3133 record_buf[0] = reg_rd;
3134 aarch64_insn_r->reg_rec_count = 1;
3136 else if (insn_bits24_27 == 0x01)
3138 /* Add/Subtract (immediate). */
3139 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3140 record_buf[0] = reg_rd;
3141 aarch64_insn_r->reg_rec_count = 1;
3143 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3145 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3147 /* Logical (immediate). */
3148 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3149 record_buf[0] = reg_rd;
3150 aarch64_insn_r->reg_rec_count = 1;
3152 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3155 return AARCH64_RECORD_UNKNOWN;
3157 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3159 return AARCH64_RECORD_SUCCESS;
3162 /* Record handler for branch, exception generation and system instructions. */
3165 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3167 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3168 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3169 uint32_t record_buf[4];
3171 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3172 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3173 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3175 if (insn_bits28_31 == 0x0d)
3177 /* Exception generation instructions. */
3178 if (insn_bits24_27 == 0x04)
3180 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3181 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3182 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3184 ULONGEST svc_number;
3186 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3188 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3192 return AARCH64_RECORD_UNSUPPORTED;
3194 /* System instructions. */
3195 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3197 uint32_t reg_rt, reg_crn;
3199 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3200 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3202 /* Record rt in case of sysl and mrs instructions. */
3203 if (bit (aarch64_insn_r->aarch64_insn, 21))
3205 record_buf[0] = reg_rt;
3206 aarch64_insn_r->reg_rec_count = 1;
3208 /* Record cpsr for hint and msr(immediate) instructions. */
3209 else if (reg_crn == 0x02 || reg_crn == 0x04)
3211 record_buf[0] = AARCH64_CPSR_REGNUM;
3212 aarch64_insn_r->reg_rec_count = 1;
3215 /* Unconditional branch (register). */
3216 else if((insn_bits24_27 & 0x0e) == 0x06)
3218 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3219 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3220 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3223 return AARCH64_RECORD_UNKNOWN;
3225 /* Unconditional branch (immediate). */
3226 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3228 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3229 if (bit (aarch64_insn_r->aarch64_insn, 31))
3230 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3233 /* Compare & branch (immediate), Test & branch (immediate) and
3234 Conditional branch (immediate). */
3235 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3237 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3239 return AARCH64_RECORD_SUCCESS;
3242 /* Record handler for advanced SIMD load and store instructions. */
3245 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3248 uint64_t addr_offset = 0;
3249 uint32_t record_buf[24];
3250 uint64_t record_buf_mem[24];
3251 uint32_t reg_rn, reg_rt;
3252 uint32_t reg_index = 0, mem_index = 0;
3253 uint8_t opcode_bits, size_bits;
3255 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3256 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3257 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3258 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3259 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3262 debug_printf ("Process record: Advanced SIMD load/store\n");
3264 /* Load/store single structure. */
3265 if (bit (aarch64_insn_r->aarch64_insn, 24))
3267 uint8_t sindex, scale, selem, esize, replicate = 0;
3268 scale = opcode_bits >> 2;
3269 selem = ((opcode_bits & 0x02) |
3270 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3274 if (size_bits & 0x01)
3275 return AARCH64_RECORD_UNKNOWN;
3278 if ((size_bits >> 1) & 0x01)
3279 return AARCH64_RECORD_UNKNOWN;
3280 if (size_bits & 0x01)
3282 if (!((opcode_bits >> 1) & 0x01))
3285 return AARCH64_RECORD_UNKNOWN;
3289 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3296 return AARCH64_RECORD_UNKNOWN;
3302 for (sindex = 0; sindex < selem; sindex++)
3304 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3305 reg_rt = (reg_rt + 1) % 32;
3309 for (sindex = 0; sindex < selem; sindex++)
3311 if (bit (aarch64_insn_r->aarch64_insn, 22))
3312 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3315 record_buf_mem[mem_index++] = esize / 8;
3316 record_buf_mem[mem_index++] = address + addr_offset;
3318 addr_offset = addr_offset + (esize / 8);
3319 reg_rt = (reg_rt + 1) % 32;
3323 /* Load/store multiple structure. */
3326 uint8_t selem, esize, rpt, elements;
3327 uint8_t eindex, rindex;
3329 esize = 8 << size_bits;
3330 if (bit (aarch64_insn_r->aarch64_insn, 30))
3331 elements = 128 / esize;
3333 elements = 64 / esize;
3335 switch (opcode_bits)
3337 /*LD/ST4 (4 Registers). */
3342 /*LD/ST1 (4 Registers). */
3347 /*LD/ST3 (3 Registers). */
3352 /*LD/ST1 (3 Registers). */
3357 /*LD/ST1 (1 Register). */
3362 /*LD/ST2 (2 Registers). */
3367 /*LD/ST1 (2 Registers). */
3373 return AARCH64_RECORD_UNSUPPORTED;
3376 for (rindex = 0; rindex < rpt; rindex++)
3377 for (eindex = 0; eindex < elements; eindex++)
3379 uint8_t reg_tt, sindex;
3380 reg_tt = (reg_rt + rindex) % 32;
3381 for (sindex = 0; sindex < selem; sindex++)
3383 if (bit (aarch64_insn_r->aarch64_insn, 22))
3384 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3387 record_buf_mem[mem_index++] = esize / 8;
3388 record_buf_mem[mem_index++] = address + addr_offset;
3390 addr_offset = addr_offset + (esize / 8);
3391 reg_tt = (reg_tt + 1) % 32;
3396 if (bit (aarch64_insn_r->aarch64_insn, 23))
3397 record_buf[reg_index++] = reg_rn;
3399 aarch64_insn_r->reg_rec_count = reg_index;
3400 aarch64_insn_r->mem_rec_count = mem_index / 2;
3401 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3403 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3405 return AARCH64_RECORD_SUCCESS;
3408 /* Record handler for load and store instructions. */
3411 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3413 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3414 uint8_t insn_bit23, insn_bit21;
3415 uint8_t opc, size_bits, ld_flag, vector_flag;
3416 uint32_t reg_rn, reg_rt, reg_rt2;
3417 uint64_t datasize, offset;
3418 uint32_t record_buf[8];
3419 uint64_t record_buf_mem[8];
3422 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3423 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3424 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3425 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3426 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3427 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3428 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3429 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3430 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3431 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3432 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3434 /* Load/store exclusive. */
3435 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3438 debug_printf ("Process record: load/store exclusive\n");
3442 record_buf[0] = reg_rt;
3443 aarch64_insn_r->reg_rec_count = 1;
3446 record_buf[1] = reg_rt2;
3447 aarch64_insn_r->reg_rec_count = 2;
3453 datasize = (8 << size_bits) * 2;
3455 datasize = (8 << size_bits);
3456 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3458 record_buf_mem[0] = datasize / 8;
3459 record_buf_mem[1] = address;
3460 aarch64_insn_r->mem_rec_count = 1;
3463 /* Save register rs. */
3464 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3465 aarch64_insn_r->reg_rec_count = 1;
3469 /* Load register (literal) instructions decoding. */
3470 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3473 debug_printf ("Process record: load register (literal)\n");
3475 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3477 record_buf[0] = reg_rt;
3478 aarch64_insn_r->reg_rec_count = 1;
3480 /* All types of load/store pair instructions decoding. */
3481 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3484 debug_printf ("Process record: load/store pair\n");
3490 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3491 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3495 record_buf[0] = reg_rt;
3496 record_buf[1] = reg_rt2;
3498 aarch64_insn_r->reg_rec_count = 2;
3503 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3505 size_bits = size_bits >> 1;
3506 datasize = 8 << (2 + size_bits);
3507 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3508 offset = offset << (2 + size_bits);
3509 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3511 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3513 if (imm7_off & 0x40)
3514 address = address - offset;
3516 address = address + offset;
3519 record_buf_mem[0] = datasize / 8;
3520 record_buf_mem[1] = address;
3521 record_buf_mem[2] = datasize / 8;
3522 record_buf_mem[3] = address + (datasize / 8);
3523 aarch64_insn_r->mem_rec_count = 2;
3525 if (bit (aarch64_insn_r->aarch64_insn, 23))
3526 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3528 /* Load/store register (unsigned immediate) instructions. */
3529 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3531 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3538 if (size_bits != 0x03)
3541 return AARCH64_RECORD_UNKNOWN;
3545 debug_printf ("Process record: load/store (unsigned immediate):"
3546 " size %x V %d opc %x\n", size_bits, vector_flag,
3552 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3553 datasize = 8 << size_bits;
3554 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3556 offset = offset << size_bits;
3557 address = address + offset;
3559 record_buf_mem[0] = datasize >> 3;
3560 record_buf_mem[1] = address;
3561 aarch64_insn_r->mem_rec_count = 1;
3566 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3568 record_buf[0] = reg_rt;
3569 aarch64_insn_r->reg_rec_count = 1;
3572 /* Load/store register (register offset) instructions. */
3573 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3574 && insn_bits10_11 == 0x02 && insn_bit21)
3577 debug_printf ("Process record: load/store (register offset)\n");
3578 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3585 if (size_bits != 0x03)
3588 return AARCH64_RECORD_UNKNOWN;
3592 ULONGEST reg_rm_val;
3594 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3595 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3596 if (bit (aarch64_insn_r->aarch64_insn, 12))
3597 offset = reg_rm_val << size_bits;
3599 offset = reg_rm_val;
3600 datasize = 8 << size_bits;
3601 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3603 address = address + offset;
3604 record_buf_mem[0] = datasize >> 3;
3605 record_buf_mem[1] = address;
3606 aarch64_insn_r->mem_rec_count = 1;
3611 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3613 record_buf[0] = reg_rt;
3614 aarch64_insn_r->reg_rec_count = 1;
3617 /* Load/store register (immediate and unprivileged) instructions. */
3618 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3623 debug_printf ("Process record: load/store "
3624 "(immediate and unprivileged)\n");
3626 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3633 if (size_bits != 0x03)
3636 return AARCH64_RECORD_UNKNOWN;
3641 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3642 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3643 datasize = 8 << size_bits;
3644 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3646 if (insn_bits10_11 != 0x01)
3648 if (imm9_off & 0x0100)
3649 address = address - offset;
3651 address = address + offset;
3653 record_buf_mem[0] = datasize >> 3;
3654 record_buf_mem[1] = address;
3655 aarch64_insn_r->mem_rec_count = 1;
3660 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3662 record_buf[0] = reg_rt;
3663 aarch64_insn_r->reg_rec_count = 1;
3665 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3666 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3668 /* Advanced SIMD load/store instructions. */
3670 return aarch64_record_asimd_load_store (aarch64_insn_r);
3672 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3674 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3676 return AARCH64_RECORD_SUCCESS;
3679 /* Record handler for data processing SIMD and floating point instructions. */
3682 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3684 uint8_t insn_bit21, opcode, rmode, reg_rd;
3685 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3686 uint8_t insn_bits11_14;
3687 uint32_t record_buf[2];
3689 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3690 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3691 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3692 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3693 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3694 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3695 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3696 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3697 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3700 debug_printf ("Process record: data processing SIMD/FP: ");
3702 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3704 /* Floating point - fixed point conversion instructions. */
3708 debug_printf ("FP - fixed point conversion");
3710 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3711 record_buf[0] = reg_rd;
3713 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3715 /* Floating point - conditional compare instructions. */
3716 else if (insn_bits10_11 == 0x01)
3719 debug_printf ("FP - conditional compare");
3721 record_buf[0] = AARCH64_CPSR_REGNUM;
3723 /* Floating point - data processing (2-source) and
3724 conditional select instructions. */
3725 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3728 debug_printf ("FP - DP (2-source)");
3730 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3732 else if (insn_bits10_11 == 0x00)
3734 /* Floating point - immediate instructions. */
3735 if ((insn_bits12_15 & 0x01) == 0x01
3736 || (insn_bits12_15 & 0x07) == 0x04)
3739 debug_printf ("FP - immediate");
3740 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3742 /* Floating point - compare instructions. */
3743 else if ((insn_bits12_15 & 0x03) == 0x02)
3746 debug_printf ("FP - immediate");
3747 record_buf[0] = AARCH64_CPSR_REGNUM;
3749 /* Floating point - integer conversions instructions. */
3750 else if (insn_bits12_15 == 0x00)
3752 /* Convert float to integer instruction. */
3753 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3756 debug_printf ("float to int conversion");
3758 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3760 /* Convert integer to float instruction. */
3761 else if ((opcode >> 1) == 0x01 && !rmode)
3764 debug_printf ("int to float conversion");
3766 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3768 /* Move float to integer instruction. */
3769 else if ((opcode >> 1) == 0x03)
3772 debug_printf ("move float to int");
3774 if (!(opcode & 0x01))
3775 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3777 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3780 return AARCH64_RECORD_UNKNOWN;
3783 return AARCH64_RECORD_UNKNOWN;
3786 return AARCH64_RECORD_UNKNOWN;
3788 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3791 debug_printf ("SIMD copy");
3793 /* Advanced SIMD copy instructions. */
3794 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3795 && !bit (aarch64_insn_r->aarch64_insn, 15)
3796 && bit (aarch64_insn_r->aarch64_insn, 10))
3798 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3799 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3801 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3804 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3806 /* All remaining floating point or advanced SIMD instructions. */
3810 debug_printf ("all remain");
3812 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3816 debug_printf ("\n");
3818 aarch64_insn_r->reg_rec_count++;
3819 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3820 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3822 return AARCH64_RECORD_SUCCESS;
3825 /* Decodes insns type and invokes its record handler. */
3828 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3830 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3832 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3833 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3834 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3835 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3837 /* Data processing - immediate instructions. */
3838 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3839 return aarch64_record_data_proc_imm (aarch64_insn_r);
3841 /* Branch, exception generation and system instructions. */
3842 if (ins_bit26 && !ins_bit27 && ins_bit28)
3843 return aarch64_record_branch_except_sys (aarch64_insn_r);
3845 /* Load and store instructions. */
3846 if (!ins_bit25 && ins_bit27)
3847 return aarch64_record_load_store (aarch64_insn_r);
3849 /* Data processing - register instructions. */
3850 if (ins_bit25 && !ins_bit26 && ins_bit27)
3851 return aarch64_record_data_proc_reg (aarch64_insn_r);
3853 /* Data processing - SIMD and floating point instructions. */
3854 if (ins_bit25 && ins_bit26 && ins_bit27)
3855 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3857 return AARCH64_RECORD_UNSUPPORTED;
3860 /* Cleans up local record registers and memory allocations. */
3863 deallocate_reg_mem (insn_decode_record *record)
3865 xfree (record->aarch64_regs);
3866 xfree (record->aarch64_mems);
3869 /* Parse the current instruction and record the values of the registers and
3870 memory that will be changed in current instruction to record_arch_list
3871 return -1 if something is wrong. */
3874 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3875 CORE_ADDR insn_addr)
3877 uint32_t rec_no = 0;
3878 uint8_t insn_size = 4;
3880 gdb_byte buf[insn_size];
3881 insn_decode_record aarch64_record;
3883 memset (&buf[0], 0, insn_size);
3884 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3885 target_read_memory (insn_addr, &buf[0], insn_size);
3886 aarch64_record.aarch64_insn
3887 = (uint32_t) extract_unsigned_integer (&buf[0],
3889 gdbarch_byte_order (gdbarch));
3890 aarch64_record.regcache = regcache;
3891 aarch64_record.this_addr = insn_addr;
3892 aarch64_record.gdbarch = gdbarch;
3894 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3895 if (ret == AARCH64_RECORD_UNSUPPORTED)
3897 printf_unfiltered (_("Process record does not support instruction "
3898 "0x%0x at address %s.\n"),
3899 aarch64_record.aarch64_insn,
3900 paddress (gdbarch, insn_addr));
3906 /* Record registers. */
3907 record_full_arch_list_add_reg (aarch64_record.regcache,
3909 /* Always record register CPSR. */
3910 record_full_arch_list_add_reg (aarch64_record.regcache,
3911 AARCH64_CPSR_REGNUM);
3912 if (aarch64_record.aarch64_regs)
3913 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3914 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3915 aarch64_record.aarch64_regs[rec_no]))
3918 /* Record memories. */
3919 if (aarch64_record.aarch64_mems)
3920 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3921 if (record_full_arch_list_add_mem
3922 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3923 aarch64_record.aarch64_mems[rec_no].len))
3926 if (record_full_arch_list_add_end ())
3930 deallocate_reg_mem (&aarch64_record);