1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 #include "arch/aarch64-insn.h"
62 #include "opcode/aarch64.h"
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75 /* The standard register names, and all the valid aliases for them. */
78 const char *const name;
80 } aarch64_register_aliases[] =
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
175 /* Is the target available to read from? */
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
183 /* The register used to hold the frame pointer for this frame. */
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
191 show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
197 /* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
202 aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
217 for (; start < limit; start += 4)
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
239 if (inst.opcode->op == OP_ADD)
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
258 else if (inst.opcode->iclass == branch_imm)
260 /* Stop analysis on branch. */
263 else if (inst.opcode->iclass == condbranch)
265 /* Stop analysis on branch. */
268 else if (inst.opcode->iclass == branch_reg)
270 /* Stop analysis on branch. */
273 else if (inst.opcode->iclass == compbranch)
275 /* Stop analysis on branch. */
278 else if (inst.opcode->op == OP_MOVZ)
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
301 debug_printf ("aarch64: prologue analysis gave up "
302 "addr=0x%s opcode=0x%x (orr x register)\n",
303 core_addr_to_string_nz (start), insn);
308 else if (inst.opcode->op == OP_STUR)
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
322 is64 ? 8 : 4, regs[rt]);
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
355 if (inst.operands[2].addr.writeback)
356 regs[rn] = pv_add_constant (regs[rn], imm);
359 else if (inst.opcode->iclass == testbranch)
361 /* Stop analysis on branch. */
368 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
370 core_addr_to_string_nz (start), insn);
378 do_cleanups (back_to);
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
409 do_cleanups (back_to);
413 /* Implement the "skip_prologue" gdbarch method. */
416 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
420 CORE_ADDR func_addr, limit_pc;
421 struct symtab_and_line sal;
423 /* See if we can determine the end of the prologue via the symbol
424 table. If so, then return either PC, or the PC after the
425 prologue, whichever is greater. */
426 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
428 CORE_ADDR post_prologue_pc
429 = skip_prologue_using_sal (gdbarch, func_addr);
431 if (post_prologue_pc != 0)
432 return max (pc, post_prologue_pc);
435 /* Can't determine prologue from the symbol table, need to examine
438 /* Find an upper limit on the function prologue using the debug
439 information. If the debug information could not be used to
440 provide that bound, then use an arbitrary large number as the
442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
444 limit_pc = pc + 128; /* Magic. */
446 /* Try disassembling prologue. */
447 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
450 /* Scan the function prologue for THIS_FRAME and populate the prologue
454 aarch64_scan_prologue (struct frame_info *this_frame,
455 struct aarch64_prologue_cache *cache)
457 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
458 CORE_ADDR prologue_start;
459 CORE_ADDR prologue_end;
460 CORE_ADDR prev_pc = get_frame_pc (this_frame);
461 struct gdbarch *gdbarch = get_frame_arch (this_frame);
463 cache->prev_pc = prev_pc;
465 /* Assume we do not find a frame. */
466 cache->framereg = -1;
467 cache->framesize = 0;
469 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
472 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
476 /* No line info so use the current PC. */
477 prologue_end = prev_pc;
479 else if (sal.end < prologue_end)
481 /* The next line begins after the function end. */
482 prologue_end = sal.end;
485 prologue_end = min (prologue_end, prev_pc);
486 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
493 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
495 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
499 cache->framereg = AARCH64_FP_REGNUM;
500 cache->framesize = 16;
501 cache->saved_regs[29].addr = 0;
502 cache->saved_regs[30].addr = 8;
506 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
507 function may throw an exception if the inferior's registers or memory is
511 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
512 struct aarch64_prologue_cache *cache)
514 CORE_ADDR unwound_fp;
517 aarch64_scan_prologue (this_frame, cache);
519 if (cache->framereg == -1)
522 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
526 cache->prev_sp = unwound_fp + cache->framesize;
528 /* Calculate actual addresses of saved registers using offsets
529 determined by aarch64_analyze_prologue. */
530 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
531 if (trad_frame_addr_p (cache->saved_regs, reg))
532 cache->saved_regs[reg].addr += cache->prev_sp;
534 cache->func = get_frame_func (this_frame);
536 cache->available_p = 1;
539 /* Allocate and fill in *THIS_CACHE with information about the prologue of
540 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
541 Return a pointer to the current aarch64_prologue_cache in
544 static struct aarch64_prologue_cache *
545 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
547 struct aarch64_prologue_cache *cache;
549 if (*this_cache != NULL)
550 return (struct aarch64_prologue_cache *) *this_cache;
552 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
553 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
558 aarch64_make_prologue_cache_1 (this_frame, cache);
560 CATCH (ex, RETURN_MASK_ERROR)
562 if (ex.error != NOT_AVAILABLE_ERROR)
563 throw_exception (ex);
570 /* Implement the "stop_reason" frame_unwind method. */
572 static enum unwind_stop_reason
573 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
576 struct aarch64_prologue_cache *cache
577 = aarch64_make_prologue_cache (this_frame, this_cache);
579 if (!cache->available_p)
580 return UNWIND_UNAVAILABLE;
582 /* Halt the backtrace at "_start". */
583 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
584 return UNWIND_OUTERMOST;
586 /* We've hit a wall, stop. */
587 if (cache->prev_sp == 0)
588 return UNWIND_OUTERMOST;
590 return UNWIND_NO_REASON;
593 /* Our frame ID for a normal frame is the current function's starting
594 PC and the caller's SP when we were called. */
597 aarch64_prologue_this_id (struct frame_info *this_frame,
598 void **this_cache, struct frame_id *this_id)
600 struct aarch64_prologue_cache *cache
601 = aarch64_make_prologue_cache (this_frame, this_cache);
603 if (!cache->available_p)
604 *this_id = frame_id_build_unavailable_stack (cache->func);
606 *this_id = frame_id_build (cache->prev_sp, cache->func);
609 /* Implement the "prev_register" frame_unwind method. */
611 static struct value *
612 aarch64_prologue_prev_register (struct frame_info *this_frame,
613 void **this_cache, int prev_regnum)
615 struct gdbarch *gdbarch = get_frame_arch (this_frame);
616 struct aarch64_prologue_cache *cache
617 = aarch64_make_prologue_cache (this_frame, this_cache);
619 /* If we are asked to unwind the PC, then we need to return the LR
620 instead. The prologue may save PC, but it will point into this
621 frame's prologue, not the next frame's resume location. */
622 if (prev_regnum == AARCH64_PC_REGNUM)
626 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
627 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
630 /* SP is generally not saved to the stack, but this frame is
631 identified by the next frame's stack pointer at the time of the
632 call. The value was already reconstructed into PREV_SP. */
645 if (prev_regnum == AARCH64_SP_REGNUM)
646 return frame_unwind_got_constant (this_frame, prev_regnum,
649 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
653 /* AArch64 prologue unwinder. */
654 struct frame_unwind aarch64_prologue_unwind =
657 aarch64_prologue_frame_unwind_stop_reason,
658 aarch64_prologue_this_id,
659 aarch64_prologue_prev_register,
661 default_frame_sniffer
664 /* Allocate and fill in *THIS_CACHE with information about the prologue of
665 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
666 Return a pointer to the current aarch64_prologue_cache in
669 static struct aarch64_prologue_cache *
670 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
672 struct aarch64_prologue_cache *cache;
674 if (*this_cache != NULL)
675 return (struct aarch64_prologue_cache *) *this_cache;
677 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
678 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
683 cache->prev_sp = get_frame_register_unsigned (this_frame,
685 cache->prev_pc = get_frame_pc (this_frame);
686 cache->available_p = 1;
688 CATCH (ex, RETURN_MASK_ERROR)
690 if (ex.error != NOT_AVAILABLE_ERROR)
691 throw_exception (ex);
698 /* Implement the "stop_reason" frame_unwind method. */
700 static enum unwind_stop_reason
701 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
704 struct aarch64_prologue_cache *cache
705 = aarch64_make_stub_cache (this_frame, this_cache);
707 if (!cache->available_p)
708 return UNWIND_UNAVAILABLE;
710 return UNWIND_NO_REASON;
713 /* Our frame ID for a stub frame is the current SP and LR. */
716 aarch64_stub_this_id (struct frame_info *this_frame,
717 void **this_cache, struct frame_id *this_id)
719 struct aarch64_prologue_cache *cache
720 = aarch64_make_stub_cache (this_frame, this_cache);
722 if (cache->available_p)
723 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
725 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
728 /* Implement the "sniffer" frame_unwind method. */
731 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
732 struct frame_info *this_frame,
733 void **this_prologue_cache)
735 CORE_ADDR addr_in_block;
738 addr_in_block = get_frame_address_in_block (this_frame);
739 if (in_plt_section (addr_in_block)
740 /* We also use the stub winder if the target memory is unreadable
741 to avoid having the prologue unwinder trying to read it. */
742 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
748 /* AArch64 stub unwinder. */
749 struct frame_unwind aarch64_stub_unwind =
752 aarch64_stub_frame_unwind_stop_reason,
753 aarch64_stub_this_id,
754 aarch64_prologue_prev_register,
756 aarch64_stub_unwind_sniffer
759 /* Return the frame base address of *THIS_FRAME. */
762 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
764 struct aarch64_prologue_cache *cache
765 = aarch64_make_prologue_cache (this_frame, this_cache);
767 return cache->prev_sp - cache->framesize;
770 /* AArch64 default frame base information. */
771 struct frame_base aarch64_normal_base =
773 &aarch64_prologue_unwind,
774 aarch64_normal_frame_base,
775 aarch64_normal_frame_base,
776 aarch64_normal_frame_base
779 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
780 dummy frame. The frame ID's base needs to match the TOS value
781 saved by save_dummy_frame_tos () and returned from
782 aarch64_push_dummy_call, and the PC needs to match the dummy
783 frame's breakpoint. */
785 static struct frame_id
786 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
788 return frame_id_build (get_frame_register_unsigned (this_frame,
790 get_frame_pc (this_frame));
793 /* Implement the "unwind_pc" gdbarch method. */
796 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
799 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
804 /* Implement the "unwind_sp" gdbarch method. */
807 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
809 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
812 /* Return the value of the REGNUM register in the previous frame of
815 static struct value *
816 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
817 void **this_cache, int regnum)
819 struct gdbarch *gdbarch = get_frame_arch (this_frame);
824 case AARCH64_PC_REGNUM:
825 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
826 return frame_unwind_got_constant (this_frame, regnum, lr);
829 internal_error (__FILE__, __LINE__,
830 _("Unexpected register %d"), regnum);
834 /* Implement the "init_reg" dwarf2_frame_ops method. */
837 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
838 struct dwarf2_frame_state_reg *reg,
839 struct frame_info *this_frame)
843 case AARCH64_PC_REGNUM:
844 reg->how = DWARF2_FRAME_REG_FN;
845 reg->loc.fn = aarch64_dwarf2_prev_register;
847 case AARCH64_SP_REGNUM:
848 reg->how = DWARF2_FRAME_REG_CFA;
853 /* When arguments must be pushed onto the stack, they go on in reverse
854 order. The code below implements a FILO (stack) to do this. */
858 /* Value to pass on stack. */
859 const gdb_byte *data;
861 /* Size in bytes of value to pass on stack. */
865 DEF_VEC_O (stack_item_t);
867 /* Return the alignment (in bytes) of the given type. */
870 aarch64_type_align (struct type *t)
876 t = check_typedef (t);
877 switch (TYPE_CODE (t))
880 /* Should never happen. */
881 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
889 case TYPE_CODE_RANGE:
890 case TYPE_CODE_BITSTRING:
894 return TYPE_LENGTH (t);
896 case TYPE_CODE_ARRAY:
897 case TYPE_CODE_COMPLEX:
898 return aarch64_type_align (TYPE_TARGET_TYPE (t));
900 case TYPE_CODE_STRUCT:
901 case TYPE_CODE_UNION:
903 for (n = 0; n < TYPE_NFIELDS (t); n++)
905 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
913 /* Return 1 if *TY is a homogeneous floating-point aggregate as
914 defined in the AAPCS64 ABI document; otherwise return 0. */
917 is_hfa (struct type *ty)
919 switch (TYPE_CODE (ty))
921 case TYPE_CODE_ARRAY:
923 struct type *target_ty = TYPE_TARGET_TYPE (ty);
924 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
929 case TYPE_CODE_UNION:
930 case TYPE_CODE_STRUCT:
932 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
934 struct type *member0_type;
936 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
937 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
941 for (i = 0; i < TYPE_NFIELDS (ty); i++)
943 struct type *member1_type;
945 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
946 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
947 || (TYPE_LENGTH (member0_type)
948 != TYPE_LENGTH (member1_type)))
964 /* AArch64 function call information structure. */
965 struct aarch64_call_info
967 /* the current argument number. */
970 /* The next general purpose register number, equivalent to NGRN as
971 described in the AArch64 Procedure Call Standard. */
974 /* The next SIMD and floating point register number, equivalent to
975 NSRN as described in the AArch64 Procedure Call Standard. */
978 /* The next stacked argument address, equivalent to NSAA as
979 described in the AArch64 Procedure Call Standard. */
982 /* Stack item vector. */
983 VEC(stack_item_t) *si;
986 /* Pass a value in a sequence of consecutive X registers. The caller
987 is responsbile for ensuring sufficient registers are available. */
990 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
991 struct aarch64_call_info *info, struct type *type,
994 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
995 int len = TYPE_LENGTH (type);
996 enum type_code typecode = TYPE_CODE (type);
997 int regnum = AARCH64_X0_REGNUM + info->ngrn;
998 const bfd_byte *buf = value_contents (arg);
1004 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1005 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1009 /* Adjust sub-word struct/union args when big-endian. */
1010 if (byte_order == BFD_ENDIAN_BIG
1011 && partial_len < X_REGISTER_SIZE
1012 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1013 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1017 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1018 gdbarch_register_name (gdbarch, regnum),
1019 phex (regval, X_REGISTER_SIZE));
1021 regcache_cooked_write_unsigned (regcache, regnum, regval);
1028 /* Attempt to marshall a value in a V register. Return 1 if
1029 successful, or 0 if insufficient registers are available. This
1030 function, unlike the equivalent pass_in_x() function does not
1031 handle arguments spread across multiple registers. */
1034 pass_in_v (struct gdbarch *gdbarch,
1035 struct regcache *regcache,
1036 struct aarch64_call_info *info,
1037 int len, const bfd_byte *buf)
1041 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1042 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1043 gdb_byte reg[V_REGISTER_SIZE];
1048 memset (reg, 0, sizeof (reg));
1049 /* PCS C.1, the argument is allocated to the least significant
1050 bits of V register. */
1051 memcpy (reg, buf, len);
1052 regcache_cooked_write (regcache, regnum, reg);
1056 debug_printf ("arg %d in %s\n", info->argnum,
1057 gdbarch_register_name (gdbarch, regnum));
1065 /* Marshall an argument onto the stack. */
1068 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1071 const bfd_byte *buf = value_contents (arg);
1072 int len = TYPE_LENGTH (type);
1078 align = aarch64_type_align (type);
1080 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1081 Natural alignment of the argument's type. */
1082 align = align_up (align, 8);
1084 /* The AArch64 PCS requires at most doubleword alignment. */
1090 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1096 VEC_safe_push (stack_item_t, info->si, &item);
1099 if (info->nsaa & (align - 1))
1101 /* Push stack alignment padding. */
1102 int pad = align - (info->nsaa & (align - 1));
1107 VEC_safe_push (stack_item_t, info->si, &item);
1112 /* Marshall an argument into a sequence of one or more consecutive X
1113 registers or, if insufficient X registers are available then onto
1117 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1118 struct aarch64_call_info *info, struct type *type,
1121 int len = TYPE_LENGTH (type);
1122 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1124 /* PCS C.13 - Pass in registers if we have enough spare */
1125 if (info->ngrn + nregs <= 8)
1127 pass_in_x (gdbarch, regcache, info, type, arg);
1128 info->ngrn += nregs;
1133 pass_on_stack (info, type, arg);
1137 /* Pass a value in a V register, or on the stack if insufficient are
1141 pass_in_v_or_stack (struct gdbarch *gdbarch,
1142 struct regcache *regcache,
1143 struct aarch64_call_info *info,
1147 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1148 value_contents (arg)))
1149 pass_on_stack (info, type, arg);
1152 /* Implement the "push_dummy_call" gdbarch method. */
1155 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1156 struct regcache *regcache, CORE_ADDR bp_addr,
1158 struct value **args, CORE_ADDR sp, int struct_return,
1159 CORE_ADDR struct_addr)
1165 struct aarch64_call_info info;
1166 struct type *func_type;
1167 struct type *return_type;
1168 int lang_struct_return;
1170 memset (&info, 0, sizeof (info));
1172 /* We need to know what the type of the called function is in order
1173 to determine the number of named/anonymous arguments for the
1174 actual argument placement, and the return type in order to handle
1175 return value correctly.
1177 The generic code above us views the decision of return in memory
1178 or return in registers as a two stage processes. The language
1179 handler is consulted first and may decide to return in memory (eg
1180 class with copy constructor returned by value), this will cause
1181 the generic code to allocate space AND insert an initial leading
1184 If the language code does not decide to pass in memory then the
1185 target code is consulted.
1187 If the language code decides to pass in memory we want to move
1188 the pointer inserted as the initial argument from the argument
1189 list and into X8, the conventional AArch64 struct return pointer
1192 This is slightly awkward, ideally the flag "lang_struct_return"
1193 would be passed to the targets implementation of push_dummy_call.
1194 Rather that change the target interface we call the language code
1195 directly ourselves. */
1197 func_type = check_typedef (value_type (function));
1199 /* Dereference function pointer types. */
1200 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1201 func_type = TYPE_TARGET_TYPE (func_type);
1203 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1204 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1206 /* If language_pass_by_reference () returned true we will have been
1207 given an additional initial argument, a hidden pointer to the
1208 return slot in memory. */
1209 return_type = TYPE_TARGET_TYPE (func_type);
1210 lang_struct_return = language_pass_by_reference (return_type);
1212 /* Set the return address. For the AArch64, the return breakpoint
1213 is always at BP_ADDR. */
1214 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1216 /* If we were given an initial argument for the return slot because
1217 lang_struct_return was true, lose it. */
1218 if (lang_struct_return)
1224 /* The struct_return pointer occupies X8. */
1225 if (struct_return || lang_struct_return)
1229 debug_printf ("struct return in %s = 0x%s\n",
1230 gdbarch_register_name (gdbarch,
1231 AARCH64_STRUCT_RETURN_REGNUM),
1232 paddress (gdbarch, struct_addr));
1234 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1238 for (argnum = 0; argnum < nargs; argnum++)
1240 struct value *arg = args[argnum];
1241 struct type *arg_type;
1244 arg_type = check_typedef (value_type (arg));
1245 len = TYPE_LENGTH (arg_type);
1247 switch (TYPE_CODE (arg_type))
1250 case TYPE_CODE_BOOL:
1251 case TYPE_CODE_CHAR:
1252 case TYPE_CODE_RANGE:
1253 case TYPE_CODE_ENUM:
1256 /* Promote to 32 bit integer. */
1257 if (TYPE_UNSIGNED (arg_type))
1258 arg_type = builtin_type (gdbarch)->builtin_uint32;
1260 arg_type = builtin_type (gdbarch)->builtin_int32;
1261 arg = value_cast (arg_type, arg);
1263 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1266 case TYPE_CODE_COMPLEX:
1269 const bfd_byte *buf = value_contents (arg);
1270 struct type *target_type =
1271 check_typedef (TYPE_TARGET_TYPE (arg_type));
1273 pass_in_v (gdbarch, regcache, &info,
1274 TYPE_LENGTH (target_type), buf);
1275 pass_in_v (gdbarch, regcache, &info,
1276 TYPE_LENGTH (target_type),
1277 buf + TYPE_LENGTH (target_type));
1282 pass_on_stack (&info, arg_type, arg);
1286 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1289 case TYPE_CODE_STRUCT:
1290 case TYPE_CODE_ARRAY:
1291 case TYPE_CODE_UNION:
1292 if (is_hfa (arg_type))
1294 int elements = TYPE_NFIELDS (arg_type);
1296 /* Homogeneous Aggregates */
1297 if (info.nsrn + elements < 8)
1301 for (i = 0; i < elements; i++)
1303 /* We know that we have sufficient registers
1304 available therefore this will never fallback
1306 struct value *field =
1307 value_primitive_field (arg, 0, i, arg_type);
1308 struct type *field_type =
1309 check_typedef (value_type (field));
1311 pass_in_v_or_stack (gdbarch, regcache, &info,
1318 pass_on_stack (&info, arg_type, arg);
1323 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1324 invisible reference. */
1326 /* Allocate aligned storage. */
1327 sp = align_down (sp - len, 16);
1329 /* Write the real data into the stack. */
1330 write_memory (sp, value_contents (arg), len);
1332 /* Construct the indirection. */
1333 arg_type = lookup_pointer_type (arg_type);
1334 arg = value_from_pointer (arg_type, sp);
1335 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1338 /* PCS C.15 / C.18 multiple values pass. */
1339 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1343 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1348 /* Make sure stack retains 16 byte alignment. */
1350 sp -= 16 - (info.nsaa & 15);
1352 while (!VEC_empty (stack_item_t, info.si))
1354 stack_item_t *si = VEC_last (stack_item_t, info.si);
1357 write_memory (sp, si->data, si->len);
1358 VEC_pop (stack_item_t, info.si);
1361 VEC_free (stack_item_t, info.si);
1363 /* Finally, update the SP register. */
1364 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1369 /* Implement the "frame_align" gdbarch method. */
1372 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1374 /* Align the stack to sixteen bytes. */
1375 return sp & ~(CORE_ADDR) 15;
1378 /* Return the type for an AdvSISD Q register. */
1380 static struct type *
1381 aarch64_vnq_type (struct gdbarch *gdbarch)
1383 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1385 if (tdep->vnq_type == NULL)
1390 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1393 elem = builtin_type (gdbarch)->builtin_uint128;
1394 append_composite_type_field (t, "u", elem);
1396 elem = builtin_type (gdbarch)->builtin_int128;
1397 append_composite_type_field (t, "s", elem);
1402 return tdep->vnq_type;
1405 /* Return the type for an AdvSISD D register. */
1407 static struct type *
1408 aarch64_vnd_type (struct gdbarch *gdbarch)
1410 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1412 if (tdep->vnd_type == NULL)
1417 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1420 elem = builtin_type (gdbarch)->builtin_double;
1421 append_composite_type_field (t, "f", elem);
1423 elem = builtin_type (gdbarch)->builtin_uint64;
1424 append_composite_type_field (t, "u", elem);
1426 elem = builtin_type (gdbarch)->builtin_int64;
1427 append_composite_type_field (t, "s", elem);
1432 return tdep->vnd_type;
1435 /* Return the type for an AdvSISD S register. */
1437 static struct type *
1438 aarch64_vns_type (struct gdbarch *gdbarch)
1440 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1442 if (tdep->vns_type == NULL)
1447 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1450 elem = builtin_type (gdbarch)->builtin_float;
1451 append_composite_type_field (t, "f", elem);
1453 elem = builtin_type (gdbarch)->builtin_uint32;
1454 append_composite_type_field (t, "u", elem);
1456 elem = builtin_type (gdbarch)->builtin_int32;
1457 append_composite_type_field (t, "s", elem);
1462 return tdep->vns_type;
1465 /* Return the type for an AdvSISD H register. */
1467 static struct type *
1468 aarch64_vnh_type (struct gdbarch *gdbarch)
1470 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1472 if (tdep->vnh_type == NULL)
1477 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1480 elem = builtin_type (gdbarch)->builtin_uint16;
1481 append_composite_type_field (t, "u", elem);
1483 elem = builtin_type (gdbarch)->builtin_int16;
1484 append_composite_type_field (t, "s", elem);
1489 return tdep->vnh_type;
1492 /* Return the type for an AdvSISD B register. */
1494 static struct type *
1495 aarch64_vnb_type (struct gdbarch *gdbarch)
1497 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1499 if (tdep->vnb_type == NULL)
1504 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1507 elem = builtin_type (gdbarch)->builtin_uint8;
1508 append_composite_type_field (t, "u", elem);
1510 elem = builtin_type (gdbarch)->builtin_int8;
1511 append_composite_type_field (t, "s", elem);
1516 return tdep->vnb_type;
1519 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1522 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1524 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1525 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1527 if (reg == AARCH64_DWARF_SP)
1528 return AARCH64_SP_REGNUM;
1530 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1531 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1537 /* Implement the "print_insn" gdbarch method. */
1540 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1542 info->symbols = NULL;
1543 return print_insn_aarch64 (memaddr, info);
1546 /* AArch64 BRK software debug mode instruction.
1547 Note that AArch64 code is always little-endian.
1548 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1549 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1551 /* Implement the "breakpoint_from_pc" gdbarch method. */
1553 static const gdb_byte *
1554 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1557 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1559 *lenptr = sizeof (aarch64_default_breakpoint);
1560 return aarch64_default_breakpoint;
1563 /* Extract from an array REGS containing the (raw) register state a
1564 function return value of type TYPE, and copy that, in virtual
1565 format, into VALBUF. */
1568 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1571 struct gdbarch *gdbarch = get_regcache_arch (regs);
1572 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1574 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1576 bfd_byte buf[V_REGISTER_SIZE];
1577 int len = TYPE_LENGTH (type);
1579 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1580 memcpy (valbuf, buf, len);
1582 else if (TYPE_CODE (type) == TYPE_CODE_INT
1583 || TYPE_CODE (type) == TYPE_CODE_CHAR
1584 || TYPE_CODE (type) == TYPE_CODE_BOOL
1585 || TYPE_CODE (type) == TYPE_CODE_PTR
1586 || TYPE_CODE (type) == TYPE_CODE_REF
1587 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1589 /* If the the type is a plain integer, then the access is
1590 straight-forward. Otherwise we have to play around a bit
1592 int len = TYPE_LENGTH (type);
1593 int regno = AARCH64_X0_REGNUM;
1598 /* By using store_unsigned_integer we avoid having to do
1599 anything special for small big-endian values. */
1600 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1601 store_unsigned_integer (valbuf,
1602 (len > X_REGISTER_SIZE
1603 ? X_REGISTER_SIZE : len), byte_order, tmp);
1604 len -= X_REGISTER_SIZE;
1605 valbuf += X_REGISTER_SIZE;
1608 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1610 int regno = AARCH64_V0_REGNUM;
1611 bfd_byte buf[V_REGISTER_SIZE];
1612 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1613 int len = TYPE_LENGTH (target_type);
1615 regcache_cooked_read (regs, regno, buf);
1616 memcpy (valbuf, buf, len);
1618 regcache_cooked_read (regs, regno + 1, buf);
1619 memcpy (valbuf, buf, len);
1622 else if (is_hfa (type))
1624 int elements = TYPE_NFIELDS (type);
1625 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1626 int len = TYPE_LENGTH (member_type);
1629 for (i = 0; i < elements; i++)
1631 int regno = AARCH64_V0_REGNUM + i;
1632 bfd_byte buf[V_REGISTER_SIZE];
1636 debug_printf ("read HFA return value element %d from %s\n",
1638 gdbarch_register_name (gdbarch, regno));
1640 regcache_cooked_read (regs, regno, buf);
1642 memcpy (valbuf, buf, len);
1648 /* For a structure or union the behaviour is as if the value had
1649 been stored to word-aligned memory and then loaded into
1650 registers with 64-bit load instruction(s). */
1651 int len = TYPE_LENGTH (type);
1652 int regno = AARCH64_X0_REGNUM;
1653 bfd_byte buf[X_REGISTER_SIZE];
1657 regcache_cooked_read (regs, regno++, buf);
1658 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1659 len -= X_REGISTER_SIZE;
1660 valbuf += X_REGISTER_SIZE;
1666 /* Will a function return an aggregate type in memory or in a
1667 register? Return 0 if an aggregate type can be returned in a
1668 register, 1 if it must be returned in memory. */
1671 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1674 enum type_code code;
1676 type = check_typedef (type);
1678 /* In the AArch64 ABI, "integer" like aggregate types are returned
1679 in registers. For an aggregate type to be integer like, its size
1680 must be less than or equal to 4 * X_REGISTER_SIZE. */
1684 /* PCS B.5 If the argument is a Named HFA, then the argument is
1689 if (TYPE_LENGTH (type) > 16)
1691 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1692 invisible reference. */
1700 /* Write into appropriate registers a function return value of type
1701 TYPE, given in virtual format. */
1704 aarch64_store_return_value (struct type *type, struct regcache *regs,
1705 const gdb_byte *valbuf)
1707 struct gdbarch *gdbarch = get_regcache_arch (regs);
1708 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1710 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1712 bfd_byte buf[V_REGISTER_SIZE];
1713 int len = TYPE_LENGTH (type);
1715 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1716 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1718 else if (TYPE_CODE (type) == TYPE_CODE_INT
1719 || TYPE_CODE (type) == TYPE_CODE_CHAR
1720 || TYPE_CODE (type) == TYPE_CODE_BOOL
1721 || TYPE_CODE (type) == TYPE_CODE_PTR
1722 || TYPE_CODE (type) == TYPE_CODE_REF
1723 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1725 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1727 /* Values of one word or less are zero/sign-extended and
1729 bfd_byte tmpbuf[X_REGISTER_SIZE];
1730 LONGEST val = unpack_long (type, valbuf);
1732 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1733 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1737 /* Integral values greater than one word are stored in
1738 consecutive registers starting with r0. This will always
1739 be a multiple of the regiser size. */
1740 int len = TYPE_LENGTH (type);
1741 int regno = AARCH64_X0_REGNUM;
1745 regcache_cooked_write (regs, regno++, valbuf);
1746 len -= X_REGISTER_SIZE;
1747 valbuf += X_REGISTER_SIZE;
1751 else if (is_hfa (type))
1753 int elements = TYPE_NFIELDS (type);
1754 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1755 int len = TYPE_LENGTH (member_type);
1758 for (i = 0; i < elements; i++)
1760 int regno = AARCH64_V0_REGNUM + i;
1761 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1765 debug_printf ("write HFA return value element %d to %s\n",
1767 gdbarch_register_name (gdbarch, regno));
1770 memcpy (tmpbuf, valbuf, len);
1771 regcache_cooked_write (regs, regno, tmpbuf);
1777 /* For a structure or union the behaviour is as if the value had
1778 been stored to word-aligned memory and then loaded into
1779 registers with 64-bit load instruction(s). */
1780 int len = TYPE_LENGTH (type);
1781 int regno = AARCH64_X0_REGNUM;
1782 bfd_byte tmpbuf[X_REGISTER_SIZE];
1786 memcpy (tmpbuf, valbuf,
1787 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1788 regcache_cooked_write (regs, regno++, tmpbuf);
1789 len -= X_REGISTER_SIZE;
1790 valbuf += X_REGISTER_SIZE;
1795 /* Implement the "return_value" gdbarch method. */
1797 static enum return_value_convention
1798 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1799 struct type *valtype, struct regcache *regcache,
1800 gdb_byte *readbuf, const gdb_byte *writebuf)
1802 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1804 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1805 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1806 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1808 if (aarch64_return_in_memory (gdbarch, valtype))
1811 debug_printf ("return value in memory\n");
1812 return RETURN_VALUE_STRUCT_CONVENTION;
1817 aarch64_store_return_value (valtype, regcache, writebuf);
1820 aarch64_extract_return_value (valtype, regcache, readbuf);
1823 debug_printf ("return value in registers\n");
1825 return RETURN_VALUE_REGISTER_CONVENTION;
1828 /* Implement the "get_longjmp_target" gdbarch method. */
1831 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1834 gdb_byte buf[X_REGISTER_SIZE];
1835 struct gdbarch *gdbarch = get_frame_arch (frame);
1836 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1837 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1839 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1841 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1845 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1849 /* Implement the "gen_return_address" gdbarch method. */
1852 aarch64_gen_return_address (struct gdbarch *gdbarch,
1853 struct agent_expr *ax, struct axs_value *value,
1856 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1857 value->kind = axs_lvalue_register;
1858 value->u.reg = AARCH64_LR_REGNUM;
1862 /* Return the pseudo register name corresponding to register regnum. */
1865 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1867 static const char *const q_name[] =
1869 "q0", "q1", "q2", "q3",
1870 "q4", "q5", "q6", "q7",
1871 "q8", "q9", "q10", "q11",
1872 "q12", "q13", "q14", "q15",
1873 "q16", "q17", "q18", "q19",
1874 "q20", "q21", "q22", "q23",
1875 "q24", "q25", "q26", "q27",
1876 "q28", "q29", "q30", "q31",
1879 static const char *const d_name[] =
1881 "d0", "d1", "d2", "d3",
1882 "d4", "d5", "d6", "d7",
1883 "d8", "d9", "d10", "d11",
1884 "d12", "d13", "d14", "d15",
1885 "d16", "d17", "d18", "d19",
1886 "d20", "d21", "d22", "d23",
1887 "d24", "d25", "d26", "d27",
1888 "d28", "d29", "d30", "d31",
1891 static const char *const s_name[] =
1893 "s0", "s1", "s2", "s3",
1894 "s4", "s5", "s6", "s7",
1895 "s8", "s9", "s10", "s11",
1896 "s12", "s13", "s14", "s15",
1897 "s16", "s17", "s18", "s19",
1898 "s20", "s21", "s22", "s23",
1899 "s24", "s25", "s26", "s27",
1900 "s28", "s29", "s30", "s31",
1903 static const char *const h_name[] =
1905 "h0", "h1", "h2", "h3",
1906 "h4", "h5", "h6", "h7",
1907 "h8", "h9", "h10", "h11",
1908 "h12", "h13", "h14", "h15",
1909 "h16", "h17", "h18", "h19",
1910 "h20", "h21", "h22", "h23",
1911 "h24", "h25", "h26", "h27",
1912 "h28", "h29", "h30", "h31",
1915 static const char *const b_name[] =
1917 "b0", "b1", "b2", "b3",
1918 "b4", "b5", "b6", "b7",
1919 "b8", "b9", "b10", "b11",
1920 "b12", "b13", "b14", "b15",
1921 "b16", "b17", "b18", "b19",
1922 "b20", "b21", "b22", "b23",
1923 "b24", "b25", "b26", "b27",
1924 "b28", "b29", "b30", "b31",
1927 regnum -= gdbarch_num_regs (gdbarch);
1929 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1930 return q_name[regnum - AARCH64_Q0_REGNUM];
1932 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1933 return d_name[regnum - AARCH64_D0_REGNUM];
1935 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1936 return s_name[regnum - AARCH64_S0_REGNUM];
1938 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1939 return h_name[regnum - AARCH64_H0_REGNUM];
1941 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1942 return b_name[regnum - AARCH64_B0_REGNUM];
1944 internal_error (__FILE__, __LINE__,
1945 _("aarch64_pseudo_register_name: bad register number %d"),
1949 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
1951 static struct type *
1952 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1954 regnum -= gdbarch_num_regs (gdbarch);
1956 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1957 return aarch64_vnq_type (gdbarch);
1959 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1960 return aarch64_vnd_type (gdbarch);
1962 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1963 return aarch64_vns_type (gdbarch);
1965 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1966 return aarch64_vnh_type (gdbarch);
1968 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1969 return aarch64_vnb_type (gdbarch);
1971 internal_error (__FILE__, __LINE__,
1972 _("aarch64_pseudo_register_type: bad register number %d"),
1976 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
1979 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
1980 struct reggroup *group)
1982 regnum -= gdbarch_num_regs (gdbarch);
1984 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1985 return group == all_reggroup || group == vector_reggroup;
1986 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1987 return (group == all_reggroup || group == vector_reggroup
1988 || group == float_reggroup);
1989 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1990 return (group == all_reggroup || group == vector_reggroup
1991 || group == float_reggroup);
1992 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1993 return group == all_reggroup || group == vector_reggroup;
1994 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1995 return group == all_reggroup || group == vector_reggroup;
1997 return group == all_reggroup;
2000 /* Implement the "pseudo_register_read_value" gdbarch method. */
2002 static struct value *
2003 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2004 struct regcache *regcache,
2007 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2008 struct value *result_value;
2011 result_value = allocate_value (register_type (gdbarch, regnum));
2012 VALUE_LVAL (result_value) = lval_register;
2013 VALUE_REGNUM (result_value) = regnum;
2014 buf = value_contents_raw (result_value);
2016 regnum -= gdbarch_num_regs (gdbarch);
2018 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2020 enum register_status status;
2023 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2024 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2025 if (status != REG_VALID)
2026 mark_value_bytes_unavailable (result_value, 0,
2027 TYPE_LENGTH (value_type (result_value)));
2029 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2030 return result_value;
2033 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2035 enum register_status status;
2038 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2039 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2040 if (status != REG_VALID)
2041 mark_value_bytes_unavailable (result_value, 0,
2042 TYPE_LENGTH (value_type (result_value)));
2044 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2045 return result_value;
2048 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2050 enum register_status status;
2053 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2054 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2055 if (status != REG_VALID)
2056 mark_value_bytes_unavailable (result_value, 0,
2057 TYPE_LENGTH (value_type (result_value)));
2059 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2060 return result_value;
2063 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2065 enum register_status status;
2068 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2069 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2070 if (status != REG_VALID)
2071 mark_value_bytes_unavailable (result_value, 0,
2072 TYPE_LENGTH (value_type (result_value)));
2074 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2075 return result_value;
2078 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2080 enum register_status status;
2083 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2084 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2085 if (status != REG_VALID)
2086 mark_value_bytes_unavailable (result_value, 0,
2087 TYPE_LENGTH (value_type (result_value)));
2089 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2090 return result_value;
2093 gdb_assert_not_reached ("regnum out of bound");
2096 /* Implement the "pseudo_register_write" gdbarch method. */
2099 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2100 int regnum, const gdb_byte *buf)
2102 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2104 /* Ensure the register buffer is zero, we want gdb writes of the
2105 various 'scalar' pseudo registers to behavior like architectural
2106 writes, register width bytes are written the remainder are set to
2108 memset (reg_buf, 0, sizeof (reg_buf));
2110 regnum -= gdbarch_num_regs (gdbarch);
2112 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2114 /* pseudo Q registers */
2117 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2118 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2119 regcache_raw_write (regcache, v_regnum, reg_buf);
2123 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2125 /* pseudo D registers */
2128 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2129 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2130 regcache_raw_write (regcache, v_regnum, reg_buf);
2134 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2138 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2139 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2140 regcache_raw_write (regcache, v_regnum, reg_buf);
2144 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2146 /* pseudo H registers */
2149 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2150 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2151 regcache_raw_write (regcache, v_regnum, reg_buf);
2155 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2157 /* pseudo B registers */
2160 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2161 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2162 regcache_raw_write (regcache, v_regnum, reg_buf);
2166 gdb_assert_not_reached ("regnum out of bound");
2169 /* Callback function for user_reg_add. */
2171 static struct value *
2172 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2174 const int *reg_p = (const int *) baton;
2176 return value_of_register (*reg_p, frame);
2180 /* Implement the "software_single_step" gdbarch method, needed to
2181 single step through atomic sequences on AArch64. */
2184 aarch64_software_single_step (struct frame_info *frame)
2186 struct gdbarch *gdbarch = get_frame_arch (frame);
2187 struct address_space *aspace = get_frame_address_space (frame);
2188 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2189 const int insn_size = 4;
2190 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2191 CORE_ADDR pc = get_frame_pc (frame);
2192 CORE_ADDR breaks[2] = { -1, -1 };
2194 CORE_ADDR closing_insn = 0;
2195 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2196 byte_order_for_code);
2199 int bc_insn_count = 0; /* Conditional branch instruction count. */
2200 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2203 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2206 /* Look for a Load Exclusive instruction which begins the sequence. */
2207 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2210 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2213 insn = read_memory_unsigned_integer (loc, insn_size,
2214 byte_order_for_code);
2216 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2218 /* Check if the instruction is a conditional branch. */
2219 if (inst.opcode->iclass == condbranch)
2221 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2223 if (bc_insn_count >= 1)
2226 /* It is, so we'll try to set a breakpoint at the destination. */
2227 breaks[1] = loc + inst.operands[0].imm.value;
2233 /* Look for the Store Exclusive which closes the atomic sequence. */
2234 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2241 /* We didn't find a closing Store Exclusive instruction, fall back. */
2245 /* Insert breakpoint after the end of the atomic sequence. */
2246 breaks[0] = loc + insn_size;
2248 /* Check for duplicated breakpoints, and also check that the second
2249 breakpoint is not within the atomic sequence. */
2251 && (breaks[1] == breaks[0]
2252 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2253 last_breakpoint = 0;
2255 /* Insert the breakpoint at the end of the sequence, and one at the
2256 destination of the conditional branch, if it exists. */
2257 for (index = 0; index <= last_breakpoint; index++)
2258 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2263 struct displaced_step_closure
2265 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2266 is being displaced stepping. */
2269 /* PC adjustment offset after displaced stepping. */
2273 /* Data when visiting instructions for displaced stepping. */
2275 struct aarch64_displaced_step_data
2277 struct aarch64_insn_data base;
2279 /* The address where the instruction will be executed at. */
2281 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2282 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2283 /* Number of instructions in INSN_BUF. */
2284 unsigned insn_count;
2285 /* Registers when doing displaced stepping. */
2286 struct regcache *regs;
2288 struct displaced_step_closure *dsc;
2291 /* Implementation of aarch64_insn_visitor method "b". */
2294 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2295 struct aarch64_insn_data *data)
2297 struct aarch64_displaced_step_data *dsd
2298 = (struct aarch64_displaced_step_data *) data;
2299 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2301 if (can_encode_int32 (new_offset, 28))
2303 /* Emit B rather than BL, because executing BL on a new address
2304 will get the wrong address into LR. In order to avoid this,
2305 we emit B, and update LR if the instruction is BL. */
2306 emit_b (dsd->insn_buf, 0, new_offset);
2312 emit_nop (dsd->insn_buf);
2314 dsd->dsc->pc_adjust = offset;
2320 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2321 data->insn_addr + 4);
2325 /* Implementation of aarch64_insn_visitor method "b_cond". */
2328 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2329 struct aarch64_insn_data *data)
2331 struct aarch64_displaced_step_data *dsd
2332 = (struct aarch64_displaced_step_data *) data;
2333 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2335 /* GDB has to fix up PC after displaced step this instruction
2336 differently according to the condition is true or false. Instead
2337 of checking COND against conditional flags, we can use
2338 the following instructions, and GDB can tell how to fix up PC
2339 according to the PC value.
2341 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2347 emit_bcond (dsd->insn_buf, cond, 8);
2349 dsd->dsc->pc_adjust = offset;
2350 dsd->insn_count = 1;
2353 /* Dynamically allocate a new register. If we know the register
2354 statically, we should make it a global as above instead of using this
2357 static struct aarch64_register
2358 aarch64_register (unsigned num, int is64)
2360 return (struct aarch64_register) { num, is64 };
2363 /* Implementation of aarch64_insn_visitor method "cb". */
2366 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2367 const unsigned rn, int is64,
2368 struct aarch64_insn_data *data)
2370 struct aarch64_displaced_step_data *dsd
2371 = (struct aarch64_displaced_step_data *) data;
2372 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2374 /* The offset is out of range for a compare and branch
2375 instruction. We can use the following instructions instead:
2377 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2382 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2383 dsd->insn_count = 1;
2385 dsd->dsc->pc_adjust = offset;
2388 /* Implementation of aarch64_insn_visitor method "tb". */
2391 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2392 const unsigned rt, unsigned bit,
2393 struct aarch64_insn_data *data)
2395 struct aarch64_displaced_step_data *dsd
2396 = (struct aarch64_displaced_step_data *) data;
2397 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2399 /* The offset is out of range for a test bit and branch
2400 instruction We can use the following instructions instead:
2402 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2408 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2409 dsd->insn_count = 1;
2411 dsd->dsc->pc_adjust = offset;
2414 /* Implementation of aarch64_insn_visitor method "adr". */
2417 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2418 const int is_adrp, struct aarch64_insn_data *data)
2420 struct aarch64_displaced_step_data *dsd
2421 = (struct aarch64_displaced_step_data *) data;
2422 /* We know exactly the address the ADR{P,} instruction will compute.
2423 We can just write it to the destination register. */
2424 CORE_ADDR address = data->insn_addr + offset;
2428 /* Clear the lower 12 bits of the offset to get the 4K page. */
2429 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2433 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2436 dsd->dsc->pc_adjust = 4;
2437 emit_nop (dsd->insn_buf);
2438 dsd->insn_count = 1;
2441 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2444 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2445 const unsigned rt, const int is64,
2446 struct aarch64_insn_data *data)
2448 struct aarch64_displaced_step_data *dsd
2449 = (struct aarch64_displaced_step_data *) data;
2450 CORE_ADDR address = data->insn_addr + offset;
2451 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2453 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2457 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2458 aarch64_register (rt, 1), zero);
2460 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2461 aarch64_register (rt, 1), zero);
2463 dsd->dsc->pc_adjust = 4;
2466 /* Implementation of aarch64_insn_visitor method "others". */
2469 aarch64_displaced_step_others (const uint32_t insn,
2470 struct aarch64_insn_data *data)
2472 struct aarch64_displaced_step_data *dsd
2473 = (struct aarch64_displaced_step_data *) data;
2475 aarch64_emit_insn (dsd->insn_buf, insn);
2476 dsd->insn_count = 1;
2478 if ((insn & 0xfffffc1f) == 0xd65f0000)
2481 dsd->dsc->pc_adjust = 0;
2484 dsd->dsc->pc_adjust = 4;
2487 static const struct aarch64_insn_visitor visitor =
2489 aarch64_displaced_step_b,
2490 aarch64_displaced_step_b_cond,
2491 aarch64_displaced_step_cb,
2492 aarch64_displaced_step_tb,
2493 aarch64_displaced_step_adr,
2494 aarch64_displaced_step_ldr_literal,
2495 aarch64_displaced_step_others,
2498 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2500 struct displaced_step_closure *
2501 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2502 CORE_ADDR from, CORE_ADDR to,
2503 struct regcache *regs)
2505 struct displaced_step_closure *dsc = NULL;
2506 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2507 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2508 struct aarch64_displaced_step_data dsd;
2511 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2514 /* Look for a Load Exclusive instruction which begins the sequence. */
2515 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2517 /* We can't displaced step atomic sequences. */
2521 dsc = XCNEW (struct displaced_step_closure);
2522 dsd.base.insn_addr = from;
2527 aarch64_relocate_instruction (insn, &visitor,
2528 (struct aarch64_insn_data *) &dsd);
2529 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2531 if (dsd.insn_count != 0)
2535 /* Instruction can be relocated to scratch pad. Copy
2536 relocated instruction(s) there. */
2537 for (i = 0; i < dsd.insn_count; i++)
2539 if (debug_displaced)
2541 debug_printf ("displaced: writing insn ");
2542 debug_printf ("%.8x", dsd.insn_buf[i]);
2543 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2545 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2546 (ULONGEST) dsd.insn_buf[i]);
2558 /* Implement the "displaced_step_fixup" gdbarch method. */
2561 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2562 struct displaced_step_closure *dsc,
2563 CORE_ADDR from, CORE_ADDR to,
2564 struct regcache *regs)
2570 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2573 /* Condition is true. */
2575 else if (pc - to == 4)
2577 /* Condition is false. */
2581 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2584 if (dsc->pc_adjust != 0)
2586 if (debug_displaced)
2588 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2589 paddress (gdbarch, from), dsc->pc_adjust);
2591 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2592 from + dsc->pc_adjust);
2596 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2599 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2600 struct displaced_step_closure *closure)
2605 /* Initialize the current architecture based on INFO. If possible,
2606 re-use an architecture from ARCHES, which is a list of
2607 architectures already created during this debugging session.
2609 Called e.g. at program startup, when reading a core file, and when
2610 reading a binary file. */
2612 static struct gdbarch *
2613 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2615 struct gdbarch_tdep *tdep;
2616 struct gdbarch *gdbarch;
2617 struct gdbarch_list *best_arch;
2618 struct tdesc_arch_data *tdesc_data = NULL;
2619 const struct target_desc *tdesc = info.target_desc;
2621 int have_fpa_registers = 1;
2623 const struct tdesc_feature *feature;
2625 int num_pseudo_regs = 0;
2627 /* Ensure we always have a target descriptor. */
2628 if (!tdesc_has_registers (tdesc))
2629 tdesc = tdesc_aarch64;
2633 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2635 if (feature == NULL)
2638 tdesc_data = tdesc_data_alloc ();
2640 /* Validate the descriptor provides the mandatory core R registers
2641 and allocate their numbers. */
2642 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2644 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2645 aarch64_r_register_names[i]);
2647 num_regs = AARCH64_X0_REGNUM + i;
2649 /* Look for the V registers. */
2650 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2653 /* Validate the descriptor provides the mandatory V registers
2654 and allocate their numbers. */
2655 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2657 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2658 aarch64_v_register_names[i]);
2660 num_regs = AARCH64_V0_REGNUM + i;
2662 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2663 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2664 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2665 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2666 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2671 tdesc_data_cleanup (tdesc_data);
2675 /* AArch64 code is always little-endian. */
2676 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2678 /* If there is already a candidate, use it. */
2679 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2681 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2683 /* Found a match. */
2687 if (best_arch != NULL)
2689 if (tdesc_data != NULL)
2690 tdesc_data_cleanup (tdesc_data);
2691 return best_arch->gdbarch;
2694 tdep = XCNEW (struct gdbarch_tdep);
2695 gdbarch = gdbarch_alloc (&info, tdep);
2697 /* This should be low enough for everything. */
2698 tdep->lowest_pc = 0x20;
2699 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2700 tdep->jb_elt_size = 8;
2702 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2703 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2705 /* Frame handling. */
2706 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2707 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2708 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2710 /* Advance PC across function entry code. */
2711 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2713 /* The stack grows downward. */
2714 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2716 /* Breakpoint manipulation. */
2717 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2718 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2719 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2721 /* Information about registers, etc. */
2722 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2723 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2724 set_gdbarch_num_regs (gdbarch, num_regs);
2726 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2727 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2728 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2729 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2730 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2731 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2732 aarch64_pseudo_register_reggroup_p);
2735 set_gdbarch_short_bit (gdbarch, 16);
2736 set_gdbarch_int_bit (gdbarch, 32);
2737 set_gdbarch_float_bit (gdbarch, 32);
2738 set_gdbarch_double_bit (gdbarch, 64);
2739 set_gdbarch_long_double_bit (gdbarch, 128);
2740 set_gdbarch_long_bit (gdbarch, 64);
2741 set_gdbarch_long_long_bit (gdbarch, 64);
2742 set_gdbarch_ptr_bit (gdbarch, 64);
2743 set_gdbarch_char_signed (gdbarch, 0);
2744 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2745 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2746 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2748 /* Internal <-> external register number maps. */
2749 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2751 /* Returning results. */
2752 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2755 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2757 /* Virtual tables. */
2758 set_gdbarch_vbit_in_delta (gdbarch, 1);
2760 /* Hook in the ABI-specific overrides, if they have been registered. */
2761 info.target_desc = tdesc;
2762 info.tdep_info = (void *) tdesc_data;
2763 gdbarch_init_osabi (info, gdbarch);
2765 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2767 /* Add some default predicates. */
2768 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2769 dwarf2_append_unwinders (gdbarch);
2770 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2772 frame_base_set_default (gdbarch, &aarch64_normal_base);
2774 /* Now we have tuned the configuration, set a few final things,
2775 based on what the OS ABI has told us. */
2777 if (tdep->jb_pc >= 0)
2778 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2780 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2782 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2784 /* Add standard register aliases. */
2785 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2786 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2787 value_of_aarch64_user_reg,
2788 &aarch64_register_aliases[i].regnum);
2794 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2801 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2802 paddress (gdbarch, tdep->lowest_pc));
2805 /* Suppress warning from -Wmissing-prototypes. */
2806 extern initialize_file_ftype _initialize_aarch64_tdep;
2809 _initialize_aarch64_tdep (void)
2811 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2814 initialize_tdesc_aarch64 ();
2816 /* Debug this file's internals. */
2817 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2818 Set AArch64 debugging."), _("\
2819 Show AArch64 debugging."), _("\
2820 When on, AArch64 specific debugging is enabled."),
2823 &setdebuglist, &showdebuglist);
2826 /* AArch64 process record-replay related structures, defines etc. */
2828 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2831 unsigned int reg_len = LENGTH; \
2834 REGS = XNEWVEC (uint32_t, reg_len); \
2835 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2840 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2843 unsigned int mem_len = LENGTH; \
2846 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2847 memcpy(&MEMS->len, &RECORD_BUF[0], \
2848 sizeof(struct aarch64_mem_r) * LENGTH); \
2853 /* AArch64 record/replay structures and enumerations. */
2855 struct aarch64_mem_r
2857 uint64_t len; /* Record length. */
2858 uint64_t addr; /* Memory address. */
2861 enum aarch64_record_result
2863 AARCH64_RECORD_SUCCESS,
2864 AARCH64_RECORD_FAILURE,
2865 AARCH64_RECORD_UNSUPPORTED,
2866 AARCH64_RECORD_UNKNOWN
2869 typedef struct insn_decode_record_t
2871 struct gdbarch *gdbarch;
2872 struct regcache *regcache;
2873 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2874 uint32_t aarch64_insn; /* Insn to be recorded. */
2875 uint32_t mem_rec_count; /* Count of memory records. */
2876 uint32_t reg_rec_count; /* Count of register records. */
2877 uint32_t *aarch64_regs; /* Registers to be recorded. */
2878 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2879 } insn_decode_record;
2881 /* Record handler for data processing - register instructions. */
2884 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2886 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2887 uint32_t record_buf[4];
2889 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2890 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2891 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2893 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2897 /* Logical (shifted register). */
2898 if (insn_bits24_27 == 0x0a)
2899 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2901 else if (insn_bits24_27 == 0x0b)
2902 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2904 return AARCH64_RECORD_UNKNOWN;
2906 record_buf[0] = reg_rd;
2907 aarch64_insn_r->reg_rec_count = 1;
2909 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2913 if (insn_bits24_27 == 0x0b)
2915 /* Data-processing (3 source). */
2916 record_buf[0] = reg_rd;
2917 aarch64_insn_r->reg_rec_count = 1;
2919 else if (insn_bits24_27 == 0x0a)
2921 if (insn_bits21_23 == 0x00)
2923 /* Add/subtract (with carry). */
2924 record_buf[0] = reg_rd;
2925 aarch64_insn_r->reg_rec_count = 1;
2926 if (bit (aarch64_insn_r->aarch64_insn, 29))
2928 record_buf[1] = AARCH64_CPSR_REGNUM;
2929 aarch64_insn_r->reg_rec_count = 2;
2932 else if (insn_bits21_23 == 0x02)
2934 /* Conditional compare (register) and conditional compare
2935 (immediate) instructions. */
2936 record_buf[0] = AARCH64_CPSR_REGNUM;
2937 aarch64_insn_r->reg_rec_count = 1;
2939 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2941 /* CConditional select. */
2942 /* Data-processing (2 source). */
2943 /* Data-processing (1 source). */
2944 record_buf[0] = reg_rd;
2945 aarch64_insn_r->reg_rec_count = 1;
2948 return AARCH64_RECORD_UNKNOWN;
2952 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2954 return AARCH64_RECORD_SUCCESS;
2957 /* Record handler for data processing - immediate instructions. */
2960 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2962 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2963 uint32_t record_buf[4];
2965 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2966 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2967 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2968 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2970 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2971 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2972 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2974 record_buf[0] = reg_rd;
2975 aarch64_insn_r->reg_rec_count = 1;
2977 else if (insn_bits24_27 == 0x01)
2979 /* Add/Subtract (immediate). */
2980 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2981 record_buf[0] = reg_rd;
2982 aarch64_insn_r->reg_rec_count = 1;
2984 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2986 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2988 /* Logical (immediate). */
2989 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2990 record_buf[0] = reg_rd;
2991 aarch64_insn_r->reg_rec_count = 1;
2993 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2996 return AARCH64_RECORD_UNKNOWN;
2998 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3000 return AARCH64_RECORD_SUCCESS;
3003 /* Record handler for branch, exception generation and system instructions. */
3006 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3008 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3009 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3010 uint32_t record_buf[4];
3012 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3013 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3014 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3016 if (insn_bits28_31 == 0x0d)
3018 /* Exception generation instructions. */
3019 if (insn_bits24_27 == 0x04)
3021 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3022 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3023 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3025 ULONGEST svc_number;
3027 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3029 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3033 return AARCH64_RECORD_UNSUPPORTED;
3035 /* System instructions. */
3036 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3038 uint32_t reg_rt, reg_crn;
3040 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3041 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3043 /* Record rt in case of sysl and mrs instructions. */
3044 if (bit (aarch64_insn_r->aarch64_insn, 21))
3046 record_buf[0] = reg_rt;
3047 aarch64_insn_r->reg_rec_count = 1;
3049 /* Record cpsr for hint and msr(immediate) instructions. */
3050 else if (reg_crn == 0x02 || reg_crn == 0x04)
3052 record_buf[0] = AARCH64_CPSR_REGNUM;
3053 aarch64_insn_r->reg_rec_count = 1;
3056 /* Unconditional branch (register). */
3057 else if((insn_bits24_27 & 0x0e) == 0x06)
3059 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3060 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3061 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3064 return AARCH64_RECORD_UNKNOWN;
3066 /* Unconditional branch (immediate). */
3067 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3069 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3070 if (bit (aarch64_insn_r->aarch64_insn, 31))
3071 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3074 /* Compare & branch (immediate), Test & branch (immediate) and
3075 Conditional branch (immediate). */
3076 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3078 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3080 return AARCH64_RECORD_SUCCESS;
3083 /* Record handler for advanced SIMD load and store instructions. */
3086 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3089 uint64_t addr_offset = 0;
3090 uint32_t record_buf[24];
3091 uint64_t record_buf_mem[24];
3092 uint32_t reg_rn, reg_rt;
3093 uint32_t reg_index = 0, mem_index = 0;
3094 uint8_t opcode_bits, size_bits;
3096 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3097 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3098 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3099 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3100 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3103 debug_printf ("Process record: Advanced SIMD load/store\n");
3105 /* Load/store single structure. */
3106 if (bit (aarch64_insn_r->aarch64_insn, 24))
3108 uint8_t sindex, scale, selem, esize, replicate = 0;
3109 scale = opcode_bits >> 2;
3110 selem = ((opcode_bits & 0x02) |
3111 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3115 if (size_bits & 0x01)
3116 return AARCH64_RECORD_UNKNOWN;
3119 if ((size_bits >> 1) & 0x01)
3120 return AARCH64_RECORD_UNKNOWN;
3121 if (size_bits & 0x01)
3123 if (!((opcode_bits >> 1) & 0x01))
3126 return AARCH64_RECORD_UNKNOWN;
3130 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3137 return AARCH64_RECORD_UNKNOWN;
3143 for (sindex = 0; sindex < selem; sindex++)
3145 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3146 reg_rt = (reg_rt + 1) % 32;
3150 for (sindex = 0; sindex < selem; sindex++)
3151 if (bit (aarch64_insn_r->aarch64_insn, 22))
3152 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3155 record_buf_mem[mem_index++] = esize / 8;
3156 record_buf_mem[mem_index++] = address + addr_offset;
3158 addr_offset = addr_offset + (esize / 8);
3159 reg_rt = (reg_rt + 1) % 32;
3162 /* Load/store multiple structure. */
3165 uint8_t selem, esize, rpt, elements;
3166 uint8_t eindex, rindex;
3168 esize = 8 << size_bits;
3169 if (bit (aarch64_insn_r->aarch64_insn, 30))
3170 elements = 128 / esize;
3172 elements = 64 / esize;
3174 switch (opcode_bits)
3176 /*LD/ST4 (4 Registers). */
3181 /*LD/ST1 (4 Registers). */
3186 /*LD/ST3 (3 Registers). */
3191 /*LD/ST1 (3 Registers). */
3196 /*LD/ST1 (1 Register). */
3201 /*LD/ST2 (2 Registers). */
3206 /*LD/ST1 (2 Registers). */
3212 return AARCH64_RECORD_UNSUPPORTED;
3215 for (rindex = 0; rindex < rpt; rindex++)
3216 for (eindex = 0; eindex < elements; eindex++)
3218 uint8_t reg_tt, sindex;
3219 reg_tt = (reg_rt + rindex) % 32;
3220 for (sindex = 0; sindex < selem; sindex++)
3222 if (bit (aarch64_insn_r->aarch64_insn, 22))
3223 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3226 record_buf_mem[mem_index++] = esize / 8;
3227 record_buf_mem[mem_index++] = address + addr_offset;
3229 addr_offset = addr_offset + (esize / 8);
3230 reg_tt = (reg_tt + 1) % 32;
3235 if (bit (aarch64_insn_r->aarch64_insn, 23))
3236 record_buf[reg_index++] = reg_rn;
3238 aarch64_insn_r->reg_rec_count = reg_index;
3239 aarch64_insn_r->mem_rec_count = mem_index / 2;
3240 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3242 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3244 return AARCH64_RECORD_SUCCESS;
3247 /* Record handler for load and store instructions. */
3250 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3252 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3253 uint8_t insn_bit23, insn_bit21;
3254 uint8_t opc, size_bits, ld_flag, vector_flag;
3255 uint32_t reg_rn, reg_rt, reg_rt2;
3256 uint64_t datasize, offset;
3257 uint32_t record_buf[8];
3258 uint64_t record_buf_mem[8];
3261 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3262 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3263 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3264 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3265 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3266 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3267 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3268 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3269 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3270 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3271 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3273 /* Load/store exclusive. */
3274 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3277 debug_printf ("Process record: load/store exclusive\n");
3281 record_buf[0] = reg_rt;
3282 aarch64_insn_r->reg_rec_count = 1;
3285 record_buf[1] = reg_rt2;
3286 aarch64_insn_r->reg_rec_count = 2;
3292 datasize = (8 << size_bits) * 2;
3294 datasize = (8 << size_bits);
3295 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3297 record_buf_mem[0] = datasize / 8;
3298 record_buf_mem[1] = address;
3299 aarch64_insn_r->mem_rec_count = 1;
3302 /* Save register rs. */
3303 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3304 aarch64_insn_r->reg_rec_count = 1;
3308 /* Load register (literal) instructions decoding. */
3309 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3312 debug_printf ("Process record: load register (literal)\n");
3314 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3316 record_buf[0] = reg_rt;
3317 aarch64_insn_r->reg_rec_count = 1;
3319 /* All types of load/store pair instructions decoding. */
3320 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3323 debug_printf ("Process record: load/store pair\n");
3329 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3330 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3334 record_buf[0] = reg_rt;
3335 record_buf[1] = reg_rt2;
3337 aarch64_insn_r->reg_rec_count = 2;
3342 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3344 size_bits = size_bits >> 1;
3345 datasize = 8 << (2 + size_bits);
3346 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3347 offset = offset << (2 + size_bits);
3348 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3350 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3352 if (imm7_off & 0x40)
3353 address = address - offset;
3355 address = address + offset;
3358 record_buf_mem[0] = datasize / 8;
3359 record_buf_mem[1] = address;
3360 record_buf_mem[2] = datasize / 8;
3361 record_buf_mem[3] = address + (datasize / 8);
3362 aarch64_insn_r->mem_rec_count = 2;
3364 if (bit (aarch64_insn_r->aarch64_insn, 23))
3365 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3367 /* Load/store register (unsigned immediate) instructions. */
3368 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3370 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3377 if (size_bits != 0x03)
3380 return AARCH64_RECORD_UNKNOWN;
3384 debug_printf ("Process record: load/store (unsigned immediate):"
3385 " size %x V %d opc %x\n", size_bits, vector_flag,
3391 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3392 datasize = 8 << size_bits;
3393 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3395 offset = offset << size_bits;
3396 address = address + offset;
3398 record_buf_mem[0] = datasize >> 3;
3399 record_buf_mem[1] = address;
3400 aarch64_insn_r->mem_rec_count = 1;
3405 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3407 record_buf[0] = reg_rt;
3408 aarch64_insn_r->reg_rec_count = 1;
3411 /* Load/store register (register offset) instructions. */
3412 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3413 && insn_bits10_11 == 0x02 && insn_bit21)
3416 debug_printf ("Process record: load/store (register offset)\n");
3417 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3424 if (size_bits != 0x03)
3427 return AARCH64_RECORD_UNKNOWN;
3431 uint64_t reg_rm_val;
3432 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3433 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3434 if (bit (aarch64_insn_r->aarch64_insn, 12))
3435 offset = reg_rm_val << size_bits;
3437 offset = reg_rm_val;
3438 datasize = 8 << size_bits;
3439 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3441 address = address + offset;
3442 record_buf_mem[0] = datasize >> 3;
3443 record_buf_mem[1] = address;
3444 aarch64_insn_r->mem_rec_count = 1;
3449 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3451 record_buf[0] = reg_rt;
3452 aarch64_insn_r->reg_rec_count = 1;
3455 /* Load/store register (immediate and unprivileged) instructions. */
3456 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3461 debug_printf ("Process record: load/store "
3462 "(immediate and unprivileged)\n");
3464 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3471 if (size_bits != 0x03)
3474 return AARCH64_RECORD_UNKNOWN;
3479 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3480 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3481 datasize = 8 << size_bits;
3482 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3484 if (insn_bits10_11 != 0x01)
3486 if (imm9_off & 0x0100)
3487 address = address - offset;
3489 address = address + offset;
3491 record_buf_mem[0] = datasize >> 3;
3492 record_buf_mem[1] = address;
3493 aarch64_insn_r->mem_rec_count = 1;
3498 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3500 record_buf[0] = reg_rt;
3501 aarch64_insn_r->reg_rec_count = 1;
3503 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3504 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3506 /* Advanced SIMD load/store instructions. */
3508 return aarch64_record_asimd_load_store (aarch64_insn_r);
3510 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3512 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3514 return AARCH64_RECORD_SUCCESS;
3517 /* Record handler for data processing SIMD and floating point instructions. */
3520 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3522 uint8_t insn_bit21, opcode, rmode, reg_rd;
3523 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3524 uint8_t insn_bits11_14;
3525 uint32_t record_buf[2];
3527 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3528 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3529 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3530 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3531 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3532 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3533 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3534 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3535 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3538 debug_printf ("Process record: data processing SIMD/FP: ");
3540 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3542 /* Floating point - fixed point conversion instructions. */
3546 debug_printf ("FP - fixed point conversion");
3548 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3549 record_buf[0] = reg_rd;
3551 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3553 /* Floating point - conditional compare instructions. */
3554 else if (insn_bits10_11 == 0x01)
3557 debug_printf ("FP - conditional compare");
3559 record_buf[0] = AARCH64_CPSR_REGNUM;
3561 /* Floating point - data processing (2-source) and
3562 conditional select instructions. */
3563 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3566 debug_printf ("FP - DP (2-source)");
3568 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3570 else if (insn_bits10_11 == 0x00)
3572 /* Floating point - immediate instructions. */
3573 if ((insn_bits12_15 & 0x01) == 0x01
3574 || (insn_bits12_15 & 0x07) == 0x04)
3577 debug_printf ("FP - immediate");
3578 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3580 /* Floating point - compare instructions. */
3581 else if ((insn_bits12_15 & 0x03) == 0x02)
3584 debug_printf ("FP - immediate");
3585 record_buf[0] = AARCH64_CPSR_REGNUM;
3587 /* Floating point - integer conversions instructions. */
3588 else if (insn_bits12_15 == 0x00)
3590 /* Convert float to integer instruction. */
3591 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3594 debug_printf ("float to int conversion");
3596 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3598 /* Convert integer to float instruction. */
3599 else if ((opcode >> 1) == 0x01 && !rmode)
3602 debug_printf ("int to float conversion");
3604 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3606 /* Move float to integer instruction. */
3607 else if ((opcode >> 1) == 0x03)
3610 debug_printf ("move float to int");
3612 if (!(opcode & 0x01))
3613 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3615 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3618 return AARCH64_RECORD_UNKNOWN;
3621 return AARCH64_RECORD_UNKNOWN;
3624 return AARCH64_RECORD_UNKNOWN;
3626 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3629 debug_printf ("SIMD copy");
3631 /* Advanced SIMD copy instructions. */
3632 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3633 && !bit (aarch64_insn_r->aarch64_insn, 15)
3634 && bit (aarch64_insn_r->aarch64_insn, 10))
3636 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3637 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3639 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3642 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3644 /* All remaining floating point or advanced SIMD instructions. */
3648 debug_printf ("all remain");
3650 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3654 debug_printf ("\n");
3656 aarch64_insn_r->reg_rec_count++;
3657 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3658 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3660 return AARCH64_RECORD_SUCCESS;
3663 /* Decodes insns type and invokes its record handler. */
3666 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3668 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3670 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3671 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3672 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3673 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3675 /* Data processing - immediate instructions. */
3676 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3677 return aarch64_record_data_proc_imm (aarch64_insn_r);
3679 /* Branch, exception generation and system instructions. */
3680 if (ins_bit26 && !ins_bit27 && ins_bit28)
3681 return aarch64_record_branch_except_sys (aarch64_insn_r);
3683 /* Load and store instructions. */
3684 if (!ins_bit25 && ins_bit27)
3685 return aarch64_record_load_store (aarch64_insn_r);
3687 /* Data processing - register instructions. */
3688 if (ins_bit25 && !ins_bit26 && ins_bit27)
3689 return aarch64_record_data_proc_reg (aarch64_insn_r);
3691 /* Data processing - SIMD and floating point instructions. */
3692 if (ins_bit25 && ins_bit26 && ins_bit27)
3693 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3695 return AARCH64_RECORD_UNSUPPORTED;
3698 /* Cleans up local record registers and memory allocations. */
3701 deallocate_reg_mem (insn_decode_record *record)
3703 xfree (record->aarch64_regs);
3704 xfree (record->aarch64_mems);
3707 /* Parse the current instruction and record the values of the registers and
3708 memory that will be changed in current instruction to record_arch_list
3709 return -1 if something is wrong. */
3712 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3713 CORE_ADDR insn_addr)
3715 uint32_t rec_no = 0;
3716 uint8_t insn_size = 4;
3718 ULONGEST t_bit = 0, insn_id = 0;
3719 gdb_byte buf[insn_size];
3720 insn_decode_record aarch64_record;
3722 memset (&buf[0], 0, insn_size);
3723 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3724 target_read_memory (insn_addr, &buf[0], insn_size);
3725 aarch64_record.aarch64_insn
3726 = (uint32_t) extract_unsigned_integer (&buf[0],
3728 gdbarch_byte_order (gdbarch));
3729 aarch64_record.regcache = regcache;
3730 aarch64_record.this_addr = insn_addr;
3731 aarch64_record.gdbarch = gdbarch;
3733 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3734 if (ret == AARCH64_RECORD_UNSUPPORTED)
3736 printf_unfiltered (_("Process record does not support instruction "
3737 "0x%0x at address %s.\n"),
3738 aarch64_record.aarch64_insn,
3739 paddress (gdbarch, insn_addr));
3745 /* Record registers. */
3746 record_full_arch_list_add_reg (aarch64_record.regcache,
3748 /* Always record register CPSR. */
3749 record_full_arch_list_add_reg (aarch64_record.regcache,
3750 AARCH64_CPSR_REGNUM);
3751 if (aarch64_record.aarch64_regs)
3752 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3753 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3754 aarch64_record.aarch64_regs[rec_no]))
3757 /* Record memories. */
3758 if (aarch64_record.aarch64_mems)
3759 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3760 if (record_full_arch_list_add_mem
3761 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3762 aarch64_record.aarch64_mems[rec_no].len))
3765 if (record_full_arch_list_add_end ())
3769 deallocate_reg_mem (&aarch64_record);