1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 #include "arch/aarch64-insn.h"
62 #include "opcode/aarch64.h"
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75 /* The standard register names, and all the valid aliases for them. */
78 const char *const name;
80 } aarch64_register_aliases[] =
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
175 /* Is the target available to read from? */
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
183 /* The register used to hold the frame pointer for this frame. */
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
191 show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
197 /* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
202 aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
217 for (; start < limit; start += 4)
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
239 if (inst.opcode->op == OP_ADD)
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
258 else if (inst.opcode->iclass == branch_imm)
260 /* Stop analysis on branch. */
263 else if (inst.opcode->iclass == condbranch)
265 /* Stop analysis on branch. */
268 else if (inst.opcode->iclass == branch_reg)
270 /* Stop analysis on branch. */
273 else if (inst.opcode->iclass == compbranch)
275 /* Stop analysis on branch. */
278 else if (inst.opcode->op == OP_MOVZ)
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
301 debug_printf ("aarch64: prologue analysis gave up "
302 "addr=%s opcode=0x%x (orr x register)\n",
303 core_addr_to_string_nz (start), insn);
308 else if (inst.opcode->op == OP_STUR)
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
322 is64 ? 8 : 4, regs[rt]);
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
355 if (inst.operands[2].addr.writeback)
356 regs[rn] = pv_add_constant (regs[rn], imm);
359 else if (inst.opcode->iclass == testbranch)
361 /* Stop analysis on branch. */
368 debug_printf ("aarch64: prologue analysis gave up addr=%s"
370 core_addr_to_string_nz (start), insn);
378 do_cleanups (back_to);
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
409 do_cleanups (back_to);
413 /* Implement the "skip_prologue" gdbarch method. */
416 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
418 CORE_ADDR func_addr, limit_pc;
420 /* See if we can determine the end of the prologue via the symbol
421 table. If so, then return either PC, or the PC after the
422 prologue, whichever is greater. */
423 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
425 CORE_ADDR post_prologue_pc
426 = skip_prologue_using_sal (gdbarch, func_addr);
428 if (post_prologue_pc != 0)
429 return max (pc, post_prologue_pc);
432 /* Can't determine prologue from the symbol table, need to examine
435 /* Find an upper limit on the function prologue using the debug
436 information. If the debug information could not be used to
437 provide that bound, then use an arbitrary large number as the
439 limit_pc = skip_prologue_using_sal (gdbarch, pc);
441 limit_pc = pc + 128; /* Magic. */
443 /* Try disassembling prologue. */
444 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
447 /* Scan the function prologue for THIS_FRAME and populate the prologue
451 aarch64_scan_prologue (struct frame_info *this_frame,
452 struct aarch64_prologue_cache *cache)
454 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
455 CORE_ADDR prologue_start;
456 CORE_ADDR prologue_end;
457 CORE_ADDR prev_pc = get_frame_pc (this_frame);
458 struct gdbarch *gdbarch = get_frame_arch (this_frame);
460 cache->prev_pc = prev_pc;
462 /* Assume we do not find a frame. */
463 cache->framereg = -1;
464 cache->framesize = 0;
466 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
469 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
473 /* No line info so use the current PC. */
474 prologue_end = prev_pc;
476 else if (sal.end < prologue_end)
478 /* The next line begins after the function end. */
479 prologue_end = sal.end;
482 prologue_end = min (prologue_end, prev_pc);
483 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
489 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
493 cache->framereg = AARCH64_FP_REGNUM;
494 cache->framesize = 16;
495 cache->saved_regs[29].addr = 0;
496 cache->saved_regs[30].addr = 8;
500 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
501 function may throw an exception if the inferior's registers or memory is
505 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
506 struct aarch64_prologue_cache *cache)
508 CORE_ADDR unwound_fp;
511 aarch64_scan_prologue (this_frame, cache);
513 if (cache->framereg == -1)
516 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
520 cache->prev_sp = unwound_fp + cache->framesize;
522 /* Calculate actual addresses of saved registers using offsets
523 determined by aarch64_analyze_prologue. */
524 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
525 if (trad_frame_addr_p (cache->saved_regs, reg))
526 cache->saved_regs[reg].addr += cache->prev_sp;
528 cache->func = get_frame_func (this_frame);
530 cache->available_p = 1;
533 /* Allocate and fill in *THIS_CACHE with information about the prologue of
534 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
535 Return a pointer to the current aarch64_prologue_cache in
538 static struct aarch64_prologue_cache *
539 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
541 struct aarch64_prologue_cache *cache;
543 if (*this_cache != NULL)
544 return (struct aarch64_prologue_cache *) *this_cache;
546 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
547 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
552 aarch64_make_prologue_cache_1 (this_frame, cache);
554 CATCH (ex, RETURN_MASK_ERROR)
556 if (ex.error != NOT_AVAILABLE_ERROR)
557 throw_exception (ex);
564 /* Implement the "stop_reason" frame_unwind method. */
566 static enum unwind_stop_reason
567 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
570 struct aarch64_prologue_cache *cache
571 = aarch64_make_prologue_cache (this_frame, this_cache);
573 if (!cache->available_p)
574 return UNWIND_UNAVAILABLE;
576 /* Halt the backtrace at "_start". */
577 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
578 return UNWIND_OUTERMOST;
580 /* We've hit a wall, stop. */
581 if (cache->prev_sp == 0)
582 return UNWIND_OUTERMOST;
584 return UNWIND_NO_REASON;
587 /* Our frame ID for a normal frame is the current function's starting
588 PC and the caller's SP when we were called. */
591 aarch64_prologue_this_id (struct frame_info *this_frame,
592 void **this_cache, struct frame_id *this_id)
594 struct aarch64_prologue_cache *cache
595 = aarch64_make_prologue_cache (this_frame, this_cache);
597 if (!cache->available_p)
598 *this_id = frame_id_build_unavailable_stack (cache->func);
600 *this_id = frame_id_build (cache->prev_sp, cache->func);
603 /* Implement the "prev_register" frame_unwind method. */
605 static struct value *
606 aarch64_prologue_prev_register (struct frame_info *this_frame,
607 void **this_cache, int prev_regnum)
609 struct aarch64_prologue_cache *cache
610 = aarch64_make_prologue_cache (this_frame, this_cache);
612 /* If we are asked to unwind the PC, then we need to return the LR
613 instead. The prologue may save PC, but it will point into this
614 frame's prologue, not the next frame's resume location. */
615 if (prev_regnum == AARCH64_PC_REGNUM)
619 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
620 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
623 /* SP is generally not saved to the stack, but this frame is
624 identified by the next frame's stack pointer at the time of the
625 call. The value was already reconstructed into PREV_SP. */
638 if (prev_regnum == AARCH64_SP_REGNUM)
639 return frame_unwind_got_constant (this_frame, prev_regnum,
642 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
646 /* AArch64 prologue unwinder. */
647 struct frame_unwind aarch64_prologue_unwind =
650 aarch64_prologue_frame_unwind_stop_reason,
651 aarch64_prologue_this_id,
652 aarch64_prologue_prev_register,
654 default_frame_sniffer
657 /* Allocate and fill in *THIS_CACHE with information about the prologue of
658 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
659 Return a pointer to the current aarch64_prologue_cache in
662 static struct aarch64_prologue_cache *
663 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
665 struct aarch64_prologue_cache *cache;
667 if (*this_cache != NULL)
668 return (struct aarch64_prologue_cache *) *this_cache;
670 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
671 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
676 cache->prev_sp = get_frame_register_unsigned (this_frame,
678 cache->prev_pc = get_frame_pc (this_frame);
679 cache->available_p = 1;
681 CATCH (ex, RETURN_MASK_ERROR)
683 if (ex.error != NOT_AVAILABLE_ERROR)
684 throw_exception (ex);
691 /* Implement the "stop_reason" frame_unwind method. */
693 static enum unwind_stop_reason
694 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
697 struct aarch64_prologue_cache *cache
698 = aarch64_make_stub_cache (this_frame, this_cache);
700 if (!cache->available_p)
701 return UNWIND_UNAVAILABLE;
703 return UNWIND_NO_REASON;
706 /* Our frame ID for a stub frame is the current SP and LR. */
709 aarch64_stub_this_id (struct frame_info *this_frame,
710 void **this_cache, struct frame_id *this_id)
712 struct aarch64_prologue_cache *cache
713 = aarch64_make_stub_cache (this_frame, this_cache);
715 if (cache->available_p)
716 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
718 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
721 /* Implement the "sniffer" frame_unwind method. */
724 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
725 struct frame_info *this_frame,
726 void **this_prologue_cache)
728 CORE_ADDR addr_in_block;
731 addr_in_block = get_frame_address_in_block (this_frame);
732 if (in_plt_section (addr_in_block)
733 /* We also use the stub winder if the target memory is unreadable
734 to avoid having the prologue unwinder trying to read it. */
735 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
741 /* AArch64 stub unwinder. */
742 struct frame_unwind aarch64_stub_unwind =
745 aarch64_stub_frame_unwind_stop_reason,
746 aarch64_stub_this_id,
747 aarch64_prologue_prev_register,
749 aarch64_stub_unwind_sniffer
752 /* Return the frame base address of *THIS_FRAME. */
755 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
757 struct aarch64_prologue_cache *cache
758 = aarch64_make_prologue_cache (this_frame, this_cache);
760 return cache->prev_sp - cache->framesize;
763 /* AArch64 default frame base information. */
764 struct frame_base aarch64_normal_base =
766 &aarch64_prologue_unwind,
767 aarch64_normal_frame_base,
768 aarch64_normal_frame_base,
769 aarch64_normal_frame_base
772 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
773 dummy frame. The frame ID's base needs to match the TOS value
774 saved by save_dummy_frame_tos () and returned from
775 aarch64_push_dummy_call, and the PC needs to match the dummy
776 frame's breakpoint. */
778 static struct frame_id
779 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
781 return frame_id_build (get_frame_register_unsigned (this_frame,
783 get_frame_pc (this_frame));
786 /* Implement the "unwind_pc" gdbarch method. */
789 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
792 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
797 /* Implement the "unwind_sp" gdbarch method. */
800 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
802 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
805 /* Return the value of the REGNUM register in the previous frame of
808 static struct value *
809 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
810 void **this_cache, int regnum)
816 case AARCH64_PC_REGNUM:
817 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
818 return frame_unwind_got_constant (this_frame, regnum, lr);
821 internal_error (__FILE__, __LINE__,
822 _("Unexpected register %d"), regnum);
826 /* Implement the "init_reg" dwarf2_frame_ops method. */
829 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
830 struct dwarf2_frame_state_reg *reg,
831 struct frame_info *this_frame)
835 case AARCH64_PC_REGNUM:
836 reg->how = DWARF2_FRAME_REG_FN;
837 reg->loc.fn = aarch64_dwarf2_prev_register;
839 case AARCH64_SP_REGNUM:
840 reg->how = DWARF2_FRAME_REG_CFA;
845 /* When arguments must be pushed onto the stack, they go on in reverse
846 order. The code below implements a FILO (stack) to do this. */
850 /* Value to pass on stack. It can be NULL if this item is for stack
852 const gdb_byte *data;
854 /* Size in bytes of value to pass on stack. */
858 DEF_VEC_O (stack_item_t);
860 /* Return the alignment (in bytes) of the given type. */
863 aarch64_type_align (struct type *t)
869 t = check_typedef (t);
870 switch (TYPE_CODE (t))
873 /* Should never happen. */
874 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
882 case TYPE_CODE_RANGE:
883 case TYPE_CODE_BITSTRING:
887 return TYPE_LENGTH (t);
889 case TYPE_CODE_ARRAY:
892 /* Use the natural alignment for vector types (the same for
893 scalar type), but the maximum alignment is 128-bit. */
894 if (TYPE_LENGTH (t) > 16)
897 return TYPE_LENGTH (t);
900 return aarch64_type_align (TYPE_TARGET_TYPE (t));
901 case TYPE_CODE_COMPLEX:
902 return aarch64_type_align (TYPE_TARGET_TYPE (t));
904 case TYPE_CODE_STRUCT:
905 case TYPE_CODE_UNION:
907 for (n = 0; n < TYPE_NFIELDS (t); n++)
909 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
917 /* Return 1 if *TY is a homogeneous floating-point aggregate or
918 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
919 document; otherwise return 0. */
922 is_hfa_or_hva (struct type *ty)
924 switch (TYPE_CODE (ty))
926 case TYPE_CODE_ARRAY:
928 struct type *target_ty = TYPE_TARGET_TYPE (ty);
930 if (TYPE_VECTOR (ty))
933 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
934 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
935 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
936 && TYPE_VECTOR (target_ty))))
941 case TYPE_CODE_UNION:
942 case TYPE_CODE_STRUCT:
944 /* HFA or HVA has at most four members. */
945 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
947 struct type *member0_type;
949 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
950 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
951 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
952 && TYPE_VECTOR (member0_type)))
956 for (i = 0; i < TYPE_NFIELDS (ty); i++)
958 struct type *member1_type;
960 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
961 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
962 || (TYPE_LENGTH (member0_type)
963 != TYPE_LENGTH (member1_type)))
979 /* AArch64 function call information structure. */
980 struct aarch64_call_info
982 /* the current argument number. */
985 /* The next general purpose register number, equivalent to NGRN as
986 described in the AArch64 Procedure Call Standard. */
989 /* The next SIMD and floating point register number, equivalent to
990 NSRN as described in the AArch64 Procedure Call Standard. */
993 /* The next stacked argument address, equivalent to NSAA as
994 described in the AArch64 Procedure Call Standard. */
997 /* Stack item vector. */
998 VEC(stack_item_t) *si;
1001 /* Pass a value in a sequence of consecutive X registers. The caller
1002 is responsbile for ensuring sufficient registers are available. */
1005 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1006 struct aarch64_call_info *info, struct type *type,
1009 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1010 int len = TYPE_LENGTH (type);
1011 enum type_code typecode = TYPE_CODE (type);
1012 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1013 const bfd_byte *buf = value_contents (arg);
1019 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1020 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1024 /* Adjust sub-word struct/union args when big-endian. */
1025 if (byte_order == BFD_ENDIAN_BIG
1026 && partial_len < X_REGISTER_SIZE
1027 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1028 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1032 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1033 gdbarch_register_name (gdbarch, regnum),
1034 phex (regval, X_REGISTER_SIZE));
1036 regcache_cooked_write_unsigned (regcache, regnum, regval);
1043 /* Attempt to marshall a value in a V register. Return 1 if
1044 successful, or 0 if insufficient registers are available. This
1045 function, unlike the equivalent pass_in_x() function does not
1046 handle arguments spread across multiple registers. */
1049 pass_in_v (struct gdbarch *gdbarch,
1050 struct regcache *regcache,
1051 struct aarch64_call_info *info,
1052 int len, const bfd_byte *buf)
1056 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1057 gdb_byte reg[V_REGISTER_SIZE];
1062 memset (reg, 0, sizeof (reg));
1063 /* PCS C.1, the argument is allocated to the least significant
1064 bits of V register. */
1065 memcpy (reg, buf, len);
1066 regcache_cooked_write (regcache, regnum, reg);
1070 debug_printf ("arg %d in %s\n", info->argnum,
1071 gdbarch_register_name (gdbarch, regnum));
1079 /* Marshall an argument onto the stack. */
1082 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1085 const bfd_byte *buf = value_contents (arg);
1086 int len = TYPE_LENGTH (type);
1092 align = aarch64_type_align (type);
1094 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1095 Natural alignment of the argument's type. */
1096 align = align_up (align, 8);
1098 /* The AArch64 PCS requires at most doubleword alignment. */
1104 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1110 VEC_safe_push (stack_item_t, info->si, &item);
1113 if (info->nsaa & (align - 1))
1115 /* Push stack alignment padding. */
1116 int pad = align - (info->nsaa & (align - 1));
1121 VEC_safe_push (stack_item_t, info->si, &item);
1126 /* Marshall an argument into a sequence of one or more consecutive X
1127 registers or, if insufficient X registers are available then onto
1131 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1132 struct aarch64_call_info *info, struct type *type,
1135 int len = TYPE_LENGTH (type);
1136 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1138 /* PCS C.13 - Pass in registers if we have enough spare */
1139 if (info->ngrn + nregs <= 8)
1141 pass_in_x (gdbarch, regcache, info, type, arg);
1142 info->ngrn += nregs;
1147 pass_on_stack (info, type, arg);
1151 /* Pass a value in a V register, or on the stack if insufficient are
1155 pass_in_v_or_stack (struct gdbarch *gdbarch,
1156 struct regcache *regcache,
1157 struct aarch64_call_info *info,
1161 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1162 value_contents (arg)))
1163 pass_on_stack (info, type, arg);
1166 /* Implement the "push_dummy_call" gdbarch method. */
1169 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1170 struct regcache *regcache, CORE_ADDR bp_addr,
1172 struct value **args, CORE_ADDR sp, int struct_return,
1173 CORE_ADDR struct_addr)
1176 struct aarch64_call_info info;
1177 struct type *func_type;
1178 struct type *return_type;
1179 int lang_struct_return;
1181 memset (&info, 0, sizeof (info));
1183 /* We need to know what the type of the called function is in order
1184 to determine the number of named/anonymous arguments for the
1185 actual argument placement, and the return type in order to handle
1186 return value correctly.
1188 The generic code above us views the decision of return in memory
1189 or return in registers as a two stage processes. The language
1190 handler is consulted first and may decide to return in memory (eg
1191 class with copy constructor returned by value), this will cause
1192 the generic code to allocate space AND insert an initial leading
1195 If the language code does not decide to pass in memory then the
1196 target code is consulted.
1198 If the language code decides to pass in memory we want to move
1199 the pointer inserted as the initial argument from the argument
1200 list and into X8, the conventional AArch64 struct return pointer
1203 This is slightly awkward, ideally the flag "lang_struct_return"
1204 would be passed to the targets implementation of push_dummy_call.
1205 Rather that change the target interface we call the language code
1206 directly ourselves. */
1208 func_type = check_typedef (value_type (function));
1210 /* Dereference function pointer types. */
1211 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1212 func_type = TYPE_TARGET_TYPE (func_type);
1214 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1215 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1217 /* If language_pass_by_reference () returned true we will have been
1218 given an additional initial argument, a hidden pointer to the
1219 return slot in memory. */
1220 return_type = TYPE_TARGET_TYPE (func_type);
1221 lang_struct_return = language_pass_by_reference (return_type);
1223 /* Set the return address. For the AArch64, the return breakpoint
1224 is always at BP_ADDR. */
1225 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1227 /* If we were given an initial argument for the return slot because
1228 lang_struct_return was true, lose it. */
1229 if (lang_struct_return)
1235 /* The struct_return pointer occupies X8. */
1236 if (struct_return || lang_struct_return)
1240 debug_printf ("struct return in %s = 0x%s\n",
1241 gdbarch_register_name (gdbarch,
1242 AARCH64_STRUCT_RETURN_REGNUM),
1243 paddress (gdbarch, struct_addr));
1245 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1249 for (argnum = 0; argnum < nargs; argnum++)
1251 struct value *arg = args[argnum];
1252 struct type *arg_type;
1255 arg_type = check_typedef (value_type (arg));
1256 len = TYPE_LENGTH (arg_type);
1258 switch (TYPE_CODE (arg_type))
1261 case TYPE_CODE_BOOL:
1262 case TYPE_CODE_CHAR:
1263 case TYPE_CODE_RANGE:
1264 case TYPE_CODE_ENUM:
1267 /* Promote to 32 bit integer. */
1268 if (TYPE_UNSIGNED (arg_type))
1269 arg_type = builtin_type (gdbarch)->builtin_uint32;
1271 arg_type = builtin_type (gdbarch)->builtin_int32;
1272 arg = value_cast (arg_type, arg);
1274 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1277 case TYPE_CODE_COMPLEX:
1280 const bfd_byte *buf = value_contents (arg);
1281 struct type *target_type =
1282 check_typedef (TYPE_TARGET_TYPE (arg_type));
1284 pass_in_v (gdbarch, regcache, &info,
1285 TYPE_LENGTH (target_type), buf);
1286 pass_in_v (gdbarch, regcache, &info,
1287 TYPE_LENGTH (target_type),
1288 buf + TYPE_LENGTH (target_type));
1293 pass_on_stack (&info, arg_type, arg);
1297 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1300 case TYPE_CODE_STRUCT:
1301 case TYPE_CODE_ARRAY:
1302 case TYPE_CODE_UNION:
1303 if (is_hfa_or_hva (arg_type))
1305 int elements = TYPE_NFIELDS (arg_type);
1307 /* Homogeneous Aggregates */
1308 if (info.nsrn + elements < 8)
1312 for (i = 0; i < elements; i++)
1314 /* We know that we have sufficient registers
1315 available therefore this will never fallback
1317 struct value *field =
1318 value_primitive_field (arg, 0, i, arg_type);
1319 struct type *field_type =
1320 check_typedef (value_type (field));
1322 pass_in_v_or_stack (gdbarch, regcache, &info,
1329 pass_on_stack (&info, arg_type, arg);
1332 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1333 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1335 /* Short vector types are passed in V registers. */
1336 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1340 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1341 invisible reference. */
1343 /* Allocate aligned storage. */
1344 sp = align_down (sp - len, 16);
1346 /* Write the real data into the stack. */
1347 write_memory (sp, value_contents (arg), len);
1349 /* Construct the indirection. */
1350 arg_type = lookup_pointer_type (arg_type);
1351 arg = value_from_pointer (arg_type, sp);
1352 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1355 /* PCS C.15 / C.18 multiple values pass. */
1356 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1360 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1365 /* Make sure stack retains 16 byte alignment. */
1367 sp -= 16 - (info.nsaa & 15);
1369 while (!VEC_empty (stack_item_t, info.si))
1371 stack_item_t *si = VEC_last (stack_item_t, info.si);
1374 if (si->data != NULL)
1375 write_memory (sp, si->data, si->len);
1376 VEC_pop (stack_item_t, info.si);
1379 VEC_free (stack_item_t, info.si);
1381 /* Finally, update the SP register. */
1382 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1387 /* Implement the "frame_align" gdbarch method. */
1390 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1392 /* Align the stack to sixteen bytes. */
1393 return sp & ~(CORE_ADDR) 15;
1396 /* Return the type for an AdvSISD Q register. */
1398 static struct type *
1399 aarch64_vnq_type (struct gdbarch *gdbarch)
1401 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1403 if (tdep->vnq_type == NULL)
1408 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1411 elem = builtin_type (gdbarch)->builtin_uint128;
1412 append_composite_type_field (t, "u", elem);
1414 elem = builtin_type (gdbarch)->builtin_int128;
1415 append_composite_type_field (t, "s", elem);
1420 return tdep->vnq_type;
1423 /* Return the type for an AdvSISD D register. */
1425 static struct type *
1426 aarch64_vnd_type (struct gdbarch *gdbarch)
1428 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1430 if (tdep->vnd_type == NULL)
1435 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1438 elem = builtin_type (gdbarch)->builtin_double;
1439 append_composite_type_field (t, "f", elem);
1441 elem = builtin_type (gdbarch)->builtin_uint64;
1442 append_composite_type_field (t, "u", elem);
1444 elem = builtin_type (gdbarch)->builtin_int64;
1445 append_composite_type_field (t, "s", elem);
1450 return tdep->vnd_type;
1453 /* Return the type for an AdvSISD S register. */
1455 static struct type *
1456 aarch64_vns_type (struct gdbarch *gdbarch)
1458 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1460 if (tdep->vns_type == NULL)
1465 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1468 elem = builtin_type (gdbarch)->builtin_float;
1469 append_composite_type_field (t, "f", elem);
1471 elem = builtin_type (gdbarch)->builtin_uint32;
1472 append_composite_type_field (t, "u", elem);
1474 elem = builtin_type (gdbarch)->builtin_int32;
1475 append_composite_type_field (t, "s", elem);
1480 return tdep->vns_type;
1483 /* Return the type for an AdvSISD H register. */
1485 static struct type *
1486 aarch64_vnh_type (struct gdbarch *gdbarch)
1488 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1490 if (tdep->vnh_type == NULL)
1495 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1498 elem = builtin_type (gdbarch)->builtin_uint16;
1499 append_composite_type_field (t, "u", elem);
1501 elem = builtin_type (gdbarch)->builtin_int16;
1502 append_composite_type_field (t, "s", elem);
1507 return tdep->vnh_type;
1510 /* Return the type for an AdvSISD B register. */
1512 static struct type *
1513 aarch64_vnb_type (struct gdbarch *gdbarch)
1515 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1517 if (tdep->vnb_type == NULL)
1522 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1525 elem = builtin_type (gdbarch)->builtin_uint8;
1526 append_composite_type_field (t, "u", elem);
1528 elem = builtin_type (gdbarch)->builtin_int8;
1529 append_composite_type_field (t, "s", elem);
1534 return tdep->vnb_type;
1537 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1540 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1542 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1543 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1545 if (reg == AARCH64_DWARF_SP)
1546 return AARCH64_SP_REGNUM;
1548 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1549 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1555 /* Implement the "print_insn" gdbarch method. */
1558 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1560 info->symbols = NULL;
1561 return print_insn_aarch64 (memaddr, info);
1564 /* AArch64 BRK software debug mode instruction.
1565 Note that AArch64 code is always little-endian.
1566 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1567 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1569 /* Implement the "breakpoint_from_pc" gdbarch method. */
1571 static const gdb_byte *
1572 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1575 *lenptr = sizeof (aarch64_default_breakpoint);
1576 return aarch64_default_breakpoint;
1579 /* Extract from an array REGS containing the (raw) register state a
1580 function return value of type TYPE, and copy that, in virtual
1581 format, into VALBUF. */
1584 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1587 struct gdbarch *gdbarch = get_regcache_arch (regs);
1588 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1590 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1592 bfd_byte buf[V_REGISTER_SIZE];
1593 int len = TYPE_LENGTH (type);
1595 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1596 memcpy (valbuf, buf, len);
1598 else if (TYPE_CODE (type) == TYPE_CODE_INT
1599 || TYPE_CODE (type) == TYPE_CODE_CHAR
1600 || TYPE_CODE (type) == TYPE_CODE_BOOL
1601 || TYPE_CODE (type) == TYPE_CODE_PTR
1602 || TYPE_CODE (type) == TYPE_CODE_REF
1603 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1605 /* If the the type is a plain integer, then the access is
1606 straight-forward. Otherwise we have to play around a bit
1608 int len = TYPE_LENGTH (type);
1609 int regno = AARCH64_X0_REGNUM;
1614 /* By using store_unsigned_integer we avoid having to do
1615 anything special for small big-endian values. */
1616 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1617 store_unsigned_integer (valbuf,
1618 (len > X_REGISTER_SIZE
1619 ? X_REGISTER_SIZE : len), byte_order, tmp);
1620 len -= X_REGISTER_SIZE;
1621 valbuf += X_REGISTER_SIZE;
1624 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1626 int regno = AARCH64_V0_REGNUM;
1627 bfd_byte buf[V_REGISTER_SIZE];
1628 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1629 int len = TYPE_LENGTH (target_type);
1631 regcache_cooked_read (regs, regno, buf);
1632 memcpy (valbuf, buf, len);
1634 regcache_cooked_read (regs, regno + 1, buf);
1635 memcpy (valbuf, buf, len);
1638 else if (is_hfa_or_hva (type))
1640 int elements = TYPE_NFIELDS (type);
1641 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1642 int len = TYPE_LENGTH (member_type);
1645 for (i = 0; i < elements; i++)
1647 int regno = AARCH64_V0_REGNUM + i;
1648 bfd_byte buf[V_REGISTER_SIZE];
1652 debug_printf ("read HFA or HVA return value element %d from %s\n",
1654 gdbarch_register_name (gdbarch, regno));
1656 regcache_cooked_read (regs, regno, buf);
1658 memcpy (valbuf, buf, len);
1662 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1663 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1665 /* Short vector is returned in V register. */
1666 gdb_byte buf[V_REGISTER_SIZE];
1668 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1669 memcpy (valbuf, buf, TYPE_LENGTH (type));
1673 /* For a structure or union the behaviour is as if the value had
1674 been stored to word-aligned memory and then loaded into
1675 registers with 64-bit load instruction(s). */
1676 int len = TYPE_LENGTH (type);
1677 int regno = AARCH64_X0_REGNUM;
1678 bfd_byte buf[X_REGISTER_SIZE];
1682 regcache_cooked_read (regs, regno++, buf);
1683 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1684 len -= X_REGISTER_SIZE;
1685 valbuf += X_REGISTER_SIZE;
1691 /* Will a function return an aggregate type in memory or in a
1692 register? Return 0 if an aggregate type can be returned in a
1693 register, 1 if it must be returned in memory. */
1696 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1698 type = check_typedef (type);
1700 if (is_hfa_or_hva (type))
1702 /* v0-v7 are used to return values and one register is allocated
1703 for one member. However, HFA or HVA has at most four members. */
1707 if (TYPE_LENGTH (type) > 16)
1709 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1710 invisible reference. */
1718 /* Write into appropriate registers a function return value of type
1719 TYPE, given in virtual format. */
1722 aarch64_store_return_value (struct type *type, struct regcache *regs,
1723 const gdb_byte *valbuf)
1725 struct gdbarch *gdbarch = get_regcache_arch (regs);
1726 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1728 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1730 bfd_byte buf[V_REGISTER_SIZE];
1731 int len = TYPE_LENGTH (type);
1733 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1734 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1736 else if (TYPE_CODE (type) == TYPE_CODE_INT
1737 || TYPE_CODE (type) == TYPE_CODE_CHAR
1738 || TYPE_CODE (type) == TYPE_CODE_BOOL
1739 || TYPE_CODE (type) == TYPE_CODE_PTR
1740 || TYPE_CODE (type) == TYPE_CODE_REF
1741 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1743 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1745 /* Values of one word or less are zero/sign-extended and
1747 bfd_byte tmpbuf[X_REGISTER_SIZE];
1748 LONGEST val = unpack_long (type, valbuf);
1750 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1751 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1755 /* Integral values greater than one word are stored in
1756 consecutive registers starting with r0. This will always
1757 be a multiple of the regiser size. */
1758 int len = TYPE_LENGTH (type);
1759 int regno = AARCH64_X0_REGNUM;
1763 regcache_cooked_write (regs, regno++, valbuf);
1764 len -= X_REGISTER_SIZE;
1765 valbuf += X_REGISTER_SIZE;
1769 else if (is_hfa_or_hva (type))
1771 int elements = TYPE_NFIELDS (type);
1772 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1773 int len = TYPE_LENGTH (member_type);
1776 for (i = 0; i < elements; i++)
1778 int regno = AARCH64_V0_REGNUM + i;
1779 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1783 debug_printf ("write HFA or HVA return value element %d to %s\n",
1785 gdbarch_register_name (gdbarch, regno));
1788 memcpy (tmpbuf, valbuf, len);
1789 regcache_cooked_write (regs, regno, tmpbuf);
1793 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1794 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1797 gdb_byte buf[V_REGISTER_SIZE];
1799 memcpy (buf, valbuf, TYPE_LENGTH (type));
1800 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1804 /* For a structure or union the behaviour is as if the value had
1805 been stored to word-aligned memory and then loaded into
1806 registers with 64-bit load instruction(s). */
1807 int len = TYPE_LENGTH (type);
1808 int regno = AARCH64_X0_REGNUM;
1809 bfd_byte tmpbuf[X_REGISTER_SIZE];
1813 memcpy (tmpbuf, valbuf,
1814 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1815 regcache_cooked_write (regs, regno++, tmpbuf);
1816 len -= X_REGISTER_SIZE;
1817 valbuf += X_REGISTER_SIZE;
1822 /* Implement the "return_value" gdbarch method. */
1824 static enum return_value_convention
1825 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1826 struct type *valtype, struct regcache *regcache,
1827 gdb_byte *readbuf, const gdb_byte *writebuf)
1830 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1831 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1832 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1834 if (aarch64_return_in_memory (gdbarch, valtype))
1837 debug_printf ("return value in memory\n");
1838 return RETURN_VALUE_STRUCT_CONVENTION;
1843 aarch64_store_return_value (valtype, regcache, writebuf);
1846 aarch64_extract_return_value (valtype, regcache, readbuf);
1849 debug_printf ("return value in registers\n");
1851 return RETURN_VALUE_REGISTER_CONVENTION;
1854 /* Implement the "get_longjmp_target" gdbarch method. */
1857 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1860 gdb_byte buf[X_REGISTER_SIZE];
1861 struct gdbarch *gdbarch = get_frame_arch (frame);
1862 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1863 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1865 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1867 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1871 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1875 /* Implement the "gen_return_address" gdbarch method. */
1878 aarch64_gen_return_address (struct gdbarch *gdbarch,
1879 struct agent_expr *ax, struct axs_value *value,
1882 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1883 value->kind = axs_lvalue_register;
1884 value->u.reg = AARCH64_LR_REGNUM;
1888 /* Return the pseudo register name corresponding to register regnum. */
1891 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1893 static const char *const q_name[] =
1895 "q0", "q1", "q2", "q3",
1896 "q4", "q5", "q6", "q7",
1897 "q8", "q9", "q10", "q11",
1898 "q12", "q13", "q14", "q15",
1899 "q16", "q17", "q18", "q19",
1900 "q20", "q21", "q22", "q23",
1901 "q24", "q25", "q26", "q27",
1902 "q28", "q29", "q30", "q31",
1905 static const char *const d_name[] =
1907 "d0", "d1", "d2", "d3",
1908 "d4", "d5", "d6", "d7",
1909 "d8", "d9", "d10", "d11",
1910 "d12", "d13", "d14", "d15",
1911 "d16", "d17", "d18", "d19",
1912 "d20", "d21", "d22", "d23",
1913 "d24", "d25", "d26", "d27",
1914 "d28", "d29", "d30", "d31",
1917 static const char *const s_name[] =
1919 "s0", "s1", "s2", "s3",
1920 "s4", "s5", "s6", "s7",
1921 "s8", "s9", "s10", "s11",
1922 "s12", "s13", "s14", "s15",
1923 "s16", "s17", "s18", "s19",
1924 "s20", "s21", "s22", "s23",
1925 "s24", "s25", "s26", "s27",
1926 "s28", "s29", "s30", "s31",
1929 static const char *const h_name[] =
1931 "h0", "h1", "h2", "h3",
1932 "h4", "h5", "h6", "h7",
1933 "h8", "h9", "h10", "h11",
1934 "h12", "h13", "h14", "h15",
1935 "h16", "h17", "h18", "h19",
1936 "h20", "h21", "h22", "h23",
1937 "h24", "h25", "h26", "h27",
1938 "h28", "h29", "h30", "h31",
1941 static const char *const b_name[] =
1943 "b0", "b1", "b2", "b3",
1944 "b4", "b5", "b6", "b7",
1945 "b8", "b9", "b10", "b11",
1946 "b12", "b13", "b14", "b15",
1947 "b16", "b17", "b18", "b19",
1948 "b20", "b21", "b22", "b23",
1949 "b24", "b25", "b26", "b27",
1950 "b28", "b29", "b30", "b31",
1953 regnum -= gdbarch_num_regs (gdbarch);
1955 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1956 return q_name[regnum - AARCH64_Q0_REGNUM];
1958 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1959 return d_name[regnum - AARCH64_D0_REGNUM];
1961 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1962 return s_name[regnum - AARCH64_S0_REGNUM];
1964 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1965 return h_name[regnum - AARCH64_H0_REGNUM];
1967 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1968 return b_name[regnum - AARCH64_B0_REGNUM];
1970 internal_error (__FILE__, __LINE__,
1971 _("aarch64_pseudo_register_name: bad register number %d"),
1975 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
1977 static struct type *
1978 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1980 regnum -= gdbarch_num_regs (gdbarch);
1982 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1983 return aarch64_vnq_type (gdbarch);
1985 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1986 return aarch64_vnd_type (gdbarch);
1988 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1989 return aarch64_vns_type (gdbarch);
1991 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1992 return aarch64_vnh_type (gdbarch);
1994 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1995 return aarch64_vnb_type (gdbarch);
1997 internal_error (__FILE__, __LINE__,
1998 _("aarch64_pseudo_register_type: bad register number %d"),
2002 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2005 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2006 struct reggroup *group)
2008 regnum -= gdbarch_num_regs (gdbarch);
2010 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2011 return group == all_reggroup || group == vector_reggroup;
2012 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2013 return (group == all_reggroup || group == vector_reggroup
2014 || group == float_reggroup);
2015 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2016 return (group == all_reggroup || group == vector_reggroup
2017 || group == float_reggroup);
2018 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2019 return group == all_reggroup || group == vector_reggroup;
2020 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2021 return group == all_reggroup || group == vector_reggroup;
2023 return group == all_reggroup;
2026 /* Implement the "pseudo_register_read_value" gdbarch method. */
2028 static struct value *
2029 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2030 struct regcache *regcache,
2033 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2034 struct value *result_value;
2037 result_value = allocate_value (register_type (gdbarch, regnum));
2038 VALUE_LVAL (result_value) = lval_register;
2039 VALUE_REGNUM (result_value) = regnum;
2040 buf = value_contents_raw (result_value);
2042 regnum -= gdbarch_num_regs (gdbarch);
2044 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2046 enum register_status status;
2049 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2050 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2051 if (status != REG_VALID)
2052 mark_value_bytes_unavailable (result_value, 0,
2053 TYPE_LENGTH (value_type (result_value)));
2055 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2056 return result_value;
2059 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2061 enum register_status status;
2064 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2065 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2066 if (status != REG_VALID)
2067 mark_value_bytes_unavailable (result_value, 0,
2068 TYPE_LENGTH (value_type (result_value)));
2070 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2071 return result_value;
2074 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2076 enum register_status status;
2079 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2080 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2081 if (status != REG_VALID)
2082 mark_value_bytes_unavailable (result_value, 0,
2083 TYPE_LENGTH (value_type (result_value)));
2085 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2086 return result_value;
2089 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2091 enum register_status status;
2094 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2095 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2096 if (status != REG_VALID)
2097 mark_value_bytes_unavailable (result_value, 0,
2098 TYPE_LENGTH (value_type (result_value)));
2100 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2101 return result_value;
2104 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2106 enum register_status status;
2109 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2110 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2111 if (status != REG_VALID)
2112 mark_value_bytes_unavailable (result_value, 0,
2113 TYPE_LENGTH (value_type (result_value)));
2115 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2116 return result_value;
2119 gdb_assert_not_reached ("regnum out of bound");
2122 /* Implement the "pseudo_register_write" gdbarch method. */
2125 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2126 int regnum, const gdb_byte *buf)
2128 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2130 /* Ensure the register buffer is zero, we want gdb writes of the
2131 various 'scalar' pseudo registers to behavior like architectural
2132 writes, register width bytes are written the remainder are set to
2134 memset (reg_buf, 0, sizeof (reg_buf));
2136 regnum -= gdbarch_num_regs (gdbarch);
2138 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2140 /* pseudo Q registers */
2143 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2144 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2145 regcache_raw_write (regcache, v_regnum, reg_buf);
2149 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2151 /* pseudo D registers */
2154 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2155 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2156 regcache_raw_write (regcache, v_regnum, reg_buf);
2160 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2164 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2165 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2166 regcache_raw_write (regcache, v_regnum, reg_buf);
2170 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2172 /* pseudo H registers */
2175 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2176 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2177 regcache_raw_write (regcache, v_regnum, reg_buf);
2181 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2183 /* pseudo B registers */
2186 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2187 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2188 regcache_raw_write (regcache, v_regnum, reg_buf);
2192 gdb_assert_not_reached ("regnum out of bound");
2195 /* Callback function for user_reg_add. */
2197 static struct value *
2198 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2200 const int *reg_p = (const int *) baton;
2202 return value_of_register (*reg_p, frame);
2206 /* Implement the "software_single_step" gdbarch method, needed to
2207 single step through atomic sequences on AArch64. */
2210 aarch64_software_single_step (struct frame_info *frame)
2212 struct gdbarch *gdbarch = get_frame_arch (frame);
2213 struct address_space *aspace = get_frame_address_space (frame);
2214 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2215 const int insn_size = 4;
2216 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2217 CORE_ADDR pc = get_frame_pc (frame);
2218 CORE_ADDR breaks[2] = { -1, -1 };
2220 CORE_ADDR closing_insn = 0;
2221 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2222 byte_order_for_code);
2225 int bc_insn_count = 0; /* Conditional branch instruction count. */
2226 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2229 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2232 /* Look for a Load Exclusive instruction which begins the sequence. */
2233 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2236 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2239 insn = read_memory_unsigned_integer (loc, insn_size,
2240 byte_order_for_code);
2242 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2244 /* Check if the instruction is a conditional branch. */
2245 if (inst.opcode->iclass == condbranch)
2247 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2249 if (bc_insn_count >= 1)
2252 /* It is, so we'll try to set a breakpoint at the destination. */
2253 breaks[1] = loc + inst.operands[0].imm.value;
2259 /* Look for the Store Exclusive which closes the atomic sequence. */
2260 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2267 /* We didn't find a closing Store Exclusive instruction, fall back. */
2271 /* Insert breakpoint after the end of the atomic sequence. */
2272 breaks[0] = loc + insn_size;
2274 /* Check for duplicated breakpoints, and also check that the second
2275 breakpoint is not within the atomic sequence. */
2277 && (breaks[1] == breaks[0]
2278 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2279 last_breakpoint = 0;
2281 /* Insert the breakpoint at the end of the sequence, and one at the
2282 destination of the conditional branch, if it exists. */
2283 for (index = 0; index <= last_breakpoint; index++)
2284 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2289 struct displaced_step_closure
2291 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2292 is being displaced stepping. */
2295 /* PC adjustment offset after displaced stepping. */
2299 /* Data when visiting instructions for displaced stepping. */
2301 struct aarch64_displaced_step_data
2303 struct aarch64_insn_data base;
2305 /* The address where the instruction will be executed at. */
2307 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2308 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2309 /* Number of instructions in INSN_BUF. */
2310 unsigned insn_count;
2311 /* Registers when doing displaced stepping. */
2312 struct regcache *regs;
2314 struct displaced_step_closure *dsc;
2317 /* Implementation of aarch64_insn_visitor method "b". */
2320 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2321 struct aarch64_insn_data *data)
2323 struct aarch64_displaced_step_data *dsd
2324 = (struct aarch64_displaced_step_data *) data;
2325 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2327 if (can_encode_int32 (new_offset, 28))
2329 /* Emit B rather than BL, because executing BL on a new address
2330 will get the wrong address into LR. In order to avoid this,
2331 we emit B, and update LR if the instruction is BL. */
2332 emit_b (dsd->insn_buf, 0, new_offset);
2338 emit_nop (dsd->insn_buf);
2340 dsd->dsc->pc_adjust = offset;
2346 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2347 data->insn_addr + 4);
2351 /* Implementation of aarch64_insn_visitor method "b_cond". */
2354 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2355 struct aarch64_insn_data *data)
2357 struct aarch64_displaced_step_data *dsd
2358 = (struct aarch64_displaced_step_data *) data;
2360 /* GDB has to fix up PC after displaced step this instruction
2361 differently according to the condition is true or false. Instead
2362 of checking COND against conditional flags, we can use
2363 the following instructions, and GDB can tell how to fix up PC
2364 according to the PC value.
2366 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2372 emit_bcond (dsd->insn_buf, cond, 8);
2374 dsd->dsc->pc_adjust = offset;
2375 dsd->insn_count = 1;
2378 /* Dynamically allocate a new register. If we know the register
2379 statically, we should make it a global as above instead of using this
2382 static struct aarch64_register
2383 aarch64_register (unsigned num, int is64)
2385 return (struct aarch64_register) { num, is64 };
2388 /* Implementation of aarch64_insn_visitor method "cb". */
2391 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2392 const unsigned rn, int is64,
2393 struct aarch64_insn_data *data)
2395 struct aarch64_displaced_step_data *dsd
2396 = (struct aarch64_displaced_step_data *) data;
2398 /* The offset is out of range for a compare and branch
2399 instruction. We can use the following instructions instead:
2401 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2406 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2407 dsd->insn_count = 1;
2409 dsd->dsc->pc_adjust = offset;
2412 /* Implementation of aarch64_insn_visitor method "tb". */
2415 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2416 const unsigned rt, unsigned bit,
2417 struct aarch64_insn_data *data)
2419 struct aarch64_displaced_step_data *dsd
2420 = (struct aarch64_displaced_step_data *) data;
2422 /* The offset is out of range for a test bit and branch
2423 instruction We can use the following instructions instead:
2425 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2431 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2432 dsd->insn_count = 1;
2434 dsd->dsc->pc_adjust = offset;
2437 /* Implementation of aarch64_insn_visitor method "adr". */
2440 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2441 const int is_adrp, struct aarch64_insn_data *data)
2443 struct aarch64_displaced_step_data *dsd
2444 = (struct aarch64_displaced_step_data *) data;
2445 /* We know exactly the address the ADR{P,} instruction will compute.
2446 We can just write it to the destination register. */
2447 CORE_ADDR address = data->insn_addr + offset;
2451 /* Clear the lower 12 bits of the offset to get the 4K page. */
2452 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2456 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2459 dsd->dsc->pc_adjust = 4;
2460 emit_nop (dsd->insn_buf);
2461 dsd->insn_count = 1;
2464 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2467 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2468 const unsigned rt, const int is64,
2469 struct aarch64_insn_data *data)
2471 struct aarch64_displaced_step_data *dsd
2472 = (struct aarch64_displaced_step_data *) data;
2473 CORE_ADDR address = data->insn_addr + offset;
2474 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2476 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2480 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2481 aarch64_register (rt, 1), zero);
2483 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2484 aarch64_register (rt, 1), zero);
2486 dsd->dsc->pc_adjust = 4;
2489 /* Implementation of aarch64_insn_visitor method "others". */
2492 aarch64_displaced_step_others (const uint32_t insn,
2493 struct aarch64_insn_data *data)
2495 struct aarch64_displaced_step_data *dsd
2496 = (struct aarch64_displaced_step_data *) data;
2498 aarch64_emit_insn (dsd->insn_buf, insn);
2499 dsd->insn_count = 1;
2501 if ((insn & 0xfffffc1f) == 0xd65f0000)
2504 dsd->dsc->pc_adjust = 0;
2507 dsd->dsc->pc_adjust = 4;
2510 static const struct aarch64_insn_visitor visitor =
2512 aarch64_displaced_step_b,
2513 aarch64_displaced_step_b_cond,
2514 aarch64_displaced_step_cb,
2515 aarch64_displaced_step_tb,
2516 aarch64_displaced_step_adr,
2517 aarch64_displaced_step_ldr_literal,
2518 aarch64_displaced_step_others,
2521 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2523 struct displaced_step_closure *
2524 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2525 CORE_ADDR from, CORE_ADDR to,
2526 struct regcache *regs)
2528 struct displaced_step_closure *dsc = NULL;
2529 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2530 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2531 struct aarch64_displaced_step_data dsd;
2534 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2537 /* Look for a Load Exclusive instruction which begins the sequence. */
2538 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2540 /* We can't displaced step atomic sequences. */
2544 dsc = XCNEW (struct displaced_step_closure);
2545 dsd.base.insn_addr = from;
2550 aarch64_relocate_instruction (insn, &visitor,
2551 (struct aarch64_insn_data *) &dsd);
2552 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2554 if (dsd.insn_count != 0)
2558 /* Instruction can be relocated to scratch pad. Copy
2559 relocated instruction(s) there. */
2560 for (i = 0; i < dsd.insn_count; i++)
2562 if (debug_displaced)
2564 debug_printf ("displaced: writing insn ");
2565 debug_printf ("%.8x", dsd.insn_buf[i]);
2566 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2568 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2569 (ULONGEST) dsd.insn_buf[i]);
2581 /* Implement the "displaced_step_fixup" gdbarch method. */
2584 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2585 struct displaced_step_closure *dsc,
2586 CORE_ADDR from, CORE_ADDR to,
2587 struct regcache *regs)
2593 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2596 /* Condition is true. */
2598 else if (pc - to == 4)
2600 /* Condition is false. */
2604 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2607 if (dsc->pc_adjust != 0)
2609 if (debug_displaced)
2611 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2612 paddress (gdbarch, from), dsc->pc_adjust);
2614 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2615 from + dsc->pc_adjust);
2619 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2622 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2623 struct displaced_step_closure *closure)
2628 /* Initialize the current architecture based on INFO. If possible,
2629 re-use an architecture from ARCHES, which is a list of
2630 architectures already created during this debugging session.
2632 Called e.g. at program startup, when reading a core file, and when
2633 reading a binary file. */
2635 static struct gdbarch *
2636 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2638 struct gdbarch_tdep *tdep;
2639 struct gdbarch *gdbarch;
2640 struct gdbarch_list *best_arch;
2641 struct tdesc_arch_data *tdesc_data = NULL;
2642 const struct target_desc *tdesc = info.target_desc;
2645 const struct tdesc_feature *feature;
2647 int num_pseudo_regs = 0;
2649 /* Ensure we always have a target descriptor. */
2650 if (!tdesc_has_registers (tdesc))
2651 tdesc = tdesc_aarch64;
2655 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2657 if (feature == NULL)
2660 tdesc_data = tdesc_data_alloc ();
2662 /* Validate the descriptor provides the mandatory core R registers
2663 and allocate their numbers. */
2664 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2666 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2667 aarch64_r_register_names[i]);
2669 num_regs = AARCH64_X0_REGNUM + i;
2671 /* Look for the V registers. */
2672 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2675 /* Validate the descriptor provides the mandatory V registers
2676 and allocate their numbers. */
2677 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2679 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2680 aarch64_v_register_names[i]);
2682 num_regs = AARCH64_V0_REGNUM + i;
2684 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2685 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2686 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2687 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2688 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2693 tdesc_data_cleanup (tdesc_data);
2697 /* AArch64 code is always little-endian. */
2698 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2700 /* If there is already a candidate, use it. */
2701 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2703 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2705 /* Found a match. */
2709 if (best_arch != NULL)
2711 if (tdesc_data != NULL)
2712 tdesc_data_cleanup (tdesc_data);
2713 return best_arch->gdbarch;
2716 tdep = XCNEW (struct gdbarch_tdep);
2717 gdbarch = gdbarch_alloc (&info, tdep);
2719 /* This should be low enough for everything. */
2720 tdep->lowest_pc = 0x20;
2721 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2722 tdep->jb_elt_size = 8;
2724 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2725 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2727 /* Frame handling. */
2728 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2729 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2730 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2732 /* Advance PC across function entry code. */
2733 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2735 /* The stack grows downward. */
2736 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2738 /* Breakpoint manipulation. */
2739 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2740 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2741 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2743 /* Information about registers, etc. */
2744 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2745 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2746 set_gdbarch_num_regs (gdbarch, num_regs);
2748 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2749 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2750 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2751 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2752 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2753 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2754 aarch64_pseudo_register_reggroup_p);
2757 set_gdbarch_short_bit (gdbarch, 16);
2758 set_gdbarch_int_bit (gdbarch, 32);
2759 set_gdbarch_float_bit (gdbarch, 32);
2760 set_gdbarch_double_bit (gdbarch, 64);
2761 set_gdbarch_long_double_bit (gdbarch, 128);
2762 set_gdbarch_long_bit (gdbarch, 64);
2763 set_gdbarch_long_long_bit (gdbarch, 64);
2764 set_gdbarch_ptr_bit (gdbarch, 64);
2765 set_gdbarch_char_signed (gdbarch, 0);
2766 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2767 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2768 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2770 /* Internal <-> external register number maps. */
2771 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2773 /* Returning results. */
2774 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2777 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2779 /* Virtual tables. */
2780 set_gdbarch_vbit_in_delta (gdbarch, 1);
2782 /* Hook in the ABI-specific overrides, if they have been registered. */
2783 info.target_desc = tdesc;
2784 info.tdep_info = (void *) tdesc_data;
2785 gdbarch_init_osabi (info, gdbarch);
2787 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2789 /* Add some default predicates. */
2790 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2791 dwarf2_append_unwinders (gdbarch);
2792 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2794 frame_base_set_default (gdbarch, &aarch64_normal_base);
2796 /* Now we have tuned the configuration, set a few final things,
2797 based on what the OS ABI has told us. */
2799 if (tdep->jb_pc >= 0)
2800 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2802 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2804 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2806 /* Add standard register aliases. */
2807 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2808 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2809 value_of_aarch64_user_reg,
2810 &aarch64_register_aliases[i].regnum);
2816 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2823 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2824 paddress (gdbarch, tdep->lowest_pc));
2827 /* Suppress warning from -Wmissing-prototypes. */
2828 extern initialize_file_ftype _initialize_aarch64_tdep;
2831 _initialize_aarch64_tdep (void)
2833 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2836 initialize_tdesc_aarch64 ();
2838 /* Debug this file's internals. */
2839 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2840 Set AArch64 debugging."), _("\
2841 Show AArch64 debugging."), _("\
2842 When on, AArch64 specific debugging is enabled."),
2845 &setdebuglist, &showdebuglist);
2848 /* AArch64 process record-replay related structures, defines etc. */
2850 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2853 unsigned int reg_len = LENGTH; \
2856 REGS = XNEWVEC (uint32_t, reg_len); \
2857 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2862 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2865 unsigned int mem_len = LENGTH; \
2868 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2869 memcpy(&MEMS->len, &RECORD_BUF[0], \
2870 sizeof(struct aarch64_mem_r) * LENGTH); \
2875 /* AArch64 record/replay structures and enumerations. */
2877 struct aarch64_mem_r
2879 uint64_t len; /* Record length. */
2880 uint64_t addr; /* Memory address. */
2883 enum aarch64_record_result
2885 AARCH64_RECORD_SUCCESS,
2886 AARCH64_RECORD_FAILURE,
2887 AARCH64_RECORD_UNSUPPORTED,
2888 AARCH64_RECORD_UNKNOWN
2891 typedef struct insn_decode_record_t
2893 struct gdbarch *gdbarch;
2894 struct regcache *regcache;
2895 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2896 uint32_t aarch64_insn; /* Insn to be recorded. */
2897 uint32_t mem_rec_count; /* Count of memory records. */
2898 uint32_t reg_rec_count; /* Count of register records. */
2899 uint32_t *aarch64_regs; /* Registers to be recorded. */
2900 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2901 } insn_decode_record;
2903 /* Record handler for data processing - register instructions. */
2906 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2908 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2909 uint32_t record_buf[4];
2911 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2912 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2913 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2915 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2919 /* Logical (shifted register). */
2920 if (insn_bits24_27 == 0x0a)
2921 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2923 else if (insn_bits24_27 == 0x0b)
2924 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2926 return AARCH64_RECORD_UNKNOWN;
2928 record_buf[0] = reg_rd;
2929 aarch64_insn_r->reg_rec_count = 1;
2931 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2935 if (insn_bits24_27 == 0x0b)
2937 /* Data-processing (3 source). */
2938 record_buf[0] = reg_rd;
2939 aarch64_insn_r->reg_rec_count = 1;
2941 else if (insn_bits24_27 == 0x0a)
2943 if (insn_bits21_23 == 0x00)
2945 /* Add/subtract (with carry). */
2946 record_buf[0] = reg_rd;
2947 aarch64_insn_r->reg_rec_count = 1;
2948 if (bit (aarch64_insn_r->aarch64_insn, 29))
2950 record_buf[1] = AARCH64_CPSR_REGNUM;
2951 aarch64_insn_r->reg_rec_count = 2;
2954 else if (insn_bits21_23 == 0x02)
2956 /* Conditional compare (register) and conditional compare
2957 (immediate) instructions. */
2958 record_buf[0] = AARCH64_CPSR_REGNUM;
2959 aarch64_insn_r->reg_rec_count = 1;
2961 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2963 /* CConditional select. */
2964 /* Data-processing (2 source). */
2965 /* Data-processing (1 source). */
2966 record_buf[0] = reg_rd;
2967 aarch64_insn_r->reg_rec_count = 1;
2970 return AARCH64_RECORD_UNKNOWN;
2974 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2976 return AARCH64_RECORD_SUCCESS;
2979 /* Record handler for data processing - immediate instructions. */
2982 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2984 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2985 uint32_t record_buf[4];
2987 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2988 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2989 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2990 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2992 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2993 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2994 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2996 record_buf[0] = reg_rd;
2997 aarch64_insn_r->reg_rec_count = 1;
2999 else if (insn_bits24_27 == 0x01)
3001 /* Add/Subtract (immediate). */
3002 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3003 record_buf[0] = reg_rd;
3004 aarch64_insn_r->reg_rec_count = 1;
3006 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3008 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3010 /* Logical (immediate). */
3011 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3012 record_buf[0] = reg_rd;
3013 aarch64_insn_r->reg_rec_count = 1;
3015 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3018 return AARCH64_RECORD_UNKNOWN;
3020 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3022 return AARCH64_RECORD_SUCCESS;
3025 /* Record handler for branch, exception generation and system instructions. */
3028 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3030 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3031 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3032 uint32_t record_buf[4];
3034 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3035 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3036 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3038 if (insn_bits28_31 == 0x0d)
3040 /* Exception generation instructions. */
3041 if (insn_bits24_27 == 0x04)
3043 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3044 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3045 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3047 ULONGEST svc_number;
3049 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3051 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3055 return AARCH64_RECORD_UNSUPPORTED;
3057 /* System instructions. */
3058 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3060 uint32_t reg_rt, reg_crn;
3062 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3063 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3065 /* Record rt in case of sysl and mrs instructions. */
3066 if (bit (aarch64_insn_r->aarch64_insn, 21))
3068 record_buf[0] = reg_rt;
3069 aarch64_insn_r->reg_rec_count = 1;
3071 /* Record cpsr for hint and msr(immediate) instructions. */
3072 else if (reg_crn == 0x02 || reg_crn == 0x04)
3074 record_buf[0] = AARCH64_CPSR_REGNUM;
3075 aarch64_insn_r->reg_rec_count = 1;
3078 /* Unconditional branch (register). */
3079 else if((insn_bits24_27 & 0x0e) == 0x06)
3081 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3082 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3083 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3086 return AARCH64_RECORD_UNKNOWN;
3088 /* Unconditional branch (immediate). */
3089 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3091 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3092 if (bit (aarch64_insn_r->aarch64_insn, 31))
3093 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3096 /* Compare & branch (immediate), Test & branch (immediate) and
3097 Conditional branch (immediate). */
3098 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3100 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3102 return AARCH64_RECORD_SUCCESS;
3105 /* Record handler for advanced SIMD load and store instructions. */
3108 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3111 uint64_t addr_offset = 0;
3112 uint32_t record_buf[24];
3113 uint64_t record_buf_mem[24];
3114 uint32_t reg_rn, reg_rt;
3115 uint32_t reg_index = 0, mem_index = 0;
3116 uint8_t opcode_bits, size_bits;
3118 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3119 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3120 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3121 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3122 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3125 debug_printf ("Process record: Advanced SIMD load/store\n");
3127 /* Load/store single structure. */
3128 if (bit (aarch64_insn_r->aarch64_insn, 24))
3130 uint8_t sindex, scale, selem, esize, replicate = 0;
3131 scale = opcode_bits >> 2;
3132 selem = ((opcode_bits & 0x02) |
3133 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3137 if (size_bits & 0x01)
3138 return AARCH64_RECORD_UNKNOWN;
3141 if ((size_bits >> 1) & 0x01)
3142 return AARCH64_RECORD_UNKNOWN;
3143 if (size_bits & 0x01)
3145 if (!((opcode_bits >> 1) & 0x01))
3148 return AARCH64_RECORD_UNKNOWN;
3152 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3159 return AARCH64_RECORD_UNKNOWN;
3165 for (sindex = 0; sindex < selem; sindex++)
3167 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3168 reg_rt = (reg_rt + 1) % 32;
3172 for (sindex = 0; sindex < selem; sindex++)
3174 if (bit (aarch64_insn_r->aarch64_insn, 22))
3175 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3178 record_buf_mem[mem_index++] = esize / 8;
3179 record_buf_mem[mem_index++] = address + addr_offset;
3181 addr_offset = addr_offset + (esize / 8);
3182 reg_rt = (reg_rt + 1) % 32;
3186 /* Load/store multiple structure. */
3189 uint8_t selem, esize, rpt, elements;
3190 uint8_t eindex, rindex;
3192 esize = 8 << size_bits;
3193 if (bit (aarch64_insn_r->aarch64_insn, 30))
3194 elements = 128 / esize;
3196 elements = 64 / esize;
3198 switch (opcode_bits)
3200 /*LD/ST4 (4 Registers). */
3205 /*LD/ST1 (4 Registers). */
3210 /*LD/ST3 (3 Registers). */
3215 /*LD/ST1 (3 Registers). */
3220 /*LD/ST1 (1 Register). */
3225 /*LD/ST2 (2 Registers). */
3230 /*LD/ST1 (2 Registers). */
3236 return AARCH64_RECORD_UNSUPPORTED;
3239 for (rindex = 0; rindex < rpt; rindex++)
3240 for (eindex = 0; eindex < elements; eindex++)
3242 uint8_t reg_tt, sindex;
3243 reg_tt = (reg_rt + rindex) % 32;
3244 for (sindex = 0; sindex < selem; sindex++)
3246 if (bit (aarch64_insn_r->aarch64_insn, 22))
3247 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3250 record_buf_mem[mem_index++] = esize / 8;
3251 record_buf_mem[mem_index++] = address + addr_offset;
3253 addr_offset = addr_offset + (esize / 8);
3254 reg_tt = (reg_tt + 1) % 32;
3259 if (bit (aarch64_insn_r->aarch64_insn, 23))
3260 record_buf[reg_index++] = reg_rn;
3262 aarch64_insn_r->reg_rec_count = reg_index;
3263 aarch64_insn_r->mem_rec_count = mem_index / 2;
3264 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3266 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3268 return AARCH64_RECORD_SUCCESS;
3271 /* Record handler for load and store instructions. */
3274 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3276 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3277 uint8_t insn_bit23, insn_bit21;
3278 uint8_t opc, size_bits, ld_flag, vector_flag;
3279 uint32_t reg_rn, reg_rt, reg_rt2;
3280 uint64_t datasize, offset;
3281 uint32_t record_buf[8];
3282 uint64_t record_buf_mem[8];
3285 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3286 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3287 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3288 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3289 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3290 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3291 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3292 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3293 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3294 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3295 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3297 /* Load/store exclusive. */
3298 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3301 debug_printf ("Process record: load/store exclusive\n");
3305 record_buf[0] = reg_rt;
3306 aarch64_insn_r->reg_rec_count = 1;
3309 record_buf[1] = reg_rt2;
3310 aarch64_insn_r->reg_rec_count = 2;
3316 datasize = (8 << size_bits) * 2;
3318 datasize = (8 << size_bits);
3319 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3321 record_buf_mem[0] = datasize / 8;
3322 record_buf_mem[1] = address;
3323 aarch64_insn_r->mem_rec_count = 1;
3326 /* Save register rs. */
3327 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3328 aarch64_insn_r->reg_rec_count = 1;
3332 /* Load register (literal) instructions decoding. */
3333 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3336 debug_printf ("Process record: load register (literal)\n");
3338 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3340 record_buf[0] = reg_rt;
3341 aarch64_insn_r->reg_rec_count = 1;
3343 /* All types of load/store pair instructions decoding. */
3344 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3347 debug_printf ("Process record: load/store pair\n");
3353 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3354 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3358 record_buf[0] = reg_rt;
3359 record_buf[1] = reg_rt2;
3361 aarch64_insn_r->reg_rec_count = 2;
3366 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3368 size_bits = size_bits >> 1;
3369 datasize = 8 << (2 + size_bits);
3370 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3371 offset = offset << (2 + size_bits);
3372 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3374 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3376 if (imm7_off & 0x40)
3377 address = address - offset;
3379 address = address + offset;
3382 record_buf_mem[0] = datasize / 8;
3383 record_buf_mem[1] = address;
3384 record_buf_mem[2] = datasize / 8;
3385 record_buf_mem[3] = address + (datasize / 8);
3386 aarch64_insn_r->mem_rec_count = 2;
3388 if (bit (aarch64_insn_r->aarch64_insn, 23))
3389 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3391 /* Load/store register (unsigned immediate) instructions. */
3392 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3394 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3401 if (size_bits != 0x03)
3404 return AARCH64_RECORD_UNKNOWN;
3408 debug_printf ("Process record: load/store (unsigned immediate):"
3409 " size %x V %d opc %x\n", size_bits, vector_flag,
3415 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3416 datasize = 8 << size_bits;
3417 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3419 offset = offset << size_bits;
3420 address = address + offset;
3422 record_buf_mem[0] = datasize >> 3;
3423 record_buf_mem[1] = address;
3424 aarch64_insn_r->mem_rec_count = 1;
3429 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3431 record_buf[0] = reg_rt;
3432 aarch64_insn_r->reg_rec_count = 1;
3435 /* Load/store register (register offset) instructions. */
3436 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3437 && insn_bits10_11 == 0x02 && insn_bit21)
3440 debug_printf ("Process record: load/store (register offset)\n");
3441 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3448 if (size_bits != 0x03)
3451 return AARCH64_RECORD_UNKNOWN;
3455 ULONGEST reg_rm_val;
3457 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3458 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3459 if (bit (aarch64_insn_r->aarch64_insn, 12))
3460 offset = reg_rm_val << size_bits;
3462 offset = reg_rm_val;
3463 datasize = 8 << size_bits;
3464 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3466 address = address + offset;
3467 record_buf_mem[0] = datasize >> 3;
3468 record_buf_mem[1] = address;
3469 aarch64_insn_r->mem_rec_count = 1;
3474 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3476 record_buf[0] = reg_rt;
3477 aarch64_insn_r->reg_rec_count = 1;
3480 /* Load/store register (immediate and unprivileged) instructions. */
3481 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3486 debug_printf ("Process record: load/store "
3487 "(immediate and unprivileged)\n");
3489 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3496 if (size_bits != 0x03)
3499 return AARCH64_RECORD_UNKNOWN;
3504 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3505 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3506 datasize = 8 << size_bits;
3507 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3509 if (insn_bits10_11 != 0x01)
3511 if (imm9_off & 0x0100)
3512 address = address - offset;
3514 address = address + offset;
3516 record_buf_mem[0] = datasize >> 3;
3517 record_buf_mem[1] = address;
3518 aarch64_insn_r->mem_rec_count = 1;
3523 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3525 record_buf[0] = reg_rt;
3526 aarch64_insn_r->reg_rec_count = 1;
3528 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3529 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3531 /* Advanced SIMD load/store instructions. */
3533 return aarch64_record_asimd_load_store (aarch64_insn_r);
3535 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3537 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3539 return AARCH64_RECORD_SUCCESS;
3542 /* Record handler for data processing SIMD and floating point instructions. */
3545 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3547 uint8_t insn_bit21, opcode, rmode, reg_rd;
3548 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3549 uint8_t insn_bits11_14;
3550 uint32_t record_buf[2];
3552 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3553 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3554 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3555 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3556 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3557 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3558 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3559 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3560 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3563 debug_printf ("Process record: data processing SIMD/FP: ");
3565 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3567 /* Floating point - fixed point conversion instructions. */
3571 debug_printf ("FP - fixed point conversion");
3573 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3574 record_buf[0] = reg_rd;
3576 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3578 /* Floating point - conditional compare instructions. */
3579 else if (insn_bits10_11 == 0x01)
3582 debug_printf ("FP - conditional compare");
3584 record_buf[0] = AARCH64_CPSR_REGNUM;
3586 /* Floating point - data processing (2-source) and
3587 conditional select instructions. */
3588 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3591 debug_printf ("FP - DP (2-source)");
3593 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3595 else if (insn_bits10_11 == 0x00)
3597 /* Floating point - immediate instructions. */
3598 if ((insn_bits12_15 & 0x01) == 0x01
3599 || (insn_bits12_15 & 0x07) == 0x04)
3602 debug_printf ("FP - immediate");
3603 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3605 /* Floating point - compare instructions. */
3606 else if ((insn_bits12_15 & 0x03) == 0x02)
3609 debug_printf ("FP - immediate");
3610 record_buf[0] = AARCH64_CPSR_REGNUM;
3612 /* Floating point - integer conversions instructions. */
3613 else if (insn_bits12_15 == 0x00)
3615 /* Convert float to integer instruction. */
3616 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3619 debug_printf ("float to int conversion");
3621 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3623 /* Convert integer to float instruction. */
3624 else if ((opcode >> 1) == 0x01 && !rmode)
3627 debug_printf ("int to float conversion");
3629 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3631 /* Move float to integer instruction. */
3632 else if ((opcode >> 1) == 0x03)
3635 debug_printf ("move float to int");
3637 if (!(opcode & 0x01))
3638 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3640 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3643 return AARCH64_RECORD_UNKNOWN;
3646 return AARCH64_RECORD_UNKNOWN;
3649 return AARCH64_RECORD_UNKNOWN;
3651 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3654 debug_printf ("SIMD copy");
3656 /* Advanced SIMD copy instructions. */
3657 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3658 && !bit (aarch64_insn_r->aarch64_insn, 15)
3659 && bit (aarch64_insn_r->aarch64_insn, 10))
3661 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3662 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3664 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3667 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3669 /* All remaining floating point or advanced SIMD instructions. */
3673 debug_printf ("all remain");
3675 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3679 debug_printf ("\n");
3681 aarch64_insn_r->reg_rec_count++;
3682 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3683 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3685 return AARCH64_RECORD_SUCCESS;
3688 /* Decodes insns type and invokes its record handler. */
3691 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3693 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3695 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3696 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3697 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3698 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3700 /* Data processing - immediate instructions. */
3701 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3702 return aarch64_record_data_proc_imm (aarch64_insn_r);
3704 /* Branch, exception generation and system instructions. */
3705 if (ins_bit26 && !ins_bit27 && ins_bit28)
3706 return aarch64_record_branch_except_sys (aarch64_insn_r);
3708 /* Load and store instructions. */
3709 if (!ins_bit25 && ins_bit27)
3710 return aarch64_record_load_store (aarch64_insn_r);
3712 /* Data processing - register instructions. */
3713 if (ins_bit25 && !ins_bit26 && ins_bit27)
3714 return aarch64_record_data_proc_reg (aarch64_insn_r);
3716 /* Data processing - SIMD and floating point instructions. */
3717 if (ins_bit25 && ins_bit26 && ins_bit27)
3718 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3720 return AARCH64_RECORD_UNSUPPORTED;
3723 /* Cleans up local record registers and memory allocations. */
3726 deallocate_reg_mem (insn_decode_record *record)
3728 xfree (record->aarch64_regs);
3729 xfree (record->aarch64_mems);
3732 /* Parse the current instruction and record the values of the registers and
3733 memory that will be changed in current instruction to record_arch_list
3734 return -1 if something is wrong. */
3737 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3738 CORE_ADDR insn_addr)
3740 uint32_t rec_no = 0;
3741 uint8_t insn_size = 4;
3743 gdb_byte buf[insn_size];
3744 insn_decode_record aarch64_record;
3746 memset (&buf[0], 0, insn_size);
3747 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3748 target_read_memory (insn_addr, &buf[0], insn_size);
3749 aarch64_record.aarch64_insn
3750 = (uint32_t) extract_unsigned_integer (&buf[0],
3752 gdbarch_byte_order (gdbarch));
3753 aarch64_record.regcache = regcache;
3754 aarch64_record.this_addr = insn_addr;
3755 aarch64_record.gdbarch = gdbarch;
3757 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3758 if (ret == AARCH64_RECORD_UNSUPPORTED)
3760 printf_unfiltered (_("Process record does not support instruction "
3761 "0x%0x at address %s.\n"),
3762 aarch64_record.aarch64_insn,
3763 paddress (gdbarch, insn_addr));
3769 /* Record registers. */
3770 record_full_arch_list_add_reg (aarch64_record.regcache,
3772 /* Always record register CPSR. */
3773 record_full_arch_list_add_reg (aarch64_record.regcache,
3774 AARCH64_CPSR_REGNUM);
3775 if (aarch64_record.aarch64_regs)
3776 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3777 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3778 aarch64_record.aarch64_regs[rec_no]))
3781 /* Record memories. */
3782 if (aarch64_record.aarch64_mems)
3783 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3784 if (record_full_arch_list_add_mem
3785 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3786 aarch64_record.aarch64_mems[rec_no].len))
3789 if (record_full_arch_list_add_end ())
3793 deallocate_reg_mem (&aarch64_record);