1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 #include "arch/aarch64-insn.h"
62 /* Pseudo register base numbers. */
63 #define AARCH64_Q0_REGNUM 0
64 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
65 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
66 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
67 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
69 /* The standard register names, and all the valid aliases for them. */
72 const char *const name;
74 } aarch64_register_aliases[] =
76 /* 64-bit register names. */
77 {"fp", AARCH64_FP_REGNUM},
78 {"lr", AARCH64_LR_REGNUM},
79 {"sp", AARCH64_SP_REGNUM},
81 /* 32-bit register names. */
82 {"w0", AARCH64_X0_REGNUM + 0},
83 {"w1", AARCH64_X0_REGNUM + 1},
84 {"w2", AARCH64_X0_REGNUM + 2},
85 {"w3", AARCH64_X0_REGNUM + 3},
86 {"w4", AARCH64_X0_REGNUM + 4},
87 {"w5", AARCH64_X0_REGNUM + 5},
88 {"w6", AARCH64_X0_REGNUM + 6},
89 {"w7", AARCH64_X0_REGNUM + 7},
90 {"w8", AARCH64_X0_REGNUM + 8},
91 {"w9", AARCH64_X0_REGNUM + 9},
92 {"w10", AARCH64_X0_REGNUM + 10},
93 {"w11", AARCH64_X0_REGNUM + 11},
94 {"w12", AARCH64_X0_REGNUM + 12},
95 {"w13", AARCH64_X0_REGNUM + 13},
96 {"w14", AARCH64_X0_REGNUM + 14},
97 {"w15", AARCH64_X0_REGNUM + 15},
98 {"w16", AARCH64_X0_REGNUM + 16},
99 {"w17", AARCH64_X0_REGNUM + 17},
100 {"w18", AARCH64_X0_REGNUM + 18},
101 {"w19", AARCH64_X0_REGNUM + 19},
102 {"w20", AARCH64_X0_REGNUM + 20},
103 {"w21", AARCH64_X0_REGNUM + 21},
104 {"w22", AARCH64_X0_REGNUM + 22},
105 {"w23", AARCH64_X0_REGNUM + 23},
106 {"w24", AARCH64_X0_REGNUM + 24},
107 {"w25", AARCH64_X0_REGNUM + 25},
108 {"w26", AARCH64_X0_REGNUM + 26},
109 {"w27", AARCH64_X0_REGNUM + 27},
110 {"w28", AARCH64_X0_REGNUM + 28},
111 {"w29", AARCH64_X0_REGNUM + 29},
112 {"w30", AARCH64_X0_REGNUM + 30},
115 {"ip0", AARCH64_X0_REGNUM + 16},
116 {"ip1", AARCH64_X0_REGNUM + 17}
119 /* The required core 'R' registers. */
120 static const char *const aarch64_r_register_names[] =
122 /* These registers must appear in consecutive RAW register number
123 order and they must begin with AARCH64_X0_REGNUM! */
124 "x0", "x1", "x2", "x3",
125 "x4", "x5", "x6", "x7",
126 "x8", "x9", "x10", "x11",
127 "x12", "x13", "x14", "x15",
128 "x16", "x17", "x18", "x19",
129 "x20", "x21", "x22", "x23",
130 "x24", "x25", "x26", "x27",
131 "x28", "x29", "x30", "sp",
135 /* The FP/SIMD 'V' registers. */
136 static const char *const aarch64_v_register_names[] =
138 /* These registers must appear in consecutive RAW register number
139 order and they must begin with AARCH64_V0_REGNUM! */
140 "v0", "v1", "v2", "v3",
141 "v4", "v5", "v6", "v7",
142 "v8", "v9", "v10", "v11",
143 "v12", "v13", "v14", "v15",
144 "v16", "v17", "v18", "v19",
145 "v20", "v21", "v22", "v23",
146 "v24", "v25", "v26", "v27",
147 "v28", "v29", "v30", "v31",
152 /* AArch64 prologue cache structure. */
153 struct aarch64_prologue_cache
155 /* The program counter at the start of the function. It is used to
156 identify this frame as a prologue frame. */
159 /* The program counter at the time this frame was created; i.e. where
160 this function was called from. It is used to identify this frame as a
164 /* The stack pointer at the time this frame was created; i.e. the
165 caller's stack pointer when this function was called. It is used
166 to identify this frame. */
169 /* Is the target available to read from? */
172 /* The frame base for this frame is just prev_sp - frame size.
173 FRAMESIZE is the distance from the frame pointer to the
174 initial stack pointer. */
177 /* The register used to hold the frame pointer for this frame. */
180 /* Saved register offsets. */
181 struct trad_frame_saved_reg *saved_regs;
185 show_aarch64_debug (struct ui_file *file, int from_tty,
186 struct cmd_list_element *c, const char *value)
188 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
191 /* Extract a signed value from a bit field within an instruction
194 INSN is the instruction opcode.
196 WIDTH specifies the width of the bit field to extract (in bits).
198 OFFSET specifies the least significant bit of the field where bits
199 are numbered zero counting from least to most significant. */
202 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
204 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
205 unsigned shift_r = sizeof (int32_t) * 8 - width;
207 return ((int32_t) insn << shift_l) >> shift_r;
210 /* Determine if specified bits within an instruction opcode matches a
213 INSN is the instruction opcode.
215 MASK specifies the bits within the opcode that are to be tested
216 agsinst for a match with PATTERN. */
219 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
221 return (insn & mask) == pattern;
224 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
226 ADDR specifies the address of the opcode.
227 INSN specifies the opcode to test.
228 RD receives the 'rd' field from the decoded instruction.
229 RN receives the 'rn' field from the decoded instruction.
231 Return 1 if the opcodes matches and is decoded, otherwise 0. */
233 aarch64_decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd,
234 unsigned *rn, int32_t *imm)
236 if ((insn & 0x9f000000) == 0x91000000)
241 *rd = (insn >> 0) & 0x1f;
242 *rn = (insn >> 5) & 0x1f;
243 *imm = (insn >> 10) & 0xfff;
244 shift = (insn >> 22) & 0x3;
245 op_is_sub = (insn >> 30) & 0x1;
264 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
265 core_addr_to_string_nz (addr), insn, *rd, *rn,
273 /* Decode an opcode if it represents a branch via register instruction.
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
277 IS_BLR receives the 'op' bit from the decoded instruction.
278 RN receives the 'rn' field from the decoded instruction.
280 Return 1 if the opcodes matches and is decoded, otherwise 0. */
283 aarch64_decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr,
286 /* 8 4 0 6 2 8 4 0 */
287 /* blr 110101100011111100000000000rrrrr */
288 /* br 110101100001111100000000000rrrrr */
289 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
291 *is_blr = (insn >> 21) & 1;
292 *rn = (insn >> 5) & 0x1f;
296 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
297 core_addr_to_string_nz (addr), insn,
298 *is_blr ? "blr" : "br", *rn);
306 /* Decode an opcode if it represents a ERET instruction.
308 ADDR specifies the address of the opcode.
309 INSN specifies the opcode to test.
311 Return 1 if the opcodes matches and is decoded, otherwise 0. */
314 aarch64_decode_eret (CORE_ADDR addr, uint32_t insn)
316 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
317 if (insn == 0xd69f03e0)
321 debug_printf ("decode: 0x%s 0x%x eret\n",
322 core_addr_to_string_nz (addr), insn);
329 /* Decode an opcode if it represents a MOVZ instruction.
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 RD receives the 'rd' field from the decoded instruction.
335 Return 1 if the opcodes matches and is decoded, otherwise 0. */
338 aarch64_decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
340 if (decode_masked_match (insn, 0xff800000, 0x52800000))
342 *rd = (insn >> 0) & 0x1f;
346 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
347 core_addr_to_string_nz (addr), insn, *rd);
354 /* Decode an opcode if it represents a ORR (shifted register)
357 ADDR specifies the address of the opcode.
358 INSN specifies the opcode to test.
359 RD receives the 'rd' field from the decoded instruction.
360 RN receives the 'rn' field from the decoded instruction.
361 RM receives the 'rm' field from the decoded instruction.
362 IMM receives the 'imm6' field from the decoded instruction.
364 Return 1 if the opcodes matches and is decoded, otherwise 0. */
367 aarch64_decode_orr_shifted_register_x (CORE_ADDR addr, uint32_t insn,
368 unsigned *rd, unsigned *rn,
369 unsigned *rm, int32_t *imm)
371 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
373 *rd = (insn >> 0) & 0x1f;
374 *rn = (insn >> 5) & 0x1f;
375 *rm = (insn >> 16) & 0x1f;
376 *imm = (insn >> 10) & 0x3f;
380 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
381 core_addr_to_string_nz (addr), insn, *rd, *rn,
389 /* Decode an opcode if it represents a RET instruction.
391 ADDR specifies the address of the opcode.
392 INSN specifies the opcode to test.
393 RN receives the 'rn' field from the decoded instruction.
395 Return 1 if the opcodes matches and is decoded, otherwise 0. */
398 aarch64_decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
400 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
402 *rn = (insn >> 5) & 0x1f;
405 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
406 core_addr_to_string_nz (addr), insn, *rn);
413 /* Decode an opcode if it represents the following instruction:
414 STP rt, rt2, [rn, #imm]
416 ADDR specifies the address of the opcode.
417 INSN specifies the opcode to test.
418 RT1 receives the 'rt' field from the decoded instruction.
419 RT2 receives the 'rt2' field from the decoded instruction.
420 RN receives the 'rn' field from the decoded instruction.
421 IMM receives the 'imm' field from the decoded instruction.
423 Return 1 if the opcodes matches and is decoded, otherwise 0. */
426 aarch64_decode_stp_offset (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
427 unsigned *rt2, unsigned *rn, int32_t *imm)
429 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
431 *rt1 = (insn >> 0) & 0x1f;
432 *rn = (insn >> 5) & 0x1f;
433 *rt2 = (insn >> 10) & 0x1f;
434 *imm = extract_signed_bitfield (insn, 7, 15);
439 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
440 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
448 /* Decode an opcode if it represents the following instruction:
449 STP rt, rt2, [rn, #imm]!
451 ADDR specifies the address of the opcode.
452 INSN specifies the opcode to test.
453 RT1 receives the 'rt' field from the decoded instruction.
454 RT2 receives the 'rt2' field from the decoded instruction.
455 RN receives the 'rn' field from the decoded instruction.
456 IMM receives the 'imm' field from the decoded instruction.
458 Return 1 if the opcodes matches and is decoded, otherwise 0. */
461 aarch64_decode_stp_offset_wb (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
462 unsigned *rt2, unsigned *rn, int32_t *imm)
464 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
466 *rt1 = (insn >> 0) & 0x1f;
467 *rn = (insn >> 5) & 0x1f;
468 *rt2 = (insn >> 10) & 0x1f;
469 *imm = extract_signed_bitfield (insn, 7, 15);
474 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
475 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
483 /* Decode an opcode if it represents the following instruction:
486 ADDR specifies the address of the opcode.
487 INSN specifies the opcode to test.
488 IS64 receives size field from the decoded instruction.
489 RT receives the 'rt' field from the decoded instruction.
490 RN receives the 'rn' field from the decoded instruction.
491 IMM receives the 'imm' field from the decoded instruction.
493 Return 1 if the opcodes matches and is decoded, otherwise 0. */
496 aarch64_decode_stur (CORE_ADDR addr, uint32_t insn, int *is64,
497 unsigned *rt, unsigned *rn, int32_t *imm)
499 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
501 *is64 = (insn >> 30) & 1;
502 *rt = (insn >> 0) & 0x1f;
503 *rn = (insn >> 5) & 0x1f;
504 *imm = extract_signed_bitfield (insn, 9, 12);
508 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
509 core_addr_to_string_nz (addr), insn,
510 *is64 ? 'x' : 'w', *rt, *rn, *imm);
517 /* Analyze a prologue, looking for a recognizable stack frame
518 and frame pointer. Scan until we encounter a store that could
519 clobber the stack frame unexpectedly, or an unknown instruction. */
522 aarch64_analyze_prologue (struct gdbarch *gdbarch,
523 CORE_ADDR start, CORE_ADDR limit,
524 struct aarch64_prologue_cache *cache)
526 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
528 pv_t regs[AARCH64_X_REGISTER_COUNT];
529 struct pv_area *stack;
530 struct cleanup *back_to;
532 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
533 regs[i] = pv_register (i, 0);
534 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
535 back_to = make_cleanup_free_pv_area (stack);
537 for (; start < limit; start += 4)
557 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
559 if (aarch64_decode_add_sub_imm (start, insn, &rd, &rn, &imm))
560 regs[rd] = pv_add_constant (regs[rn], imm);
561 else if (aarch64_decode_adr (start, insn, &is_adrp, &rd, &offset)
563 regs[rd] = pv_unknown ();
564 else if (aarch64_decode_b (start, insn, &is_link, &offset))
566 /* Stop analysis on branch. */
569 else if (aarch64_decode_bcond (start, insn, &cond, &offset))
571 /* Stop analysis on branch. */
574 else if (aarch64_decode_br (start, insn, &is_link, &rn))
576 /* Stop analysis on branch. */
579 else if (aarch64_decode_cb (start, insn, &is64, &is_cbnz, &rn,
582 /* Stop analysis on branch. */
585 else if (aarch64_decode_eret (start, insn))
587 /* Stop analysis on branch. */
590 else if (aarch64_decode_movz (start, insn, &rd))
591 regs[rd] = pv_unknown ();
592 else if (aarch64_decode_orr_shifted_register_x (start, insn, &rd,
595 if (imm == 0 && rn == 31)
601 debug_printf ("aarch64: prologue analysis gave up "
602 "addr=0x%s opcode=0x%x (orr x register)\n",
603 core_addr_to_string_nz (start), insn);
608 else if (aarch64_decode_ret (start, insn, &rn))
610 /* Stop analysis on branch. */
613 else if (aarch64_decode_stur (start, insn, &is64, &rt, &rn, &offset))
615 pv_area_store (stack, pv_add_constant (regs[rn], offset),
616 is64 ? 8 : 4, regs[rt]);
618 else if (aarch64_decode_stp_offset (start, insn, &rt1, &rt2, &rn,
621 /* If recording this store would invalidate the store area
622 (perhaps because rn is not known) then we should abandon
623 further prologue analysis. */
624 if (pv_area_store_would_trash (stack,
625 pv_add_constant (regs[rn], imm)))
628 if (pv_area_store_would_trash (stack,
629 pv_add_constant (regs[rn], imm + 8)))
632 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
634 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
637 else if (aarch64_decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn,
640 /* If recording this store would invalidate the store area
641 (perhaps because rn is not known) then we should abandon
642 further prologue analysis. */
643 if (pv_area_store_would_trash (stack,
644 pv_add_constant (regs[rn], imm)))
647 if (pv_area_store_would_trash (stack,
648 pv_add_constant (regs[rn], imm + 8)))
651 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
653 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
655 regs[rn] = pv_add_constant (regs[rn], imm);
657 else if (aarch64_decode_tb (start, insn, &is_tbnz, &bit, &rn,
660 /* Stop analysis on branch. */
667 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
669 core_addr_to_string_nz (start), insn);
677 do_cleanups (back_to);
681 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
683 /* Frame pointer is fp. Frame size is constant. */
684 cache->framereg = AARCH64_FP_REGNUM;
685 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
687 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
689 /* Try the stack pointer. */
690 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
691 cache->framereg = AARCH64_SP_REGNUM;
695 /* We're just out of luck. We don't know where the frame is. */
696 cache->framereg = -1;
697 cache->framesize = 0;
700 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
704 if (pv_area_find_reg (stack, gdbarch, i, &offset))
705 cache->saved_regs[i].addr = offset;
708 do_cleanups (back_to);
712 /* Implement the "skip_prologue" gdbarch method. */
715 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
719 CORE_ADDR func_addr, limit_pc;
720 struct symtab_and_line sal;
722 /* See if we can determine the end of the prologue via the symbol
723 table. If so, then return either PC, or the PC after the
724 prologue, whichever is greater. */
725 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
727 CORE_ADDR post_prologue_pc
728 = skip_prologue_using_sal (gdbarch, func_addr);
730 if (post_prologue_pc != 0)
731 return max (pc, post_prologue_pc);
734 /* Can't determine prologue from the symbol table, need to examine
737 /* Find an upper limit on the function prologue using the debug
738 information. If the debug information could not be used to
739 provide that bound, then use an arbitrary large number as the
741 limit_pc = skip_prologue_using_sal (gdbarch, pc);
743 limit_pc = pc + 128; /* Magic. */
745 /* Try disassembling prologue. */
746 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
749 /* Scan the function prologue for THIS_FRAME and populate the prologue
753 aarch64_scan_prologue (struct frame_info *this_frame,
754 struct aarch64_prologue_cache *cache)
756 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
757 CORE_ADDR prologue_start;
758 CORE_ADDR prologue_end;
759 CORE_ADDR prev_pc = get_frame_pc (this_frame);
760 struct gdbarch *gdbarch = get_frame_arch (this_frame);
762 cache->prev_pc = prev_pc;
764 /* Assume we do not find a frame. */
765 cache->framereg = -1;
766 cache->framesize = 0;
768 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
771 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
775 /* No line info so use the current PC. */
776 prologue_end = prev_pc;
778 else if (sal.end < prologue_end)
780 /* The next line begins after the function end. */
781 prologue_end = sal.end;
784 prologue_end = min (prologue_end, prev_pc);
785 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
792 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
794 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
798 cache->framereg = AARCH64_FP_REGNUM;
799 cache->framesize = 16;
800 cache->saved_regs[29].addr = 0;
801 cache->saved_regs[30].addr = 8;
805 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
806 function may throw an exception if the inferior's registers or memory is
810 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
811 struct aarch64_prologue_cache *cache)
813 CORE_ADDR unwound_fp;
816 aarch64_scan_prologue (this_frame, cache);
818 if (cache->framereg == -1)
821 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
825 cache->prev_sp = unwound_fp + cache->framesize;
827 /* Calculate actual addresses of saved registers using offsets
828 determined by aarch64_analyze_prologue. */
829 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
830 if (trad_frame_addr_p (cache->saved_regs, reg))
831 cache->saved_regs[reg].addr += cache->prev_sp;
833 cache->func = get_frame_func (this_frame);
835 cache->available_p = 1;
838 /* Allocate and fill in *THIS_CACHE with information about the prologue of
839 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
840 Return a pointer to the current aarch64_prologue_cache in
843 static struct aarch64_prologue_cache *
844 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
846 struct aarch64_prologue_cache *cache;
848 if (*this_cache != NULL)
851 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
852 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
857 aarch64_make_prologue_cache_1 (this_frame, cache);
859 CATCH (ex, RETURN_MASK_ERROR)
861 if (ex.error != NOT_AVAILABLE_ERROR)
862 throw_exception (ex);
869 /* Implement the "stop_reason" frame_unwind method. */
871 static enum unwind_stop_reason
872 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
875 struct aarch64_prologue_cache *cache
876 = aarch64_make_prologue_cache (this_frame, this_cache);
878 if (!cache->available_p)
879 return UNWIND_UNAVAILABLE;
881 /* Halt the backtrace at "_start". */
882 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
883 return UNWIND_OUTERMOST;
885 /* We've hit a wall, stop. */
886 if (cache->prev_sp == 0)
887 return UNWIND_OUTERMOST;
889 return UNWIND_NO_REASON;
892 /* Our frame ID for a normal frame is the current function's starting
893 PC and the caller's SP when we were called. */
896 aarch64_prologue_this_id (struct frame_info *this_frame,
897 void **this_cache, struct frame_id *this_id)
899 struct aarch64_prologue_cache *cache
900 = aarch64_make_prologue_cache (this_frame, this_cache);
902 if (!cache->available_p)
903 *this_id = frame_id_build_unavailable_stack (cache->func);
905 *this_id = frame_id_build (cache->prev_sp, cache->func);
908 /* Implement the "prev_register" frame_unwind method. */
910 static struct value *
911 aarch64_prologue_prev_register (struct frame_info *this_frame,
912 void **this_cache, int prev_regnum)
914 struct gdbarch *gdbarch = get_frame_arch (this_frame);
915 struct aarch64_prologue_cache *cache
916 = aarch64_make_prologue_cache (this_frame, this_cache);
918 /* If we are asked to unwind the PC, then we need to return the LR
919 instead. The prologue may save PC, but it will point into this
920 frame's prologue, not the next frame's resume location. */
921 if (prev_regnum == AARCH64_PC_REGNUM)
925 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
926 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
929 /* SP is generally not saved to the stack, but this frame is
930 identified by the next frame's stack pointer at the time of the
931 call. The value was already reconstructed into PREV_SP. */
944 if (prev_regnum == AARCH64_SP_REGNUM)
945 return frame_unwind_got_constant (this_frame, prev_regnum,
948 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
952 /* AArch64 prologue unwinder. */
953 struct frame_unwind aarch64_prologue_unwind =
956 aarch64_prologue_frame_unwind_stop_reason,
957 aarch64_prologue_this_id,
958 aarch64_prologue_prev_register,
960 default_frame_sniffer
963 /* Allocate and fill in *THIS_CACHE with information about the prologue of
964 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
965 Return a pointer to the current aarch64_prologue_cache in
968 static struct aarch64_prologue_cache *
969 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
971 struct aarch64_prologue_cache *cache;
973 if (*this_cache != NULL)
976 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
977 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
982 cache->prev_sp = get_frame_register_unsigned (this_frame,
984 cache->prev_pc = get_frame_pc (this_frame);
985 cache->available_p = 1;
987 CATCH (ex, RETURN_MASK_ERROR)
989 if (ex.error != NOT_AVAILABLE_ERROR)
990 throw_exception (ex);
997 /* Implement the "stop_reason" frame_unwind method. */
999 static enum unwind_stop_reason
1000 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1003 struct aarch64_prologue_cache *cache
1004 = aarch64_make_stub_cache (this_frame, this_cache);
1006 if (!cache->available_p)
1007 return UNWIND_UNAVAILABLE;
1009 return UNWIND_NO_REASON;
1012 /* Our frame ID for a stub frame is the current SP and LR. */
1015 aarch64_stub_this_id (struct frame_info *this_frame,
1016 void **this_cache, struct frame_id *this_id)
1018 struct aarch64_prologue_cache *cache
1019 = aarch64_make_stub_cache (this_frame, this_cache);
1021 if (cache->available_p)
1022 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1024 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1027 /* Implement the "sniffer" frame_unwind method. */
1030 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1031 struct frame_info *this_frame,
1032 void **this_prologue_cache)
1034 CORE_ADDR addr_in_block;
1037 addr_in_block = get_frame_address_in_block (this_frame);
1038 if (in_plt_section (addr_in_block)
1039 /* We also use the stub winder if the target memory is unreadable
1040 to avoid having the prologue unwinder trying to read it. */
1041 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1047 /* AArch64 stub unwinder. */
1048 struct frame_unwind aarch64_stub_unwind =
1051 aarch64_stub_frame_unwind_stop_reason,
1052 aarch64_stub_this_id,
1053 aarch64_prologue_prev_register,
1055 aarch64_stub_unwind_sniffer
1058 /* Return the frame base address of *THIS_FRAME. */
1061 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1063 struct aarch64_prologue_cache *cache
1064 = aarch64_make_prologue_cache (this_frame, this_cache);
1066 return cache->prev_sp - cache->framesize;
1069 /* AArch64 default frame base information. */
1070 struct frame_base aarch64_normal_base =
1072 &aarch64_prologue_unwind,
1073 aarch64_normal_frame_base,
1074 aarch64_normal_frame_base,
1075 aarch64_normal_frame_base
1078 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1079 dummy frame. The frame ID's base needs to match the TOS value
1080 saved by save_dummy_frame_tos () and returned from
1081 aarch64_push_dummy_call, and the PC needs to match the dummy
1082 frame's breakpoint. */
1084 static struct frame_id
1085 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1087 return frame_id_build (get_frame_register_unsigned (this_frame,
1089 get_frame_pc (this_frame));
1092 /* Implement the "unwind_pc" gdbarch method. */
1095 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1098 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1103 /* Implement the "unwind_sp" gdbarch method. */
1106 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1108 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1111 /* Return the value of the REGNUM register in the previous frame of
1114 static struct value *
1115 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1116 void **this_cache, int regnum)
1118 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1123 case AARCH64_PC_REGNUM:
1124 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1125 return frame_unwind_got_constant (this_frame, regnum, lr);
1128 internal_error (__FILE__, __LINE__,
1129 _("Unexpected register %d"), regnum);
1133 /* Implement the "init_reg" dwarf2_frame_ops method. */
1136 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1137 struct dwarf2_frame_state_reg *reg,
1138 struct frame_info *this_frame)
1142 case AARCH64_PC_REGNUM:
1143 reg->how = DWARF2_FRAME_REG_FN;
1144 reg->loc.fn = aarch64_dwarf2_prev_register;
1146 case AARCH64_SP_REGNUM:
1147 reg->how = DWARF2_FRAME_REG_CFA;
1152 /* When arguments must be pushed onto the stack, they go on in reverse
1153 order. The code below implements a FILO (stack) to do this. */
1157 /* Value to pass on stack. */
1160 /* Size in bytes of value to pass on stack. */
1164 DEF_VEC_O (stack_item_t);
1166 /* Return the alignment (in bytes) of the given type. */
1169 aarch64_type_align (struct type *t)
1175 t = check_typedef (t);
1176 switch (TYPE_CODE (t))
1179 /* Should never happen. */
1180 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1184 case TYPE_CODE_ENUM:
1188 case TYPE_CODE_RANGE:
1189 case TYPE_CODE_BITSTRING:
1191 case TYPE_CODE_CHAR:
1192 case TYPE_CODE_BOOL:
1193 return TYPE_LENGTH (t);
1195 case TYPE_CODE_ARRAY:
1196 case TYPE_CODE_COMPLEX:
1197 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1199 case TYPE_CODE_STRUCT:
1200 case TYPE_CODE_UNION:
1202 for (n = 0; n < TYPE_NFIELDS (t); n++)
1204 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1212 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1213 defined in the AAPCS64 ABI document; otherwise return 0. */
1216 is_hfa (struct type *ty)
1218 switch (TYPE_CODE (ty))
1220 case TYPE_CODE_ARRAY:
1222 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1223 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1228 case TYPE_CODE_UNION:
1229 case TYPE_CODE_STRUCT:
1231 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1233 struct type *member0_type;
1235 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1236 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1240 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1242 struct type *member1_type;
1244 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1245 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1246 || (TYPE_LENGTH (member0_type)
1247 != TYPE_LENGTH (member1_type)))
1263 /* AArch64 function call information structure. */
1264 struct aarch64_call_info
1266 /* the current argument number. */
1269 /* The next general purpose register number, equivalent to NGRN as
1270 described in the AArch64 Procedure Call Standard. */
1273 /* The next SIMD and floating point register number, equivalent to
1274 NSRN as described in the AArch64 Procedure Call Standard. */
1277 /* The next stacked argument address, equivalent to NSAA as
1278 described in the AArch64 Procedure Call Standard. */
1281 /* Stack item vector. */
1282 VEC(stack_item_t) *si;
1285 /* Pass a value in a sequence of consecutive X registers. The caller
1286 is responsbile for ensuring sufficient registers are available. */
1289 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1290 struct aarch64_call_info *info, struct type *type,
1291 const bfd_byte *buf)
1293 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1294 int len = TYPE_LENGTH (type);
1295 enum type_code typecode = TYPE_CODE (type);
1296 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1302 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1303 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1307 /* Adjust sub-word struct/union args when big-endian. */
1308 if (byte_order == BFD_ENDIAN_BIG
1309 && partial_len < X_REGISTER_SIZE
1310 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1311 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1315 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1316 gdbarch_register_name (gdbarch, regnum),
1317 phex (regval, X_REGISTER_SIZE));
1319 regcache_cooked_write_unsigned (regcache, regnum, regval);
1326 /* Attempt to marshall a value in a V register. Return 1 if
1327 successful, or 0 if insufficient registers are available. This
1328 function, unlike the equivalent pass_in_x() function does not
1329 handle arguments spread across multiple registers. */
1332 pass_in_v (struct gdbarch *gdbarch,
1333 struct regcache *regcache,
1334 struct aarch64_call_info *info,
1335 const bfd_byte *buf)
1339 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1340 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1345 regcache_cooked_write (regcache, regnum, buf);
1348 debug_printf ("arg %d in %s\n", info->argnum,
1349 gdbarch_register_name (gdbarch, regnum));
1357 /* Marshall an argument onto the stack. */
1360 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1361 const bfd_byte *buf)
1363 int len = TYPE_LENGTH (type);
1369 align = aarch64_type_align (type);
1371 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1372 Natural alignment of the argument's type. */
1373 align = align_up (align, 8);
1375 /* The AArch64 PCS requires at most doubleword alignment. */
1381 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1387 VEC_safe_push (stack_item_t, info->si, &item);
1390 if (info->nsaa & (align - 1))
1392 /* Push stack alignment padding. */
1393 int pad = align - (info->nsaa & (align - 1));
1398 VEC_safe_push (stack_item_t, info->si, &item);
1403 /* Marshall an argument into a sequence of one or more consecutive X
1404 registers or, if insufficient X registers are available then onto
1408 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1409 struct aarch64_call_info *info, struct type *type,
1410 const bfd_byte *buf)
1412 int len = TYPE_LENGTH (type);
1413 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1415 /* PCS C.13 - Pass in registers if we have enough spare */
1416 if (info->ngrn + nregs <= 8)
1418 pass_in_x (gdbarch, regcache, info, type, buf);
1419 info->ngrn += nregs;
1424 pass_on_stack (info, type, buf);
1428 /* Pass a value in a V register, or on the stack if insufficient are
1432 pass_in_v_or_stack (struct gdbarch *gdbarch,
1433 struct regcache *regcache,
1434 struct aarch64_call_info *info,
1436 const bfd_byte *buf)
1438 if (!pass_in_v (gdbarch, regcache, info, buf))
1439 pass_on_stack (info, type, buf);
1442 /* Implement the "push_dummy_call" gdbarch method. */
1445 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1446 struct regcache *regcache, CORE_ADDR bp_addr,
1448 struct value **args, CORE_ADDR sp, int struct_return,
1449 CORE_ADDR struct_addr)
1455 struct aarch64_call_info info;
1456 struct type *func_type;
1457 struct type *return_type;
1458 int lang_struct_return;
1460 memset (&info, 0, sizeof (info));
1462 /* We need to know what the type of the called function is in order
1463 to determine the number of named/anonymous arguments for the
1464 actual argument placement, and the return type in order to handle
1465 return value correctly.
1467 The generic code above us views the decision of return in memory
1468 or return in registers as a two stage processes. The language
1469 handler is consulted first and may decide to return in memory (eg
1470 class with copy constructor returned by value), this will cause
1471 the generic code to allocate space AND insert an initial leading
1474 If the language code does not decide to pass in memory then the
1475 target code is consulted.
1477 If the language code decides to pass in memory we want to move
1478 the pointer inserted as the initial argument from the argument
1479 list and into X8, the conventional AArch64 struct return pointer
1482 This is slightly awkward, ideally the flag "lang_struct_return"
1483 would be passed to the targets implementation of push_dummy_call.
1484 Rather that change the target interface we call the language code
1485 directly ourselves. */
1487 func_type = check_typedef (value_type (function));
1489 /* Dereference function pointer types. */
1490 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1491 func_type = TYPE_TARGET_TYPE (func_type);
1493 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1494 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1496 /* If language_pass_by_reference () returned true we will have been
1497 given an additional initial argument, a hidden pointer to the
1498 return slot in memory. */
1499 return_type = TYPE_TARGET_TYPE (func_type);
1500 lang_struct_return = language_pass_by_reference (return_type);
1502 /* Set the return address. For the AArch64, the return breakpoint
1503 is always at BP_ADDR. */
1504 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1506 /* If we were given an initial argument for the return slot because
1507 lang_struct_return was true, lose it. */
1508 if (lang_struct_return)
1514 /* The struct_return pointer occupies X8. */
1515 if (struct_return || lang_struct_return)
1519 debug_printf ("struct return in %s = 0x%s\n",
1520 gdbarch_register_name (gdbarch,
1521 AARCH64_STRUCT_RETURN_REGNUM),
1522 paddress (gdbarch, struct_addr));
1524 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1528 for (argnum = 0; argnum < nargs; argnum++)
1530 struct value *arg = args[argnum];
1531 struct type *arg_type;
1534 arg_type = check_typedef (value_type (arg));
1535 len = TYPE_LENGTH (arg_type);
1537 switch (TYPE_CODE (arg_type))
1540 case TYPE_CODE_BOOL:
1541 case TYPE_CODE_CHAR:
1542 case TYPE_CODE_RANGE:
1543 case TYPE_CODE_ENUM:
1546 /* Promote to 32 bit integer. */
1547 if (TYPE_UNSIGNED (arg_type))
1548 arg_type = builtin_type (gdbarch)->builtin_uint32;
1550 arg_type = builtin_type (gdbarch)->builtin_int32;
1551 arg = value_cast (arg_type, arg);
1553 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1554 value_contents (arg));
1557 case TYPE_CODE_COMPLEX:
1560 const bfd_byte *buf = value_contents (arg);
1561 struct type *target_type =
1562 check_typedef (TYPE_TARGET_TYPE (arg_type));
1564 pass_in_v (gdbarch, regcache, &info, buf);
1565 pass_in_v (gdbarch, regcache, &info,
1566 buf + TYPE_LENGTH (target_type));
1571 pass_on_stack (&info, arg_type, value_contents (arg));
1575 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1576 value_contents (arg));
1579 case TYPE_CODE_STRUCT:
1580 case TYPE_CODE_ARRAY:
1581 case TYPE_CODE_UNION:
1582 if (is_hfa (arg_type))
1584 int elements = TYPE_NFIELDS (arg_type);
1586 /* Homogeneous Aggregates */
1587 if (info.nsrn + elements < 8)
1591 for (i = 0; i < elements; i++)
1593 /* We know that we have sufficient registers
1594 available therefore this will never fallback
1596 struct value *field =
1597 value_primitive_field (arg, 0, i, arg_type);
1598 struct type *field_type =
1599 check_typedef (value_type (field));
1601 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1602 value_contents_writeable (field));
1608 pass_on_stack (&info, arg_type, value_contents (arg));
1613 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1614 invisible reference. */
1616 /* Allocate aligned storage. */
1617 sp = align_down (sp - len, 16);
1619 /* Write the real data into the stack. */
1620 write_memory (sp, value_contents (arg), len);
1622 /* Construct the indirection. */
1623 arg_type = lookup_pointer_type (arg_type);
1624 arg = value_from_pointer (arg_type, sp);
1625 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1626 value_contents (arg));
1629 /* PCS C.15 / C.18 multiple values pass. */
1630 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1631 value_contents (arg));
1635 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1636 value_contents (arg));
1641 /* Make sure stack retains 16 byte alignment. */
1643 sp -= 16 - (info.nsaa & 15);
1645 while (!VEC_empty (stack_item_t, info.si))
1647 stack_item_t *si = VEC_last (stack_item_t, info.si);
1650 write_memory (sp, si->data, si->len);
1651 VEC_pop (stack_item_t, info.si);
1654 VEC_free (stack_item_t, info.si);
1656 /* Finally, update the SP register. */
1657 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1662 /* Implement the "frame_align" gdbarch method. */
1665 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1667 /* Align the stack to sixteen bytes. */
1668 return sp & ~(CORE_ADDR) 15;
1671 /* Return the type for an AdvSISD Q register. */
1673 static struct type *
1674 aarch64_vnq_type (struct gdbarch *gdbarch)
1676 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1678 if (tdep->vnq_type == NULL)
1683 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1686 elem = builtin_type (gdbarch)->builtin_uint128;
1687 append_composite_type_field (t, "u", elem);
1689 elem = builtin_type (gdbarch)->builtin_int128;
1690 append_composite_type_field (t, "s", elem);
1695 return tdep->vnq_type;
1698 /* Return the type for an AdvSISD D register. */
1700 static struct type *
1701 aarch64_vnd_type (struct gdbarch *gdbarch)
1703 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1705 if (tdep->vnd_type == NULL)
1710 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1713 elem = builtin_type (gdbarch)->builtin_double;
1714 append_composite_type_field (t, "f", elem);
1716 elem = builtin_type (gdbarch)->builtin_uint64;
1717 append_composite_type_field (t, "u", elem);
1719 elem = builtin_type (gdbarch)->builtin_int64;
1720 append_composite_type_field (t, "s", elem);
1725 return tdep->vnd_type;
1728 /* Return the type for an AdvSISD S register. */
1730 static struct type *
1731 aarch64_vns_type (struct gdbarch *gdbarch)
1733 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1735 if (tdep->vns_type == NULL)
1740 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1743 elem = builtin_type (gdbarch)->builtin_float;
1744 append_composite_type_field (t, "f", elem);
1746 elem = builtin_type (gdbarch)->builtin_uint32;
1747 append_composite_type_field (t, "u", elem);
1749 elem = builtin_type (gdbarch)->builtin_int32;
1750 append_composite_type_field (t, "s", elem);
1755 return tdep->vns_type;
1758 /* Return the type for an AdvSISD H register. */
1760 static struct type *
1761 aarch64_vnh_type (struct gdbarch *gdbarch)
1763 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1765 if (tdep->vnh_type == NULL)
1770 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1773 elem = builtin_type (gdbarch)->builtin_uint16;
1774 append_composite_type_field (t, "u", elem);
1776 elem = builtin_type (gdbarch)->builtin_int16;
1777 append_composite_type_field (t, "s", elem);
1782 return tdep->vnh_type;
1785 /* Return the type for an AdvSISD B register. */
1787 static struct type *
1788 aarch64_vnb_type (struct gdbarch *gdbarch)
1790 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1792 if (tdep->vnb_type == NULL)
1797 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1800 elem = builtin_type (gdbarch)->builtin_uint8;
1801 append_composite_type_field (t, "u", elem);
1803 elem = builtin_type (gdbarch)->builtin_int8;
1804 append_composite_type_field (t, "s", elem);
1809 return tdep->vnb_type;
1812 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1815 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1817 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1818 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1820 if (reg == AARCH64_DWARF_SP)
1821 return AARCH64_SP_REGNUM;
1823 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1824 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1830 /* Implement the "print_insn" gdbarch method. */
1833 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1835 info->symbols = NULL;
1836 return print_insn_aarch64 (memaddr, info);
1839 /* AArch64 BRK software debug mode instruction.
1840 Note that AArch64 code is always little-endian.
1841 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1842 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1844 /* Implement the "breakpoint_from_pc" gdbarch method. */
1846 static const gdb_byte *
1847 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1850 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1852 *lenptr = sizeof (aarch64_default_breakpoint);
1853 return aarch64_default_breakpoint;
1856 /* Extract from an array REGS containing the (raw) register state a
1857 function return value of type TYPE, and copy that, in virtual
1858 format, into VALBUF. */
1861 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1864 struct gdbarch *gdbarch = get_regcache_arch (regs);
1865 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1867 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1869 bfd_byte buf[V_REGISTER_SIZE];
1870 int len = TYPE_LENGTH (type);
1872 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1873 memcpy (valbuf, buf, len);
1875 else if (TYPE_CODE (type) == TYPE_CODE_INT
1876 || TYPE_CODE (type) == TYPE_CODE_CHAR
1877 || TYPE_CODE (type) == TYPE_CODE_BOOL
1878 || TYPE_CODE (type) == TYPE_CODE_PTR
1879 || TYPE_CODE (type) == TYPE_CODE_REF
1880 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1882 /* If the the type is a plain integer, then the access is
1883 straight-forward. Otherwise we have to play around a bit
1885 int len = TYPE_LENGTH (type);
1886 int regno = AARCH64_X0_REGNUM;
1891 /* By using store_unsigned_integer we avoid having to do
1892 anything special for small big-endian values. */
1893 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1894 store_unsigned_integer (valbuf,
1895 (len > X_REGISTER_SIZE
1896 ? X_REGISTER_SIZE : len), byte_order, tmp);
1897 len -= X_REGISTER_SIZE;
1898 valbuf += X_REGISTER_SIZE;
1901 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1903 int regno = AARCH64_V0_REGNUM;
1904 bfd_byte buf[V_REGISTER_SIZE];
1905 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1906 int len = TYPE_LENGTH (target_type);
1908 regcache_cooked_read (regs, regno, buf);
1909 memcpy (valbuf, buf, len);
1911 regcache_cooked_read (regs, regno + 1, buf);
1912 memcpy (valbuf, buf, len);
1915 else if (is_hfa (type))
1917 int elements = TYPE_NFIELDS (type);
1918 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1919 int len = TYPE_LENGTH (member_type);
1922 for (i = 0; i < elements; i++)
1924 int regno = AARCH64_V0_REGNUM + i;
1925 bfd_byte buf[X_REGISTER_SIZE];
1929 debug_printf ("read HFA return value element %d from %s\n",
1931 gdbarch_register_name (gdbarch, regno));
1933 regcache_cooked_read (regs, regno, buf);
1935 memcpy (valbuf, buf, len);
1941 /* For a structure or union the behaviour is as if the value had
1942 been stored to word-aligned memory and then loaded into
1943 registers with 64-bit load instruction(s). */
1944 int len = TYPE_LENGTH (type);
1945 int regno = AARCH64_X0_REGNUM;
1946 bfd_byte buf[X_REGISTER_SIZE];
1950 regcache_cooked_read (regs, regno++, buf);
1951 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1952 len -= X_REGISTER_SIZE;
1953 valbuf += X_REGISTER_SIZE;
1959 /* Will a function return an aggregate type in memory or in a
1960 register? Return 0 if an aggregate type can be returned in a
1961 register, 1 if it must be returned in memory. */
1964 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1967 enum type_code code;
1969 type = check_typedef (type);
1971 /* In the AArch64 ABI, "integer" like aggregate types are returned
1972 in registers. For an aggregate type to be integer like, its size
1973 must be less than or equal to 4 * X_REGISTER_SIZE. */
1977 /* PCS B.5 If the argument is a Named HFA, then the argument is
1982 if (TYPE_LENGTH (type) > 16)
1984 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1985 invisible reference. */
1993 /* Write into appropriate registers a function return value of type
1994 TYPE, given in virtual format. */
1997 aarch64_store_return_value (struct type *type, struct regcache *regs,
1998 const gdb_byte *valbuf)
2000 struct gdbarch *gdbarch = get_regcache_arch (regs);
2001 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2003 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2005 bfd_byte buf[V_REGISTER_SIZE];
2006 int len = TYPE_LENGTH (type);
2008 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2009 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2011 else if (TYPE_CODE (type) == TYPE_CODE_INT
2012 || TYPE_CODE (type) == TYPE_CODE_CHAR
2013 || TYPE_CODE (type) == TYPE_CODE_BOOL
2014 || TYPE_CODE (type) == TYPE_CODE_PTR
2015 || TYPE_CODE (type) == TYPE_CODE_REF
2016 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2018 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2020 /* Values of one word or less are zero/sign-extended and
2022 bfd_byte tmpbuf[X_REGISTER_SIZE];
2023 LONGEST val = unpack_long (type, valbuf);
2025 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2026 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2030 /* Integral values greater than one word are stored in
2031 consecutive registers starting with r0. This will always
2032 be a multiple of the regiser size. */
2033 int len = TYPE_LENGTH (type);
2034 int regno = AARCH64_X0_REGNUM;
2038 regcache_cooked_write (regs, regno++, valbuf);
2039 len -= X_REGISTER_SIZE;
2040 valbuf += X_REGISTER_SIZE;
2044 else if (is_hfa (type))
2046 int elements = TYPE_NFIELDS (type);
2047 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2048 int len = TYPE_LENGTH (member_type);
2051 for (i = 0; i < elements; i++)
2053 int regno = AARCH64_V0_REGNUM + i;
2054 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2058 debug_printf ("write HFA return value element %d to %s\n",
2060 gdbarch_register_name (gdbarch, regno));
2063 memcpy (tmpbuf, valbuf, len);
2064 regcache_cooked_write (regs, regno, tmpbuf);
2070 /* For a structure or union the behaviour is as if the value had
2071 been stored to word-aligned memory and then loaded into
2072 registers with 64-bit load instruction(s). */
2073 int len = TYPE_LENGTH (type);
2074 int regno = AARCH64_X0_REGNUM;
2075 bfd_byte tmpbuf[X_REGISTER_SIZE];
2079 memcpy (tmpbuf, valbuf,
2080 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2081 regcache_cooked_write (regs, regno++, tmpbuf);
2082 len -= X_REGISTER_SIZE;
2083 valbuf += X_REGISTER_SIZE;
2088 /* Implement the "return_value" gdbarch method. */
2090 static enum return_value_convention
2091 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2092 struct type *valtype, struct regcache *regcache,
2093 gdb_byte *readbuf, const gdb_byte *writebuf)
2095 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2097 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2098 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2099 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2101 if (aarch64_return_in_memory (gdbarch, valtype))
2104 debug_printf ("return value in memory\n");
2105 return RETURN_VALUE_STRUCT_CONVENTION;
2110 aarch64_store_return_value (valtype, regcache, writebuf);
2113 aarch64_extract_return_value (valtype, regcache, readbuf);
2116 debug_printf ("return value in registers\n");
2118 return RETURN_VALUE_REGISTER_CONVENTION;
2121 /* Implement the "get_longjmp_target" gdbarch method. */
2124 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2127 gdb_byte buf[X_REGISTER_SIZE];
2128 struct gdbarch *gdbarch = get_frame_arch (frame);
2129 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2130 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2132 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2134 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2138 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2142 /* Implement the "gen_return_address" gdbarch method. */
2145 aarch64_gen_return_address (struct gdbarch *gdbarch,
2146 struct agent_expr *ax, struct axs_value *value,
2149 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2150 value->kind = axs_lvalue_register;
2151 value->u.reg = AARCH64_LR_REGNUM;
2155 /* Return the pseudo register name corresponding to register regnum. */
2158 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2160 static const char *const q_name[] =
2162 "q0", "q1", "q2", "q3",
2163 "q4", "q5", "q6", "q7",
2164 "q8", "q9", "q10", "q11",
2165 "q12", "q13", "q14", "q15",
2166 "q16", "q17", "q18", "q19",
2167 "q20", "q21", "q22", "q23",
2168 "q24", "q25", "q26", "q27",
2169 "q28", "q29", "q30", "q31",
2172 static const char *const d_name[] =
2174 "d0", "d1", "d2", "d3",
2175 "d4", "d5", "d6", "d7",
2176 "d8", "d9", "d10", "d11",
2177 "d12", "d13", "d14", "d15",
2178 "d16", "d17", "d18", "d19",
2179 "d20", "d21", "d22", "d23",
2180 "d24", "d25", "d26", "d27",
2181 "d28", "d29", "d30", "d31",
2184 static const char *const s_name[] =
2186 "s0", "s1", "s2", "s3",
2187 "s4", "s5", "s6", "s7",
2188 "s8", "s9", "s10", "s11",
2189 "s12", "s13", "s14", "s15",
2190 "s16", "s17", "s18", "s19",
2191 "s20", "s21", "s22", "s23",
2192 "s24", "s25", "s26", "s27",
2193 "s28", "s29", "s30", "s31",
2196 static const char *const h_name[] =
2198 "h0", "h1", "h2", "h3",
2199 "h4", "h5", "h6", "h7",
2200 "h8", "h9", "h10", "h11",
2201 "h12", "h13", "h14", "h15",
2202 "h16", "h17", "h18", "h19",
2203 "h20", "h21", "h22", "h23",
2204 "h24", "h25", "h26", "h27",
2205 "h28", "h29", "h30", "h31",
2208 static const char *const b_name[] =
2210 "b0", "b1", "b2", "b3",
2211 "b4", "b5", "b6", "b7",
2212 "b8", "b9", "b10", "b11",
2213 "b12", "b13", "b14", "b15",
2214 "b16", "b17", "b18", "b19",
2215 "b20", "b21", "b22", "b23",
2216 "b24", "b25", "b26", "b27",
2217 "b28", "b29", "b30", "b31",
2220 regnum -= gdbarch_num_regs (gdbarch);
2222 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2223 return q_name[regnum - AARCH64_Q0_REGNUM];
2225 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2226 return d_name[regnum - AARCH64_D0_REGNUM];
2228 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2229 return s_name[regnum - AARCH64_S0_REGNUM];
2231 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2232 return h_name[regnum - AARCH64_H0_REGNUM];
2234 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2235 return b_name[regnum - AARCH64_B0_REGNUM];
2237 internal_error (__FILE__, __LINE__,
2238 _("aarch64_pseudo_register_name: bad register number %d"),
2242 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2244 static struct type *
2245 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2247 regnum -= gdbarch_num_regs (gdbarch);
2249 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2250 return aarch64_vnq_type (gdbarch);
2252 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2253 return aarch64_vnd_type (gdbarch);
2255 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2256 return aarch64_vns_type (gdbarch);
2258 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2259 return aarch64_vnh_type (gdbarch);
2261 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2262 return aarch64_vnb_type (gdbarch);
2264 internal_error (__FILE__, __LINE__,
2265 _("aarch64_pseudo_register_type: bad register number %d"),
2269 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2272 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2273 struct reggroup *group)
2275 regnum -= gdbarch_num_regs (gdbarch);
2277 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2278 return group == all_reggroup || group == vector_reggroup;
2279 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2280 return (group == all_reggroup || group == vector_reggroup
2281 || group == float_reggroup);
2282 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2283 return (group == all_reggroup || group == vector_reggroup
2284 || group == float_reggroup);
2285 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2286 return group == all_reggroup || group == vector_reggroup;
2287 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2288 return group == all_reggroup || group == vector_reggroup;
2290 return group == all_reggroup;
2293 /* Implement the "pseudo_register_read_value" gdbarch method. */
2295 static struct value *
2296 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2297 struct regcache *regcache,
2300 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2301 struct value *result_value;
2304 result_value = allocate_value (register_type (gdbarch, regnum));
2305 VALUE_LVAL (result_value) = lval_register;
2306 VALUE_REGNUM (result_value) = regnum;
2307 buf = value_contents_raw (result_value);
2309 regnum -= gdbarch_num_regs (gdbarch);
2311 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2313 enum register_status status;
2316 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2317 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2318 if (status != REG_VALID)
2319 mark_value_bytes_unavailable (result_value, 0,
2320 TYPE_LENGTH (value_type (result_value)));
2322 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2323 return result_value;
2326 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2328 enum register_status status;
2331 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2332 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2333 if (status != REG_VALID)
2334 mark_value_bytes_unavailable (result_value, 0,
2335 TYPE_LENGTH (value_type (result_value)));
2337 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2338 return result_value;
2341 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2343 enum register_status status;
2346 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2347 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2348 if (status != REG_VALID)
2349 mark_value_bytes_unavailable (result_value, 0,
2350 TYPE_LENGTH (value_type (result_value)));
2352 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2353 return result_value;
2356 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2358 enum register_status status;
2361 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2362 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2363 if (status != REG_VALID)
2364 mark_value_bytes_unavailable (result_value, 0,
2365 TYPE_LENGTH (value_type (result_value)));
2367 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2368 return result_value;
2371 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2373 enum register_status status;
2376 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2377 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2378 if (status != REG_VALID)
2379 mark_value_bytes_unavailable (result_value, 0,
2380 TYPE_LENGTH (value_type (result_value)));
2382 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2383 return result_value;
2386 gdb_assert_not_reached ("regnum out of bound");
2389 /* Implement the "pseudo_register_write" gdbarch method. */
2392 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2393 int regnum, const gdb_byte *buf)
2395 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2397 /* Ensure the register buffer is zero, we want gdb writes of the
2398 various 'scalar' pseudo registers to behavior like architectural
2399 writes, register width bytes are written the remainder are set to
2401 memset (reg_buf, 0, sizeof (reg_buf));
2403 regnum -= gdbarch_num_regs (gdbarch);
2405 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2407 /* pseudo Q registers */
2410 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2411 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2412 regcache_raw_write (regcache, v_regnum, reg_buf);
2416 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2418 /* pseudo D registers */
2421 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2422 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2423 regcache_raw_write (regcache, v_regnum, reg_buf);
2427 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2431 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2432 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2433 regcache_raw_write (regcache, v_regnum, reg_buf);
2437 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2439 /* pseudo H registers */
2442 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2443 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2444 regcache_raw_write (regcache, v_regnum, reg_buf);
2448 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2450 /* pseudo B registers */
2453 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2454 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2455 regcache_raw_write (regcache, v_regnum, reg_buf);
2459 gdb_assert_not_reached ("regnum out of bound");
2462 /* Callback function for user_reg_add. */
2464 static struct value *
2465 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2467 const int *reg_p = baton;
2469 return value_of_register (*reg_p, frame);
2473 /* Implement the "software_single_step" gdbarch method, needed to
2474 single step through atomic sequences on AArch64. */
2477 aarch64_software_single_step (struct frame_info *frame)
2479 struct gdbarch *gdbarch = get_frame_arch (frame);
2480 struct address_space *aspace = get_frame_address_space (frame);
2481 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2482 const int insn_size = 4;
2483 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2484 CORE_ADDR pc = get_frame_pc (frame);
2485 CORE_ADDR breaks[2] = { -1, -1 };
2487 CORE_ADDR closing_insn = 0;
2488 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2489 byte_order_for_code);
2492 int bc_insn_count = 0; /* Conditional branch instruction count. */
2493 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2495 /* Look for a Load Exclusive instruction which begins the sequence. */
2496 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2499 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2505 insn = read_memory_unsigned_integer (loc, insn_size,
2506 byte_order_for_code);
2508 /* Check if the instruction is a conditional branch. */
2509 if (aarch64_decode_bcond (loc, insn, &cond, &offset))
2511 if (bc_insn_count >= 1)
2514 /* It is, so we'll try to set a breakpoint at the destination. */
2515 breaks[1] = loc + offset;
2521 /* Look for the Store Exclusive which closes the atomic sequence. */
2522 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2529 /* We didn't find a closing Store Exclusive instruction, fall back. */
2533 /* Insert breakpoint after the end of the atomic sequence. */
2534 breaks[0] = loc + insn_size;
2536 /* Check for duplicated breakpoints, and also check that the second
2537 breakpoint is not within the atomic sequence. */
2539 && (breaks[1] == breaks[0]
2540 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2541 last_breakpoint = 0;
2543 /* Insert the breakpoint at the end of the sequence, and one at the
2544 destination of the conditional branch, if it exists. */
2545 for (index = 0; index <= last_breakpoint; index++)
2546 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2551 /* Initialize the current architecture based on INFO. If possible,
2552 re-use an architecture from ARCHES, which is a list of
2553 architectures already created during this debugging session.
2555 Called e.g. at program startup, when reading a core file, and when
2556 reading a binary file. */
2558 static struct gdbarch *
2559 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2561 struct gdbarch_tdep *tdep;
2562 struct gdbarch *gdbarch;
2563 struct gdbarch_list *best_arch;
2564 struct tdesc_arch_data *tdesc_data = NULL;
2565 const struct target_desc *tdesc = info.target_desc;
2567 int have_fpa_registers = 1;
2569 const struct tdesc_feature *feature;
2571 int num_pseudo_regs = 0;
2573 /* Ensure we always have a target descriptor. */
2574 if (!tdesc_has_registers (tdesc))
2575 tdesc = tdesc_aarch64;
2579 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2581 if (feature == NULL)
2584 tdesc_data = tdesc_data_alloc ();
2586 /* Validate the descriptor provides the mandatory core R registers
2587 and allocate their numbers. */
2588 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2590 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2591 aarch64_r_register_names[i]);
2593 num_regs = AARCH64_X0_REGNUM + i;
2595 /* Look for the V registers. */
2596 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2599 /* Validate the descriptor provides the mandatory V registers
2600 and allocate their numbers. */
2601 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2603 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2604 aarch64_v_register_names[i]);
2606 num_regs = AARCH64_V0_REGNUM + i;
2608 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2609 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2610 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2611 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2612 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2617 tdesc_data_cleanup (tdesc_data);
2621 /* AArch64 code is always little-endian. */
2622 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2624 /* If there is already a candidate, use it. */
2625 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2627 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2629 /* Found a match. */
2633 if (best_arch != NULL)
2635 if (tdesc_data != NULL)
2636 tdesc_data_cleanup (tdesc_data);
2637 return best_arch->gdbarch;
2640 tdep = XCNEW (struct gdbarch_tdep);
2641 gdbarch = gdbarch_alloc (&info, tdep);
2643 /* This should be low enough for everything. */
2644 tdep->lowest_pc = 0x20;
2645 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2646 tdep->jb_elt_size = 8;
2648 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2649 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2651 /* Frame handling. */
2652 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2653 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2654 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2656 /* Advance PC across function entry code. */
2657 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2659 /* The stack grows downward. */
2660 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2662 /* Breakpoint manipulation. */
2663 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2664 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2665 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2667 /* Information about registers, etc. */
2668 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2669 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2670 set_gdbarch_num_regs (gdbarch, num_regs);
2672 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2673 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2674 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2675 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2676 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2677 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2678 aarch64_pseudo_register_reggroup_p);
2681 set_gdbarch_short_bit (gdbarch, 16);
2682 set_gdbarch_int_bit (gdbarch, 32);
2683 set_gdbarch_float_bit (gdbarch, 32);
2684 set_gdbarch_double_bit (gdbarch, 64);
2685 set_gdbarch_long_double_bit (gdbarch, 128);
2686 set_gdbarch_long_bit (gdbarch, 64);
2687 set_gdbarch_long_long_bit (gdbarch, 64);
2688 set_gdbarch_ptr_bit (gdbarch, 64);
2689 set_gdbarch_char_signed (gdbarch, 0);
2690 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2691 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2692 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2694 /* Internal <-> external register number maps. */
2695 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2697 /* Returning results. */
2698 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2701 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2703 /* Virtual tables. */
2704 set_gdbarch_vbit_in_delta (gdbarch, 1);
2706 /* Hook in the ABI-specific overrides, if they have been registered. */
2707 info.target_desc = tdesc;
2708 info.tdep_info = (void *) tdesc_data;
2709 gdbarch_init_osabi (info, gdbarch);
2711 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2713 /* Add some default predicates. */
2714 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2715 dwarf2_append_unwinders (gdbarch);
2716 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2718 frame_base_set_default (gdbarch, &aarch64_normal_base);
2720 /* Now we have tuned the configuration, set a few final things,
2721 based on what the OS ABI has told us. */
2723 if (tdep->jb_pc >= 0)
2724 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2726 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2728 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2730 /* Add standard register aliases. */
2731 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2732 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2733 value_of_aarch64_user_reg,
2734 &aarch64_register_aliases[i].regnum);
2740 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2742 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2747 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2748 paddress (gdbarch, tdep->lowest_pc));
2751 /* Suppress warning from -Wmissing-prototypes. */
2752 extern initialize_file_ftype _initialize_aarch64_tdep;
2755 _initialize_aarch64_tdep (void)
2757 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2760 initialize_tdesc_aarch64 ();
2762 /* Debug this file's internals. */
2763 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2764 Set AArch64 debugging."), _("\
2765 Show AArch64 debugging."), _("\
2766 When on, AArch64 specific debugging is enabled."),
2769 &setdebuglist, &showdebuglist);
2772 /* AArch64 process record-replay related structures, defines etc. */
2774 #define submask(x) ((1L << ((x) + 1)) - 1)
2775 #define bit(obj,st) (((obj) >> (st)) & 1)
2776 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2778 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2781 unsigned int reg_len = LENGTH; \
2784 REGS = XNEWVEC (uint32_t, reg_len); \
2785 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2790 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2793 unsigned int mem_len = LENGTH; \
2796 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2797 memcpy(&MEMS->len, &RECORD_BUF[0], \
2798 sizeof(struct aarch64_mem_r) * LENGTH); \
2803 /* AArch64 record/replay structures and enumerations. */
2805 struct aarch64_mem_r
2807 uint64_t len; /* Record length. */
2808 uint64_t addr; /* Memory address. */
2811 enum aarch64_record_result
2813 AARCH64_RECORD_SUCCESS,
2814 AARCH64_RECORD_FAILURE,
2815 AARCH64_RECORD_UNSUPPORTED,
2816 AARCH64_RECORD_UNKNOWN
2819 typedef struct insn_decode_record_t
2821 struct gdbarch *gdbarch;
2822 struct regcache *regcache;
2823 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2824 uint32_t aarch64_insn; /* Insn to be recorded. */
2825 uint32_t mem_rec_count; /* Count of memory records. */
2826 uint32_t reg_rec_count; /* Count of register records. */
2827 uint32_t *aarch64_regs; /* Registers to be recorded. */
2828 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2829 } insn_decode_record;
2831 /* Record handler for data processing - register instructions. */
2834 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2836 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2837 uint32_t record_buf[4];
2839 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2840 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2841 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2843 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2847 /* Logical (shifted register). */
2848 if (insn_bits24_27 == 0x0a)
2849 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2851 else if (insn_bits24_27 == 0x0b)
2852 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2854 return AARCH64_RECORD_UNKNOWN;
2856 record_buf[0] = reg_rd;
2857 aarch64_insn_r->reg_rec_count = 1;
2859 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2863 if (insn_bits24_27 == 0x0b)
2865 /* Data-processing (3 source). */
2866 record_buf[0] = reg_rd;
2867 aarch64_insn_r->reg_rec_count = 1;
2869 else if (insn_bits24_27 == 0x0a)
2871 if (insn_bits21_23 == 0x00)
2873 /* Add/subtract (with carry). */
2874 record_buf[0] = reg_rd;
2875 aarch64_insn_r->reg_rec_count = 1;
2876 if (bit (aarch64_insn_r->aarch64_insn, 29))
2878 record_buf[1] = AARCH64_CPSR_REGNUM;
2879 aarch64_insn_r->reg_rec_count = 2;
2882 else if (insn_bits21_23 == 0x02)
2884 /* Conditional compare (register) and conditional compare
2885 (immediate) instructions. */
2886 record_buf[0] = AARCH64_CPSR_REGNUM;
2887 aarch64_insn_r->reg_rec_count = 1;
2889 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2891 /* CConditional select. */
2892 /* Data-processing (2 source). */
2893 /* Data-processing (1 source). */
2894 record_buf[0] = reg_rd;
2895 aarch64_insn_r->reg_rec_count = 1;
2898 return AARCH64_RECORD_UNKNOWN;
2902 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2904 return AARCH64_RECORD_SUCCESS;
2907 /* Record handler for data processing - immediate instructions. */
2910 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2912 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2913 uint32_t record_buf[4];
2915 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2916 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2917 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2918 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2920 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2921 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2922 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2924 record_buf[0] = reg_rd;
2925 aarch64_insn_r->reg_rec_count = 1;
2927 else if (insn_bits24_27 == 0x01)
2929 /* Add/Subtract (immediate). */
2930 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2931 record_buf[0] = reg_rd;
2932 aarch64_insn_r->reg_rec_count = 1;
2934 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2936 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2938 /* Logical (immediate). */
2939 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2940 record_buf[0] = reg_rd;
2941 aarch64_insn_r->reg_rec_count = 1;
2943 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2946 return AARCH64_RECORD_UNKNOWN;
2948 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2950 return AARCH64_RECORD_SUCCESS;
2953 /* Record handler for branch, exception generation and system instructions. */
2956 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2958 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2959 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2960 uint32_t record_buf[4];
2962 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2963 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
2964 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
2966 if (insn_bits28_31 == 0x0d)
2968 /* Exception generation instructions. */
2969 if (insn_bits24_27 == 0x04)
2971 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
2972 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
2973 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
2975 ULONGEST svc_number;
2977 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
2979 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
2983 return AARCH64_RECORD_UNSUPPORTED;
2985 /* System instructions. */
2986 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
2988 uint32_t reg_rt, reg_crn;
2990 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2991 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
2993 /* Record rt in case of sysl and mrs instructions. */
2994 if (bit (aarch64_insn_r->aarch64_insn, 21))
2996 record_buf[0] = reg_rt;
2997 aarch64_insn_r->reg_rec_count = 1;
2999 /* Record cpsr for hint and msr(immediate) instructions. */
3000 else if (reg_crn == 0x02 || reg_crn == 0x04)
3002 record_buf[0] = AARCH64_CPSR_REGNUM;
3003 aarch64_insn_r->reg_rec_count = 1;
3006 /* Unconditional branch (register). */
3007 else if((insn_bits24_27 & 0x0e) == 0x06)
3009 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3010 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3011 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3014 return AARCH64_RECORD_UNKNOWN;
3016 /* Unconditional branch (immediate). */
3017 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3019 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3020 if (bit (aarch64_insn_r->aarch64_insn, 31))
3021 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3024 /* Compare & branch (immediate), Test & branch (immediate) and
3025 Conditional branch (immediate). */
3026 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3028 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3030 return AARCH64_RECORD_SUCCESS;
3033 /* Record handler for advanced SIMD load and store instructions. */
3036 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3039 uint64_t addr_offset = 0;
3040 uint32_t record_buf[24];
3041 uint64_t record_buf_mem[24];
3042 uint32_t reg_rn, reg_rt;
3043 uint32_t reg_index = 0, mem_index = 0;
3044 uint8_t opcode_bits, size_bits;
3046 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3047 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3048 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3049 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3050 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3053 debug_printf ("Process record: Advanced SIMD load/store\n");
3055 /* Load/store single structure. */
3056 if (bit (aarch64_insn_r->aarch64_insn, 24))
3058 uint8_t sindex, scale, selem, esize, replicate = 0;
3059 scale = opcode_bits >> 2;
3060 selem = ((opcode_bits & 0x02) |
3061 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3065 if (size_bits & 0x01)
3066 return AARCH64_RECORD_UNKNOWN;
3069 if ((size_bits >> 1) & 0x01)
3070 return AARCH64_RECORD_UNKNOWN;
3071 if (size_bits & 0x01)
3073 if (!((opcode_bits >> 1) & 0x01))
3076 return AARCH64_RECORD_UNKNOWN;
3080 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3087 return AARCH64_RECORD_UNKNOWN;
3093 for (sindex = 0; sindex < selem; sindex++)
3095 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3096 reg_rt = (reg_rt + 1) % 32;
3100 for (sindex = 0; sindex < selem; sindex++)
3101 if (bit (aarch64_insn_r->aarch64_insn, 22))
3102 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3105 record_buf_mem[mem_index++] = esize / 8;
3106 record_buf_mem[mem_index++] = address + addr_offset;
3108 addr_offset = addr_offset + (esize / 8);
3109 reg_rt = (reg_rt + 1) % 32;
3112 /* Load/store multiple structure. */
3115 uint8_t selem, esize, rpt, elements;
3116 uint8_t eindex, rindex;
3118 esize = 8 << size_bits;
3119 if (bit (aarch64_insn_r->aarch64_insn, 30))
3120 elements = 128 / esize;
3122 elements = 64 / esize;
3124 switch (opcode_bits)
3126 /*LD/ST4 (4 Registers). */
3131 /*LD/ST1 (4 Registers). */
3136 /*LD/ST3 (3 Registers). */
3141 /*LD/ST1 (3 Registers). */
3146 /*LD/ST1 (1 Register). */
3151 /*LD/ST2 (2 Registers). */
3156 /*LD/ST1 (2 Registers). */
3162 return AARCH64_RECORD_UNSUPPORTED;
3165 for (rindex = 0; rindex < rpt; rindex++)
3166 for (eindex = 0; eindex < elements; eindex++)
3168 uint8_t reg_tt, sindex;
3169 reg_tt = (reg_rt + rindex) % 32;
3170 for (sindex = 0; sindex < selem; sindex++)
3172 if (bit (aarch64_insn_r->aarch64_insn, 22))
3173 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3176 record_buf_mem[mem_index++] = esize / 8;
3177 record_buf_mem[mem_index++] = address + addr_offset;
3179 addr_offset = addr_offset + (esize / 8);
3180 reg_tt = (reg_tt + 1) % 32;
3185 if (bit (aarch64_insn_r->aarch64_insn, 23))
3186 record_buf[reg_index++] = reg_rn;
3188 aarch64_insn_r->reg_rec_count = reg_index;
3189 aarch64_insn_r->mem_rec_count = mem_index / 2;
3190 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3192 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3194 return AARCH64_RECORD_SUCCESS;
3197 /* Record handler for load and store instructions. */
3200 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3202 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3203 uint8_t insn_bit23, insn_bit21;
3204 uint8_t opc, size_bits, ld_flag, vector_flag;
3205 uint32_t reg_rn, reg_rt, reg_rt2;
3206 uint64_t datasize, offset;
3207 uint32_t record_buf[8];
3208 uint64_t record_buf_mem[8];
3211 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3212 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3213 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3214 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3215 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3216 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3217 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3218 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3219 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3220 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3221 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3223 /* Load/store exclusive. */
3224 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3227 debug_printf ("Process record: load/store exclusive\n");
3231 record_buf[0] = reg_rt;
3232 aarch64_insn_r->reg_rec_count = 1;
3235 record_buf[1] = reg_rt2;
3236 aarch64_insn_r->reg_rec_count = 2;
3242 datasize = (8 << size_bits) * 2;
3244 datasize = (8 << size_bits);
3245 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3247 record_buf_mem[0] = datasize / 8;
3248 record_buf_mem[1] = address;
3249 aarch64_insn_r->mem_rec_count = 1;
3252 /* Save register rs. */
3253 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3254 aarch64_insn_r->reg_rec_count = 1;
3258 /* Load register (literal) instructions decoding. */
3259 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3262 debug_printf ("Process record: load register (literal)\n");
3264 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3266 record_buf[0] = reg_rt;
3267 aarch64_insn_r->reg_rec_count = 1;
3269 /* All types of load/store pair instructions decoding. */
3270 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3273 debug_printf ("Process record: load/store pair\n");
3279 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3280 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3284 record_buf[0] = reg_rt;
3285 record_buf[1] = reg_rt2;
3287 aarch64_insn_r->reg_rec_count = 2;
3292 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3294 size_bits = size_bits >> 1;
3295 datasize = 8 << (2 + size_bits);
3296 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3297 offset = offset << (2 + size_bits);
3298 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3300 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3302 if (imm7_off & 0x40)
3303 address = address - offset;
3305 address = address + offset;
3308 record_buf_mem[0] = datasize / 8;
3309 record_buf_mem[1] = address;
3310 record_buf_mem[2] = datasize / 8;
3311 record_buf_mem[3] = address + (datasize / 8);
3312 aarch64_insn_r->mem_rec_count = 2;
3314 if (bit (aarch64_insn_r->aarch64_insn, 23))
3315 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3317 /* Load/store register (unsigned immediate) instructions. */
3318 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3320 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3327 if (size_bits != 0x03)
3330 return AARCH64_RECORD_UNKNOWN;
3334 debug_printf ("Process record: load/store (unsigned immediate):"
3335 " size %x V %d opc %x\n", size_bits, vector_flag,
3341 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3342 datasize = 8 << size_bits;
3343 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3345 offset = offset << size_bits;
3346 address = address + offset;
3348 record_buf_mem[0] = datasize >> 3;
3349 record_buf_mem[1] = address;
3350 aarch64_insn_r->mem_rec_count = 1;
3355 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3357 record_buf[0] = reg_rt;
3358 aarch64_insn_r->reg_rec_count = 1;
3361 /* Load/store register (register offset) instructions. */
3362 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3363 && insn_bits10_11 == 0x02 && insn_bit21)
3366 debug_printf ("Process record: load/store (register offset)\n");
3367 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3374 if (size_bits != 0x03)
3377 return AARCH64_RECORD_UNKNOWN;
3381 uint64_t reg_rm_val;
3382 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3383 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3384 if (bit (aarch64_insn_r->aarch64_insn, 12))
3385 offset = reg_rm_val << size_bits;
3387 offset = reg_rm_val;
3388 datasize = 8 << size_bits;
3389 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3391 address = address + offset;
3392 record_buf_mem[0] = datasize >> 3;
3393 record_buf_mem[1] = address;
3394 aarch64_insn_r->mem_rec_count = 1;
3399 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3401 record_buf[0] = reg_rt;
3402 aarch64_insn_r->reg_rec_count = 1;
3405 /* Load/store register (immediate and unprivileged) instructions. */
3406 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3411 debug_printf ("Process record: load/store "
3412 "(immediate and unprivileged)\n");
3414 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3421 if (size_bits != 0x03)
3424 return AARCH64_RECORD_UNKNOWN;
3429 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3430 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3431 datasize = 8 << size_bits;
3432 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3434 if (insn_bits10_11 != 0x01)
3436 if (imm9_off & 0x0100)
3437 address = address - offset;
3439 address = address + offset;
3441 record_buf_mem[0] = datasize >> 3;
3442 record_buf_mem[1] = address;
3443 aarch64_insn_r->mem_rec_count = 1;
3448 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3450 record_buf[0] = reg_rt;
3451 aarch64_insn_r->reg_rec_count = 1;
3453 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3454 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3456 /* Advanced SIMD load/store instructions. */
3458 return aarch64_record_asimd_load_store (aarch64_insn_r);
3460 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3462 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3464 return AARCH64_RECORD_SUCCESS;
3467 /* Record handler for data processing SIMD and floating point instructions. */
3470 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3472 uint8_t insn_bit21, opcode, rmode, reg_rd;
3473 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3474 uint8_t insn_bits11_14;
3475 uint32_t record_buf[2];
3477 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3478 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3479 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3480 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3481 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3482 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3483 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3484 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3485 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3488 debug_printf ("Process record: data processing SIMD/FP: ");
3490 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3492 /* Floating point - fixed point conversion instructions. */
3496 debug_printf ("FP - fixed point conversion");
3498 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3499 record_buf[0] = reg_rd;
3501 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3503 /* Floating point - conditional compare instructions. */
3504 else if (insn_bits10_11 == 0x01)
3507 debug_printf ("FP - conditional compare");
3509 record_buf[0] = AARCH64_CPSR_REGNUM;
3511 /* Floating point - data processing (2-source) and
3512 conditional select instructions. */
3513 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3516 debug_printf ("FP - DP (2-source)");
3518 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3520 else if (insn_bits10_11 == 0x00)
3522 /* Floating point - immediate instructions. */
3523 if ((insn_bits12_15 & 0x01) == 0x01
3524 || (insn_bits12_15 & 0x07) == 0x04)
3527 debug_printf ("FP - immediate");
3528 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3530 /* Floating point - compare instructions. */
3531 else if ((insn_bits12_15 & 0x03) == 0x02)
3534 debug_printf ("FP - immediate");
3535 record_buf[0] = AARCH64_CPSR_REGNUM;
3537 /* Floating point - integer conversions instructions. */
3538 else if (insn_bits12_15 == 0x00)
3540 /* Convert float to integer instruction. */
3541 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3544 debug_printf ("float to int conversion");
3546 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3548 /* Convert integer to float instruction. */
3549 else if ((opcode >> 1) == 0x01 && !rmode)
3552 debug_printf ("int to float conversion");
3554 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3556 /* Move float to integer instruction. */
3557 else if ((opcode >> 1) == 0x03)
3560 debug_printf ("move float to int");
3562 if (!(opcode & 0x01))
3563 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3565 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3568 return AARCH64_RECORD_UNKNOWN;
3571 return AARCH64_RECORD_UNKNOWN;
3574 return AARCH64_RECORD_UNKNOWN;
3576 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3579 debug_printf ("SIMD copy");
3581 /* Advanced SIMD copy instructions. */
3582 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3583 && !bit (aarch64_insn_r->aarch64_insn, 15)
3584 && bit (aarch64_insn_r->aarch64_insn, 10))
3586 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3587 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3589 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3592 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3594 /* All remaining floating point or advanced SIMD instructions. */
3598 debug_printf ("all remain");
3600 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3604 debug_printf ("\n");
3606 aarch64_insn_r->reg_rec_count++;
3607 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3608 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3610 return AARCH64_RECORD_SUCCESS;
3613 /* Decodes insns type and invokes its record handler. */
3616 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3618 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3620 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3621 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3622 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3623 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3625 /* Data processing - immediate instructions. */
3626 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3627 return aarch64_record_data_proc_imm (aarch64_insn_r);
3629 /* Branch, exception generation and system instructions. */
3630 if (ins_bit26 && !ins_bit27 && ins_bit28)
3631 return aarch64_record_branch_except_sys (aarch64_insn_r);
3633 /* Load and store instructions. */
3634 if (!ins_bit25 && ins_bit27)
3635 return aarch64_record_load_store (aarch64_insn_r);
3637 /* Data processing - register instructions. */
3638 if (ins_bit25 && !ins_bit26 && ins_bit27)
3639 return aarch64_record_data_proc_reg (aarch64_insn_r);
3641 /* Data processing - SIMD and floating point instructions. */
3642 if (ins_bit25 && ins_bit26 && ins_bit27)
3643 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3645 return AARCH64_RECORD_UNSUPPORTED;
3648 /* Cleans up local record registers and memory allocations. */
3651 deallocate_reg_mem (insn_decode_record *record)
3653 xfree (record->aarch64_regs);
3654 xfree (record->aarch64_mems);
3657 /* Parse the current instruction and record the values of the registers and
3658 memory that will be changed in current instruction to record_arch_list
3659 return -1 if something is wrong. */
3662 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3663 CORE_ADDR insn_addr)
3665 uint32_t rec_no = 0;
3666 uint8_t insn_size = 4;
3668 ULONGEST t_bit = 0, insn_id = 0;
3669 gdb_byte buf[insn_size];
3670 insn_decode_record aarch64_record;
3672 memset (&buf[0], 0, insn_size);
3673 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3674 target_read_memory (insn_addr, &buf[0], insn_size);
3675 aarch64_record.aarch64_insn
3676 = (uint32_t) extract_unsigned_integer (&buf[0],
3678 gdbarch_byte_order (gdbarch));
3679 aarch64_record.regcache = regcache;
3680 aarch64_record.this_addr = insn_addr;
3681 aarch64_record.gdbarch = gdbarch;
3683 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3684 if (ret == AARCH64_RECORD_UNSUPPORTED)
3686 printf_unfiltered (_("Process record does not support instruction "
3687 "0x%0x at address %s.\n"),
3688 aarch64_record.aarch64_insn,
3689 paddress (gdbarch, insn_addr));
3695 /* Record registers. */
3696 record_full_arch_list_add_reg (aarch64_record.regcache,
3698 /* Always record register CPSR. */
3699 record_full_arch_list_add_reg (aarch64_record.regcache,
3700 AARCH64_CPSR_REGNUM);
3701 if (aarch64_record.aarch64_regs)
3702 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3703 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3704 aarch64_record.aarch64_regs[rec_no]))
3707 /* Record memories. */
3708 if (aarch64_record.aarch64_mems)
3709 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3710 if (record_full_arch_list_add_mem
3711 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3712 aarch64_record.aarch64_mems[rec_no].len))
3715 if (record_full_arch_list_add_end ())
3719 deallocate_reg_mem (&aarch64_record);