1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
46 #include "aarch64-tdep.h"
49 #include "elf/aarch64.h"
54 #include "record-full.h"
56 #include "features/aarch64.c"
58 /* Pseudo register base numbers. */
59 #define AARCH64_Q0_REGNUM 0
60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
65 /* The standard register names, and all the valid aliases for them. */
68 const char *const name;
70 } aarch64_register_aliases[] =
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM},
74 {"lr", AARCH64_LR_REGNUM},
75 {"sp", AARCH64_SP_REGNUM},
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM + 0},
79 {"w1", AARCH64_X0_REGNUM + 1},
80 {"w2", AARCH64_X0_REGNUM + 2},
81 {"w3", AARCH64_X0_REGNUM + 3},
82 {"w4", AARCH64_X0_REGNUM + 4},
83 {"w5", AARCH64_X0_REGNUM + 5},
84 {"w6", AARCH64_X0_REGNUM + 6},
85 {"w7", AARCH64_X0_REGNUM + 7},
86 {"w8", AARCH64_X0_REGNUM + 8},
87 {"w9", AARCH64_X0_REGNUM + 9},
88 {"w10", AARCH64_X0_REGNUM + 10},
89 {"w11", AARCH64_X0_REGNUM + 11},
90 {"w12", AARCH64_X0_REGNUM + 12},
91 {"w13", AARCH64_X0_REGNUM + 13},
92 {"w14", AARCH64_X0_REGNUM + 14},
93 {"w15", AARCH64_X0_REGNUM + 15},
94 {"w16", AARCH64_X0_REGNUM + 16},
95 {"w17", AARCH64_X0_REGNUM + 17},
96 {"w18", AARCH64_X0_REGNUM + 18},
97 {"w19", AARCH64_X0_REGNUM + 19},
98 {"w20", AARCH64_X0_REGNUM + 20},
99 {"w21", AARCH64_X0_REGNUM + 21},
100 {"w22", AARCH64_X0_REGNUM + 22},
101 {"w23", AARCH64_X0_REGNUM + 23},
102 {"w24", AARCH64_X0_REGNUM + 24},
103 {"w25", AARCH64_X0_REGNUM + 25},
104 {"w26", AARCH64_X0_REGNUM + 26},
105 {"w27", AARCH64_X0_REGNUM + 27},
106 {"w28", AARCH64_X0_REGNUM + 28},
107 {"w29", AARCH64_X0_REGNUM + 29},
108 {"w30", AARCH64_X0_REGNUM + 30},
111 {"ip0", AARCH64_X0_REGNUM + 16},
112 {"ip1", AARCH64_X0_REGNUM + 17}
115 /* The required core 'R' registers. */
116 static const char *const aarch64_r_register_names[] =
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
131 /* The FP/SIMD 'V' registers. */
132 static const char *const aarch64_v_register_names[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
148 /* AArch64 prologue cache structure. */
149 struct aarch64_prologue_cache
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
165 /* Is the target available to read from? */
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
173 /* The register used to hold the frame pointer for this frame. */
176 /* Saved register offsets. */
177 struct trad_frame_saved_reg *saved_regs;
180 /* Toggle this file's internal debugging dump. */
181 static int aarch64_debug;
184 show_aarch64_debug (struct ui_file *file, int from_tty,
185 struct cmd_list_element *c, const char *value)
187 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
190 /* Extract a signed value from a bit field within an instruction
193 INSN is the instruction opcode.
195 WIDTH specifies the width of the bit field to extract (in bits).
197 OFFSET specifies the least significant bit of the field where bits
198 are numbered zero counting from least to most significant. */
201 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
203 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
204 unsigned shift_r = sizeof (int32_t) * 8 - width;
206 return ((int32_t) insn << shift_l) >> shift_r;
209 /* Determine if specified bits within an instruction opcode matches a
212 INSN is the instruction opcode.
214 MASK specifies the bits within the opcode that are to be tested
215 agsinst for a match with PATTERN. */
218 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
220 return (insn & mask) == pattern;
223 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
225 ADDR specifies the address of the opcode.
226 INSN specifies the opcode to test.
227 RD receives the 'rd' field from the decoded instruction.
228 RN receives the 'rn' field from the decoded instruction.
230 Return 1 if the opcodes matches and is decoded, otherwise 0. */
232 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
235 if ((insn & 0x9f000000) == 0x91000000)
240 *rd = (insn >> 0) & 0x1f;
241 *rn = (insn >> 5) & 0x1f;
242 *imm = (insn >> 10) & 0xfff;
243 shift = (insn >> 22) & 0x3;
244 op_is_sub = (insn >> 30) & 0x1;
262 fprintf_unfiltered (gdb_stdlog,
263 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
264 core_addr_to_string_nz (addr), insn, *rd, *rn,
271 /* Decode an opcode if it represents an ADRP instruction.
273 ADDR specifies the address of the opcode.
274 INSN specifies the opcode to test.
275 RD receives the 'rd' field from the decoded instruction.
277 Return 1 if the opcodes matches and is decoded, otherwise 0. */
280 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
282 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
284 *rd = (insn >> 0) & 0x1f;
287 fprintf_unfiltered (gdb_stdlog,
288 "decode: 0x%s 0x%x adrp x%u, #?\n",
289 core_addr_to_string_nz (addr), insn, *rd);
295 /* Decode an opcode if it represents an branch immediate or branch
296 and link immediate instruction.
298 ADDR specifies the address of the opcode.
299 INSN specifies the opcode to test.
300 LINK receives the 'link' bit from the decoded instruction.
301 OFFSET receives the immediate offset from the decoded instruction.
303 Return 1 if the opcodes matches and is decoded, otherwise 0. */
306 decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
308 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
309 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
310 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
313 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
316 fprintf_unfiltered (gdb_stdlog,
317 "decode: 0x%s 0x%x %s 0x%s\n",
318 core_addr_to_string_nz (addr), insn,
320 core_addr_to_string_nz (addr + *offset));
327 /* Decode an opcode if it represents a conditional branch instruction.
329 ADDR specifies the address of the opcode.
330 INSN specifies the opcode to test.
331 COND receives the branch condition field from the decoded
333 OFFSET receives the immediate offset from the decoded instruction.
335 Return 1 if the opcodes matches and is decoded, otherwise 0. */
338 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
340 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
342 *cond = (insn >> 0) & 0xf;
343 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
346 fprintf_unfiltered (gdb_stdlog,
347 "decode: 0x%s 0x%x b<%u> 0x%s\n",
348 core_addr_to_string_nz (addr), insn, *cond,
349 core_addr_to_string_nz (addr + *offset));
355 /* Decode an opcode if it represents a branch via register instruction.
357 ADDR specifies the address of the opcode.
358 INSN specifies the opcode to test.
359 LINK receives the 'link' bit from the decoded instruction.
360 RN receives the 'rn' field from the decoded instruction.
362 Return 1 if the opcodes matches and is decoded, otherwise 0. */
365 decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
367 /* 8 4 0 6 2 8 4 0 */
368 /* blr 110101100011111100000000000rrrrr */
369 /* br 110101100001111100000000000rrrrr */
370 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
372 *link = (insn >> 21) & 1;
373 *rn = (insn >> 5) & 0x1f;
376 fprintf_unfiltered (gdb_stdlog,
377 "decode: 0x%s 0x%x %s 0x%x\n",
378 core_addr_to_string_nz (addr), insn,
379 *link ? "blr" : "br", *rn);
386 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
388 ADDR specifies the address of the opcode.
389 INSN specifies the opcode to test.
390 IS64 receives the 'sf' field from the decoded instruction.
391 OP receives the 'op' field from the decoded instruction.
392 RN receives the 'rn' field from the decoded instruction.
393 OFFSET receives the 'imm19' field from the decoded instruction.
395 Return 1 if the opcodes matches and is decoded, otherwise 0. */
398 decode_cb (CORE_ADDR addr,
399 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
402 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
404 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
405 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
407 *rn = (insn >> 0) & 0x1f;
408 *is64 = (insn >> 31) & 0x1;
409 *op = (insn >> 24) & 0x1;
410 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
413 fprintf_unfiltered (gdb_stdlog,
414 "decode: 0x%s 0x%x %s 0x%s\n",
415 core_addr_to_string_nz (addr), insn,
416 *op ? "cbnz" : "cbz",
417 core_addr_to_string_nz (addr + *offset));
423 /* Decode an opcode if it represents a ERET instruction.
425 ADDR specifies the address of the opcode.
426 INSN specifies the opcode to test.
428 Return 1 if the opcodes matches and is decoded, otherwise 0. */
431 decode_eret (CORE_ADDR addr, uint32_t insn)
433 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
434 if (insn == 0xd69f03e0)
437 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
438 core_addr_to_string_nz (addr), insn);
444 /* Decode an opcode if it represents a MOVZ instruction.
446 ADDR specifies the address of the opcode.
447 INSN specifies the opcode to test.
448 RD receives the 'rd' field from the decoded instruction.
450 Return 1 if the opcodes matches and is decoded, otherwise 0. */
453 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
455 if (decode_masked_match (insn, 0xff800000, 0x52800000))
457 *rd = (insn >> 0) & 0x1f;
460 fprintf_unfiltered (gdb_stdlog,
461 "decode: 0x%s 0x%x movz x%u, #?\n",
462 core_addr_to_string_nz (addr), insn, *rd);
468 /* Decode an opcode if it represents a ORR (shifted register)
471 ADDR specifies the address of the opcode.
472 INSN specifies the opcode to test.
473 RD receives the 'rd' field from the decoded instruction.
474 RN receives the 'rn' field from the decoded instruction.
475 RM receives the 'rm' field from the decoded instruction.
476 IMM receives the 'imm6' field from the decoded instruction.
478 Return 1 if the opcodes matches and is decoded, otherwise 0. */
481 decode_orr_shifted_register_x (CORE_ADDR addr,
482 uint32_t insn, unsigned *rd, unsigned *rn,
483 unsigned *rm, int32_t *imm)
485 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
487 *rd = (insn >> 0) & 0x1f;
488 *rn = (insn >> 5) & 0x1f;
489 *rm = (insn >> 16) & 0x1f;
490 *imm = (insn >> 10) & 0x3f;
493 fprintf_unfiltered (gdb_stdlog,
494 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
495 core_addr_to_string_nz (addr), insn, *rd,
502 /* Decode an opcode if it represents a RET instruction.
504 ADDR specifies the address of the opcode.
505 INSN specifies the opcode to test.
506 RN receives the 'rn' field from the decoded instruction.
508 Return 1 if the opcodes matches and is decoded, otherwise 0. */
511 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
513 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
515 *rn = (insn >> 5) & 0x1f;
517 fprintf_unfiltered (gdb_stdlog,
518 "decode: 0x%s 0x%x ret x%u\n",
519 core_addr_to_string_nz (addr), insn, *rn);
525 /* Decode an opcode if it represents the following instruction:
526 STP rt, rt2, [rn, #imm]
528 ADDR specifies the address of the opcode.
529 INSN specifies the opcode to test.
530 RT1 receives the 'rt' field from the decoded instruction.
531 RT2 receives the 'rt2' field from the decoded instruction.
532 RN receives the 'rn' field from the decoded instruction.
533 IMM receives the 'imm' field from the decoded instruction.
535 Return 1 if the opcodes matches and is decoded, otherwise 0. */
538 decode_stp_offset (CORE_ADDR addr,
540 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
542 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
544 *rt1 = (insn >> 0) & 0x1f;
545 *rn = (insn >> 5) & 0x1f;
546 *rt2 = (insn >> 10) & 0x1f;
547 *imm = extract_signed_bitfield (insn, 7, 15);
551 fprintf_unfiltered (gdb_stdlog,
552 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
553 core_addr_to_string_nz (addr), insn,
554 *rt1, *rt2, *rn, *imm);
560 /* Decode an opcode if it represents the following instruction:
561 STP rt, rt2, [rn, #imm]!
563 ADDR specifies the address of the opcode.
564 INSN specifies the opcode to test.
565 RT1 receives the 'rt' field from the decoded instruction.
566 RT2 receives the 'rt2' field from the decoded instruction.
567 RN receives the 'rn' field from the decoded instruction.
568 IMM receives the 'imm' field from the decoded instruction.
570 Return 1 if the opcodes matches and is decoded, otherwise 0. */
573 decode_stp_offset_wb (CORE_ADDR addr,
575 unsigned *rt1, unsigned *rt2, unsigned *rn,
578 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
580 *rt1 = (insn >> 0) & 0x1f;
581 *rn = (insn >> 5) & 0x1f;
582 *rt2 = (insn >> 10) & 0x1f;
583 *imm = extract_signed_bitfield (insn, 7, 15);
587 fprintf_unfiltered (gdb_stdlog,
588 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
589 core_addr_to_string_nz (addr), insn,
590 *rt1, *rt2, *rn, *imm);
596 /* Decode an opcode if it represents the following instruction:
599 ADDR specifies the address of the opcode.
600 INSN specifies the opcode to test.
601 IS64 receives size field from the decoded instruction.
602 RT receives the 'rt' field from the decoded instruction.
603 RN receives the 'rn' field from the decoded instruction.
604 IMM receives the 'imm' field from the decoded instruction.
606 Return 1 if the opcodes matches and is decoded, otherwise 0. */
609 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
610 unsigned *rn, int32_t *imm)
612 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
614 *is64 = (insn >> 30) & 1;
615 *rt = (insn >> 0) & 0x1f;
616 *rn = (insn >> 5) & 0x1f;
617 *imm = extract_signed_bitfield (insn, 9, 12);
620 fprintf_unfiltered (gdb_stdlog,
621 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
622 core_addr_to_string_nz (addr), insn,
623 *is64 ? 'x' : 'w', *rt, *rn, *imm);
629 /* Decode an opcode if it represents a TB or TBNZ instruction.
631 ADDR specifies the address of the opcode.
632 INSN specifies the opcode to test.
633 OP receives the 'op' field from the decoded instruction.
634 BIT receives the bit position field from the decoded instruction.
635 RT receives 'rt' field from the decoded instruction.
636 IMM receives 'imm' field from the decoded instruction.
638 Return 1 if the opcodes matches and is decoded, otherwise 0. */
641 decode_tb (CORE_ADDR addr,
642 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
645 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
647 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
648 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
650 *rt = (insn >> 0) & 0x1f;
651 *op = insn & (1 << 24);
652 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
653 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
656 fprintf_unfiltered (gdb_stdlog,
657 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
658 core_addr_to_string_nz (addr), insn,
659 *op ? "tbnz" : "tbz", *rt, *bit,
660 core_addr_to_string_nz (addr + *imm));
666 /* Analyze a prologue, looking for a recognizable stack frame
667 and frame pointer. Scan until we encounter a store that could
668 clobber the stack frame unexpectedly, or an unknown instruction. */
671 aarch64_analyze_prologue (struct gdbarch *gdbarch,
672 CORE_ADDR start, CORE_ADDR limit,
673 struct aarch64_prologue_cache *cache)
675 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
677 pv_t regs[AARCH64_X_REGISTER_COUNT];
678 struct pv_area *stack;
679 struct cleanup *back_to;
681 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
682 regs[i] = pv_register (i, 0);
683 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
684 back_to = make_cleanup_free_pv_area (stack);
686 for (; start < limit; start += 4)
704 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
706 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
707 regs[rd] = pv_add_constant (regs[rn], imm);
708 else if (decode_adrp (start, insn, &rd))
709 regs[rd] = pv_unknown ();
710 else if (decode_b (start, insn, &is_link, &offset))
712 /* Stop analysis on branch. */
715 else if (decode_bcond (start, insn, &cond, &offset))
717 /* Stop analysis on branch. */
720 else if (decode_br (start, insn, &is_link, &rn))
722 /* Stop analysis on branch. */
725 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
727 /* Stop analysis on branch. */
730 else if (decode_eret (start, insn))
732 /* Stop analysis on branch. */
735 else if (decode_movz (start, insn, &rd))
736 regs[rd] = pv_unknown ();
738 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
740 if (imm == 0 && rn == 31)
747 "aarch64: prologue analysis gave up addr=0x%s "
748 "opcode=0x%x (orr x register)\n",
749 core_addr_to_string_nz (start),
754 else if (decode_ret (start, insn, &rn))
756 /* Stop analysis on branch. */
759 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
761 pv_area_store (stack, pv_add_constant (regs[rn], offset),
762 is64 ? 8 : 4, regs[rt]);
764 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
766 /* If recording this store would invalidate the store area
767 (perhaps because rn is not known) then we should abandon
768 further prologue analysis. */
769 if (pv_area_store_would_trash (stack,
770 pv_add_constant (regs[rn], imm)))
773 if (pv_area_store_would_trash (stack,
774 pv_add_constant (regs[rn], imm + 8)))
777 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
779 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
782 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
784 /* If recording this store would invalidate the store area
785 (perhaps because rn is not known) then we should abandon
786 further prologue analysis. */
787 if (pv_area_store_would_trash (stack,
788 pv_add_constant (regs[rn], imm)))
791 if (pv_area_store_would_trash (stack,
792 pv_add_constant (regs[rn], imm + 8)))
795 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
797 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
799 regs[rn] = pv_add_constant (regs[rn], imm);
801 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
803 /* Stop analysis on branch. */
809 fprintf_unfiltered (gdb_stdlog,
810 "aarch64: prologue analysis gave up addr=0x%s"
812 core_addr_to_string_nz (start), insn);
819 do_cleanups (back_to);
823 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
825 /* Frame pointer is fp. Frame size is constant. */
826 cache->framereg = AARCH64_FP_REGNUM;
827 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
829 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
831 /* Try the stack pointer. */
832 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
833 cache->framereg = AARCH64_SP_REGNUM;
837 /* We're just out of luck. We don't know where the frame is. */
838 cache->framereg = -1;
839 cache->framesize = 0;
842 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
846 if (pv_area_find_reg (stack, gdbarch, i, &offset))
847 cache->saved_regs[i].addr = offset;
850 do_cleanups (back_to);
854 /* Implement the "skip_prologue" gdbarch method. */
857 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
861 CORE_ADDR func_addr, limit_pc;
862 struct symtab_and_line sal;
864 /* See if we can determine the end of the prologue via the symbol
865 table. If so, then return either PC, or the PC after the
866 prologue, whichever is greater. */
867 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
869 CORE_ADDR post_prologue_pc
870 = skip_prologue_using_sal (gdbarch, func_addr);
872 if (post_prologue_pc != 0)
873 return max (pc, post_prologue_pc);
876 /* Can't determine prologue from the symbol table, need to examine
879 /* Find an upper limit on the function prologue using the debug
880 information. If the debug information could not be used to
881 provide that bound, then use an arbitrary large number as the
883 limit_pc = skip_prologue_using_sal (gdbarch, pc);
885 limit_pc = pc + 128; /* Magic. */
887 /* Try disassembling prologue. */
888 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
891 /* Scan the function prologue for THIS_FRAME and populate the prologue
895 aarch64_scan_prologue (struct frame_info *this_frame,
896 struct aarch64_prologue_cache *cache)
898 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
899 CORE_ADDR prologue_start;
900 CORE_ADDR prologue_end;
901 CORE_ADDR prev_pc = get_frame_pc (this_frame);
902 struct gdbarch *gdbarch = get_frame_arch (this_frame);
904 cache->prev_pc = prev_pc;
906 /* Assume we do not find a frame. */
907 cache->framereg = -1;
908 cache->framesize = 0;
910 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
913 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
917 /* No line info so use the current PC. */
918 prologue_end = prev_pc;
920 else if (sal.end < prologue_end)
922 /* The next line begins after the function end. */
923 prologue_end = sal.end;
926 prologue_end = min (prologue_end, prev_pc);
927 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
936 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
940 cache->framereg = AARCH64_FP_REGNUM;
941 cache->framesize = 16;
942 cache->saved_regs[29].addr = 0;
943 cache->saved_regs[30].addr = 8;
947 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
948 function may throw an exception if the inferior's registers or memory is
952 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
953 struct aarch64_prologue_cache *cache)
955 CORE_ADDR unwound_fp;
958 aarch64_scan_prologue (this_frame, cache);
960 if (cache->framereg == -1)
963 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
967 cache->prev_sp = unwound_fp + cache->framesize;
969 /* Calculate actual addresses of saved registers using offsets
970 determined by aarch64_analyze_prologue. */
971 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
972 if (trad_frame_addr_p (cache->saved_regs, reg))
973 cache->saved_regs[reg].addr += cache->prev_sp;
975 cache->func = get_frame_func (this_frame);
977 cache->available_p = 1;
980 /* Allocate and fill in *THIS_CACHE with information about the prologue of
981 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
982 Return a pointer to the current aarch64_prologue_cache in
985 static struct aarch64_prologue_cache *
986 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
988 struct aarch64_prologue_cache *cache;
990 if (*this_cache != NULL)
993 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
994 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
999 aarch64_make_prologue_cache_1 (this_frame, cache);
1001 CATCH (ex, RETURN_MASK_ERROR)
1003 if (ex.error != NOT_AVAILABLE_ERROR)
1004 throw_exception (ex);
1011 /* Implement the "stop_reason" frame_unwind method. */
1013 static enum unwind_stop_reason
1014 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1017 struct aarch64_prologue_cache *cache
1018 = aarch64_make_prologue_cache (this_frame, this_cache);
1020 if (!cache->available_p)
1021 return UNWIND_UNAVAILABLE;
1023 /* Halt the backtrace at "_start". */
1024 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1025 return UNWIND_OUTERMOST;
1027 /* We've hit a wall, stop. */
1028 if (cache->prev_sp == 0)
1029 return UNWIND_OUTERMOST;
1031 return UNWIND_NO_REASON;
1034 /* Our frame ID for a normal frame is the current function's starting
1035 PC and the caller's SP when we were called. */
1038 aarch64_prologue_this_id (struct frame_info *this_frame,
1039 void **this_cache, struct frame_id *this_id)
1041 struct aarch64_prologue_cache *cache
1042 = aarch64_make_prologue_cache (this_frame, this_cache);
1044 if (!cache->available_p)
1045 *this_id = frame_id_build_unavailable_stack (cache->func);
1047 *this_id = frame_id_build (cache->prev_sp, cache->func);
1050 /* Implement the "prev_register" frame_unwind method. */
1052 static struct value *
1053 aarch64_prologue_prev_register (struct frame_info *this_frame,
1054 void **this_cache, int prev_regnum)
1056 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1057 struct aarch64_prologue_cache *cache
1058 = aarch64_make_prologue_cache (this_frame, this_cache);
1060 /* If we are asked to unwind the PC, then we need to return the LR
1061 instead. The prologue may save PC, but it will point into this
1062 frame's prologue, not the next frame's resume location. */
1063 if (prev_regnum == AARCH64_PC_REGNUM)
1067 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1068 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1071 /* SP is generally not saved to the stack, but this frame is
1072 identified by the next frame's stack pointer at the time of the
1073 call. The value was already reconstructed into PREV_SP. */
1079 | | | <- Previous SP
1082 +--| saved fp |<- FP
1086 if (prev_regnum == AARCH64_SP_REGNUM)
1087 return frame_unwind_got_constant (this_frame, prev_regnum,
1090 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1094 /* AArch64 prologue unwinder. */
1095 struct frame_unwind aarch64_prologue_unwind =
1098 aarch64_prologue_frame_unwind_stop_reason,
1099 aarch64_prologue_this_id,
1100 aarch64_prologue_prev_register,
1102 default_frame_sniffer
1105 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1106 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1107 Return a pointer to the current aarch64_prologue_cache in
1110 static struct aarch64_prologue_cache *
1111 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1113 struct aarch64_prologue_cache *cache;
1115 if (*this_cache != NULL)
1118 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1119 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1120 *this_cache = cache;
1124 cache->prev_sp = get_frame_register_unsigned (this_frame,
1126 cache->prev_pc = get_frame_pc (this_frame);
1127 cache->available_p = 1;
1129 CATCH (ex, RETURN_MASK_ERROR)
1131 if (ex.error != NOT_AVAILABLE_ERROR)
1132 throw_exception (ex);
1139 /* Implement the "stop_reason" frame_unwind method. */
1141 static enum unwind_stop_reason
1142 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1145 struct aarch64_prologue_cache *cache
1146 = aarch64_make_stub_cache (this_frame, this_cache);
1148 if (!cache->available_p)
1149 return UNWIND_UNAVAILABLE;
1151 return UNWIND_NO_REASON;
1154 /* Our frame ID for a stub frame is the current SP and LR. */
1157 aarch64_stub_this_id (struct frame_info *this_frame,
1158 void **this_cache, struct frame_id *this_id)
1160 struct aarch64_prologue_cache *cache
1161 = aarch64_make_stub_cache (this_frame, this_cache);
1163 if (cache->available_p)
1164 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1166 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1169 /* Implement the "sniffer" frame_unwind method. */
1172 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1173 struct frame_info *this_frame,
1174 void **this_prologue_cache)
1176 CORE_ADDR addr_in_block;
1179 addr_in_block = get_frame_address_in_block (this_frame);
1180 if (in_plt_section (addr_in_block)
1181 /* We also use the stub winder if the target memory is unreadable
1182 to avoid having the prologue unwinder trying to read it. */
1183 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1189 /* AArch64 stub unwinder. */
1190 struct frame_unwind aarch64_stub_unwind =
1193 aarch64_stub_frame_unwind_stop_reason,
1194 aarch64_stub_this_id,
1195 aarch64_prologue_prev_register,
1197 aarch64_stub_unwind_sniffer
1200 /* Return the frame base address of *THIS_FRAME. */
1203 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1205 struct aarch64_prologue_cache *cache
1206 = aarch64_make_prologue_cache (this_frame, this_cache);
1208 return cache->prev_sp - cache->framesize;
1211 /* AArch64 default frame base information. */
1212 struct frame_base aarch64_normal_base =
1214 &aarch64_prologue_unwind,
1215 aarch64_normal_frame_base,
1216 aarch64_normal_frame_base,
1217 aarch64_normal_frame_base
1220 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1221 dummy frame. The frame ID's base needs to match the TOS value
1222 saved by save_dummy_frame_tos () and returned from
1223 aarch64_push_dummy_call, and the PC needs to match the dummy
1224 frame's breakpoint. */
1226 static struct frame_id
1227 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1229 return frame_id_build (get_frame_register_unsigned (this_frame,
1231 get_frame_pc (this_frame));
1234 /* Implement the "unwind_pc" gdbarch method. */
1237 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1240 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1245 /* Implement the "unwind_sp" gdbarch method. */
1248 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1250 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1253 /* Return the value of the REGNUM register in the previous frame of
1256 static struct value *
1257 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1258 void **this_cache, int regnum)
1260 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1265 case AARCH64_PC_REGNUM:
1266 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1267 return frame_unwind_got_constant (this_frame, regnum, lr);
1270 internal_error (__FILE__, __LINE__,
1271 _("Unexpected register %d"), regnum);
1275 /* Implement the "init_reg" dwarf2_frame_ops method. */
1278 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1279 struct dwarf2_frame_state_reg *reg,
1280 struct frame_info *this_frame)
1284 case AARCH64_PC_REGNUM:
1285 reg->how = DWARF2_FRAME_REG_FN;
1286 reg->loc.fn = aarch64_dwarf2_prev_register;
1288 case AARCH64_SP_REGNUM:
1289 reg->how = DWARF2_FRAME_REG_CFA;
1294 /* When arguments must be pushed onto the stack, they go on in reverse
1295 order. The code below implements a FILO (stack) to do this. */
1299 /* Value to pass on stack. */
1302 /* Size in bytes of value to pass on stack. */
1306 DEF_VEC_O (stack_item_t);
1308 /* Return the alignment (in bytes) of the given type. */
1311 aarch64_type_align (struct type *t)
1317 t = check_typedef (t);
1318 switch (TYPE_CODE (t))
1321 /* Should never happen. */
1322 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1326 case TYPE_CODE_ENUM:
1330 case TYPE_CODE_RANGE:
1331 case TYPE_CODE_BITSTRING:
1333 case TYPE_CODE_CHAR:
1334 case TYPE_CODE_BOOL:
1335 return TYPE_LENGTH (t);
1337 case TYPE_CODE_ARRAY:
1338 case TYPE_CODE_COMPLEX:
1339 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1341 case TYPE_CODE_STRUCT:
1342 case TYPE_CODE_UNION:
1344 for (n = 0; n < TYPE_NFIELDS (t); n++)
1346 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1354 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1355 defined in the AAPCS64 ABI document; otherwise return 0. */
1358 is_hfa (struct type *ty)
1360 switch (TYPE_CODE (ty))
1362 case TYPE_CODE_ARRAY:
1364 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1365 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1370 case TYPE_CODE_UNION:
1371 case TYPE_CODE_STRUCT:
1373 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1375 struct type *member0_type;
1377 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1378 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1382 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1384 struct type *member1_type;
1386 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1387 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1388 || (TYPE_LENGTH (member0_type)
1389 != TYPE_LENGTH (member1_type)))
1405 /* AArch64 function call information structure. */
1406 struct aarch64_call_info
1408 /* the current argument number. */
1411 /* The next general purpose register number, equivalent to NGRN as
1412 described in the AArch64 Procedure Call Standard. */
1415 /* The next SIMD and floating point register number, equivalent to
1416 NSRN as described in the AArch64 Procedure Call Standard. */
1419 /* The next stacked argument address, equivalent to NSAA as
1420 described in the AArch64 Procedure Call Standard. */
1423 /* Stack item vector. */
1424 VEC(stack_item_t) *si;
1427 /* Pass a value in a sequence of consecutive X registers. The caller
1428 is responsbile for ensuring sufficient registers are available. */
1431 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1432 struct aarch64_call_info *info, struct type *type,
1433 const bfd_byte *buf)
1435 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1436 int len = TYPE_LENGTH (type);
1437 enum type_code typecode = TYPE_CODE (type);
1438 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1444 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1445 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1449 /* Adjust sub-word struct/union args when big-endian. */
1450 if (byte_order == BFD_ENDIAN_BIG
1451 && partial_len < X_REGISTER_SIZE
1452 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1453 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1456 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1458 gdbarch_register_name (gdbarch, regnum),
1459 phex (regval, X_REGISTER_SIZE));
1460 regcache_cooked_write_unsigned (regcache, regnum, regval);
1467 /* Attempt to marshall a value in a V register. Return 1 if
1468 successful, or 0 if insufficient registers are available. This
1469 function, unlike the equivalent pass_in_x() function does not
1470 handle arguments spread across multiple registers. */
1473 pass_in_v (struct gdbarch *gdbarch,
1474 struct regcache *regcache,
1475 struct aarch64_call_info *info,
1476 const bfd_byte *buf)
1480 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1481 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1486 regcache_cooked_write (regcache, regnum, buf);
1488 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1490 gdbarch_register_name (gdbarch, regnum));
1497 /* Marshall an argument onto the stack. */
1500 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1501 const bfd_byte *buf)
1503 int len = TYPE_LENGTH (type);
1509 align = aarch64_type_align (type);
1511 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1512 Natural alignment of the argument's type. */
1513 align = align_up (align, 8);
1515 /* The AArch64 PCS requires at most doubleword alignment. */
1520 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1521 info->argnum, len, info->nsaa);
1525 VEC_safe_push (stack_item_t, info->si, &item);
1528 if (info->nsaa & (align - 1))
1530 /* Push stack alignment padding. */
1531 int pad = align - (info->nsaa & (align - 1));
1536 VEC_safe_push (stack_item_t, info->si, &item);
1541 /* Marshall an argument into a sequence of one or more consecutive X
1542 registers or, if insufficient X registers are available then onto
1546 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1547 struct aarch64_call_info *info, struct type *type,
1548 const bfd_byte *buf)
1550 int len = TYPE_LENGTH (type);
1551 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1553 /* PCS C.13 - Pass in registers if we have enough spare */
1554 if (info->ngrn + nregs <= 8)
1556 pass_in_x (gdbarch, regcache, info, type, buf);
1557 info->ngrn += nregs;
1562 pass_on_stack (info, type, buf);
1566 /* Pass a value in a V register, or on the stack if insufficient are
1570 pass_in_v_or_stack (struct gdbarch *gdbarch,
1571 struct regcache *regcache,
1572 struct aarch64_call_info *info,
1574 const bfd_byte *buf)
1576 if (!pass_in_v (gdbarch, regcache, info, buf))
1577 pass_on_stack (info, type, buf);
1580 /* Implement the "push_dummy_call" gdbarch method. */
1583 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1584 struct regcache *regcache, CORE_ADDR bp_addr,
1586 struct value **args, CORE_ADDR sp, int struct_return,
1587 CORE_ADDR struct_addr)
1593 struct aarch64_call_info info;
1594 struct type *func_type;
1595 struct type *return_type;
1596 int lang_struct_return;
1598 memset (&info, 0, sizeof (info));
1600 /* We need to know what the type of the called function is in order
1601 to determine the number of named/anonymous arguments for the
1602 actual argument placement, and the return type in order to handle
1603 return value correctly.
1605 The generic code above us views the decision of return in memory
1606 or return in registers as a two stage processes. The language
1607 handler is consulted first and may decide to return in memory (eg
1608 class with copy constructor returned by value), this will cause
1609 the generic code to allocate space AND insert an initial leading
1612 If the language code does not decide to pass in memory then the
1613 target code is consulted.
1615 If the language code decides to pass in memory we want to move
1616 the pointer inserted as the initial argument from the argument
1617 list and into X8, the conventional AArch64 struct return pointer
1620 This is slightly awkward, ideally the flag "lang_struct_return"
1621 would be passed to the targets implementation of push_dummy_call.
1622 Rather that change the target interface we call the language code
1623 directly ourselves. */
1625 func_type = check_typedef (value_type (function));
1627 /* Dereference function pointer types. */
1628 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1629 func_type = TYPE_TARGET_TYPE (func_type);
1631 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1632 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1634 /* If language_pass_by_reference () returned true we will have been
1635 given an additional initial argument, a hidden pointer to the
1636 return slot in memory. */
1637 return_type = TYPE_TARGET_TYPE (func_type);
1638 lang_struct_return = language_pass_by_reference (return_type);
1640 /* Set the return address. For the AArch64, the return breakpoint
1641 is always at BP_ADDR. */
1642 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1644 /* If we were given an initial argument for the return slot because
1645 lang_struct_return was true, lose it. */
1646 if (lang_struct_return)
1652 /* The struct_return pointer occupies X8. */
1653 if (struct_return || lang_struct_return)
1656 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1657 gdbarch_register_name
1659 AARCH64_STRUCT_RETURN_REGNUM),
1660 paddress (gdbarch, struct_addr));
1661 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1665 for (argnum = 0; argnum < nargs; argnum++)
1667 struct value *arg = args[argnum];
1668 struct type *arg_type;
1671 arg_type = check_typedef (value_type (arg));
1672 len = TYPE_LENGTH (arg_type);
1674 switch (TYPE_CODE (arg_type))
1677 case TYPE_CODE_BOOL:
1678 case TYPE_CODE_CHAR:
1679 case TYPE_CODE_RANGE:
1680 case TYPE_CODE_ENUM:
1683 /* Promote to 32 bit integer. */
1684 if (TYPE_UNSIGNED (arg_type))
1685 arg_type = builtin_type (gdbarch)->builtin_uint32;
1687 arg_type = builtin_type (gdbarch)->builtin_int32;
1688 arg = value_cast (arg_type, arg);
1690 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1691 value_contents (arg));
1694 case TYPE_CODE_COMPLEX:
1697 const bfd_byte *buf = value_contents (arg);
1698 struct type *target_type =
1699 check_typedef (TYPE_TARGET_TYPE (arg_type));
1701 pass_in_v (gdbarch, regcache, &info, buf);
1702 pass_in_v (gdbarch, regcache, &info,
1703 buf + TYPE_LENGTH (target_type));
1708 pass_on_stack (&info, arg_type, value_contents (arg));
1712 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1713 value_contents (arg));
1716 case TYPE_CODE_STRUCT:
1717 case TYPE_CODE_ARRAY:
1718 case TYPE_CODE_UNION:
1719 if (is_hfa (arg_type))
1721 int elements = TYPE_NFIELDS (arg_type);
1723 /* Homogeneous Aggregates */
1724 if (info.nsrn + elements < 8)
1728 for (i = 0; i < elements; i++)
1730 /* We know that we have sufficient registers
1731 available therefore this will never fallback
1733 struct value *field =
1734 value_primitive_field (arg, 0, i, arg_type);
1735 struct type *field_type =
1736 check_typedef (value_type (field));
1738 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1739 value_contents_writeable (field));
1745 pass_on_stack (&info, arg_type, value_contents (arg));
1750 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1751 invisible reference. */
1753 /* Allocate aligned storage. */
1754 sp = align_down (sp - len, 16);
1756 /* Write the real data into the stack. */
1757 write_memory (sp, value_contents (arg), len);
1759 /* Construct the indirection. */
1760 arg_type = lookup_pointer_type (arg_type);
1761 arg = value_from_pointer (arg_type, sp);
1762 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1763 value_contents (arg));
1766 /* PCS C.15 / C.18 multiple values pass. */
1767 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1768 value_contents (arg));
1772 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1773 value_contents (arg));
1778 /* Make sure stack retains 16 byte alignment. */
1780 sp -= 16 - (info.nsaa & 15);
1782 while (!VEC_empty (stack_item_t, info.si))
1784 stack_item_t *si = VEC_last (stack_item_t, info.si);
1787 write_memory (sp, si->data, si->len);
1788 VEC_pop (stack_item_t, info.si);
1791 VEC_free (stack_item_t, info.si);
1793 /* Finally, update the SP register. */
1794 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1799 /* Implement the "frame_align" gdbarch method. */
1802 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1804 /* Align the stack to sixteen bytes. */
1805 return sp & ~(CORE_ADDR) 15;
1808 /* Return the type for an AdvSISD Q register. */
1810 static struct type *
1811 aarch64_vnq_type (struct gdbarch *gdbarch)
1813 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1815 if (tdep->vnq_type == NULL)
1820 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1823 elem = builtin_type (gdbarch)->builtin_uint128;
1824 append_composite_type_field (t, "u", elem);
1826 elem = builtin_type (gdbarch)->builtin_int128;
1827 append_composite_type_field (t, "s", elem);
1832 return tdep->vnq_type;
1835 /* Return the type for an AdvSISD D register. */
1837 static struct type *
1838 aarch64_vnd_type (struct gdbarch *gdbarch)
1840 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1842 if (tdep->vnd_type == NULL)
1847 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1850 elem = builtin_type (gdbarch)->builtin_double;
1851 append_composite_type_field (t, "f", elem);
1853 elem = builtin_type (gdbarch)->builtin_uint64;
1854 append_composite_type_field (t, "u", elem);
1856 elem = builtin_type (gdbarch)->builtin_int64;
1857 append_composite_type_field (t, "s", elem);
1862 return tdep->vnd_type;
1865 /* Return the type for an AdvSISD S register. */
1867 static struct type *
1868 aarch64_vns_type (struct gdbarch *gdbarch)
1870 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1872 if (tdep->vns_type == NULL)
1877 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1880 elem = builtin_type (gdbarch)->builtin_float;
1881 append_composite_type_field (t, "f", elem);
1883 elem = builtin_type (gdbarch)->builtin_uint32;
1884 append_composite_type_field (t, "u", elem);
1886 elem = builtin_type (gdbarch)->builtin_int32;
1887 append_composite_type_field (t, "s", elem);
1892 return tdep->vns_type;
1895 /* Return the type for an AdvSISD H register. */
1897 static struct type *
1898 aarch64_vnh_type (struct gdbarch *gdbarch)
1900 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1902 if (tdep->vnh_type == NULL)
1907 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1910 elem = builtin_type (gdbarch)->builtin_uint16;
1911 append_composite_type_field (t, "u", elem);
1913 elem = builtin_type (gdbarch)->builtin_int16;
1914 append_composite_type_field (t, "s", elem);
1919 return tdep->vnh_type;
1922 /* Return the type for an AdvSISD B register. */
1924 static struct type *
1925 aarch64_vnb_type (struct gdbarch *gdbarch)
1927 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1929 if (tdep->vnb_type == NULL)
1934 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1937 elem = builtin_type (gdbarch)->builtin_uint8;
1938 append_composite_type_field (t, "u", elem);
1940 elem = builtin_type (gdbarch)->builtin_int8;
1941 append_composite_type_field (t, "s", elem);
1946 return tdep->vnb_type;
1949 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1952 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1954 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1955 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1957 if (reg == AARCH64_DWARF_SP)
1958 return AARCH64_SP_REGNUM;
1960 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1961 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1967 /* Implement the "print_insn" gdbarch method. */
1970 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1972 info->symbols = NULL;
1973 return print_insn_aarch64 (memaddr, info);
1976 /* AArch64 BRK software debug mode instruction.
1977 Note that AArch64 code is always little-endian.
1978 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1979 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1981 /* Implement the "breakpoint_from_pc" gdbarch method. */
1983 static const gdb_byte *
1984 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1987 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1989 *lenptr = sizeof (aarch64_default_breakpoint);
1990 return aarch64_default_breakpoint;
1993 /* Extract from an array REGS containing the (raw) register state a
1994 function return value of type TYPE, and copy that, in virtual
1995 format, into VALBUF. */
1998 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2001 struct gdbarch *gdbarch = get_regcache_arch (regs);
2002 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2004 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2006 bfd_byte buf[V_REGISTER_SIZE];
2007 int len = TYPE_LENGTH (type);
2009 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
2010 memcpy (valbuf, buf, len);
2012 else if (TYPE_CODE (type) == TYPE_CODE_INT
2013 || TYPE_CODE (type) == TYPE_CODE_CHAR
2014 || TYPE_CODE (type) == TYPE_CODE_BOOL
2015 || TYPE_CODE (type) == TYPE_CODE_PTR
2016 || TYPE_CODE (type) == TYPE_CODE_REF
2017 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2019 /* If the the type is a plain integer, then the access is
2020 straight-forward. Otherwise we have to play around a bit
2022 int len = TYPE_LENGTH (type);
2023 int regno = AARCH64_X0_REGNUM;
2028 /* By using store_unsigned_integer we avoid having to do
2029 anything special for small big-endian values. */
2030 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2031 store_unsigned_integer (valbuf,
2032 (len > X_REGISTER_SIZE
2033 ? X_REGISTER_SIZE : len), byte_order, tmp);
2034 len -= X_REGISTER_SIZE;
2035 valbuf += X_REGISTER_SIZE;
2038 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2040 int regno = AARCH64_V0_REGNUM;
2041 bfd_byte buf[V_REGISTER_SIZE];
2042 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2043 int len = TYPE_LENGTH (target_type);
2045 regcache_cooked_read (regs, regno, buf);
2046 memcpy (valbuf, buf, len);
2048 regcache_cooked_read (regs, regno + 1, buf);
2049 memcpy (valbuf, buf, len);
2052 else if (is_hfa (type))
2054 int elements = TYPE_NFIELDS (type);
2055 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2056 int len = TYPE_LENGTH (member_type);
2059 for (i = 0; i < elements; i++)
2061 int regno = AARCH64_V0_REGNUM + i;
2062 bfd_byte buf[X_REGISTER_SIZE];
2065 fprintf_unfiltered (gdb_stdlog,
2066 "read HFA return value element %d from %s\n",
2068 gdbarch_register_name (gdbarch, regno));
2069 regcache_cooked_read (regs, regno, buf);
2071 memcpy (valbuf, buf, len);
2077 /* For a structure or union the behaviour is as if the value had
2078 been stored to word-aligned memory and then loaded into
2079 registers with 64-bit load instruction(s). */
2080 int len = TYPE_LENGTH (type);
2081 int regno = AARCH64_X0_REGNUM;
2082 bfd_byte buf[X_REGISTER_SIZE];
2086 regcache_cooked_read (regs, regno++, buf);
2087 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2088 len -= X_REGISTER_SIZE;
2089 valbuf += X_REGISTER_SIZE;
2095 /* Will a function return an aggregate type in memory or in a
2096 register? Return 0 if an aggregate type can be returned in a
2097 register, 1 if it must be returned in memory. */
2100 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2103 enum type_code code;
2105 CHECK_TYPEDEF (type);
2107 /* In the AArch64 ABI, "integer" like aggregate types are returned
2108 in registers. For an aggregate type to be integer like, its size
2109 must be less than or equal to 4 * X_REGISTER_SIZE. */
2113 /* PCS B.5 If the argument is a Named HFA, then the argument is
2118 if (TYPE_LENGTH (type) > 16)
2120 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2121 invisible reference. */
2129 /* Write into appropriate registers a function return value of type
2130 TYPE, given in virtual format. */
2133 aarch64_store_return_value (struct type *type, struct regcache *regs,
2134 const gdb_byte *valbuf)
2136 struct gdbarch *gdbarch = get_regcache_arch (regs);
2137 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2139 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2141 bfd_byte buf[V_REGISTER_SIZE];
2142 int len = TYPE_LENGTH (type);
2144 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2145 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2147 else if (TYPE_CODE (type) == TYPE_CODE_INT
2148 || TYPE_CODE (type) == TYPE_CODE_CHAR
2149 || TYPE_CODE (type) == TYPE_CODE_BOOL
2150 || TYPE_CODE (type) == TYPE_CODE_PTR
2151 || TYPE_CODE (type) == TYPE_CODE_REF
2152 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2154 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2156 /* Values of one word or less are zero/sign-extended and
2158 bfd_byte tmpbuf[X_REGISTER_SIZE];
2159 LONGEST val = unpack_long (type, valbuf);
2161 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2162 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2166 /* Integral values greater than one word are stored in
2167 consecutive registers starting with r0. This will always
2168 be a multiple of the regiser size. */
2169 int len = TYPE_LENGTH (type);
2170 int regno = AARCH64_X0_REGNUM;
2174 regcache_cooked_write (regs, regno++, valbuf);
2175 len -= X_REGISTER_SIZE;
2176 valbuf += X_REGISTER_SIZE;
2180 else if (is_hfa (type))
2182 int elements = TYPE_NFIELDS (type);
2183 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2184 int len = TYPE_LENGTH (member_type);
2187 for (i = 0; i < elements; i++)
2189 int regno = AARCH64_V0_REGNUM + i;
2190 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2193 fprintf_unfiltered (gdb_stdlog,
2194 "write HFA return value element %d to %s\n",
2196 gdbarch_register_name (gdbarch, regno));
2198 memcpy (tmpbuf, valbuf, len);
2199 regcache_cooked_write (regs, regno, tmpbuf);
2205 /* For a structure or union the behaviour is as if the value had
2206 been stored to word-aligned memory and then loaded into
2207 registers with 64-bit load instruction(s). */
2208 int len = TYPE_LENGTH (type);
2209 int regno = AARCH64_X0_REGNUM;
2210 bfd_byte tmpbuf[X_REGISTER_SIZE];
2214 memcpy (tmpbuf, valbuf,
2215 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2216 regcache_cooked_write (regs, regno++, tmpbuf);
2217 len -= X_REGISTER_SIZE;
2218 valbuf += X_REGISTER_SIZE;
2223 /* Implement the "return_value" gdbarch method. */
2225 static enum return_value_convention
2226 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2227 struct type *valtype, struct regcache *regcache,
2228 gdb_byte *readbuf, const gdb_byte *writebuf)
2230 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2232 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2233 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2234 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2236 if (aarch64_return_in_memory (gdbarch, valtype))
2239 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2240 return RETURN_VALUE_STRUCT_CONVENTION;
2245 aarch64_store_return_value (valtype, regcache, writebuf);
2248 aarch64_extract_return_value (valtype, regcache, readbuf);
2251 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2253 return RETURN_VALUE_REGISTER_CONVENTION;
2256 /* Implement the "get_longjmp_target" gdbarch method. */
2259 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2262 gdb_byte buf[X_REGISTER_SIZE];
2263 struct gdbarch *gdbarch = get_frame_arch (frame);
2264 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2265 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2267 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2269 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2273 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2278 /* Return the pseudo register name corresponding to register regnum. */
2281 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2283 static const char *const q_name[] =
2285 "q0", "q1", "q2", "q3",
2286 "q4", "q5", "q6", "q7",
2287 "q8", "q9", "q10", "q11",
2288 "q12", "q13", "q14", "q15",
2289 "q16", "q17", "q18", "q19",
2290 "q20", "q21", "q22", "q23",
2291 "q24", "q25", "q26", "q27",
2292 "q28", "q29", "q30", "q31",
2295 static const char *const d_name[] =
2297 "d0", "d1", "d2", "d3",
2298 "d4", "d5", "d6", "d7",
2299 "d8", "d9", "d10", "d11",
2300 "d12", "d13", "d14", "d15",
2301 "d16", "d17", "d18", "d19",
2302 "d20", "d21", "d22", "d23",
2303 "d24", "d25", "d26", "d27",
2304 "d28", "d29", "d30", "d31",
2307 static const char *const s_name[] =
2309 "s0", "s1", "s2", "s3",
2310 "s4", "s5", "s6", "s7",
2311 "s8", "s9", "s10", "s11",
2312 "s12", "s13", "s14", "s15",
2313 "s16", "s17", "s18", "s19",
2314 "s20", "s21", "s22", "s23",
2315 "s24", "s25", "s26", "s27",
2316 "s28", "s29", "s30", "s31",
2319 static const char *const h_name[] =
2321 "h0", "h1", "h2", "h3",
2322 "h4", "h5", "h6", "h7",
2323 "h8", "h9", "h10", "h11",
2324 "h12", "h13", "h14", "h15",
2325 "h16", "h17", "h18", "h19",
2326 "h20", "h21", "h22", "h23",
2327 "h24", "h25", "h26", "h27",
2328 "h28", "h29", "h30", "h31",
2331 static const char *const b_name[] =
2333 "b0", "b1", "b2", "b3",
2334 "b4", "b5", "b6", "b7",
2335 "b8", "b9", "b10", "b11",
2336 "b12", "b13", "b14", "b15",
2337 "b16", "b17", "b18", "b19",
2338 "b20", "b21", "b22", "b23",
2339 "b24", "b25", "b26", "b27",
2340 "b28", "b29", "b30", "b31",
2343 regnum -= gdbarch_num_regs (gdbarch);
2345 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2346 return q_name[regnum - AARCH64_Q0_REGNUM];
2348 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2349 return d_name[regnum - AARCH64_D0_REGNUM];
2351 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2352 return s_name[regnum - AARCH64_S0_REGNUM];
2354 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2355 return h_name[regnum - AARCH64_H0_REGNUM];
2357 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2358 return b_name[regnum - AARCH64_B0_REGNUM];
2360 internal_error (__FILE__, __LINE__,
2361 _("aarch64_pseudo_register_name: bad register number %d"),
2365 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2367 static struct type *
2368 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2370 regnum -= gdbarch_num_regs (gdbarch);
2372 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2373 return aarch64_vnq_type (gdbarch);
2375 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2376 return aarch64_vnd_type (gdbarch);
2378 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2379 return aarch64_vns_type (gdbarch);
2381 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2382 return aarch64_vnh_type (gdbarch);
2384 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2385 return aarch64_vnb_type (gdbarch);
2387 internal_error (__FILE__, __LINE__,
2388 _("aarch64_pseudo_register_type: bad register number %d"),
2392 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2395 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2396 struct reggroup *group)
2398 regnum -= gdbarch_num_regs (gdbarch);
2400 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2401 return group == all_reggroup || group == vector_reggroup;
2402 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2403 return (group == all_reggroup || group == vector_reggroup
2404 || group == float_reggroup);
2405 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2406 return (group == all_reggroup || group == vector_reggroup
2407 || group == float_reggroup);
2408 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2409 return group == all_reggroup || group == vector_reggroup;
2410 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2411 return group == all_reggroup || group == vector_reggroup;
2413 return group == all_reggroup;
2416 /* Implement the "pseudo_register_read_value" gdbarch method. */
2418 static struct value *
2419 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2420 struct regcache *regcache,
2423 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2424 struct value *result_value;
2427 result_value = allocate_value (register_type (gdbarch, regnum));
2428 VALUE_LVAL (result_value) = lval_register;
2429 VALUE_REGNUM (result_value) = regnum;
2430 buf = value_contents_raw (result_value);
2432 regnum -= gdbarch_num_regs (gdbarch);
2434 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2436 enum register_status status;
2439 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2440 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2441 if (status != REG_VALID)
2442 mark_value_bytes_unavailable (result_value, 0,
2443 TYPE_LENGTH (value_type (result_value)));
2445 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2446 return result_value;
2449 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2451 enum register_status status;
2454 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2455 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2456 if (status != REG_VALID)
2457 mark_value_bytes_unavailable (result_value, 0,
2458 TYPE_LENGTH (value_type (result_value)));
2460 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2461 return result_value;
2464 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2466 enum register_status status;
2469 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2470 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2471 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2472 return result_value;
2475 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2477 enum register_status status;
2480 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2481 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2482 if (status != REG_VALID)
2483 mark_value_bytes_unavailable (result_value, 0,
2484 TYPE_LENGTH (value_type (result_value)));
2486 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2487 return result_value;
2490 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2492 enum register_status status;
2495 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2496 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2497 if (status != REG_VALID)
2498 mark_value_bytes_unavailable (result_value, 0,
2499 TYPE_LENGTH (value_type (result_value)));
2501 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2502 return result_value;
2505 gdb_assert_not_reached ("regnum out of bound");
2508 /* Implement the "pseudo_register_write" gdbarch method. */
2511 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2512 int regnum, const gdb_byte *buf)
2514 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2516 /* Ensure the register buffer is zero, we want gdb writes of the
2517 various 'scalar' pseudo registers to behavior like architectural
2518 writes, register width bytes are written the remainder are set to
2520 memset (reg_buf, 0, sizeof (reg_buf));
2522 regnum -= gdbarch_num_regs (gdbarch);
2524 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2526 /* pseudo Q registers */
2529 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2530 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2531 regcache_raw_write (regcache, v_regnum, reg_buf);
2535 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2537 /* pseudo D registers */
2540 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2541 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2542 regcache_raw_write (regcache, v_regnum, reg_buf);
2546 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2550 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2551 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2552 regcache_raw_write (regcache, v_regnum, reg_buf);
2556 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2558 /* pseudo H registers */
2561 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2562 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2563 regcache_raw_write (regcache, v_regnum, reg_buf);
2567 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2569 /* pseudo B registers */
2572 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2573 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2574 regcache_raw_write (regcache, v_regnum, reg_buf);
2578 gdb_assert_not_reached ("regnum out of bound");
2581 /* Callback function for user_reg_add. */
2583 static struct value *
2584 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2586 const int *reg_p = baton;
2588 return value_of_register (*reg_p, frame);
2592 /* Implement the "software_single_step" gdbarch method, needed to
2593 single step through atomic sequences on AArch64. */
2596 aarch64_software_single_step (struct frame_info *frame)
2598 struct gdbarch *gdbarch = get_frame_arch (frame);
2599 struct address_space *aspace = get_frame_address_space (frame);
2600 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2601 const int insn_size = 4;
2602 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2603 CORE_ADDR pc = get_frame_pc (frame);
2604 CORE_ADDR breaks[2] = { -1, -1 };
2606 CORE_ADDR closing_insn = 0;
2607 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2608 byte_order_for_code);
2611 int bc_insn_count = 0; /* Conditional branch instruction count. */
2612 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2614 /* Look for a Load Exclusive instruction which begins the sequence. */
2615 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2618 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2624 insn = read_memory_unsigned_integer (loc, insn_size,
2625 byte_order_for_code);
2627 /* Check if the instruction is a conditional branch. */
2628 if (decode_bcond (loc, insn, &cond, &offset))
2630 if (bc_insn_count >= 1)
2633 /* It is, so we'll try to set a breakpoint at the destination. */
2634 breaks[1] = loc + offset;
2640 /* Look for the Store Exclusive which closes the atomic sequence. */
2641 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2648 /* We didn't find a closing Store Exclusive instruction, fall back. */
2652 /* Insert breakpoint after the end of the atomic sequence. */
2653 breaks[0] = loc + insn_size;
2655 /* Check for duplicated breakpoints, and also check that the second
2656 breakpoint is not within the atomic sequence. */
2658 && (breaks[1] == breaks[0]
2659 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2660 last_breakpoint = 0;
2662 /* Insert the breakpoint at the end of the sequence, and one at the
2663 destination of the conditional branch, if it exists. */
2664 for (index = 0; index <= last_breakpoint; index++)
2665 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2670 /* Initialize the current architecture based on INFO. If possible,
2671 re-use an architecture from ARCHES, which is a list of
2672 architectures already created during this debugging session.
2674 Called e.g. at program startup, when reading a core file, and when
2675 reading a binary file. */
2677 static struct gdbarch *
2678 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2680 struct gdbarch_tdep *tdep;
2681 struct gdbarch *gdbarch;
2682 struct gdbarch_list *best_arch;
2683 struct tdesc_arch_data *tdesc_data = NULL;
2684 const struct target_desc *tdesc = info.target_desc;
2686 int have_fpa_registers = 1;
2688 const struct tdesc_feature *feature;
2690 int num_pseudo_regs = 0;
2692 /* Ensure we always have a target descriptor. */
2693 if (!tdesc_has_registers (tdesc))
2694 tdesc = tdesc_aarch64;
2698 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2700 if (feature == NULL)
2703 tdesc_data = tdesc_data_alloc ();
2705 /* Validate the descriptor provides the mandatory core R registers
2706 and allocate their numbers. */
2707 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2709 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2710 aarch64_r_register_names[i]);
2712 num_regs = AARCH64_X0_REGNUM + i;
2714 /* Look for the V registers. */
2715 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2718 /* Validate the descriptor provides the mandatory V registers
2719 and allocate their numbers. */
2720 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2722 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2723 aarch64_v_register_names[i]);
2725 num_regs = AARCH64_V0_REGNUM + i;
2727 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2728 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2729 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2730 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2731 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2736 tdesc_data_cleanup (tdesc_data);
2740 /* AArch64 code is always little-endian. */
2741 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2743 /* If there is already a candidate, use it. */
2744 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2746 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2748 /* Found a match. */
2752 if (best_arch != NULL)
2754 if (tdesc_data != NULL)
2755 tdesc_data_cleanup (tdesc_data);
2756 return best_arch->gdbarch;
2759 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2760 gdbarch = gdbarch_alloc (&info, tdep);
2762 /* This should be low enough for everything. */
2763 tdep->lowest_pc = 0x20;
2764 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2765 tdep->jb_elt_size = 8;
2767 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2768 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2770 /* Frame handling. */
2771 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2772 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2773 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2775 /* Advance PC across function entry code. */
2776 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2778 /* The stack grows downward. */
2779 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2781 /* Breakpoint manipulation. */
2782 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2783 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2784 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2786 /* Information about registers, etc. */
2787 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2788 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2789 set_gdbarch_num_regs (gdbarch, num_regs);
2791 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2792 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2793 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2794 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2795 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2796 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2797 aarch64_pseudo_register_reggroup_p);
2800 set_gdbarch_short_bit (gdbarch, 16);
2801 set_gdbarch_int_bit (gdbarch, 32);
2802 set_gdbarch_float_bit (gdbarch, 32);
2803 set_gdbarch_double_bit (gdbarch, 64);
2804 set_gdbarch_long_double_bit (gdbarch, 128);
2805 set_gdbarch_long_bit (gdbarch, 64);
2806 set_gdbarch_long_long_bit (gdbarch, 64);
2807 set_gdbarch_ptr_bit (gdbarch, 64);
2808 set_gdbarch_char_signed (gdbarch, 0);
2809 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2810 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2811 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2813 /* Internal <-> external register number maps. */
2814 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2816 /* Returning results. */
2817 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2820 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2822 /* Virtual tables. */
2823 set_gdbarch_vbit_in_delta (gdbarch, 1);
2825 /* Hook in the ABI-specific overrides, if they have been registered. */
2826 info.target_desc = tdesc;
2827 info.tdep_info = (void *) tdesc_data;
2828 gdbarch_init_osabi (info, gdbarch);
2830 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2832 /* Add some default predicates. */
2833 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2834 dwarf2_append_unwinders (gdbarch);
2835 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2837 frame_base_set_default (gdbarch, &aarch64_normal_base);
2839 /* Now we have tuned the configuration, set a few final things,
2840 based on what the OS ABI has told us. */
2842 if (tdep->jb_pc >= 0)
2843 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2845 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2847 /* Add standard register aliases. */
2848 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2849 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2850 value_of_aarch64_user_reg,
2851 &aarch64_register_aliases[i].regnum);
2857 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2859 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2864 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2865 paddress (gdbarch, tdep->lowest_pc));
2868 /* Suppress warning from -Wmissing-prototypes. */
2869 extern initialize_file_ftype _initialize_aarch64_tdep;
2872 _initialize_aarch64_tdep (void)
2874 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2877 initialize_tdesc_aarch64 ();
2879 /* Debug this file's internals. */
2880 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2881 Set AArch64 debugging."), _("\
2882 Show AArch64 debugging."), _("\
2883 When on, AArch64 specific debugging is enabled."),
2886 &setdebuglist, &showdebuglist);
2889 /* AArch64 process record-replay related structures, defines etc. */
2891 #define submask(x) ((1L << ((x) + 1)) - 1)
2892 #define bit(obj,st) (((obj) >> (st)) & 1)
2893 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2895 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2898 unsigned int reg_len = LENGTH; \
2901 REGS = XNEWVEC (uint32_t, reg_len); \
2902 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2907 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2910 unsigned int mem_len = LENGTH; \
2913 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2914 memcpy(&MEMS->len, &RECORD_BUF[0], \
2915 sizeof(struct aarch64_mem_r) * LENGTH); \
2920 /* AArch64 record/replay structures and enumerations. */
2922 struct aarch64_mem_r
2924 uint64_t len; /* Record length. */
2925 uint64_t addr; /* Memory address. */
2928 enum aarch64_record_result
2930 AARCH64_RECORD_SUCCESS,
2931 AARCH64_RECORD_FAILURE,
2932 AARCH64_RECORD_UNSUPPORTED,
2933 AARCH64_RECORD_UNKNOWN
2936 typedef struct insn_decode_record_t
2938 struct gdbarch *gdbarch;
2939 struct regcache *regcache;
2940 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2941 uint32_t aarch64_insn; /* Insn to be recorded. */
2942 uint32_t mem_rec_count; /* Count of memory records. */
2943 uint32_t reg_rec_count; /* Count of register records. */
2944 uint32_t *aarch64_regs; /* Registers to be recorded. */
2945 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2946 } insn_decode_record;
2948 /* Record handler for data processing - register instructions. */
2951 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2953 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2954 uint32_t record_buf[4];
2956 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2957 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2958 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2960 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2964 /* Logical (shifted register). */
2965 if (insn_bits24_27 == 0x0a)
2966 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2968 else if (insn_bits24_27 == 0x0b)
2969 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2971 return AARCH64_RECORD_UNKNOWN;
2973 record_buf[0] = reg_rd;
2974 aarch64_insn_r->reg_rec_count = 1;
2976 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2980 if (insn_bits24_27 == 0x0b)
2982 /* Data-processing (3 source). */
2983 record_buf[0] = reg_rd;
2984 aarch64_insn_r->reg_rec_count = 1;
2986 else if (insn_bits24_27 == 0x0a)
2988 if (insn_bits21_23 == 0x00)
2990 /* Add/subtract (with carry). */
2991 record_buf[0] = reg_rd;
2992 aarch64_insn_r->reg_rec_count = 1;
2993 if (bit (aarch64_insn_r->aarch64_insn, 29))
2995 record_buf[1] = AARCH64_CPSR_REGNUM;
2996 aarch64_insn_r->reg_rec_count = 2;
2999 else if (insn_bits21_23 == 0x02)
3001 /* Conditional compare (register) and conditional compare
3002 (immediate) instructions. */
3003 record_buf[0] = AARCH64_CPSR_REGNUM;
3004 aarch64_insn_r->reg_rec_count = 1;
3006 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3008 /* CConditional select. */
3009 /* Data-processing (2 source). */
3010 /* Data-processing (1 source). */
3011 record_buf[0] = reg_rd;
3012 aarch64_insn_r->reg_rec_count = 1;
3015 return AARCH64_RECORD_UNKNOWN;
3019 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3021 return AARCH64_RECORD_SUCCESS;
3024 /* Record handler for data processing - immediate instructions. */
3027 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3029 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3030 uint32_t record_buf[4];
3032 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3033 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3034 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3035 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3037 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3038 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3039 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3041 record_buf[0] = reg_rd;
3042 aarch64_insn_r->reg_rec_count = 1;
3044 else if (insn_bits24_27 == 0x01)
3046 /* Add/Subtract (immediate). */
3047 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3048 record_buf[0] = reg_rd;
3049 aarch64_insn_r->reg_rec_count = 1;
3051 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3053 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3055 /* Logical (immediate). */
3056 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3057 record_buf[0] = reg_rd;
3058 aarch64_insn_r->reg_rec_count = 1;
3060 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3063 return AARCH64_RECORD_UNKNOWN;
3065 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3067 return AARCH64_RECORD_SUCCESS;
3070 /* Record handler for branch, exception generation and system instructions. */
3073 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3075 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3076 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3077 uint32_t record_buf[4];
3079 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3080 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3081 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3083 if (insn_bits28_31 == 0x0d)
3085 /* Exception generation instructions. */
3086 if (insn_bits24_27 == 0x04)
3088 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3089 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3090 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3092 ULONGEST svc_number;
3094 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3096 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3100 return AARCH64_RECORD_UNSUPPORTED;
3102 /* System instructions. */
3103 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3105 uint32_t reg_rt, reg_crn;
3107 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3108 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3110 /* Record rt in case of sysl and mrs instructions. */
3111 if (bit (aarch64_insn_r->aarch64_insn, 21))
3113 record_buf[0] = reg_rt;
3114 aarch64_insn_r->reg_rec_count = 1;
3116 /* Record cpsr for hint and msr(immediate) instructions. */
3117 else if (reg_crn == 0x02 || reg_crn == 0x04)
3119 record_buf[0] = AARCH64_CPSR_REGNUM;
3120 aarch64_insn_r->reg_rec_count = 1;
3123 /* Unconditional branch (register). */
3124 else if((insn_bits24_27 & 0x0e) == 0x06)
3126 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3127 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3128 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3131 return AARCH64_RECORD_UNKNOWN;
3133 /* Unconditional branch (immediate). */
3134 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3136 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3137 if (bit (aarch64_insn_r->aarch64_insn, 31))
3138 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3141 /* Compare & branch (immediate), Test & branch (immediate) and
3142 Conditional branch (immediate). */
3143 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3145 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3147 return AARCH64_RECORD_SUCCESS;
3150 /* Record handler for advanced SIMD load and store instructions. */
3153 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3156 uint64_t addr_offset = 0;
3157 uint32_t record_buf[24];
3158 uint64_t record_buf_mem[24];
3159 uint32_t reg_rn, reg_rt;
3160 uint32_t reg_index = 0, mem_index = 0;
3161 uint8_t opcode_bits, size_bits;
3163 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3164 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3165 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3166 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3167 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3171 fprintf_unfiltered (gdb_stdlog,
3172 "Process record: Advanced SIMD load/store\n");
3175 /* Load/store single structure. */
3176 if (bit (aarch64_insn_r->aarch64_insn, 24))
3178 uint8_t sindex, scale, selem, esize, replicate = 0;
3179 scale = opcode_bits >> 2;
3180 selem = ((opcode_bits & 0x02) |
3181 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3185 if (size_bits & 0x01)
3186 return AARCH64_RECORD_UNKNOWN;
3189 if ((size_bits >> 1) & 0x01)
3190 return AARCH64_RECORD_UNKNOWN;
3191 if (size_bits & 0x01)
3193 if (!((opcode_bits >> 1) & 0x01))
3196 return AARCH64_RECORD_UNKNOWN;
3200 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3207 return AARCH64_RECORD_UNKNOWN;
3213 for (sindex = 0; sindex < selem; sindex++)
3215 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3216 reg_rt = (reg_rt + 1) % 32;
3220 for (sindex = 0; sindex < selem; sindex++)
3221 if (bit (aarch64_insn_r->aarch64_insn, 22))
3222 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3225 record_buf_mem[mem_index++] = esize / 8;
3226 record_buf_mem[mem_index++] = address + addr_offset;
3228 addr_offset = addr_offset + (esize / 8);
3229 reg_rt = (reg_rt + 1) % 32;
3232 /* Load/store multiple structure. */
3235 uint8_t selem, esize, rpt, elements;
3236 uint8_t eindex, rindex;
3238 esize = 8 << size_bits;
3239 if (bit (aarch64_insn_r->aarch64_insn, 30))
3240 elements = 128 / esize;
3242 elements = 64 / esize;
3244 switch (opcode_bits)
3246 /*LD/ST4 (4 Registers). */
3251 /*LD/ST1 (4 Registers). */
3256 /*LD/ST3 (3 Registers). */
3261 /*LD/ST1 (3 Registers). */
3266 /*LD/ST1 (1 Register). */
3271 /*LD/ST2 (2 Registers). */
3276 /*LD/ST1 (2 Registers). */
3282 return AARCH64_RECORD_UNSUPPORTED;
3285 for (rindex = 0; rindex < rpt; rindex++)
3286 for (eindex = 0; eindex < elements; eindex++)
3288 uint8_t reg_tt, sindex;
3289 reg_tt = (reg_rt + rindex) % 32;
3290 for (sindex = 0; sindex < selem; sindex++)
3292 if (bit (aarch64_insn_r->aarch64_insn, 22))
3293 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3296 record_buf_mem[mem_index++] = esize / 8;
3297 record_buf_mem[mem_index++] = address + addr_offset;
3299 addr_offset = addr_offset + (esize / 8);
3300 reg_tt = (reg_tt + 1) % 32;
3305 if (bit (aarch64_insn_r->aarch64_insn, 23))
3306 record_buf[reg_index++] = reg_rn;
3308 aarch64_insn_r->reg_rec_count = reg_index;
3309 aarch64_insn_r->mem_rec_count = mem_index / 2;
3310 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3312 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3314 return AARCH64_RECORD_SUCCESS;
3317 /* Record handler for load and store instructions. */
3320 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3322 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3323 uint8_t insn_bit23, insn_bit21;
3324 uint8_t opc, size_bits, ld_flag, vector_flag;
3325 uint32_t reg_rn, reg_rt, reg_rt2;
3326 uint64_t datasize, offset;
3327 uint32_t record_buf[8];
3328 uint64_t record_buf_mem[8];
3331 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3332 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3333 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3334 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3335 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3336 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3337 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3338 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3339 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3340 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3341 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3343 /* Load/store exclusive. */
3344 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3348 fprintf_unfiltered (gdb_stdlog,
3349 "Process record: load/store exclusive\n");
3354 record_buf[0] = reg_rt;
3355 aarch64_insn_r->reg_rec_count = 1;
3358 record_buf[1] = reg_rt2;
3359 aarch64_insn_r->reg_rec_count = 2;
3365 datasize = (8 << size_bits) * 2;
3367 datasize = (8 << size_bits);
3368 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3370 record_buf_mem[0] = datasize / 8;
3371 record_buf_mem[1] = address;
3372 aarch64_insn_r->mem_rec_count = 1;
3375 /* Save register rs. */
3376 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3377 aarch64_insn_r->reg_rec_count = 1;
3381 /* Load register (literal) instructions decoding. */
3382 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3386 fprintf_unfiltered (gdb_stdlog,
3387 "Process record: load register (literal)\n");
3390 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3392 record_buf[0] = reg_rt;
3393 aarch64_insn_r->reg_rec_count = 1;
3395 /* All types of load/store pair instructions decoding. */
3396 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3400 fprintf_unfiltered (gdb_stdlog,
3401 "Process record: load/store pair\n");
3408 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3409 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3413 record_buf[0] = reg_rt;
3414 record_buf[1] = reg_rt2;
3416 aarch64_insn_r->reg_rec_count = 2;
3421 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3423 size_bits = size_bits >> 1;
3424 datasize = 8 << (2 + size_bits);
3425 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3426 offset = offset << (2 + size_bits);
3427 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3429 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3431 if (imm7_off & 0x40)
3432 address = address - offset;
3434 address = address + offset;
3437 record_buf_mem[0] = datasize / 8;
3438 record_buf_mem[1] = address;
3439 record_buf_mem[2] = datasize / 8;
3440 record_buf_mem[3] = address + (datasize / 8);
3441 aarch64_insn_r->mem_rec_count = 2;
3443 if (bit (aarch64_insn_r->aarch64_insn, 23))
3444 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3446 /* Load/store register (unsigned immediate) instructions. */
3447 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3449 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3456 if (size_bits != 0x03)
3459 return AARCH64_RECORD_UNKNOWN;
3463 fprintf_unfiltered (gdb_stdlog,
3464 "Process record: load/store (unsigned immediate):"
3465 " size %x V %d opc %x\n", size_bits, vector_flag,
3471 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3472 datasize = 8 << size_bits;
3473 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3475 offset = offset << size_bits;
3476 address = address + offset;
3478 record_buf_mem[0] = datasize >> 3;
3479 record_buf_mem[1] = address;
3480 aarch64_insn_r->mem_rec_count = 1;
3485 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3487 record_buf[0] = reg_rt;
3488 aarch64_insn_r->reg_rec_count = 1;
3491 /* Load/store register (register offset) instructions. */
3492 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3493 && insn_bits10_11 == 0x02 && insn_bit21)
3497 fprintf_unfiltered (gdb_stdlog,
3498 "Process record: load/store (register offset)\n");
3500 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3507 if (size_bits != 0x03)
3510 return AARCH64_RECORD_UNKNOWN;
3514 uint64_t reg_rm_val;
3515 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3516 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3517 if (bit (aarch64_insn_r->aarch64_insn, 12))
3518 offset = reg_rm_val << size_bits;
3520 offset = reg_rm_val;
3521 datasize = 8 << size_bits;
3522 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3524 address = address + offset;
3525 record_buf_mem[0] = datasize >> 3;
3526 record_buf_mem[1] = address;
3527 aarch64_insn_r->mem_rec_count = 1;
3532 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3534 record_buf[0] = reg_rt;
3535 aarch64_insn_r->reg_rec_count = 1;
3538 /* Load/store register (immediate and unprivileged) instructions. */
3539 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3544 fprintf_unfiltered (gdb_stdlog,
3545 "Process record: load/store (immediate and unprivileged)\n");
3547 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3554 if (size_bits != 0x03)
3557 return AARCH64_RECORD_UNKNOWN;
3562 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3563 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3564 datasize = 8 << size_bits;
3565 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3567 if (insn_bits10_11 != 0x01)
3569 if (imm9_off & 0x0100)
3570 address = address - offset;
3572 address = address + offset;
3574 record_buf_mem[0] = datasize >> 3;
3575 record_buf_mem[1] = address;
3576 aarch64_insn_r->mem_rec_count = 1;
3581 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3583 record_buf[0] = reg_rt;
3584 aarch64_insn_r->reg_rec_count = 1;
3586 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3587 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3589 /* Advanced SIMD load/store instructions. */
3591 return aarch64_record_asimd_load_store (aarch64_insn_r);
3593 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3595 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3597 return AARCH64_RECORD_SUCCESS;
3600 /* Record handler for data processing SIMD and floating point instructions. */
3603 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3605 uint8_t insn_bit21, opcode, rmode, reg_rd;
3606 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3607 uint8_t insn_bits11_14;
3608 uint32_t record_buf[2];
3610 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3611 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3612 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3613 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3614 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3615 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3616 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3617 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3618 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3622 fprintf_unfiltered (gdb_stdlog,
3623 "Process record: data processing SIMD/FP: ");
3626 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3628 /* Floating point - fixed point conversion instructions. */
3632 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3634 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3635 record_buf[0] = reg_rd;
3637 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3639 /* Floating point - conditional compare instructions. */
3640 else if (insn_bits10_11 == 0x01)
3643 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3645 record_buf[0] = AARCH64_CPSR_REGNUM;
3647 /* Floating point - data processing (2-source) and
3648 conditional select instructions. */
3649 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3652 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3654 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3656 else if (insn_bits10_11 == 0x00)
3658 /* Floating point - immediate instructions. */
3659 if ((insn_bits12_15 & 0x01) == 0x01
3660 || (insn_bits12_15 & 0x07) == 0x04)
3663 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3664 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3666 /* Floating point - compare instructions. */
3667 else if ((insn_bits12_15 & 0x03) == 0x02)
3670 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3671 record_buf[0] = AARCH64_CPSR_REGNUM;
3673 /* Floating point - integer conversions instructions. */
3674 else if (insn_bits12_15 == 0x00)
3676 /* Convert float to integer instruction. */
3677 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3680 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3682 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3684 /* Convert integer to float instruction. */
3685 else if ((opcode >> 1) == 0x01 && !rmode)
3688 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3690 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3692 /* Move float to integer instruction. */
3693 else if ((opcode >> 1) == 0x03)
3696 fprintf_unfiltered (gdb_stdlog, "move float to int");
3698 if (!(opcode & 0x01))
3699 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3701 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3704 return AARCH64_RECORD_UNKNOWN;
3707 return AARCH64_RECORD_UNKNOWN;
3710 return AARCH64_RECORD_UNKNOWN;
3712 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3715 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3717 /* Advanced SIMD copy instructions. */
3718 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3719 && !bit (aarch64_insn_r->aarch64_insn, 15)
3720 && bit (aarch64_insn_r->aarch64_insn, 10))
3722 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3723 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3725 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3728 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3730 /* All remaining floating point or advanced SIMD instructions. */
3734 fprintf_unfiltered (gdb_stdlog, "all remain");
3736 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3740 fprintf_unfiltered (gdb_stdlog, "\n");
3742 aarch64_insn_r->reg_rec_count++;
3743 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3744 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3746 return AARCH64_RECORD_SUCCESS;
3749 /* Decodes insns type and invokes its record handler. */
3752 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3754 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3756 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3757 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3758 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3759 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3761 /* Data processing - immediate instructions. */
3762 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3763 return aarch64_record_data_proc_imm (aarch64_insn_r);
3765 /* Branch, exception generation and system instructions. */
3766 if (ins_bit26 && !ins_bit27 && ins_bit28)
3767 return aarch64_record_branch_except_sys (aarch64_insn_r);
3769 /* Load and store instructions. */
3770 if (!ins_bit25 && ins_bit27)
3771 return aarch64_record_load_store (aarch64_insn_r);
3773 /* Data processing - register instructions. */
3774 if (ins_bit25 && !ins_bit26 && ins_bit27)
3775 return aarch64_record_data_proc_reg (aarch64_insn_r);
3777 /* Data processing - SIMD and floating point instructions. */
3778 if (ins_bit25 && ins_bit26 && ins_bit27)
3779 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3781 return AARCH64_RECORD_UNSUPPORTED;
3784 /* Cleans up local record registers and memory allocations. */
3787 deallocate_reg_mem (insn_decode_record *record)
3789 xfree (record->aarch64_regs);
3790 xfree (record->aarch64_mems);
3793 /* Parse the current instruction and record the values of the registers and
3794 memory that will be changed in current instruction to record_arch_list
3795 return -1 if something is wrong. */
3798 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3799 CORE_ADDR insn_addr)
3801 uint32_t rec_no = 0;
3802 uint8_t insn_size = 4;
3804 ULONGEST t_bit = 0, insn_id = 0;
3805 gdb_byte buf[insn_size];
3806 insn_decode_record aarch64_record;
3808 memset (&buf[0], 0, insn_size);
3809 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3810 target_read_memory (insn_addr, &buf[0], insn_size);
3811 aarch64_record.aarch64_insn
3812 = (uint32_t) extract_unsigned_integer (&buf[0],
3814 gdbarch_byte_order (gdbarch));
3815 aarch64_record.regcache = regcache;
3816 aarch64_record.this_addr = insn_addr;
3817 aarch64_record.gdbarch = gdbarch;
3819 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3820 if (ret == AARCH64_RECORD_UNSUPPORTED)
3822 printf_unfiltered (_("Process record does not support instruction "
3823 "0x%0x at address %s.\n"),
3824 aarch64_record.aarch64_insn,
3825 paddress (gdbarch, insn_addr));
3831 /* Record registers. */
3832 record_full_arch_list_add_reg (aarch64_record.regcache,
3834 /* Always record register CPSR. */
3835 record_full_arch_list_add_reg (aarch64_record.regcache,
3836 AARCH64_CPSR_REGNUM);
3837 if (aarch64_record.aarch64_regs)
3838 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3839 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3840 aarch64_record.aarch64_regs[rec_no]))
3843 /* Record memories. */
3844 if (aarch64_record.aarch64_mems)
3845 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3846 if (record_full_arch_list_add_mem
3847 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3848 aarch64_record.aarch64_mems[rec_no].len))
3851 if (record_full_arch_list_add_end ())
3855 deallocate_reg_mem (&aarch64_record);