1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
46 #include "aarch64-tdep.h"
49 #include "elf/aarch64.h"
54 #include "record-full.h"
56 #include "features/aarch64.c"
58 /* Pseudo register base numbers. */
59 #define AARCH64_Q0_REGNUM 0
60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
65 /* The standard register names, and all the valid aliases for them. */
68 const char *const name;
70 } aarch64_register_aliases[] =
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM},
74 {"lr", AARCH64_LR_REGNUM},
75 {"sp", AARCH64_SP_REGNUM},
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM + 0},
79 {"w1", AARCH64_X0_REGNUM + 1},
80 {"w2", AARCH64_X0_REGNUM + 2},
81 {"w3", AARCH64_X0_REGNUM + 3},
82 {"w4", AARCH64_X0_REGNUM + 4},
83 {"w5", AARCH64_X0_REGNUM + 5},
84 {"w6", AARCH64_X0_REGNUM + 6},
85 {"w7", AARCH64_X0_REGNUM + 7},
86 {"w8", AARCH64_X0_REGNUM + 8},
87 {"w9", AARCH64_X0_REGNUM + 9},
88 {"w10", AARCH64_X0_REGNUM + 10},
89 {"w11", AARCH64_X0_REGNUM + 11},
90 {"w12", AARCH64_X0_REGNUM + 12},
91 {"w13", AARCH64_X0_REGNUM + 13},
92 {"w14", AARCH64_X0_REGNUM + 14},
93 {"w15", AARCH64_X0_REGNUM + 15},
94 {"w16", AARCH64_X0_REGNUM + 16},
95 {"w17", AARCH64_X0_REGNUM + 17},
96 {"w18", AARCH64_X0_REGNUM + 18},
97 {"w19", AARCH64_X0_REGNUM + 19},
98 {"w20", AARCH64_X0_REGNUM + 20},
99 {"w21", AARCH64_X0_REGNUM + 21},
100 {"w22", AARCH64_X0_REGNUM + 22},
101 {"w23", AARCH64_X0_REGNUM + 23},
102 {"w24", AARCH64_X0_REGNUM + 24},
103 {"w25", AARCH64_X0_REGNUM + 25},
104 {"w26", AARCH64_X0_REGNUM + 26},
105 {"w27", AARCH64_X0_REGNUM + 27},
106 {"w28", AARCH64_X0_REGNUM + 28},
107 {"w29", AARCH64_X0_REGNUM + 29},
108 {"w30", AARCH64_X0_REGNUM + 30},
111 {"ip0", AARCH64_X0_REGNUM + 16},
112 {"ip1", AARCH64_X0_REGNUM + 17}
115 /* The required core 'R' registers. */
116 static const char *const aarch64_r_register_names[] =
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
131 /* The FP/SIMD 'V' registers. */
132 static const char *const aarch64_v_register_names[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
148 /* AArch64 prologue cache structure. */
149 struct aarch64_prologue_cache
151 /* The stack pointer at the time this frame was created; i.e. the
152 caller's stack pointer when this function was called. It is used
153 to identify this frame. */
156 /* The frame base for this frame is just prev_sp - frame size.
157 FRAMESIZE is the distance from the frame pointer to the
158 initial stack pointer. */
161 /* The register used to hold the frame pointer for this frame. */
164 /* Saved register offsets. */
165 struct trad_frame_saved_reg *saved_regs;
168 /* Toggle this file's internal debugging dump. */
169 static int aarch64_debug;
172 show_aarch64_debug (struct ui_file *file, int from_tty,
173 struct cmd_list_element *c, const char *value)
175 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
178 /* Extract a signed value from a bit field within an instruction
181 INSN is the instruction opcode.
183 WIDTH specifies the width of the bit field to extract (in bits).
185 OFFSET specifies the least significant bit of the field where bits
186 are numbered zero counting from least to most significant. */
189 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
191 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
192 unsigned shift_r = sizeof (int32_t) * 8 - width;
194 return ((int32_t) insn << shift_l) >> shift_r;
197 /* Determine if specified bits within an instruction opcode matches a
200 INSN is the instruction opcode.
202 MASK specifies the bits within the opcode that are to be tested
203 agsinst for a match with PATTERN. */
206 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
208 return (insn & mask) == pattern;
211 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
213 ADDR specifies the address of the opcode.
214 INSN specifies the opcode to test.
215 RD receives the 'rd' field from the decoded instruction.
216 RN receives the 'rn' field from the decoded instruction.
218 Return 1 if the opcodes matches and is decoded, otherwise 0. */
220 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
223 if ((insn & 0x9f000000) == 0x91000000)
228 *rd = (insn >> 0) & 0x1f;
229 *rn = (insn >> 5) & 0x1f;
230 *imm = (insn >> 10) & 0xfff;
231 shift = (insn >> 22) & 0x3;
232 op_is_sub = (insn >> 30) & 0x1;
250 fprintf_unfiltered (gdb_stdlog,
251 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
252 core_addr_to_string_nz (addr), insn, *rd, *rn,
259 /* Decode an opcode if it represents an ADRP instruction.
261 ADDR specifies the address of the opcode.
262 INSN specifies the opcode to test.
263 RD receives the 'rd' field from the decoded instruction.
265 Return 1 if the opcodes matches and is decoded, otherwise 0. */
268 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
270 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
272 *rd = (insn >> 0) & 0x1f;
275 fprintf_unfiltered (gdb_stdlog,
276 "decode: 0x%s 0x%x adrp x%u, #?\n",
277 core_addr_to_string_nz (addr), insn, *rd);
283 /* Decode an opcode if it represents an branch immediate or branch
284 and link immediate instruction.
286 ADDR specifies the address of the opcode.
287 INSN specifies the opcode to test.
288 LINK receives the 'link' bit from the decoded instruction.
289 OFFSET receives the immediate offset from the decoded instruction.
291 Return 1 if the opcodes matches and is decoded, otherwise 0. */
294 decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
296 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
297 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
298 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
301 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
304 fprintf_unfiltered (gdb_stdlog,
305 "decode: 0x%s 0x%x %s 0x%s\n",
306 core_addr_to_string_nz (addr), insn,
308 core_addr_to_string_nz (addr + *offset));
315 /* Decode an opcode if it represents a conditional branch instruction.
317 ADDR specifies the address of the opcode.
318 INSN specifies the opcode to test.
319 COND receives the branch condition field from the decoded
321 OFFSET receives the immediate offset from the decoded instruction.
323 Return 1 if the opcodes matches and is decoded, otherwise 0. */
326 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
328 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
330 *cond = (insn >> 0) & 0xf;
331 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
334 fprintf_unfiltered (gdb_stdlog,
335 "decode: 0x%s 0x%x b<%u> 0x%s\n",
336 core_addr_to_string_nz (addr), insn, *cond,
337 core_addr_to_string_nz (addr + *offset));
343 /* Decode an opcode if it represents a branch via register instruction.
345 ADDR specifies the address of the opcode.
346 INSN specifies the opcode to test.
347 LINK receives the 'link' bit from the decoded instruction.
348 RN receives the 'rn' field from the decoded instruction.
350 Return 1 if the opcodes matches and is decoded, otherwise 0. */
353 decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
355 /* 8 4 0 6 2 8 4 0 */
356 /* blr 110101100011111100000000000rrrrr */
357 /* br 110101100001111100000000000rrrrr */
358 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
360 *link = (insn >> 21) & 1;
361 *rn = (insn >> 5) & 0x1f;
364 fprintf_unfiltered (gdb_stdlog,
365 "decode: 0x%s 0x%x %s 0x%x\n",
366 core_addr_to_string_nz (addr), insn,
367 *link ? "blr" : "br", *rn);
374 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
376 ADDR specifies the address of the opcode.
377 INSN specifies the opcode to test.
378 IS64 receives the 'sf' field from the decoded instruction.
379 OP receives the 'op' field from the decoded instruction.
380 RN receives the 'rn' field from the decoded instruction.
381 OFFSET receives the 'imm19' field from the decoded instruction.
383 Return 1 if the opcodes matches and is decoded, otherwise 0. */
386 decode_cb (CORE_ADDR addr,
387 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
390 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
392 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
393 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
395 *rn = (insn >> 0) & 0x1f;
396 *is64 = (insn >> 31) & 0x1;
397 *op = (insn >> 24) & 0x1;
398 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
401 fprintf_unfiltered (gdb_stdlog,
402 "decode: 0x%s 0x%x %s 0x%s\n",
403 core_addr_to_string_nz (addr), insn,
404 *op ? "cbnz" : "cbz",
405 core_addr_to_string_nz (addr + *offset));
411 /* Decode an opcode if it represents a ERET instruction.
413 ADDR specifies the address of the opcode.
414 INSN specifies the opcode to test.
416 Return 1 if the opcodes matches and is decoded, otherwise 0. */
419 decode_eret (CORE_ADDR addr, uint32_t insn)
421 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
422 if (insn == 0xd69f03e0)
425 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
426 core_addr_to_string_nz (addr), insn);
432 /* Decode an opcode if it represents a MOVZ instruction.
434 ADDR specifies the address of the opcode.
435 INSN specifies the opcode to test.
436 RD receives the 'rd' field from the decoded instruction.
438 Return 1 if the opcodes matches and is decoded, otherwise 0. */
441 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
443 if (decode_masked_match (insn, 0xff800000, 0x52800000))
445 *rd = (insn >> 0) & 0x1f;
448 fprintf_unfiltered (gdb_stdlog,
449 "decode: 0x%s 0x%x movz x%u, #?\n",
450 core_addr_to_string_nz (addr), insn, *rd);
456 /* Decode an opcode if it represents a ORR (shifted register)
459 ADDR specifies the address of the opcode.
460 INSN specifies the opcode to test.
461 RD receives the 'rd' field from the decoded instruction.
462 RN receives the 'rn' field from the decoded instruction.
463 RM receives the 'rm' field from the decoded instruction.
464 IMM receives the 'imm6' field from the decoded instruction.
466 Return 1 if the opcodes matches and is decoded, otherwise 0. */
469 decode_orr_shifted_register_x (CORE_ADDR addr,
470 uint32_t insn, unsigned *rd, unsigned *rn,
471 unsigned *rm, int32_t *imm)
473 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
475 *rd = (insn >> 0) & 0x1f;
476 *rn = (insn >> 5) & 0x1f;
477 *rm = (insn >> 16) & 0x1f;
478 *imm = (insn >> 10) & 0x3f;
481 fprintf_unfiltered (gdb_stdlog,
482 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
483 core_addr_to_string_nz (addr), insn, *rd,
490 /* Decode an opcode if it represents a RET instruction.
492 ADDR specifies the address of the opcode.
493 INSN specifies the opcode to test.
494 RN receives the 'rn' field from the decoded instruction.
496 Return 1 if the opcodes matches and is decoded, otherwise 0. */
499 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
501 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
503 *rn = (insn >> 5) & 0x1f;
505 fprintf_unfiltered (gdb_stdlog,
506 "decode: 0x%s 0x%x ret x%u\n",
507 core_addr_to_string_nz (addr), insn, *rn);
513 /* Decode an opcode if it represents the following instruction:
514 STP rt, rt2, [rn, #imm]
516 ADDR specifies the address of the opcode.
517 INSN specifies the opcode to test.
518 RT1 receives the 'rt' field from the decoded instruction.
519 RT2 receives the 'rt2' field from the decoded instruction.
520 RN receives the 'rn' field from the decoded instruction.
521 IMM receives the 'imm' field from the decoded instruction.
523 Return 1 if the opcodes matches and is decoded, otherwise 0. */
526 decode_stp_offset (CORE_ADDR addr,
528 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
530 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
532 *rt1 = (insn >> 0) & 0x1f;
533 *rn = (insn >> 5) & 0x1f;
534 *rt2 = (insn >> 10) & 0x1f;
535 *imm = extract_signed_bitfield (insn, 7, 15);
539 fprintf_unfiltered (gdb_stdlog,
540 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
541 core_addr_to_string_nz (addr), insn,
542 *rt1, *rt2, *rn, *imm);
548 /* Decode an opcode if it represents the following instruction:
549 STP rt, rt2, [rn, #imm]!
551 ADDR specifies the address of the opcode.
552 INSN specifies the opcode to test.
553 RT1 receives the 'rt' field from the decoded instruction.
554 RT2 receives the 'rt2' field from the decoded instruction.
555 RN receives the 'rn' field from the decoded instruction.
556 IMM receives the 'imm' field from the decoded instruction.
558 Return 1 if the opcodes matches and is decoded, otherwise 0. */
561 decode_stp_offset_wb (CORE_ADDR addr,
563 unsigned *rt1, unsigned *rt2, unsigned *rn,
566 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
568 *rt1 = (insn >> 0) & 0x1f;
569 *rn = (insn >> 5) & 0x1f;
570 *rt2 = (insn >> 10) & 0x1f;
571 *imm = extract_signed_bitfield (insn, 7, 15);
575 fprintf_unfiltered (gdb_stdlog,
576 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
577 core_addr_to_string_nz (addr), insn,
578 *rt1, *rt2, *rn, *imm);
584 /* Decode an opcode if it represents the following instruction:
587 ADDR specifies the address of the opcode.
588 INSN specifies the opcode to test.
589 IS64 receives size field from the decoded instruction.
590 RT receives the 'rt' field from the decoded instruction.
591 RN receives the 'rn' field from the decoded instruction.
592 IMM receives the 'imm' field from the decoded instruction.
594 Return 1 if the opcodes matches and is decoded, otherwise 0. */
597 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
598 unsigned *rn, int32_t *imm)
600 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
602 *is64 = (insn >> 30) & 1;
603 *rt = (insn >> 0) & 0x1f;
604 *rn = (insn >> 5) & 0x1f;
605 *imm = extract_signed_bitfield (insn, 9, 12);
608 fprintf_unfiltered (gdb_stdlog,
609 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
610 core_addr_to_string_nz (addr), insn,
611 *is64 ? 'x' : 'w', *rt, *rn, *imm);
617 /* Decode an opcode if it represents a TB or TBNZ instruction.
619 ADDR specifies the address of the opcode.
620 INSN specifies the opcode to test.
621 OP receives the 'op' field from the decoded instruction.
622 BIT receives the bit position field from the decoded instruction.
623 RT receives 'rt' field from the decoded instruction.
624 IMM receives 'imm' field from the decoded instruction.
626 Return 1 if the opcodes matches and is decoded, otherwise 0. */
629 decode_tb (CORE_ADDR addr,
630 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
633 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
635 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
636 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
638 *rt = (insn >> 0) & 0x1f;
639 *op = insn & (1 << 24);
640 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
641 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
644 fprintf_unfiltered (gdb_stdlog,
645 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
646 core_addr_to_string_nz (addr), insn,
647 *op ? "tbnz" : "tbz", *rt, *bit,
648 core_addr_to_string_nz (addr + *imm));
654 /* Analyze a prologue, looking for a recognizable stack frame
655 and frame pointer. Scan until we encounter a store that could
656 clobber the stack frame unexpectedly, or an unknown instruction. */
659 aarch64_analyze_prologue (struct gdbarch *gdbarch,
660 CORE_ADDR start, CORE_ADDR limit,
661 struct aarch64_prologue_cache *cache)
663 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
665 pv_t regs[AARCH64_X_REGISTER_COUNT];
666 struct pv_area *stack;
667 struct cleanup *back_to;
669 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
670 regs[i] = pv_register (i, 0);
671 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
672 back_to = make_cleanup_free_pv_area (stack);
674 for (; start < limit; start += 4)
692 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
694 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
695 regs[rd] = pv_add_constant (regs[rn], imm);
696 else if (decode_adrp (start, insn, &rd))
697 regs[rd] = pv_unknown ();
698 else if (decode_b (start, insn, &is_link, &offset))
700 /* Stop analysis on branch. */
703 else if (decode_bcond (start, insn, &cond, &offset))
705 /* Stop analysis on branch. */
708 else if (decode_br (start, insn, &is_link, &rn))
710 /* Stop analysis on branch. */
713 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
715 /* Stop analysis on branch. */
718 else if (decode_eret (start, insn))
720 /* Stop analysis on branch. */
723 else if (decode_movz (start, insn, &rd))
724 regs[rd] = pv_unknown ();
726 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
728 if (imm == 0 && rn == 31)
735 "aarch64: prologue analysis gave up addr=0x%s "
736 "opcode=0x%x (orr x register)\n",
737 core_addr_to_string_nz (start),
742 else if (decode_ret (start, insn, &rn))
744 /* Stop analysis on branch. */
747 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
749 pv_area_store (stack, pv_add_constant (regs[rn], offset),
750 is64 ? 8 : 4, regs[rt]);
752 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
754 /* If recording this store would invalidate the store area
755 (perhaps because rn is not known) then we should abandon
756 further prologue analysis. */
757 if (pv_area_store_would_trash (stack,
758 pv_add_constant (regs[rn], imm)))
761 if (pv_area_store_would_trash (stack,
762 pv_add_constant (regs[rn], imm + 8)))
765 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
767 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
770 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
772 /* If recording this store would invalidate the store area
773 (perhaps because rn is not known) then we should abandon
774 further prologue analysis. */
775 if (pv_area_store_would_trash (stack,
776 pv_add_constant (regs[rn], imm)))
779 if (pv_area_store_would_trash (stack,
780 pv_add_constant (regs[rn], imm + 8)))
783 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
785 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
787 regs[rn] = pv_add_constant (regs[rn], imm);
789 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
791 /* Stop analysis on branch. */
797 fprintf_unfiltered (gdb_stdlog,
798 "aarch64: prologue analysis gave up addr=0x%s"
800 core_addr_to_string_nz (start), insn);
807 do_cleanups (back_to);
811 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
813 /* Frame pointer is fp. Frame size is constant. */
814 cache->framereg = AARCH64_FP_REGNUM;
815 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
817 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
819 /* Try the stack pointer. */
820 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
821 cache->framereg = AARCH64_SP_REGNUM;
825 /* We're just out of luck. We don't know where the frame is. */
826 cache->framereg = -1;
827 cache->framesize = 0;
830 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
834 if (pv_area_find_reg (stack, gdbarch, i, &offset))
835 cache->saved_regs[i].addr = offset;
838 do_cleanups (back_to);
842 /* Implement the "skip_prologue" gdbarch method. */
845 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
849 CORE_ADDR func_addr, limit_pc;
850 struct symtab_and_line sal;
852 /* See if we can determine the end of the prologue via the symbol
853 table. If so, then return either PC, or the PC after the
854 prologue, whichever is greater. */
855 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
857 CORE_ADDR post_prologue_pc
858 = skip_prologue_using_sal (gdbarch, func_addr);
860 if (post_prologue_pc != 0)
861 return max (pc, post_prologue_pc);
864 /* Can't determine prologue from the symbol table, need to examine
867 /* Find an upper limit on the function prologue using the debug
868 information. If the debug information could not be used to
869 provide that bound, then use an arbitrary large number as the
871 limit_pc = skip_prologue_using_sal (gdbarch, pc);
873 limit_pc = pc + 128; /* Magic. */
875 /* Try disassembling prologue. */
876 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
879 /* Scan the function prologue for THIS_FRAME and populate the prologue
883 aarch64_scan_prologue (struct frame_info *this_frame,
884 struct aarch64_prologue_cache *cache)
886 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
887 CORE_ADDR prologue_start;
888 CORE_ADDR prologue_end;
889 CORE_ADDR prev_pc = get_frame_pc (this_frame);
890 struct gdbarch *gdbarch = get_frame_arch (this_frame);
892 /* Assume we do not find a frame. */
893 cache->framereg = -1;
894 cache->framesize = 0;
896 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
899 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
903 /* No line info so use the current PC. */
904 prologue_end = prev_pc;
906 else if (sal.end < prologue_end)
908 /* The next line begins after the function end. */
909 prologue_end = sal.end;
912 prologue_end = min (prologue_end, prev_pc);
913 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
922 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
926 cache->framereg = AARCH64_FP_REGNUM;
927 cache->framesize = 16;
928 cache->saved_regs[29].addr = 0;
929 cache->saved_regs[30].addr = 8;
933 /* Allocate and fill in *THIS_CACHE with information about the prologue of
934 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
935 Return a pointer to the current aarch64_prologue_cache in
938 static struct aarch64_prologue_cache *
939 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
941 struct aarch64_prologue_cache *cache;
942 CORE_ADDR unwound_fp;
945 if (*this_cache != NULL)
948 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
949 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
952 aarch64_scan_prologue (this_frame, cache);
954 if (cache->framereg == -1)
957 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
961 cache->prev_sp = unwound_fp + cache->framesize;
963 /* Calculate actual addresses of saved registers using offsets
964 determined by aarch64_analyze_prologue. */
965 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
966 if (trad_frame_addr_p (cache->saved_regs, reg))
967 cache->saved_regs[reg].addr += cache->prev_sp;
972 /* Our frame ID for a normal frame is the current function's starting
973 PC and the caller's SP when we were called. */
976 aarch64_prologue_this_id (struct frame_info *this_frame,
977 void **this_cache, struct frame_id *this_id)
979 struct aarch64_prologue_cache *cache
980 = aarch64_make_prologue_cache (this_frame, this_cache);
984 /* This is meant to halt the backtrace at "_start". */
985 pc = get_frame_pc (this_frame);
986 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
989 /* If we've hit a wall, stop. */
990 if (cache->prev_sp == 0)
993 func = get_frame_func (this_frame);
994 id = frame_id_build (cache->prev_sp, func);
998 /* Implement the "prev_register" frame_unwind method. */
1000 static struct value *
1001 aarch64_prologue_prev_register (struct frame_info *this_frame,
1002 void **this_cache, int prev_regnum)
1004 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1005 struct aarch64_prologue_cache *cache
1006 = aarch64_make_prologue_cache (this_frame, this_cache);
1008 /* If we are asked to unwind the PC, then we need to return the LR
1009 instead. The prologue may save PC, but it will point into this
1010 frame's prologue, not the next frame's resume location. */
1011 if (prev_regnum == AARCH64_PC_REGNUM)
1015 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1016 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1019 /* SP is generally not saved to the stack, but this frame is
1020 identified by the next frame's stack pointer at the time of the
1021 call. The value was already reconstructed into PREV_SP. */
1027 | | | <- Previous SP
1030 +--| saved fp |<- FP
1034 if (prev_regnum == AARCH64_SP_REGNUM)
1035 return frame_unwind_got_constant (this_frame, prev_regnum,
1038 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1042 /* AArch64 prologue unwinder. */
1043 struct frame_unwind aarch64_prologue_unwind =
1046 default_frame_unwind_stop_reason,
1047 aarch64_prologue_this_id,
1048 aarch64_prologue_prev_register,
1050 default_frame_sniffer
1053 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1054 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1055 Return a pointer to the current aarch64_prologue_cache in
1058 static struct aarch64_prologue_cache *
1059 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1061 struct aarch64_prologue_cache *cache;
1063 if (*this_cache != NULL)
1066 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1067 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1068 *this_cache = cache;
1071 = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1076 /* Our frame ID for a stub frame is the current SP and LR. */
1079 aarch64_stub_this_id (struct frame_info *this_frame,
1080 void **this_cache, struct frame_id *this_id)
1082 struct aarch64_prologue_cache *cache
1083 = aarch64_make_stub_cache (this_frame, this_cache);
1085 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1088 /* Implement the "sniffer" frame_unwind method. */
1091 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1092 struct frame_info *this_frame,
1093 void **this_prologue_cache)
1095 CORE_ADDR addr_in_block;
1098 addr_in_block = get_frame_address_in_block (this_frame);
1099 if (in_plt_section (addr_in_block)
1100 /* We also use the stub winder if the target memory is unreadable
1101 to avoid having the prologue unwinder trying to read it. */
1102 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1108 /* AArch64 stub unwinder. */
1109 struct frame_unwind aarch64_stub_unwind =
1112 default_frame_unwind_stop_reason,
1113 aarch64_stub_this_id,
1114 aarch64_prologue_prev_register,
1116 aarch64_stub_unwind_sniffer
1119 /* Return the frame base address of *THIS_FRAME. */
1122 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1124 struct aarch64_prologue_cache *cache
1125 = aarch64_make_prologue_cache (this_frame, this_cache);
1127 return cache->prev_sp - cache->framesize;
1130 /* AArch64 default frame base information. */
1131 struct frame_base aarch64_normal_base =
1133 &aarch64_prologue_unwind,
1134 aarch64_normal_frame_base,
1135 aarch64_normal_frame_base,
1136 aarch64_normal_frame_base
1139 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1140 dummy frame. The frame ID's base needs to match the TOS value
1141 saved by save_dummy_frame_tos () and returned from
1142 aarch64_push_dummy_call, and the PC needs to match the dummy
1143 frame's breakpoint. */
1145 static struct frame_id
1146 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1148 return frame_id_build (get_frame_register_unsigned (this_frame,
1150 get_frame_pc (this_frame));
1153 /* Implement the "unwind_pc" gdbarch method. */
1156 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1159 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1164 /* Implement the "unwind_sp" gdbarch method. */
1167 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1169 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1172 /* Return the value of the REGNUM register in the previous frame of
1175 static struct value *
1176 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1177 void **this_cache, int regnum)
1179 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1184 case AARCH64_PC_REGNUM:
1185 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1186 return frame_unwind_got_constant (this_frame, regnum, lr);
1189 internal_error (__FILE__, __LINE__,
1190 _("Unexpected register %d"), regnum);
1194 /* Implement the "init_reg" dwarf2_frame_ops method. */
1197 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1198 struct dwarf2_frame_state_reg *reg,
1199 struct frame_info *this_frame)
1203 case AARCH64_PC_REGNUM:
1204 reg->how = DWARF2_FRAME_REG_FN;
1205 reg->loc.fn = aarch64_dwarf2_prev_register;
1207 case AARCH64_SP_REGNUM:
1208 reg->how = DWARF2_FRAME_REG_CFA;
1213 /* When arguments must be pushed onto the stack, they go on in reverse
1214 order. The code below implements a FILO (stack) to do this. */
1218 /* Value to pass on stack. */
1221 /* Size in bytes of value to pass on stack. */
1225 DEF_VEC_O (stack_item_t);
1227 /* Return the alignment (in bytes) of the given type. */
1230 aarch64_type_align (struct type *t)
1236 t = check_typedef (t);
1237 switch (TYPE_CODE (t))
1240 /* Should never happen. */
1241 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1245 case TYPE_CODE_ENUM:
1249 case TYPE_CODE_RANGE:
1250 case TYPE_CODE_BITSTRING:
1252 case TYPE_CODE_CHAR:
1253 case TYPE_CODE_BOOL:
1254 return TYPE_LENGTH (t);
1256 case TYPE_CODE_ARRAY:
1257 case TYPE_CODE_COMPLEX:
1258 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1260 case TYPE_CODE_STRUCT:
1261 case TYPE_CODE_UNION:
1263 for (n = 0; n < TYPE_NFIELDS (t); n++)
1265 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1273 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1274 defined in the AAPCS64 ABI document; otherwise return 0. */
1277 is_hfa (struct type *ty)
1279 switch (TYPE_CODE (ty))
1281 case TYPE_CODE_ARRAY:
1283 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1284 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1289 case TYPE_CODE_UNION:
1290 case TYPE_CODE_STRUCT:
1292 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1294 struct type *member0_type;
1296 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1297 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1301 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1303 struct type *member1_type;
1305 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1306 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1307 || (TYPE_LENGTH (member0_type)
1308 != TYPE_LENGTH (member1_type)))
1324 /* AArch64 function call information structure. */
1325 struct aarch64_call_info
1327 /* the current argument number. */
1330 /* The next general purpose register number, equivalent to NGRN as
1331 described in the AArch64 Procedure Call Standard. */
1334 /* The next SIMD and floating point register number, equivalent to
1335 NSRN as described in the AArch64 Procedure Call Standard. */
1338 /* The next stacked argument address, equivalent to NSAA as
1339 described in the AArch64 Procedure Call Standard. */
1342 /* Stack item vector. */
1343 VEC(stack_item_t) *si;
1346 /* Pass a value in a sequence of consecutive X registers. The caller
1347 is responsbile for ensuring sufficient registers are available. */
1350 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1351 struct aarch64_call_info *info, struct type *type,
1352 const bfd_byte *buf)
1354 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1355 int len = TYPE_LENGTH (type);
1356 enum type_code typecode = TYPE_CODE (type);
1357 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1363 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1364 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1368 /* Adjust sub-word struct/union args when big-endian. */
1369 if (byte_order == BFD_ENDIAN_BIG
1370 && partial_len < X_REGISTER_SIZE
1371 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1372 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1375 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1377 gdbarch_register_name (gdbarch, regnum),
1378 phex (regval, X_REGISTER_SIZE));
1379 regcache_cooked_write_unsigned (regcache, regnum, regval);
1386 /* Attempt to marshall a value in a V register. Return 1 if
1387 successful, or 0 if insufficient registers are available. This
1388 function, unlike the equivalent pass_in_x() function does not
1389 handle arguments spread across multiple registers. */
1392 pass_in_v (struct gdbarch *gdbarch,
1393 struct regcache *regcache,
1394 struct aarch64_call_info *info,
1395 const bfd_byte *buf)
1399 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1400 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1405 regcache_cooked_write (regcache, regnum, buf);
1407 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1409 gdbarch_register_name (gdbarch, regnum));
1416 /* Marshall an argument onto the stack. */
1419 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1420 const bfd_byte *buf)
1422 int len = TYPE_LENGTH (type);
1428 align = aarch64_type_align (type);
1430 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1431 Natural alignment of the argument's type. */
1432 align = align_up (align, 8);
1434 /* The AArch64 PCS requires at most doubleword alignment. */
1439 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1440 info->argnum, len, info->nsaa);
1444 VEC_safe_push (stack_item_t, info->si, &item);
1447 if (info->nsaa & (align - 1))
1449 /* Push stack alignment padding. */
1450 int pad = align - (info->nsaa & (align - 1));
1455 VEC_safe_push (stack_item_t, info->si, &item);
1460 /* Marshall an argument into a sequence of one or more consecutive X
1461 registers or, if insufficient X registers are available then onto
1465 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1466 struct aarch64_call_info *info, struct type *type,
1467 const bfd_byte *buf)
1469 int len = TYPE_LENGTH (type);
1470 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1472 /* PCS C.13 - Pass in registers if we have enough spare */
1473 if (info->ngrn + nregs <= 8)
1475 pass_in_x (gdbarch, regcache, info, type, buf);
1476 info->ngrn += nregs;
1481 pass_on_stack (info, type, buf);
1485 /* Pass a value in a V register, or on the stack if insufficient are
1489 pass_in_v_or_stack (struct gdbarch *gdbarch,
1490 struct regcache *regcache,
1491 struct aarch64_call_info *info,
1493 const bfd_byte *buf)
1495 if (!pass_in_v (gdbarch, regcache, info, buf))
1496 pass_on_stack (info, type, buf);
1499 /* Implement the "push_dummy_call" gdbarch method. */
1502 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1503 struct regcache *regcache, CORE_ADDR bp_addr,
1505 struct value **args, CORE_ADDR sp, int struct_return,
1506 CORE_ADDR struct_addr)
1512 struct aarch64_call_info info;
1513 struct type *func_type;
1514 struct type *return_type;
1515 int lang_struct_return;
1517 memset (&info, 0, sizeof (info));
1519 /* We need to know what the type of the called function is in order
1520 to determine the number of named/anonymous arguments for the
1521 actual argument placement, and the return type in order to handle
1522 return value correctly.
1524 The generic code above us views the decision of return in memory
1525 or return in registers as a two stage processes. The language
1526 handler is consulted first and may decide to return in memory (eg
1527 class with copy constructor returned by value), this will cause
1528 the generic code to allocate space AND insert an initial leading
1531 If the language code does not decide to pass in memory then the
1532 target code is consulted.
1534 If the language code decides to pass in memory we want to move
1535 the pointer inserted as the initial argument from the argument
1536 list and into X8, the conventional AArch64 struct return pointer
1539 This is slightly awkward, ideally the flag "lang_struct_return"
1540 would be passed to the targets implementation of push_dummy_call.
1541 Rather that change the target interface we call the language code
1542 directly ourselves. */
1544 func_type = check_typedef (value_type (function));
1546 /* Dereference function pointer types. */
1547 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1548 func_type = TYPE_TARGET_TYPE (func_type);
1550 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1551 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1553 /* If language_pass_by_reference () returned true we will have been
1554 given an additional initial argument, a hidden pointer to the
1555 return slot in memory. */
1556 return_type = TYPE_TARGET_TYPE (func_type);
1557 lang_struct_return = language_pass_by_reference (return_type);
1559 /* Set the return address. For the AArch64, the return breakpoint
1560 is always at BP_ADDR. */
1561 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1563 /* If we were given an initial argument for the return slot because
1564 lang_struct_return was true, lose it. */
1565 if (lang_struct_return)
1571 /* The struct_return pointer occupies X8. */
1572 if (struct_return || lang_struct_return)
1575 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1576 gdbarch_register_name
1578 AARCH64_STRUCT_RETURN_REGNUM),
1579 paddress (gdbarch, struct_addr));
1580 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1584 for (argnum = 0; argnum < nargs; argnum++)
1586 struct value *arg = args[argnum];
1587 struct type *arg_type;
1590 arg_type = check_typedef (value_type (arg));
1591 len = TYPE_LENGTH (arg_type);
1593 switch (TYPE_CODE (arg_type))
1596 case TYPE_CODE_BOOL:
1597 case TYPE_CODE_CHAR:
1598 case TYPE_CODE_RANGE:
1599 case TYPE_CODE_ENUM:
1602 /* Promote to 32 bit integer. */
1603 if (TYPE_UNSIGNED (arg_type))
1604 arg_type = builtin_type (gdbarch)->builtin_uint32;
1606 arg_type = builtin_type (gdbarch)->builtin_int32;
1607 arg = value_cast (arg_type, arg);
1609 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1610 value_contents (arg));
1613 case TYPE_CODE_COMPLEX:
1616 const bfd_byte *buf = value_contents (arg);
1617 struct type *target_type =
1618 check_typedef (TYPE_TARGET_TYPE (arg_type));
1620 pass_in_v (gdbarch, regcache, &info, buf);
1621 pass_in_v (gdbarch, regcache, &info,
1622 buf + TYPE_LENGTH (target_type));
1627 pass_on_stack (&info, arg_type, value_contents (arg));
1631 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1632 value_contents (arg));
1635 case TYPE_CODE_STRUCT:
1636 case TYPE_CODE_ARRAY:
1637 case TYPE_CODE_UNION:
1638 if (is_hfa (arg_type))
1640 int elements = TYPE_NFIELDS (arg_type);
1642 /* Homogeneous Aggregates */
1643 if (info.nsrn + elements < 8)
1647 for (i = 0; i < elements; i++)
1649 /* We know that we have sufficient registers
1650 available therefore this will never fallback
1652 struct value *field =
1653 value_primitive_field (arg, 0, i, arg_type);
1654 struct type *field_type =
1655 check_typedef (value_type (field));
1657 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1658 value_contents_writeable (field));
1664 pass_on_stack (&info, arg_type, value_contents (arg));
1669 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1670 invisible reference. */
1672 /* Allocate aligned storage. */
1673 sp = align_down (sp - len, 16);
1675 /* Write the real data into the stack. */
1676 write_memory (sp, value_contents (arg), len);
1678 /* Construct the indirection. */
1679 arg_type = lookup_pointer_type (arg_type);
1680 arg = value_from_pointer (arg_type, sp);
1681 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1682 value_contents (arg));
1685 /* PCS C.15 / C.18 multiple values pass. */
1686 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1687 value_contents (arg));
1691 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1692 value_contents (arg));
1697 /* Make sure stack retains 16 byte alignment. */
1699 sp -= 16 - (info.nsaa & 15);
1701 while (!VEC_empty (stack_item_t, info.si))
1703 stack_item_t *si = VEC_last (stack_item_t, info.si);
1706 write_memory (sp, si->data, si->len);
1707 VEC_pop (stack_item_t, info.si);
1710 VEC_free (stack_item_t, info.si);
1712 /* Finally, update the SP register. */
1713 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1718 /* Implement the "frame_align" gdbarch method. */
1721 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1723 /* Align the stack to sixteen bytes. */
1724 return sp & ~(CORE_ADDR) 15;
1727 /* Return the type for an AdvSISD Q register. */
1729 static struct type *
1730 aarch64_vnq_type (struct gdbarch *gdbarch)
1732 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1734 if (tdep->vnq_type == NULL)
1739 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1742 elem = builtin_type (gdbarch)->builtin_uint128;
1743 append_composite_type_field (t, "u", elem);
1745 elem = builtin_type (gdbarch)->builtin_int128;
1746 append_composite_type_field (t, "s", elem);
1751 return tdep->vnq_type;
1754 /* Return the type for an AdvSISD D register. */
1756 static struct type *
1757 aarch64_vnd_type (struct gdbarch *gdbarch)
1759 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1761 if (tdep->vnd_type == NULL)
1766 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1769 elem = builtin_type (gdbarch)->builtin_double;
1770 append_composite_type_field (t, "f", elem);
1772 elem = builtin_type (gdbarch)->builtin_uint64;
1773 append_composite_type_field (t, "u", elem);
1775 elem = builtin_type (gdbarch)->builtin_int64;
1776 append_composite_type_field (t, "s", elem);
1781 return tdep->vnd_type;
1784 /* Return the type for an AdvSISD S register. */
1786 static struct type *
1787 aarch64_vns_type (struct gdbarch *gdbarch)
1789 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1791 if (tdep->vns_type == NULL)
1796 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1799 elem = builtin_type (gdbarch)->builtin_float;
1800 append_composite_type_field (t, "f", elem);
1802 elem = builtin_type (gdbarch)->builtin_uint32;
1803 append_composite_type_field (t, "u", elem);
1805 elem = builtin_type (gdbarch)->builtin_int32;
1806 append_composite_type_field (t, "s", elem);
1811 return tdep->vns_type;
1814 /* Return the type for an AdvSISD H register. */
1816 static struct type *
1817 aarch64_vnh_type (struct gdbarch *gdbarch)
1819 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1821 if (tdep->vnh_type == NULL)
1826 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1829 elem = builtin_type (gdbarch)->builtin_uint16;
1830 append_composite_type_field (t, "u", elem);
1832 elem = builtin_type (gdbarch)->builtin_int16;
1833 append_composite_type_field (t, "s", elem);
1838 return tdep->vnh_type;
1841 /* Return the type for an AdvSISD B register. */
1843 static struct type *
1844 aarch64_vnb_type (struct gdbarch *gdbarch)
1846 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1848 if (tdep->vnb_type == NULL)
1853 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1856 elem = builtin_type (gdbarch)->builtin_uint8;
1857 append_composite_type_field (t, "u", elem);
1859 elem = builtin_type (gdbarch)->builtin_int8;
1860 append_composite_type_field (t, "s", elem);
1865 return tdep->vnb_type;
1868 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1871 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1873 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1874 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1876 if (reg == AARCH64_DWARF_SP)
1877 return AARCH64_SP_REGNUM;
1879 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1880 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1886 /* Implement the "print_insn" gdbarch method. */
1889 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1891 info->symbols = NULL;
1892 return print_insn_aarch64 (memaddr, info);
1895 /* AArch64 BRK software debug mode instruction.
1896 Note that AArch64 code is always little-endian.
1897 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1898 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1900 /* Implement the "breakpoint_from_pc" gdbarch method. */
1902 static const gdb_byte *
1903 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1906 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1908 *lenptr = sizeof (aarch64_default_breakpoint);
1909 return aarch64_default_breakpoint;
1912 /* Extract from an array REGS containing the (raw) register state a
1913 function return value of type TYPE, and copy that, in virtual
1914 format, into VALBUF. */
1917 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1920 struct gdbarch *gdbarch = get_regcache_arch (regs);
1921 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1923 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1925 bfd_byte buf[V_REGISTER_SIZE];
1926 int len = TYPE_LENGTH (type);
1928 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1929 memcpy (valbuf, buf, len);
1931 else if (TYPE_CODE (type) == TYPE_CODE_INT
1932 || TYPE_CODE (type) == TYPE_CODE_CHAR
1933 || TYPE_CODE (type) == TYPE_CODE_BOOL
1934 || TYPE_CODE (type) == TYPE_CODE_PTR
1935 || TYPE_CODE (type) == TYPE_CODE_REF
1936 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1938 /* If the the type is a plain integer, then the access is
1939 straight-forward. Otherwise we have to play around a bit
1941 int len = TYPE_LENGTH (type);
1942 int regno = AARCH64_X0_REGNUM;
1947 /* By using store_unsigned_integer we avoid having to do
1948 anything special for small big-endian values. */
1949 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1950 store_unsigned_integer (valbuf,
1951 (len > X_REGISTER_SIZE
1952 ? X_REGISTER_SIZE : len), byte_order, tmp);
1953 len -= X_REGISTER_SIZE;
1954 valbuf += X_REGISTER_SIZE;
1957 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1959 int regno = AARCH64_V0_REGNUM;
1960 bfd_byte buf[V_REGISTER_SIZE];
1961 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1962 int len = TYPE_LENGTH (target_type);
1964 regcache_cooked_read (regs, regno, buf);
1965 memcpy (valbuf, buf, len);
1967 regcache_cooked_read (regs, regno + 1, buf);
1968 memcpy (valbuf, buf, len);
1971 else if (is_hfa (type))
1973 int elements = TYPE_NFIELDS (type);
1974 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1975 int len = TYPE_LENGTH (member_type);
1978 for (i = 0; i < elements; i++)
1980 int regno = AARCH64_V0_REGNUM + i;
1981 bfd_byte buf[X_REGISTER_SIZE];
1984 fprintf_unfiltered (gdb_stdlog,
1985 "read HFA return value element %d from %s\n",
1987 gdbarch_register_name (gdbarch, regno));
1988 regcache_cooked_read (regs, regno, buf);
1990 memcpy (valbuf, buf, len);
1996 /* For a structure or union the behaviour is as if the value had
1997 been stored to word-aligned memory and then loaded into
1998 registers with 64-bit load instruction(s). */
1999 int len = TYPE_LENGTH (type);
2000 int regno = AARCH64_X0_REGNUM;
2001 bfd_byte buf[X_REGISTER_SIZE];
2005 regcache_cooked_read (regs, regno++, buf);
2006 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2007 len -= X_REGISTER_SIZE;
2008 valbuf += X_REGISTER_SIZE;
2014 /* Will a function return an aggregate type in memory or in a
2015 register? Return 0 if an aggregate type can be returned in a
2016 register, 1 if it must be returned in memory. */
2019 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2022 enum type_code code;
2024 CHECK_TYPEDEF (type);
2026 /* In the AArch64 ABI, "integer" like aggregate types are returned
2027 in registers. For an aggregate type to be integer like, its size
2028 must be less than or equal to 4 * X_REGISTER_SIZE. */
2032 /* PCS B.5 If the argument is a Named HFA, then the argument is
2037 if (TYPE_LENGTH (type) > 16)
2039 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2040 invisible reference. */
2048 /* Write into appropriate registers a function return value of type
2049 TYPE, given in virtual format. */
2052 aarch64_store_return_value (struct type *type, struct regcache *regs,
2053 const gdb_byte *valbuf)
2055 struct gdbarch *gdbarch = get_regcache_arch (regs);
2056 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2058 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2060 bfd_byte buf[V_REGISTER_SIZE];
2061 int len = TYPE_LENGTH (type);
2063 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2064 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2066 else if (TYPE_CODE (type) == TYPE_CODE_INT
2067 || TYPE_CODE (type) == TYPE_CODE_CHAR
2068 || TYPE_CODE (type) == TYPE_CODE_BOOL
2069 || TYPE_CODE (type) == TYPE_CODE_PTR
2070 || TYPE_CODE (type) == TYPE_CODE_REF
2071 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2073 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2075 /* Values of one word or less are zero/sign-extended and
2077 bfd_byte tmpbuf[X_REGISTER_SIZE];
2078 LONGEST val = unpack_long (type, valbuf);
2080 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2081 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2085 /* Integral values greater than one word are stored in
2086 consecutive registers starting with r0. This will always
2087 be a multiple of the regiser size. */
2088 int len = TYPE_LENGTH (type);
2089 int regno = AARCH64_X0_REGNUM;
2093 regcache_cooked_write (regs, regno++, valbuf);
2094 len -= X_REGISTER_SIZE;
2095 valbuf += X_REGISTER_SIZE;
2099 else if (is_hfa (type))
2101 int elements = TYPE_NFIELDS (type);
2102 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2103 int len = TYPE_LENGTH (member_type);
2106 for (i = 0; i < elements; i++)
2108 int regno = AARCH64_V0_REGNUM + i;
2109 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2112 fprintf_unfiltered (gdb_stdlog,
2113 "write HFA return value element %d to %s\n",
2115 gdbarch_register_name (gdbarch, regno));
2117 memcpy (tmpbuf, valbuf, len);
2118 regcache_cooked_write (regs, regno, tmpbuf);
2124 /* For a structure or union the behaviour is as if the value had
2125 been stored to word-aligned memory and then loaded into
2126 registers with 64-bit load instruction(s). */
2127 int len = TYPE_LENGTH (type);
2128 int regno = AARCH64_X0_REGNUM;
2129 bfd_byte tmpbuf[X_REGISTER_SIZE];
2133 memcpy (tmpbuf, valbuf,
2134 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2135 regcache_cooked_write (regs, regno++, tmpbuf);
2136 len -= X_REGISTER_SIZE;
2137 valbuf += X_REGISTER_SIZE;
2142 /* Implement the "return_value" gdbarch method. */
2144 static enum return_value_convention
2145 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2146 struct type *valtype, struct regcache *regcache,
2147 gdb_byte *readbuf, const gdb_byte *writebuf)
2149 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2151 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2152 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2153 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2155 if (aarch64_return_in_memory (gdbarch, valtype))
2158 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2159 return RETURN_VALUE_STRUCT_CONVENTION;
2164 aarch64_store_return_value (valtype, regcache, writebuf);
2167 aarch64_extract_return_value (valtype, regcache, readbuf);
2170 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2172 return RETURN_VALUE_REGISTER_CONVENTION;
2175 /* Implement the "get_longjmp_target" gdbarch method. */
2178 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2181 gdb_byte buf[X_REGISTER_SIZE];
2182 struct gdbarch *gdbarch = get_frame_arch (frame);
2183 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2184 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2186 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2188 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2192 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2197 /* Return the pseudo register name corresponding to register regnum. */
2200 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2202 static const char *const q_name[] =
2204 "q0", "q1", "q2", "q3",
2205 "q4", "q5", "q6", "q7",
2206 "q8", "q9", "q10", "q11",
2207 "q12", "q13", "q14", "q15",
2208 "q16", "q17", "q18", "q19",
2209 "q20", "q21", "q22", "q23",
2210 "q24", "q25", "q26", "q27",
2211 "q28", "q29", "q30", "q31",
2214 static const char *const d_name[] =
2216 "d0", "d1", "d2", "d3",
2217 "d4", "d5", "d6", "d7",
2218 "d8", "d9", "d10", "d11",
2219 "d12", "d13", "d14", "d15",
2220 "d16", "d17", "d18", "d19",
2221 "d20", "d21", "d22", "d23",
2222 "d24", "d25", "d26", "d27",
2223 "d28", "d29", "d30", "d31",
2226 static const char *const s_name[] =
2228 "s0", "s1", "s2", "s3",
2229 "s4", "s5", "s6", "s7",
2230 "s8", "s9", "s10", "s11",
2231 "s12", "s13", "s14", "s15",
2232 "s16", "s17", "s18", "s19",
2233 "s20", "s21", "s22", "s23",
2234 "s24", "s25", "s26", "s27",
2235 "s28", "s29", "s30", "s31",
2238 static const char *const h_name[] =
2240 "h0", "h1", "h2", "h3",
2241 "h4", "h5", "h6", "h7",
2242 "h8", "h9", "h10", "h11",
2243 "h12", "h13", "h14", "h15",
2244 "h16", "h17", "h18", "h19",
2245 "h20", "h21", "h22", "h23",
2246 "h24", "h25", "h26", "h27",
2247 "h28", "h29", "h30", "h31",
2250 static const char *const b_name[] =
2252 "b0", "b1", "b2", "b3",
2253 "b4", "b5", "b6", "b7",
2254 "b8", "b9", "b10", "b11",
2255 "b12", "b13", "b14", "b15",
2256 "b16", "b17", "b18", "b19",
2257 "b20", "b21", "b22", "b23",
2258 "b24", "b25", "b26", "b27",
2259 "b28", "b29", "b30", "b31",
2262 regnum -= gdbarch_num_regs (gdbarch);
2264 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2265 return q_name[regnum - AARCH64_Q0_REGNUM];
2267 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2268 return d_name[regnum - AARCH64_D0_REGNUM];
2270 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2271 return s_name[regnum - AARCH64_S0_REGNUM];
2273 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2274 return h_name[regnum - AARCH64_H0_REGNUM];
2276 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2277 return b_name[regnum - AARCH64_B0_REGNUM];
2279 internal_error (__FILE__, __LINE__,
2280 _("aarch64_pseudo_register_name: bad register number %d"),
2284 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2286 static struct type *
2287 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2289 regnum -= gdbarch_num_regs (gdbarch);
2291 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2292 return aarch64_vnq_type (gdbarch);
2294 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2295 return aarch64_vnd_type (gdbarch);
2297 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2298 return aarch64_vns_type (gdbarch);
2300 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2301 return aarch64_vnh_type (gdbarch);
2303 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2304 return aarch64_vnb_type (gdbarch);
2306 internal_error (__FILE__, __LINE__,
2307 _("aarch64_pseudo_register_type: bad register number %d"),
2311 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2314 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2315 struct reggroup *group)
2317 regnum -= gdbarch_num_regs (gdbarch);
2319 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2320 return group == all_reggroup || group == vector_reggroup;
2321 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2322 return (group == all_reggroup || group == vector_reggroup
2323 || group == float_reggroup);
2324 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2325 return (group == all_reggroup || group == vector_reggroup
2326 || group == float_reggroup);
2327 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2328 return group == all_reggroup || group == vector_reggroup;
2329 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2330 return group == all_reggroup || group == vector_reggroup;
2332 return group == all_reggroup;
2335 /* Implement the "pseudo_register_read_value" gdbarch method. */
2337 static struct value *
2338 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2339 struct regcache *regcache,
2342 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2343 struct value *result_value;
2346 result_value = allocate_value (register_type (gdbarch, regnum));
2347 VALUE_LVAL (result_value) = lval_register;
2348 VALUE_REGNUM (result_value) = regnum;
2349 buf = value_contents_raw (result_value);
2351 regnum -= gdbarch_num_regs (gdbarch);
2353 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2355 enum register_status status;
2358 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2359 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2360 if (status != REG_VALID)
2361 mark_value_bytes_unavailable (result_value, 0,
2362 TYPE_LENGTH (value_type (result_value)));
2364 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2365 return result_value;
2368 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2370 enum register_status status;
2373 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2374 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2375 if (status != REG_VALID)
2376 mark_value_bytes_unavailable (result_value, 0,
2377 TYPE_LENGTH (value_type (result_value)));
2379 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2380 return result_value;
2383 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2385 enum register_status status;
2388 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2389 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2390 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2391 return result_value;
2394 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2396 enum register_status status;
2399 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2400 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2401 if (status != REG_VALID)
2402 mark_value_bytes_unavailable (result_value, 0,
2403 TYPE_LENGTH (value_type (result_value)));
2405 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2406 return result_value;
2409 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2411 enum register_status status;
2414 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2415 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2416 if (status != REG_VALID)
2417 mark_value_bytes_unavailable (result_value, 0,
2418 TYPE_LENGTH (value_type (result_value)));
2420 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2421 return result_value;
2424 gdb_assert_not_reached ("regnum out of bound");
2427 /* Implement the "pseudo_register_write" gdbarch method. */
2430 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2431 int regnum, const gdb_byte *buf)
2433 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2435 /* Ensure the register buffer is zero, we want gdb writes of the
2436 various 'scalar' pseudo registers to behavior like architectural
2437 writes, register width bytes are written the remainder are set to
2439 memset (reg_buf, 0, sizeof (reg_buf));
2441 regnum -= gdbarch_num_regs (gdbarch);
2443 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2445 /* pseudo Q registers */
2448 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2449 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2450 regcache_raw_write (regcache, v_regnum, reg_buf);
2454 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2456 /* pseudo D registers */
2459 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2460 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2461 regcache_raw_write (regcache, v_regnum, reg_buf);
2465 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2469 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2470 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2471 regcache_raw_write (regcache, v_regnum, reg_buf);
2475 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2477 /* pseudo H registers */
2480 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2481 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2482 regcache_raw_write (regcache, v_regnum, reg_buf);
2486 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2488 /* pseudo B registers */
2491 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2492 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2493 regcache_raw_write (regcache, v_regnum, reg_buf);
2497 gdb_assert_not_reached ("regnum out of bound");
2500 /* Callback function for user_reg_add. */
2502 static struct value *
2503 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2505 const int *reg_p = baton;
2507 return value_of_register (*reg_p, frame);
2511 /* Implement the "software_single_step" gdbarch method, needed to
2512 single step through atomic sequences on AArch64. */
2515 aarch64_software_single_step (struct frame_info *frame)
2517 struct gdbarch *gdbarch = get_frame_arch (frame);
2518 struct address_space *aspace = get_frame_address_space (frame);
2519 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2520 const int insn_size = 4;
2521 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2522 CORE_ADDR pc = get_frame_pc (frame);
2523 CORE_ADDR breaks[2] = { -1, -1 };
2525 CORE_ADDR closing_insn = 0;
2526 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2527 byte_order_for_code);
2530 int bc_insn_count = 0; /* Conditional branch instruction count. */
2531 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2533 /* Look for a Load Exclusive instruction which begins the sequence. */
2534 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2537 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2543 insn = read_memory_unsigned_integer (loc, insn_size,
2544 byte_order_for_code);
2546 /* Check if the instruction is a conditional branch. */
2547 if (decode_bcond (loc, insn, &cond, &offset))
2549 if (bc_insn_count >= 1)
2552 /* It is, so we'll try to set a breakpoint at the destination. */
2553 breaks[1] = loc + offset;
2559 /* Look for the Store Exclusive which closes the atomic sequence. */
2560 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2567 /* We didn't find a closing Store Exclusive instruction, fall back. */
2571 /* Insert breakpoint after the end of the atomic sequence. */
2572 breaks[0] = loc + insn_size;
2574 /* Check for duplicated breakpoints, and also check that the second
2575 breakpoint is not within the atomic sequence. */
2577 && (breaks[1] == breaks[0]
2578 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2579 last_breakpoint = 0;
2581 /* Insert the breakpoint at the end of the sequence, and one at the
2582 destination of the conditional branch, if it exists. */
2583 for (index = 0; index <= last_breakpoint; index++)
2584 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2589 /* Initialize the current architecture based on INFO. If possible,
2590 re-use an architecture from ARCHES, which is a list of
2591 architectures already created during this debugging session.
2593 Called e.g. at program startup, when reading a core file, and when
2594 reading a binary file. */
2596 static struct gdbarch *
2597 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2599 struct gdbarch_tdep *tdep;
2600 struct gdbarch *gdbarch;
2601 struct gdbarch_list *best_arch;
2602 struct tdesc_arch_data *tdesc_data = NULL;
2603 const struct target_desc *tdesc = info.target_desc;
2605 int have_fpa_registers = 1;
2607 const struct tdesc_feature *feature;
2609 int num_pseudo_regs = 0;
2611 /* Ensure we always have a target descriptor. */
2612 if (!tdesc_has_registers (tdesc))
2613 tdesc = tdesc_aarch64;
2617 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2619 if (feature == NULL)
2622 tdesc_data = tdesc_data_alloc ();
2624 /* Validate the descriptor provides the mandatory core R registers
2625 and allocate their numbers. */
2626 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2628 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2629 aarch64_r_register_names[i]);
2631 num_regs = AARCH64_X0_REGNUM + i;
2633 /* Look for the V registers. */
2634 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2637 /* Validate the descriptor provides the mandatory V registers
2638 and allocate their numbers. */
2639 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2641 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2642 aarch64_v_register_names[i]);
2644 num_regs = AARCH64_V0_REGNUM + i;
2646 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2647 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2648 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2649 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2650 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2655 tdesc_data_cleanup (tdesc_data);
2659 /* AArch64 code is always little-endian. */
2660 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2662 /* If there is already a candidate, use it. */
2663 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2665 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2667 /* Found a match. */
2671 if (best_arch != NULL)
2673 if (tdesc_data != NULL)
2674 tdesc_data_cleanup (tdesc_data);
2675 return best_arch->gdbarch;
2678 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2679 gdbarch = gdbarch_alloc (&info, tdep);
2681 /* This should be low enough for everything. */
2682 tdep->lowest_pc = 0x20;
2683 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2684 tdep->jb_elt_size = 8;
2686 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2687 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2689 /* Frame handling. */
2690 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2691 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2692 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2694 /* Advance PC across function entry code. */
2695 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2697 /* The stack grows downward. */
2698 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2700 /* Breakpoint manipulation. */
2701 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2702 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2703 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2705 /* Information about registers, etc. */
2706 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2707 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2708 set_gdbarch_num_regs (gdbarch, num_regs);
2710 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2711 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2712 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2713 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2714 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2715 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2716 aarch64_pseudo_register_reggroup_p);
2719 set_gdbarch_short_bit (gdbarch, 16);
2720 set_gdbarch_int_bit (gdbarch, 32);
2721 set_gdbarch_float_bit (gdbarch, 32);
2722 set_gdbarch_double_bit (gdbarch, 64);
2723 set_gdbarch_long_double_bit (gdbarch, 128);
2724 set_gdbarch_long_bit (gdbarch, 64);
2725 set_gdbarch_long_long_bit (gdbarch, 64);
2726 set_gdbarch_ptr_bit (gdbarch, 64);
2727 set_gdbarch_char_signed (gdbarch, 0);
2728 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2729 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2730 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2732 /* Internal <-> external register number maps. */
2733 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2735 /* Returning results. */
2736 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2739 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2741 /* Virtual tables. */
2742 set_gdbarch_vbit_in_delta (gdbarch, 1);
2744 /* Hook in the ABI-specific overrides, if they have been registered. */
2745 info.target_desc = tdesc;
2746 info.tdep_info = (void *) tdesc_data;
2747 gdbarch_init_osabi (info, gdbarch);
2749 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2751 /* Add some default predicates. */
2752 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2753 dwarf2_append_unwinders (gdbarch);
2754 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2756 frame_base_set_default (gdbarch, &aarch64_normal_base);
2758 /* Now we have tuned the configuration, set a few final things,
2759 based on what the OS ABI has told us. */
2761 if (tdep->jb_pc >= 0)
2762 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2764 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2766 /* Add standard register aliases. */
2767 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2768 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2769 value_of_aarch64_user_reg,
2770 &aarch64_register_aliases[i].regnum);
2776 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2778 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2783 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2784 paddress (gdbarch, tdep->lowest_pc));
2787 /* Suppress warning from -Wmissing-prototypes. */
2788 extern initialize_file_ftype _initialize_aarch64_tdep;
2791 _initialize_aarch64_tdep (void)
2793 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2796 initialize_tdesc_aarch64 ();
2798 /* Debug this file's internals. */
2799 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2800 Set AArch64 debugging."), _("\
2801 Show AArch64 debugging."), _("\
2802 When on, AArch64 specific debugging is enabled."),
2805 &setdebuglist, &showdebuglist);
2808 /* AArch64 process record-replay related structures, defines etc. */
2810 #define submask(x) ((1L << ((x) + 1)) - 1)
2811 #define bit(obj,st) (((obj) >> (st)) & 1)
2812 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2814 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2817 unsigned int reg_len = LENGTH; \
2820 REGS = XNEWVEC (uint32_t, reg_len); \
2821 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2826 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2829 unsigned int mem_len = LENGTH; \
2832 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2833 memcpy(&MEMS->len, &RECORD_BUF[0], \
2834 sizeof(struct aarch64_mem_r) * LENGTH); \
2839 /* AArch64 record/replay structures and enumerations. */
2841 struct aarch64_mem_r
2843 uint64_t len; /* Record length. */
2844 uint64_t addr; /* Memory address. */
2847 enum aarch64_record_result
2849 AARCH64_RECORD_SUCCESS,
2850 AARCH64_RECORD_FAILURE,
2851 AARCH64_RECORD_UNSUPPORTED,
2852 AARCH64_RECORD_UNKNOWN
2855 typedef struct insn_decode_record_t
2857 struct gdbarch *gdbarch;
2858 struct regcache *regcache;
2859 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2860 uint32_t aarch64_insn; /* Insn to be recorded. */
2861 uint32_t mem_rec_count; /* Count of memory records. */
2862 uint32_t reg_rec_count; /* Count of register records. */
2863 uint32_t *aarch64_regs; /* Registers to be recorded. */
2864 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2865 } insn_decode_record;
2867 /* Record handler for data processing - register instructions. */
2870 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2872 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2873 uint32_t record_buf[4];
2875 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2876 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2877 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2879 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2883 /* Logical (shifted register). */
2884 if (insn_bits24_27 == 0x0a)
2885 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2887 else if (insn_bits24_27 == 0x0b)
2888 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2890 return AARCH64_RECORD_UNKNOWN;
2892 record_buf[0] = reg_rd;
2893 aarch64_insn_r->reg_rec_count = 1;
2895 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2899 if (insn_bits24_27 == 0x0b)
2901 /* Data-processing (3 source). */
2902 record_buf[0] = reg_rd;
2903 aarch64_insn_r->reg_rec_count = 1;
2905 else if (insn_bits24_27 == 0x0a)
2907 if (insn_bits21_23 == 0x00)
2909 /* Add/subtract (with carry). */
2910 record_buf[0] = reg_rd;
2911 aarch64_insn_r->reg_rec_count = 1;
2912 if (bit (aarch64_insn_r->aarch64_insn, 29))
2914 record_buf[1] = AARCH64_CPSR_REGNUM;
2915 aarch64_insn_r->reg_rec_count = 2;
2918 else if (insn_bits21_23 == 0x02)
2920 /* Conditional compare (register) and conditional compare
2921 (immediate) instructions. */
2922 record_buf[0] = AARCH64_CPSR_REGNUM;
2923 aarch64_insn_r->reg_rec_count = 1;
2925 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2927 /* CConditional select. */
2928 /* Data-processing (2 source). */
2929 /* Data-processing (1 source). */
2930 record_buf[0] = reg_rd;
2931 aarch64_insn_r->reg_rec_count = 1;
2934 return AARCH64_RECORD_UNKNOWN;
2938 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2940 return AARCH64_RECORD_SUCCESS;
2943 /* Record handler for data processing - immediate instructions. */
2946 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2948 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2949 uint32_t record_buf[4];
2951 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2952 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2953 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2954 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2956 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2957 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2958 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2960 record_buf[0] = reg_rd;
2961 aarch64_insn_r->reg_rec_count = 1;
2963 else if (insn_bits24_27 == 0x01)
2965 /* Add/Subtract (immediate). */
2966 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2967 record_buf[0] = reg_rd;
2968 aarch64_insn_r->reg_rec_count = 1;
2970 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2972 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2974 /* Logical (immediate). */
2975 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2976 record_buf[0] = reg_rd;
2977 aarch64_insn_r->reg_rec_count = 1;
2979 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2982 return AARCH64_RECORD_UNKNOWN;
2984 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2986 return AARCH64_RECORD_SUCCESS;
2989 /* Record handler for branch, exception generation and system instructions. */
2992 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2994 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2995 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2996 uint32_t record_buf[4];
2998 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2999 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3000 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3002 if (insn_bits28_31 == 0x0d)
3004 /* Exception generation instructions. */
3005 if (insn_bits24_27 == 0x04)
3007 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3008 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3009 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3011 ULONGEST svc_number;
3013 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3015 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3019 return AARCH64_RECORD_UNSUPPORTED;
3021 /* System instructions. */
3022 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3024 uint32_t reg_rt, reg_crn;
3026 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3027 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3029 /* Record rt in case of sysl and mrs instructions. */
3030 if (bit (aarch64_insn_r->aarch64_insn, 21))
3032 record_buf[0] = reg_rt;
3033 aarch64_insn_r->reg_rec_count = 1;
3035 /* Record cpsr for hint and msr(immediate) instructions. */
3036 else if (reg_crn == 0x02 || reg_crn == 0x04)
3038 record_buf[0] = AARCH64_CPSR_REGNUM;
3039 aarch64_insn_r->reg_rec_count = 1;
3042 /* Unconditional branch (register). */
3043 else if((insn_bits24_27 & 0x0e) == 0x06)
3045 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3046 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3047 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3050 return AARCH64_RECORD_UNKNOWN;
3052 /* Unconditional branch (immediate). */
3053 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3055 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3056 if (bit (aarch64_insn_r->aarch64_insn, 31))
3057 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3060 /* Compare & branch (immediate), Test & branch (immediate) and
3061 Conditional branch (immediate). */
3062 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3064 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3066 return AARCH64_RECORD_SUCCESS;
3069 /* Record handler for advanced SIMD load and store instructions. */
3072 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3075 uint64_t addr_offset = 0;
3076 uint32_t record_buf[24];
3077 uint64_t record_buf_mem[24];
3078 uint32_t reg_rn, reg_rt;
3079 uint32_t reg_index = 0, mem_index = 0;
3080 uint8_t opcode_bits, size_bits;
3082 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3083 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3084 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3085 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3086 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3090 fprintf_unfiltered (gdb_stdlog,
3091 "Process record: Advanced SIMD load/store\n");
3094 /* Load/store single structure. */
3095 if (bit (aarch64_insn_r->aarch64_insn, 24))
3097 uint8_t sindex, scale, selem, esize, replicate = 0;
3098 scale = opcode_bits >> 2;
3099 selem = ((opcode_bits & 0x02) |
3100 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3104 if (size_bits & 0x01)
3105 return AARCH64_RECORD_UNKNOWN;
3108 if ((size_bits >> 1) & 0x01)
3109 return AARCH64_RECORD_UNKNOWN;
3110 if (size_bits & 0x01)
3112 if (!((opcode_bits >> 1) & 0x01))
3115 return AARCH64_RECORD_UNKNOWN;
3119 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3126 return AARCH64_RECORD_UNKNOWN;
3132 for (sindex = 0; sindex < selem; sindex++)
3134 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3135 reg_rt = (reg_rt + 1) % 32;
3139 for (sindex = 0; sindex < selem; sindex++)
3140 if (bit (aarch64_insn_r->aarch64_insn, 22))
3141 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3144 record_buf_mem[mem_index++] = esize / 8;
3145 record_buf_mem[mem_index++] = address + addr_offset;
3147 addr_offset = addr_offset + (esize / 8);
3148 reg_rt = (reg_rt + 1) % 32;
3151 /* Load/store multiple structure. */
3154 uint8_t selem, esize, rpt, elements;
3155 uint8_t eindex, rindex;
3157 esize = 8 << size_bits;
3158 if (bit (aarch64_insn_r->aarch64_insn, 30))
3159 elements = 128 / esize;
3161 elements = 64 / esize;
3163 switch (opcode_bits)
3165 /*LD/ST4 (4 Registers). */
3170 /*LD/ST1 (4 Registers). */
3175 /*LD/ST3 (3 Registers). */
3180 /*LD/ST1 (3 Registers). */
3185 /*LD/ST1 (1 Register). */
3190 /*LD/ST2 (2 Registers). */
3195 /*LD/ST1 (2 Registers). */
3201 return AARCH64_RECORD_UNSUPPORTED;
3204 for (rindex = 0; rindex < rpt; rindex++)
3205 for (eindex = 0; eindex < elements; eindex++)
3207 uint8_t reg_tt, sindex;
3208 reg_tt = (reg_rt + rindex) % 32;
3209 for (sindex = 0; sindex < selem; sindex++)
3211 if (bit (aarch64_insn_r->aarch64_insn, 22))
3212 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3215 record_buf_mem[mem_index++] = esize / 8;
3216 record_buf_mem[mem_index++] = address + addr_offset;
3218 addr_offset = addr_offset + (esize / 8);
3219 reg_tt = (reg_tt + 1) % 32;
3224 if (bit (aarch64_insn_r->aarch64_insn, 23))
3225 record_buf[reg_index++] = reg_rn;
3227 aarch64_insn_r->reg_rec_count = reg_index;
3228 aarch64_insn_r->mem_rec_count = mem_index / 2;
3229 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3231 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3233 return AARCH64_RECORD_SUCCESS;
3236 /* Record handler for load and store instructions. */
3239 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3241 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3242 uint8_t insn_bit23, insn_bit21;
3243 uint8_t opc, size_bits, ld_flag, vector_flag;
3244 uint32_t reg_rn, reg_rt, reg_rt2;
3245 uint64_t datasize, offset;
3246 uint32_t record_buf[8];
3247 uint64_t record_buf_mem[8];
3250 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3251 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3252 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3253 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3254 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3255 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3256 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3257 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3258 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3259 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3260 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3262 /* Load/store exclusive. */
3263 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3267 fprintf_unfiltered (gdb_stdlog,
3268 "Process record: load/store exclusive\n");
3273 record_buf[0] = reg_rt;
3274 aarch64_insn_r->reg_rec_count = 1;
3277 record_buf[1] = reg_rt2;
3278 aarch64_insn_r->reg_rec_count = 2;
3284 datasize = (8 << size_bits) * 2;
3286 datasize = (8 << size_bits);
3287 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3289 record_buf_mem[0] = datasize / 8;
3290 record_buf_mem[1] = address;
3291 aarch64_insn_r->mem_rec_count = 1;
3294 /* Save register rs. */
3295 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3296 aarch64_insn_r->reg_rec_count = 1;
3300 /* Load register (literal) instructions decoding. */
3301 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3305 fprintf_unfiltered (gdb_stdlog,
3306 "Process record: load register (literal)\n");
3309 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3311 record_buf[0] = reg_rt;
3312 aarch64_insn_r->reg_rec_count = 1;
3314 /* All types of load/store pair instructions decoding. */
3315 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3319 fprintf_unfiltered (gdb_stdlog,
3320 "Process record: load/store pair\n");
3327 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3328 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3332 record_buf[0] = reg_rt;
3333 record_buf[1] = reg_rt2;
3335 aarch64_insn_r->reg_rec_count = 2;
3340 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3342 size_bits = size_bits >> 1;
3343 datasize = 8 << (2 + size_bits);
3344 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3345 offset = offset << (2 + size_bits);
3346 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3348 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3350 if (imm7_off & 0x40)
3351 address = address - offset;
3353 address = address + offset;
3356 record_buf_mem[0] = datasize / 8;
3357 record_buf_mem[1] = address;
3358 record_buf_mem[2] = datasize / 8;
3359 record_buf_mem[3] = address + (datasize / 8);
3360 aarch64_insn_r->mem_rec_count = 2;
3362 if (bit (aarch64_insn_r->aarch64_insn, 23))
3363 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3365 /* Load/store register (unsigned immediate) instructions. */
3366 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3368 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3375 if (size_bits != 0x03)
3378 return AARCH64_RECORD_UNKNOWN;
3382 fprintf_unfiltered (gdb_stdlog,
3383 "Process record: load/store (unsigned immediate):"
3384 " size %x V %d opc %x\n", size_bits, vector_flag,
3390 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3391 datasize = 8 << size_bits;
3392 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3394 offset = offset << size_bits;
3395 address = address + offset;
3397 record_buf_mem[0] = datasize >> 3;
3398 record_buf_mem[1] = address;
3399 aarch64_insn_r->mem_rec_count = 1;
3404 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3406 record_buf[0] = reg_rt;
3407 aarch64_insn_r->reg_rec_count = 1;
3410 /* Load/store register (register offset) instructions. */
3411 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3412 && insn_bits10_11 == 0x02 && insn_bit21)
3416 fprintf_unfiltered (gdb_stdlog,
3417 "Process record: load/store (register offset)\n");
3419 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3426 if (size_bits != 0x03)
3429 return AARCH64_RECORD_UNKNOWN;
3433 uint64_t reg_rm_val;
3434 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3435 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3436 if (bit (aarch64_insn_r->aarch64_insn, 12))
3437 offset = reg_rm_val << size_bits;
3439 offset = reg_rm_val;
3440 datasize = 8 << size_bits;
3441 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3443 address = address + offset;
3444 record_buf_mem[0] = datasize >> 3;
3445 record_buf_mem[1] = address;
3446 aarch64_insn_r->mem_rec_count = 1;
3451 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3453 record_buf[0] = reg_rt;
3454 aarch64_insn_r->reg_rec_count = 1;
3457 /* Load/store register (immediate and unprivileged) instructions. */
3458 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3463 fprintf_unfiltered (gdb_stdlog,
3464 "Process record: load/store (immediate and unprivileged)\n");
3466 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3473 if (size_bits != 0x03)
3476 return AARCH64_RECORD_UNKNOWN;
3481 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3482 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3483 datasize = 8 << size_bits;
3484 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3486 if (insn_bits10_11 != 0x01)
3488 if (imm9_off & 0x0100)
3489 address = address - offset;
3491 address = address + offset;
3493 record_buf_mem[0] = datasize >> 3;
3494 record_buf_mem[1] = address;
3495 aarch64_insn_r->mem_rec_count = 1;
3500 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3502 record_buf[0] = reg_rt;
3503 aarch64_insn_r->reg_rec_count = 1;
3505 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3506 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3508 /* Advanced SIMD load/store instructions. */
3510 return aarch64_record_asimd_load_store (aarch64_insn_r);
3512 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3514 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3516 return AARCH64_RECORD_SUCCESS;
3519 /* Record handler for data processing SIMD and floating point instructions. */
3522 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3524 uint8_t insn_bit21, opcode, rmode, reg_rd;
3525 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3526 uint8_t insn_bits11_14;
3527 uint32_t record_buf[2];
3529 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3530 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3531 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3532 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3533 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3534 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3535 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3536 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3537 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3541 fprintf_unfiltered (gdb_stdlog,
3542 "Process record: data processing SIMD/FP: ");
3545 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3547 /* Floating point - fixed point conversion instructions. */
3551 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3553 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3554 record_buf[0] = reg_rd;
3556 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3558 /* Floating point - conditional compare instructions. */
3559 else if (insn_bits10_11 == 0x01)
3562 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3564 record_buf[0] = AARCH64_CPSR_REGNUM;
3566 /* Floating point - data processing (2-source) and
3567 conditional select instructions. */
3568 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3571 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3573 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3575 else if (insn_bits10_11 == 0x00)
3577 /* Floating point - immediate instructions. */
3578 if ((insn_bits12_15 & 0x01) == 0x01
3579 || (insn_bits12_15 & 0x07) == 0x04)
3582 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3583 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3585 /* Floating point - compare instructions. */
3586 else if ((insn_bits12_15 & 0x03) == 0x02)
3589 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3590 record_buf[0] = AARCH64_CPSR_REGNUM;
3592 /* Floating point - integer conversions instructions. */
3593 else if (insn_bits12_15 == 0x00)
3595 /* Convert float to integer instruction. */
3596 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3599 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3601 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3603 /* Convert integer to float instruction. */
3604 else if ((opcode >> 1) == 0x01 && !rmode)
3607 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3609 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3611 /* Move float to integer instruction. */
3612 else if ((opcode >> 1) == 0x03)
3615 fprintf_unfiltered (gdb_stdlog, "move float to int");
3617 if (!(opcode & 0x01))
3618 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3620 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3623 return AARCH64_RECORD_UNKNOWN;
3626 return AARCH64_RECORD_UNKNOWN;
3629 return AARCH64_RECORD_UNKNOWN;
3631 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3634 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3636 /* Advanced SIMD copy instructions. */
3637 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3638 && !bit (aarch64_insn_r->aarch64_insn, 15)
3639 && bit (aarch64_insn_r->aarch64_insn, 10))
3641 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3642 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3644 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3647 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3649 /* All remaining floating point or advanced SIMD instructions. */
3653 fprintf_unfiltered (gdb_stdlog, "all remain");
3655 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3659 fprintf_unfiltered (gdb_stdlog, "\n");
3661 aarch64_insn_r->reg_rec_count++;
3662 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3663 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3665 return AARCH64_RECORD_SUCCESS;
3668 /* Decodes insns type and invokes its record handler. */
3671 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3673 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3675 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3676 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3677 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3678 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3680 /* Data processing - immediate instructions. */
3681 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3682 return aarch64_record_data_proc_imm (aarch64_insn_r);
3684 /* Branch, exception generation and system instructions. */
3685 if (ins_bit26 && !ins_bit27 && ins_bit28)
3686 return aarch64_record_branch_except_sys (aarch64_insn_r);
3688 /* Load and store instructions. */
3689 if (!ins_bit25 && ins_bit27)
3690 return aarch64_record_load_store (aarch64_insn_r);
3692 /* Data processing - register instructions. */
3693 if (ins_bit25 && !ins_bit26 && ins_bit27)
3694 return aarch64_record_data_proc_reg (aarch64_insn_r);
3696 /* Data processing - SIMD and floating point instructions. */
3697 if (ins_bit25 && ins_bit26 && ins_bit27)
3698 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3700 return AARCH64_RECORD_UNSUPPORTED;
3703 /* Cleans up local record registers and memory allocations. */
3706 deallocate_reg_mem (insn_decode_record *record)
3708 xfree (record->aarch64_regs);
3709 xfree (record->aarch64_mems);
3712 /* Parse the current instruction and record the values of the registers and
3713 memory that will be changed in current instruction to record_arch_list
3714 return -1 if something is wrong. */
3717 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3718 CORE_ADDR insn_addr)
3720 uint32_t rec_no = 0;
3721 uint8_t insn_size = 4;
3723 ULONGEST t_bit = 0, insn_id = 0;
3724 gdb_byte buf[insn_size];
3725 insn_decode_record aarch64_record;
3727 memset (&buf[0], 0, insn_size);
3728 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3729 target_read_memory (insn_addr, &buf[0], insn_size);
3730 aarch64_record.aarch64_insn
3731 = (uint32_t) extract_unsigned_integer (&buf[0],
3733 gdbarch_byte_order (gdbarch));
3734 aarch64_record.regcache = regcache;
3735 aarch64_record.this_addr = insn_addr;
3736 aarch64_record.gdbarch = gdbarch;
3738 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3739 if (ret == AARCH64_RECORD_UNSUPPORTED)
3741 printf_unfiltered (_("Process record does not support instruction "
3742 "0x%0x at address %s.\n"),
3743 aarch64_record.aarch64_insn,
3744 paddress (gdbarch, insn_addr));
3750 /* Record registers. */
3751 record_full_arch_list_add_reg (aarch64_record.regcache,
3753 /* Always record register CPSR. */
3754 record_full_arch_list_add_reg (aarch64_record.regcache,
3755 AARCH64_CPSR_REGNUM);
3756 if (aarch64_record.aarch64_regs)
3757 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3758 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3759 aarch64_record.aarch64_regs[rec_no]))
3762 /* Record memories. */
3763 if (aarch64_record.aarch64_mems)
3764 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3765 if (record_full_arch_list_add_mem
3766 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3767 aarch64_record.aarch64_mems[rec_no].len))
3770 if (record_full_arch_list_add_end ())
3774 deallocate_reg_mem (&aarch64_record);