1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
46 #include "aarch64-tdep.h"
49 #include "elf/aarch64.h"
54 #include "record-full.h"
56 #include "features/aarch64.c"
58 /* Pseudo register base numbers. */
59 #define AARCH64_Q0_REGNUM 0
60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
65 /* The standard register names, and all the valid aliases for them. */
68 const char *const name;
70 } aarch64_register_aliases[] =
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM},
74 {"lr", AARCH64_LR_REGNUM},
75 {"sp", AARCH64_SP_REGNUM},
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM + 0},
79 {"w1", AARCH64_X0_REGNUM + 1},
80 {"w2", AARCH64_X0_REGNUM + 2},
81 {"w3", AARCH64_X0_REGNUM + 3},
82 {"w4", AARCH64_X0_REGNUM + 4},
83 {"w5", AARCH64_X0_REGNUM + 5},
84 {"w6", AARCH64_X0_REGNUM + 6},
85 {"w7", AARCH64_X0_REGNUM + 7},
86 {"w8", AARCH64_X0_REGNUM + 8},
87 {"w9", AARCH64_X0_REGNUM + 9},
88 {"w10", AARCH64_X0_REGNUM + 10},
89 {"w11", AARCH64_X0_REGNUM + 11},
90 {"w12", AARCH64_X0_REGNUM + 12},
91 {"w13", AARCH64_X0_REGNUM + 13},
92 {"w14", AARCH64_X0_REGNUM + 14},
93 {"w15", AARCH64_X0_REGNUM + 15},
94 {"w16", AARCH64_X0_REGNUM + 16},
95 {"w17", AARCH64_X0_REGNUM + 17},
96 {"w18", AARCH64_X0_REGNUM + 18},
97 {"w19", AARCH64_X0_REGNUM + 19},
98 {"w20", AARCH64_X0_REGNUM + 20},
99 {"w21", AARCH64_X0_REGNUM + 21},
100 {"w22", AARCH64_X0_REGNUM + 22},
101 {"w23", AARCH64_X0_REGNUM + 23},
102 {"w24", AARCH64_X0_REGNUM + 24},
103 {"w25", AARCH64_X0_REGNUM + 25},
104 {"w26", AARCH64_X0_REGNUM + 26},
105 {"w27", AARCH64_X0_REGNUM + 27},
106 {"w28", AARCH64_X0_REGNUM + 28},
107 {"w29", AARCH64_X0_REGNUM + 29},
108 {"w30", AARCH64_X0_REGNUM + 30},
111 {"ip0", AARCH64_X0_REGNUM + 16},
112 {"ip1", AARCH64_X0_REGNUM + 17}
115 /* The required core 'R' registers. */
116 static const char *const aarch64_r_register_names[] =
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
131 /* The FP/SIMD 'V' registers. */
132 static const char *const aarch64_v_register_names[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
148 /* AArch64 prologue cache structure. */
149 struct aarch64_prologue_cache
151 /* The stack pointer at the time this frame was created; i.e. the
152 caller's stack pointer when this function was called. It is used
153 to identify this frame. */
156 /* The frame base for this frame is just prev_sp - frame size.
157 FRAMESIZE is the distance from the frame pointer to the
158 initial stack pointer. */
161 /* The register used to hold the frame pointer for this frame. */
164 /* Saved register offsets. */
165 struct trad_frame_saved_reg *saved_regs;
168 /* Toggle this file's internal debugging dump. */
169 static int aarch64_debug;
172 show_aarch64_debug (struct ui_file *file, int from_tty,
173 struct cmd_list_element *c, const char *value)
175 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
178 /* Extract a signed value from a bit field within an instruction
181 INSN is the instruction opcode.
183 WIDTH specifies the width of the bit field to extract (in bits).
185 OFFSET specifies the least significant bit of the field where bits
186 are numbered zero counting from least to most significant. */
189 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
191 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
192 unsigned shift_r = sizeof (int32_t) * 8 - width;
194 return ((int32_t) insn << shift_l) >> shift_r;
197 /* Determine if specified bits within an instruction opcode matches a
200 INSN is the instruction opcode.
202 MASK specifies the bits within the opcode that are to be tested
203 agsinst for a match with PATTERN. */
206 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
208 return (insn & mask) == pattern;
211 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
213 ADDR specifies the address of the opcode.
214 INSN specifies the opcode to test.
215 RD receives the 'rd' field from the decoded instruction.
216 RN receives the 'rn' field from the decoded instruction.
218 Return 1 if the opcodes matches and is decoded, otherwise 0. */
220 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
223 if ((insn & 0x9f000000) == 0x91000000)
228 *rd = (insn >> 0) & 0x1f;
229 *rn = (insn >> 5) & 0x1f;
230 *imm = (insn >> 10) & 0xfff;
231 shift = (insn >> 22) & 0x3;
232 op_is_sub = (insn >> 30) & 0x1;
250 fprintf_unfiltered (gdb_stdlog,
251 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
252 core_addr_to_string_nz (addr), insn, *rd, *rn,
259 /* Decode an opcode if it represents an ADRP instruction.
261 ADDR specifies the address of the opcode.
262 INSN specifies the opcode to test.
263 RD receives the 'rd' field from the decoded instruction.
265 Return 1 if the opcodes matches and is decoded, otherwise 0. */
268 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
270 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
272 *rd = (insn >> 0) & 0x1f;
275 fprintf_unfiltered (gdb_stdlog,
276 "decode: 0x%s 0x%x adrp x%u, #?\n",
277 core_addr_to_string_nz (addr), insn, *rd);
283 /* Decode an opcode if it represents an branch immediate or branch
284 and link immediate instruction.
286 ADDR specifies the address of the opcode.
287 INSN specifies the opcode to test.
288 LINK receives the 'link' bit from the decoded instruction.
289 OFFSET receives the immediate offset from the decoded instruction.
291 Return 1 if the opcodes matches and is decoded, otherwise 0. */
294 decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
296 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
297 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
298 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
301 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
304 fprintf_unfiltered (gdb_stdlog,
305 "decode: 0x%s 0x%x %s 0x%s\n",
306 core_addr_to_string_nz (addr), insn,
308 core_addr_to_string_nz (addr + *offset));
315 /* Decode an opcode if it represents a conditional branch instruction.
317 ADDR specifies the address of the opcode.
318 INSN specifies the opcode to test.
319 COND receives the branch condition field from the decoded
321 OFFSET receives the immediate offset from the decoded instruction.
323 Return 1 if the opcodes matches and is decoded, otherwise 0. */
326 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
328 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
330 *cond = (insn >> 0) & 0xf;
331 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
334 fprintf_unfiltered (gdb_stdlog,
335 "decode: 0x%s 0x%x b<%u> 0x%s\n",
336 core_addr_to_string_nz (addr), insn, *cond,
337 core_addr_to_string_nz (addr + *offset));
343 /* Decode an opcode if it represents a branch via register instruction.
345 ADDR specifies the address of the opcode.
346 INSN specifies the opcode to test.
347 LINK receives the 'link' bit from the decoded instruction.
348 RN receives the 'rn' field from the decoded instruction.
350 Return 1 if the opcodes matches and is decoded, otherwise 0. */
353 decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
355 /* 8 4 0 6 2 8 4 0 */
356 /* blr 110101100011111100000000000rrrrr */
357 /* br 110101100001111100000000000rrrrr */
358 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
360 *link = (insn >> 21) & 1;
361 *rn = (insn >> 5) & 0x1f;
364 fprintf_unfiltered (gdb_stdlog,
365 "decode: 0x%s 0x%x %s 0x%x\n",
366 core_addr_to_string_nz (addr), insn,
367 *link ? "blr" : "br", *rn);
374 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
376 ADDR specifies the address of the opcode.
377 INSN specifies the opcode to test.
378 IS64 receives the 'sf' field from the decoded instruction.
379 OP receives the 'op' field from the decoded instruction.
380 RN receives the 'rn' field from the decoded instruction.
381 OFFSET receives the 'imm19' field from the decoded instruction.
383 Return 1 if the opcodes matches and is decoded, otherwise 0. */
386 decode_cb (CORE_ADDR addr,
387 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
390 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
392 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
393 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
395 *rn = (insn >> 0) & 0x1f;
396 *is64 = (insn >> 31) & 0x1;
397 *op = (insn >> 24) & 0x1;
398 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
401 fprintf_unfiltered (gdb_stdlog,
402 "decode: 0x%s 0x%x %s 0x%s\n",
403 core_addr_to_string_nz (addr), insn,
404 *op ? "cbnz" : "cbz",
405 core_addr_to_string_nz (addr + *offset));
411 /* Decode an opcode if it represents a ERET instruction.
413 ADDR specifies the address of the opcode.
414 INSN specifies the opcode to test.
416 Return 1 if the opcodes matches and is decoded, otherwise 0. */
419 decode_eret (CORE_ADDR addr, uint32_t insn)
421 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
422 if (insn == 0xd69f03e0)
425 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
426 core_addr_to_string_nz (addr), insn);
432 /* Decode an opcode if it represents a MOVZ instruction.
434 ADDR specifies the address of the opcode.
435 INSN specifies the opcode to test.
436 RD receives the 'rd' field from the decoded instruction.
438 Return 1 if the opcodes matches and is decoded, otherwise 0. */
441 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
443 if (decode_masked_match (insn, 0xff800000, 0x52800000))
445 *rd = (insn >> 0) & 0x1f;
448 fprintf_unfiltered (gdb_stdlog,
449 "decode: 0x%s 0x%x movz x%u, #?\n",
450 core_addr_to_string_nz (addr), insn, *rd);
456 /* Decode an opcode if it represents a ORR (shifted register)
459 ADDR specifies the address of the opcode.
460 INSN specifies the opcode to test.
461 RD receives the 'rd' field from the decoded instruction.
462 RN receives the 'rn' field from the decoded instruction.
463 RM receives the 'rm' field from the decoded instruction.
464 IMM receives the 'imm6' field from the decoded instruction.
466 Return 1 if the opcodes matches and is decoded, otherwise 0. */
469 decode_orr_shifted_register_x (CORE_ADDR addr,
470 uint32_t insn, unsigned *rd, unsigned *rn,
471 unsigned *rm, int32_t *imm)
473 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
475 *rd = (insn >> 0) & 0x1f;
476 *rn = (insn >> 5) & 0x1f;
477 *rm = (insn >> 16) & 0x1f;
478 *imm = (insn >> 10) & 0x3f;
481 fprintf_unfiltered (gdb_stdlog,
482 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
483 core_addr_to_string_nz (addr), insn, *rd,
490 /* Decode an opcode if it represents a RET instruction.
492 ADDR specifies the address of the opcode.
493 INSN specifies the opcode to test.
494 RN receives the 'rn' field from the decoded instruction.
496 Return 1 if the opcodes matches and is decoded, otherwise 0. */
499 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
501 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
503 *rn = (insn >> 5) & 0x1f;
505 fprintf_unfiltered (gdb_stdlog,
506 "decode: 0x%s 0x%x ret x%u\n",
507 core_addr_to_string_nz (addr), insn, *rn);
513 /* Decode an opcode if it represents the following instruction:
514 STP rt, rt2, [rn, #imm]
516 ADDR specifies the address of the opcode.
517 INSN specifies the opcode to test.
518 RT1 receives the 'rt' field from the decoded instruction.
519 RT2 receives the 'rt2' field from the decoded instruction.
520 RN receives the 'rn' field from the decoded instruction.
521 IMM receives the 'imm' field from the decoded instruction.
523 Return 1 if the opcodes matches and is decoded, otherwise 0. */
526 decode_stp_offset (CORE_ADDR addr,
528 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
530 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
532 *rt1 = (insn >> 0) & 0x1f;
533 *rn = (insn >> 5) & 0x1f;
534 *rt2 = (insn >> 10) & 0x1f;
535 *imm = extract_signed_bitfield (insn, 7, 15);
539 fprintf_unfiltered (gdb_stdlog,
540 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
541 core_addr_to_string_nz (addr), insn,
542 *rt1, *rt2, *rn, *imm);
548 /* Decode an opcode if it represents the following instruction:
549 STP rt, rt2, [rn, #imm]!
551 ADDR specifies the address of the opcode.
552 INSN specifies the opcode to test.
553 RT1 receives the 'rt' field from the decoded instruction.
554 RT2 receives the 'rt2' field from the decoded instruction.
555 RN receives the 'rn' field from the decoded instruction.
556 IMM receives the 'imm' field from the decoded instruction.
558 Return 1 if the opcodes matches and is decoded, otherwise 0. */
561 decode_stp_offset_wb (CORE_ADDR addr,
563 unsigned *rt1, unsigned *rt2, unsigned *rn,
566 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
568 *rt1 = (insn >> 0) & 0x1f;
569 *rn = (insn >> 5) & 0x1f;
570 *rt2 = (insn >> 10) & 0x1f;
571 *imm = extract_signed_bitfield (insn, 7, 15);
575 fprintf_unfiltered (gdb_stdlog,
576 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
577 core_addr_to_string_nz (addr), insn,
578 *rt1, *rt2, *rn, *imm);
584 /* Decode an opcode if it represents the following instruction:
587 ADDR specifies the address of the opcode.
588 INSN specifies the opcode to test.
589 IS64 receives size field from the decoded instruction.
590 RT receives the 'rt' field from the decoded instruction.
591 RN receives the 'rn' field from the decoded instruction.
592 IMM receives the 'imm' field from the decoded instruction.
594 Return 1 if the opcodes matches and is decoded, otherwise 0. */
597 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
598 unsigned *rn, int32_t *imm)
600 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
602 *is64 = (insn >> 30) & 1;
603 *rt = (insn >> 0) & 0x1f;
604 *rn = (insn >> 5) & 0x1f;
605 *imm = extract_signed_bitfield (insn, 9, 12);
608 fprintf_unfiltered (gdb_stdlog,
609 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
610 core_addr_to_string_nz (addr), insn,
611 *is64 ? 'x' : 'w', *rt, *rn, *imm);
617 /* Decode an opcode if it represents a TB or TBNZ instruction.
619 ADDR specifies the address of the opcode.
620 INSN specifies the opcode to test.
621 OP receives the 'op' field from the decoded instruction.
622 BIT receives the bit position field from the decoded instruction.
623 RT receives 'rt' field from the decoded instruction.
624 IMM receives 'imm' field from the decoded instruction.
626 Return 1 if the opcodes matches and is decoded, otherwise 0. */
629 decode_tb (CORE_ADDR addr,
630 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
633 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
635 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
636 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
638 *rt = (insn >> 0) & 0x1f;
639 *op = insn & (1 << 24);
640 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
641 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
644 fprintf_unfiltered (gdb_stdlog,
645 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
646 core_addr_to_string_nz (addr), insn,
647 *op ? "tbnz" : "tbz", *rt, *bit,
648 core_addr_to_string_nz (addr + *imm));
654 /* Analyze a prologue, looking for a recognizable stack frame
655 and frame pointer. Scan until we encounter a store that could
656 clobber the stack frame unexpectedly, or an unknown instruction. */
659 aarch64_analyze_prologue (struct gdbarch *gdbarch,
660 CORE_ADDR start, CORE_ADDR limit,
661 struct aarch64_prologue_cache *cache)
663 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
665 pv_t regs[AARCH64_X_REGISTER_COUNT];
666 struct pv_area *stack;
667 struct cleanup *back_to;
669 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
670 regs[i] = pv_register (i, 0);
671 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
672 back_to = make_cleanup_free_pv_area (stack);
674 for (; start < limit; start += 4)
692 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
694 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
695 regs[rd] = pv_add_constant (regs[rn], imm);
696 else if (decode_adrp (start, insn, &rd))
697 regs[rd] = pv_unknown ();
698 else if (decode_b (start, insn, &is_link, &offset))
700 /* Stop analysis on branch. */
703 else if (decode_bcond (start, insn, &cond, &offset))
705 /* Stop analysis on branch. */
708 else if (decode_br (start, insn, &is_link, &rn))
710 /* Stop analysis on branch. */
713 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
715 /* Stop analysis on branch. */
718 else if (decode_eret (start, insn))
720 /* Stop analysis on branch. */
723 else if (decode_movz (start, insn, &rd))
724 regs[rd] = pv_unknown ();
726 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
728 if (imm == 0 && rn == 31)
735 "aarch64: prologue analysis gave up addr=0x%s "
736 "opcode=0x%x (orr x register)\n",
737 core_addr_to_string_nz (start),
742 else if (decode_ret (start, insn, &rn))
744 /* Stop analysis on branch. */
747 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
749 pv_area_store (stack, pv_add_constant (regs[rn], offset),
750 is64 ? 8 : 4, regs[rt]);
752 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
754 /* If recording this store would invalidate the store area
755 (perhaps because rn is not known) then we should abandon
756 further prologue analysis. */
757 if (pv_area_store_would_trash (stack,
758 pv_add_constant (regs[rn], imm)))
761 if (pv_area_store_would_trash (stack,
762 pv_add_constant (regs[rn], imm + 8)))
765 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
767 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
770 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
772 /* If recording this store would invalidate the store area
773 (perhaps because rn is not known) then we should abandon
774 further prologue analysis. */
775 if (pv_area_store_would_trash (stack,
776 pv_add_constant (regs[rn], imm)))
779 if (pv_area_store_would_trash (stack,
780 pv_add_constant (regs[rn], imm + 8)))
783 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
785 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
787 regs[rn] = pv_add_constant (regs[rn], imm);
789 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
791 /* Stop analysis on branch. */
797 fprintf_unfiltered (gdb_stdlog,
798 "aarch64: prologue analysis gave up addr=0x%s"
800 core_addr_to_string_nz (start), insn);
807 do_cleanups (back_to);
811 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
813 /* Frame pointer is fp. Frame size is constant. */
814 cache->framereg = AARCH64_FP_REGNUM;
815 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
817 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
819 /* Try the stack pointer. */
820 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
821 cache->framereg = AARCH64_SP_REGNUM;
825 /* We're just out of luck. We don't know where the frame is. */
826 cache->framereg = -1;
827 cache->framesize = 0;
830 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
834 if (pv_area_find_reg (stack, gdbarch, i, &offset))
835 cache->saved_regs[i].addr = offset;
838 do_cleanups (back_to);
842 /* Implement the "skip_prologue" gdbarch method. */
845 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
849 CORE_ADDR func_addr, limit_pc;
850 struct symtab_and_line sal;
852 /* See if we can determine the end of the prologue via the symbol
853 table. If so, then return either PC, or the PC after the
854 prologue, whichever is greater. */
855 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
857 CORE_ADDR post_prologue_pc
858 = skip_prologue_using_sal (gdbarch, func_addr);
860 if (post_prologue_pc != 0)
861 return max (pc, post_prologue_pc);
864 /* Can't determine prologue from the symbol table, need to examine
867 /* Find an upper limit on the function prologue using the debug
868 information. If the debug information could not be used to
869 provide that bound, then use an arbitrary large number as the
871 limit_pc = skip_prologue_using_sal (gdbarch, pc);
873 limit_pc = pc + 128; /* Magic. */
875 /* Try disassembling prologue. */
876 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
879 /* Scan the function prologue for THIS_FRAME and populate the prologue
883 aarch64_scan_prologue (struct frame_info *this_frame,
884 struct aarch64_prologue_cache *cache)
886 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
887 CORE_ADDR prologue_start;
888 CORE_ADDR prologue_end;
889 CORE_ADDR prev_pc = get_frame_pc (this_frame);
890 struct gdbarch *gdbarch = get_frame_arch (this_frame);
892 /* Assume we do not find a frame. */
893 cache->framereg = -1;
894 cache->framesize = 0;
896 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
899 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
903 /* No line info so use the current PC. */
904 prologue_end = prev_pc;
906 else if (sal.end < prologue_end)
908 /* The next line begins after the function end. */
909 prologue_end = sal.end;
912 prologue_end = min (prologue_end, prev_pc);
913 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
922 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
926 cache->framereg = AARCH64_FP_REGNUM;
927 cache->framesize = 16;
928 cache->saved_regs[29].addr = 0;
929 cache->saved_regs[30].addr = 8;
933 /* Allocate an aarch64_prologue_cache and fill it with information
934 about the prologue of *THIS_FRAME. */
936 static struct aarch64_prologue_cache *
937 aarch64_make_prologue_cache (struct frame_info *this_frame)
939 struct aarch64_prologue_cache *cache;
940 CORE_ADDR unwound_fp;
943 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
944 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
946 aarch64_scan_prologue (this_frame, cache);
948 if (cache->framereg == -1)
951 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
955 cache->prev_sp = unwound_fp + cache->framesize;
957 /* Calculate actual addresses of saved registers using offsets
958 determined by aarch64_analyze_prologue. */
959 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
960 if (trad_frame_addr_p (cache->saved_regs, reg))
961 cache->saved_regs[reg].addr += cache->prev_sp;
966 /* Our frame ID for a normal frame is the current function's starting
967 PC and the caller's SP when we were called. */
970 aarch64_prologue_this_id (struct frame_info *this_frame,
971 void **this_cache, struct frame_id *this_id)
973 struct aarch64_prologue_cache *cache;
977 if (*this_cache == NULL)
978 *this_cache = aarch64_make_prologue_cache (this_frame);
981 /* This is meant to halt the backtrace at "_start". */
982 pc = get_frame_pc (this_frame);
983 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
986 /* If we've hit a wall, stop. */
987 if (cache->prev_sp == 0)
990 func = get_frame_func (this_frame);
991 id = frame_id_build (cache->prev_sp, func);
995 /* Implement the "prev_register" frame_unwind method. */
997 static struct value *
998 aarch64_prologue_prev_register (struct frame_info *this_frame,
999 void **this_cache, int prev_regnum)
1001 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1002 struct aarch64_prologue_cache *cache;
1004 if (*this_cache == NULL)
1005 *this_cache = aarch64_make_prologue_cache (this_frame);
1006 cache = *this_cache;
1008 /* If we are asked to unwind the PC, then we need to return the LR
1009 instead. The prologue may save PC, but it will point into this
1010 frame's prologue, not the next frame's resume location. */
1011 if (prev_regnum == AARCH64_PC_REGNUM)
1015 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1016 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1019 /* SP is generally not saved to the stack, but this frame is
1020 identified by the next frame's stack pointer at the time of the
1021 call. The value was already reconstructed into PREV_SP. */
1027 | | | <- Previous SP
1030 +--| saved fp |<- FP
1034 if (prev_regnum == AARCH64_SP_REGNUM)
1035 return frame_unwind_got_constant (this_frame, prev_regnum,
1038 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1042 /* AArch64 prologue unwinder. */
1043 struct frame_unwind aarch64_prologue_unwind =
1046 default_frame_unwind_stop_reason,
1047 aarch64_prologue_this_id,
1048 aarch64_prologue_prev_register,
1050 default_frame_sniffer
1053 /* Allocate an aarch64_prologue_cache and fill it with information
1054 about the prologue of *THIS_FRAME. */
1056 static struct aarch64_prologue_cache *
1057 aarch64_make_stub_cache (struct frame_info *this_frame)
1060 struct aarch64_prologue_cache *cache;
1061 CORE_ADDR unwound_fp;
1063 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1064 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1067 = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1072 /* Our frame ID for a stub frame is the current SP and LR. */
1075 aarch64_stub_this_id (struct frame_info *this_frame,
1076 void **this_cache, struct frame_id *this_id)
1078 struct aarch64_prologue_cache *cache;
1080 if (*this_cache == NULL)
1081 *this_cache = aarch64_make_stub_cache (this_frame);
1082 cache = *this_cache;
1084 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1087 /* Implement the "sniffer" frame_unwind method. */
1090 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1091 struct frame_info *this_frame,
1092 void **this_prologue_cache)
1094 CORE_ADDR addr_in_block;
1097 addr_in_block = get_frame_address_in_block (this_frame);
1098 if (in_plt_section (addr_in_block)
1099 /* We also use the stub winder if the target memory is unreadable
1100 to avoid having the prologue unwinder trying to read it. */
1101 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1107 /* AArch64 stub unwinder. */
1108 struct frame_unwind aarch64_stub_unwind =
1111 default_frame_unwind_stop_reason,
1112 aarch64_stub_this_id,
1113 aarch64_prologue_prev_register,
1115 aarch64_stub_unwind_sniffer
1118 /* Return the frame base address of *THIS_FRAME. */
1121 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1123 struct aarch64_prologue_cache *cache;
1125 if (*this_cache == NULL)
1126 *this_cache = aarch64_make_prologue_cache (this_frame);
1127 cache = *this_cache;
1129 return cache->prev_sp - cache->framesize;
1132 /* AArch64 default frame base information. */
1133 struct frame_base aarch64_normal_base =
1135 &aarch64_prologue_unwind,
1136 aarch64_normal_frame_base,
1137 aarch64_normal_frame_base,
1138 aarch64_normal_frame_base
1141 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1142 dummy frame. The frame ID's base needs to match the TOS value
1143 saved by save_dummy_frame_tos () and returned from
1144 aarch64_push_dummy_call, and the PC needs to match the dummy
1145 frame's breakpoint. */
1147 static struct frame_id
1148 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1150 return frame_id_build (get_frame_register_unsigned (this_frame,
1152 get_frame_pc (this_frame));
1155 /* Implement the "unwind_pc" gdbarch method. */
1158 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1161 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1166 /* Implement the "unwind_sp" gdbarch method. */
1169 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1171 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1174 /* Return the value of the REGNUM register in the previous frame of
1177 static struct value *
1178 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1179 void **this_cache, int regnum)
1181 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1186 case AARCH64_PC_REGNUM:
1187 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1188 return frame_unwind_got_constant (this_frame, regnum, lr);
1191 internal_error (__FILE__, __LINE__,
1192 _("Unexpected register %d"), regnum);
1196 /* Implement the "init_reg" dwarf2_frame_ops method. */
1199 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1200 struct dwarf2_frame_state_reg *reg,
1201 struct frame_info *this_frame)
1205 case AARCH64_PC_REGNUM:
1206 reg->how = DWARF2_FRAME_REG_FN;
1207 reg->loc.fn = aarch64_dwarf2_prev_register;
1209 case AARCH64_SP_REGNUM:
1210 reg->how = DWARF2_FRAME_REG_CFA;
1215 /* When arguments must be pushed onto the stack, they go on in reverse
1216 order. The code below implements a FILO (stack) to do this. */
1220 /* Value to pass on stack. */
1223 /* Size in bytes of value to pass on stack. */
1227 DEF_VEC_O (stack_item_t);
1229 /* Return the alignment (in bytes) of the given type. */
1232 aarch64_type_align (struct type *t)
1238 t = check_typedef (t);
1239 switch (TYPE_CODE (t))
1242 /* Should never happen. */
1243 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1247 case TYPE_CODE_ENUM:
1251 case TYPE_CODE_RANGE:
1252 case TYPE_CODE_BITSTRING:
1254 case TYPE_CODE_CHAR:
1255 case TYPE_CODE_BOOL:
1256 return TYPE_LENGTH (t);
1258 case TYPE_CODE_ARRAY:
1259 case TYPE_CODE_COMPLEX:
1260 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1262 case TYPE_CODE_STRUCT:
1263 case TYPE_CODE_UNION:
1265 for (n = 0; n < TYPE_NFIELDS (t); n++)
1267 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1275 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1276 defined in the AAPCS64 ABI document; otherwise return 0. */
1279 is_hfa (struct type *ty)
1281 switch (TYPE_CODE (ty))
1283 case TYPE_CODE_ARRAY:
1285 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1286 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1291 case TYPE_CODE_UNION:
1292 case TYPE_CODE_STRUCT:
1294 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1296 struct type *member0_type;
1298 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1299 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1303 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1305 struct type *member1_type;
1307 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1308 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1309 || (TYPE_LENGTH (member0_type)
1310 != TYPE_LENGTH (member1_type)))
1326 /* AArch64 function call information structure. */
1327 struct aarch64_call_info
1329 /* the current argument number. */
1332 /* The next general purpose register number, equivalent to NGRN as
1333 described in the AArch64 Procedure Call Standard. */
1336 /* The next SIMD and floating point register number, equivalent to
1337 NSRN as described in the AArch64 Procedure Call Standard. */
1340 /* The next stacked argument address, equivalent to NSAA as
1341 described in the AArch64 Procedure Call Standard. */
1344 /* Stack item vector. */
1345 VEC(stack_item_t) *si;
1348 /* Pass a value in a sequence of consecutive X registers. The caller
1349 is responsbile for ensuring sufficient registers are available. */
1352 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1353 struct aarch64_call_info *info, struct type *type,
1354 const bfd_byte *buf)
1356 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1357 int len = TYPE_LENGTH (type);
1358 enum type_code typecode = TYPE_CODE (type);
1359 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1365 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1366 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1370 /* Adjust sub-word struct/union args when big-endian. */
1371 if (byte_order == BFD_ENDIAN_BIG
1372 && partial_len < X_REGISTER_SIZE
1373 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1374 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1377 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1379 gdbarch_register_name (gdbarch, regnum),
1380 phex (regval, X_REGISTER_SIZE));
1381 regcache_cooked_write_unsigned (regcache, regnum, regval);
1388 /* Attempt to marshall a value in a V register. Return 1 if
1389 successful, or 0 if insufficient registers are available. This
1390 function, unlike the equivalent pass_in_x() function does not
1391 handle arguments spread across multiple registers. */
1394 pass_in_v (struct gdbarch *gdbarch,
1395 struct regcache *regcache,
1396 struct aarch64_call_info *info,
1397 const bfd_byte *buf)
1401 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1402 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1407 regcache_cooked_write (regcache, regnum, buf);
1409 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1411 gdbarch_register_name (gdbarch, regnum));
1418 /* Marshall an argument onto the stack. */
1421 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1422 const bfd_byte *buf)
1424 int len = TYPE_LENGTH (type);
1430 align = aarch64_type_align (type);
1432 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1433 Natural alignment of the argument's type. */
1434 align = align_up (align, 8);
1436 /* The AArch64 PCS requires at most doubleword alignment. */
1441 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1442 info->argnum, len, info->nsaa);
1446 VEC_safe_push (stack_item_t, info->si, &item);
1449 if (info->nsaa & (align - 1))
1451 /* Push stack alignment padding. */
1452 int pad = align - (info->nsaa & (align - 1));
1457 VEC_safe_push (stack_item_t, info->si, &item);
1462 /* Marshall an argument into a sequence of one or more consecutive X
1463 registers or, if insufficient X registers are available then onto
1467 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1468 struct aarch64_call_info *info, struct type *type,
1469 const bfd_byte *buf)
1471 int len = TYPE_LENGTH (type);
1472 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1474 /* PCS C.13 - Pass in registers if we have enough spare */
1475 if (info->ngrn + nregs <= 8)
1477 pass_in_x (gdbarch, regcache, info, type, buf);
1478 info->ngrn += nregs;
1483 pass_on_stack (info, type, buf);
1487 /* Pass a value in a V register, or on the stack if insufficient are
1491 pass_in_v_or_stack (struct gdbarch *gdbarch,
1492 struct regcache *regcache,
1493 struct aarch64_call_info *info,
1495 const bfd_byte *buf)
1497 if (!pass_in_v (gdbarch, regcache, info, buf))
1498 pass_on_stack (info, type, buf);
1501 /* Implement the "push_dummy_call" gdbarch method. */
1504 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1505 struct regcache *regcache, CORE_ADDR bp_addr,
1507 struct value **args, CORE_ADDR sp, int struct_return,
1508 CORE_ADDR struct_addr)
1514 struct aarch64_call_info info;
1515 struct type *func_type;
1516 struct type *return_type;
1517 int lang_struct_return;
1519 memset (&info, 0, sizeof (info));
1521 /* We need to know what the type of the called function is in order
1522 to determine the number of named/anonymous arguments for the
1523 actual argument placement, and the return type in order to handle
1524 return value correctly.
1526 The generic code above us views the decision of return in memory
1527 or return in registers as a two stage processes. The language
1528 handler is consulted first and may decide to return in memory (eg
1529 class with copy constructor returned by value), this will cause
1530 the generic code to allocate space AND insert an initial leading
1533 If the language code does not decide to pass in memory then the
1534 target code is consulted.
1536 If the language code decides to pass in memory we want to move
1537 the pointer inserted as the initial argument from the argument
1538 list and into X8, the conventional AArch64 struct return pointer
1541 This is slightly awkward, ideally the flag "lang_struct_return"
1542 would be passed to the targets implementation of push_dummy_call.
1543 Rather that change the target interface we call the language code
1544 directly ourselves. */
1546 func_type = check_typedef (value_type (function));
1548 /* Dereference function pointer types. */
1549 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1550 func_type = TYPE_TARGET_TYPE (func_type);
1552 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1553 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1555 /* If language_pass_by_reference () returned true we will have been
1556 given an additional initial argument, a hidden pointer to the
1557 return slot in memory. */
1558 return_type = TYPE_TARGET_TYPE (func_type);
1559 lang_struct_return = language_pass_by_reference (return_type);
1561 /* Set the return address. For the AArch64, the return breakpoint
1562 is always at BP_ADDR. */
1563 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1565 /* If we were given an initial argument for the return slot because
1566 lang_struct_return was true, lose it. */
1567 if (lang_struct_return)
1573 /* The struct_return pointer occupies X8. */
1574 if (struct_return || lang_struct_return)
1577 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1578 gdbarch_register_name
1580 AARCH64_STRUCT_RETURN_REGNUM),
1581 paddress (gdbarch, struct_addr));
1582 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1586 for (argnum = 0; argnum < nargs; argnum++)
1588 struct value *arg = args[argnum];
1589 struct type *arg_type;
1592 arg_type = check_typedef (value_type (arg));
1593 len = TYPE_LENGTH (arg_type);
1595 switch (TYPE_CODE (arg_type))
1598 case TYPE_CODE_BOOL:
1599 case TYPE_CODE_CHAR:
1600 case TYPE_CODE_RANGE:
1601 case TYPE_CODE_ENUM:
1604 /* Promote to 32 bit integer. */
1605 if (TYPE_UNSIGNED (arg_type))
1606 arg_type = builtin_type (gdbarch)->builtin_uint32;
1608 arg_type = builtin_type (gdbarch)->builtin_int32;
1609 arg = value_cast (arg_type, arg);
1611 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1612 value_contents (arg));
1615 case TYPE_CODE_COMPLEX:
1618 const bfd_byte *buf = value_contents (arg);
1619 struct type *target_type =
1620 check_typedef (TYPE_TARGET_TYPE (arg_type));
1622 pass_in_v (gdbarch, regcache, &info, buf);
1623 pass_in_v (gdbarch, regcache, &info,
1624 buf + TYPE_LENGTH (target_type));
1629 pass_on_stack (&info, arg_type, value_contents (arg));
1633 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1634 value_contents (arg));
1637 case TYPE_CODE_STRUCT:
1638 case TYPE_CODE_ARRAY:
1639 case TYPE_CODE_UNION:
1640 if (is_hfa (arg_type))
1642 int elements = TYPE_NFIELDS (arg_type);
1644 /* Homogeneous Aggregates */
1645 if (info.nsrn + elements < 8)
1649 for (i = 0; i < elements; i++)
1651 /* We know that we have sufficient registers
1652 available therefore this will never fallback
1654 struct value *field =
1655 value_primitive_field (arg, 0, i, arg_type);
1656 struct type *field_type =
1657 check_typedef (value_type (field));
1659 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1660 value_contents_writeable (field));
1666 pass_on_stack (&info, arg_type, value_contents (arg));
1671 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1672 invisible reference. */
1674 /* Allocate aligned storage. */
1675 sp = align_down (sp - len, 16);
1677 /* Write the real data into the stack. */
1678 write_memory (sp, value_contents (arg), len);
1680 /* Construct the indirection. */
1681 arg_type = lookup_pointer_type (arg_type);
1682 arg = value_from_pointer (arg_type, sp);
1683 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1684 value_contents (arg));
1687 /* PCS C.15 / C.18 multiple values pass. */
1688 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1689 value_contents (arg));
1693 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1694 value_contents (arg));
1699 /* Make sure stack retains 16 byte alignment. */
1701 sp -= 16 - (info.nsaa & 15);
1703 while (!VEC_empty (stack_item_t, info.si))
1705 stack_item_t *si = VEC_last (stack_item_t, info.si);
1708 write_memory (sp, si->data, si->len);
1709 VEC_pop (stack_item_t, info.si);
1712 VEC_free (stack_item_t, info.si);
1714 /* Finally, update the SP register. */
1715 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1720 /* Implement the "frame_align" gdbarch method. */
1723 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1725 /* Align the stack to sixteen bytes. */
1726 return sp & ~(CORE_ADDR) 15;
1729 /* Return the type for an AdvSISD Q register. */
1731 static struct type *
1732 aarch64_vnq_type (struct gdbarch *gdbarch)
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1736 if (tdep->vnq_type == NULL)
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1744 elem = builtin_type (gdbarch)->builtin_uint128;
1745 append_composite_type_field (t, "u", elem);
1747 elem = builtin_type (gdbarch)->builtin_int128;
1748 append_composite_type_field (t, "s", elem);
1753 return tdep->vnq_type;
1756 /* Return the type for an AdvSISD D register. */
1758 static struct type *
1759 aarch64_vnd_type (struct gdbarch *gdbarch)
1761 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1763 if (tdep->vnd_type == NULL)
1768 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1771 elem = builtin_type (gdbarch)->builtin_double;
1772 append_composite_type_field (t, "f", elem);
1774 elem = builtin_type (gdbarch)->builtin_uint64;
1775 append_composite_type_field (t, "u", elem);
1777 elem = builtin_type (gdbarch)->builtin_int64;
1778 append_composite_type_field (t, "s", elem);
1783 return tdep->vnd_type;
1786 /* Return the type for an AdvSISD S register. */
1788 static struct type *
1789 aarch64_vns_type (struct gdbarch *gdbarch)
1791 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1793 if (tdep->vns_type == NULL)
1798 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1801 elem = builtin_type (gdbarch)->builtin_float;
1802 append_composite_type_field (t, "f", elem);
1804 elem = builtin_type (gdbarch)->builtin_uint32;
1805 append_composite_type_field (t, "u", elem);
1807 elem = builtin_type (gdbarch)->builtin_int32;
1808 append_composite_type_field (t, "s", elem);
1813 return tdep->vns_type;
1816 /* Return the type for an AdvSISD H register. */
1818 static struct type *
1819 aarch64_vnh_type (struct gdbarch *gdbarch)
1821 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1823 if (tdep->vnh_type == NULL)
1828 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1831 elem = builtin_type (gdbarch)->builtin_uint16;
1832 append_composite_type_field (t, "u", elem);
1834 elem = builtin_type (gdbarch)->builtin_int16;
1835 append_composite_type_field (t, "s", elem);
1840 return tdep->vnh_type;
1843 /* Return the type for an AdvSISD B register. */
1845 static struct type *
1846 aarch64_vnb_type (struct gdbarch *gdbarch)
1848 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1850 if (tdep->vnb_type == NULL)
1855 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1858 elem = builtin_type (gdbarch)->builtin_uint8;
1859 append_composite_type_field (t, "u", elem);
1861 elem = builtin_type (gdbarch)->builtin_int8;
1862 append_composite_type_field (t, "s", elem);
1867 return tdep->vnb_type;
1870 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1873 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1875 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1876 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1878 if (reg == AARCH64_DWARF_SP)
1879 return AARCH64_SP_REGNUM;
1881 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1882 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1888 /* Implement the "print_insn" gdbarch method. */
1891 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1893 info->symbols = NULL;
1894 return print_insn_aarch64 (memaddr, info);
1897 /* AArch64 BRK software debug mode instruction.
1898 Note that AArch64 code is always little-endian.
1899 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1900 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1902 /* Implement the "breakpoint_from_pc" gdbarch method. */
1904 static const gdb_byte *
1905 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1908 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1910 *lenptr = sizeof (aarch64_default_breakpoint);
1911 return aarch64_default_breakpoint;
1914 /* Extract from an array REGS containing the (raw) register state a
1915 function return value of type TYPE, and copy that, in virtual
1916 format, into VALBUF. */
1919 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1922 struct gdbarch *gdbarch = get_regcache_arch (regs);
1923 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1925 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1927 bfd_byte buf[V_REGISTER_SIZE];
1928 int len = TYPE_LENGTH (type);
1930 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1931 memcpy (valbuf, buf, len);
1933 else if (TYPE_CODE (type) == TYPE_CODE_INT
1934 || TYPE_CODE (type) == TYPE_CODE_CHAR
1935 || TYPE_CODE (type) == TYPE_CODE_BOOL
1936 || TYPE_CODE (type) == TYPE_CODE_PTR
1937 || TYPE_CODE (type) == TYPE_CODE_REF
1938 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1940 /* If the the type is a plain integer, then the access is
1941 straight-forward. Otherwise we have to play around a bit
1943 int len = TYPE_LENGTH (type);
1944 int regno = AARCH64_X0_REGNUM;
1949 /* By using store_unsigned_integer we avoid having to do
1950 anything special for small big-endian values. */
1951 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1952 store_unsigned_integer (valbuf,
1953 (len > X_REGISTER_SIZE
1954 ? X_REGISTER_SIZE : len), byte_order, tmp);
1955 len -= X_REGISTER_SIZE;
1956 valbuf += X_REGISTER_SIZE;
1959 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1961 int regno = AARCH64_V0_REGNUM;
1962 bfd_byte buf[V_REGISTER_SIZE];
1963 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1964 int len = TYPE_LENGTH (target_type);
1966 regcache_cooked_read (regs, regno, buf);
1967 memcpy (valbuf, buf, len);
1969 regcache_cooked_read (regs, regno + 1, buf);
1970 memcpy (valbuf, buf, len);
1973 else if (is_hfa (type))
1975 int elements = TYPE_NFIELDS (type);
1976 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1977 int len = TYPE_LENGTH (member_type);
1980 for (i = 0; i < elements; i++)
1982 int regno = AARCH64_V0_REGNUM + i;
1983 bfd_byte buf[X_REGISTER_SIZE];
1986 fprintf_unfiltered (gdb_stdlog,
1987 "read HFA return value element %d from %s\n",
1989 gdbarch_register_name (gdbarch, regno));
1990 regcache_cooked_read (regs, regno, buf);
1992 memcpy (valbuf, buf, len);
1998 /* For a structure or union the behaviour is as if the value had
1999 been stored to word-aligned memory and then loaded into
2000 registers with 64-bit load instruction(s). */
2001 int len = TYPE_LENGTH (type);
2002 int regno = AARCH64_X0_REGNUM;
2003 bfd_byte buf[X_REGISTER_SIZE];
2007 regcache_cooked_read (regs, regno++, buf);
2008 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2009 len -= X_REGISTER_SIZE;
2010 valbuf += X_REGISTER_SIZE;
2016 /* Will a function return an aggregate type in memory or in a
2017 register? Return 0 if an aggregate type can be returned in a
2018 register, 1 if it must be returned in memory. */
2021 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2024 enum type_code code;
2026 CHECK_TYPEDEF (type);
2028 /* In the AArch64 ABI, "integer" like aggregate types are returned
2029 in registers. For an aggregate type to be integer like, its size
2030 must be less than or equal to 4 * X_REGISTER_SIZE. */
2034 /* PCS B.5 If the argument is a Named HFA, then the argument is
2039 if (TYPE_LENGTH (type) > 16)
2041 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2042 invisible reference. */
2050 /* Write into appropriate registers a function return value of type
2051 TYPE, given in virtual format. */
2054 aarch64_store_return_value (struct type *type, struct regcache *regs,
2055 const gdb_byte *valbuf)
2057 struct gdbarch *gdbarch = get_regcache_arch (regs);
2058 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2060 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2062 bfd_byte buf[V_REGISTER_SIZE];
2063 int len = TYPE_LENGTH (type);
2065 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2066 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2068 else if (TYPE_CODE (type) == TYPE_CODE_INT
2069 || TYPE_CODE (type) == TYPE_CODE_CHAR
2070 || TYPE_CODE (type) == TYPE_CODE_BOOL
2071 || TYPE_CODE (type) == TYPE_CODE_PTR
2072 || TYPE_CODE (type) == TYPE_CODE_REF
2073 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2075 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2077 /* Values of one word or less are zero/sign-extended and
2079 bfd_byte tmpbuf[X_REGISTER_SIZE];
2080 LONGEST val = unpack_long (type, valbuf);
2082 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2083 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2087 /* Integral values greater than one word are stored in
2088 consecutive registers starting with r0. This will always
2089 be a multiple of the regiser size. */
2090 int len = TYPE_LENGTH (type);
2091 int regno = AARCH64_X0_REGNUM;
2095 regcache_cooked_write (regs, regno++, valbuf);
2096 len -= X_REGISTER_SIZE;
2097 valbuf += X_REGISTER_SIZE;
2101 else if (is_hfa (type))
2103 int elements = TYPE_NFIELDS (type);
2104 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2105 int len = TYPE_LENGTH (member_type);
2108 for (i = 0; i < elements; i++)
2110 int regno = AARCH64_V0_REGNUM + i;
2111 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2114 fprintf_unfiltered (gdb_stdlog,
2115 "write HFA return value element %d to %s\n",
2117 gdbarch_register_name (gdbarch, regno));
2119 memcpy (tmpbuf, valbuf, len);
2120 regcache_cooked_write (regs, regno, tmpbuf);
2126 /* For a structure or union the behaviour is as if the value had
2127 been stored to word-aligned memory and then loaded into
2128 registers with 64-bit load instruction(s). */
2129 int len = TYPE_LENGTH (type);
2130 int regno = AARCH64_X0_REGNUM;
2131 bfd_byte tmpbuf[X_REGISTER_SIZE];
2135 memcpy (tmpbuf, valbuf,
2136 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2137 regcache_cooked_write (regs, regno++, tmpbuf);
2138 len -= X_REGISTER_SIZE;
2139 valbuf += X_REGISTER_SIZE;
2144 /* Implement the "return_value" gdbarch method. */
2146 static enum return_value_convention
2147 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2148 struct type *valtype, struct regcache *regcache,
2149 gdb_byte *readbuf, const gdb_byte *writebuf)
2151 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2153 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2154 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2155 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2157 if (aarch64_return_in_memory (gdbarch, valtype))
2160 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2161 return RETURN_VALUE_STRUCT_CONVENTION;
2166 aarch64_store_return_value (valtype, regcache, writebuf);
2169 aarch64_extract_return_value (valtype, regcache, readbuf);
2172 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2174 return RETURN_VALUE_REGISTER_CONVENTION;
2177 /* Implement the "get_longjmp_target" gdbarch method. */
2180 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2183 gdb_byte buf[X_REGISTER_SIZE];
2184 struct gdbarch *gdbarch = get_frame_arch (frame);
2185 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2186 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2188 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2190 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2194 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2199 /* Return the pseudo register name corresponding to register regnum. */
2202 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2204 static const char *const q_name[] =
2206 "q0", "q1", "q2", "q3",
2207 "q4", "q5", "q6", "q7",
2208 "q8", "q9", "q10", "q11",
2209 "q12", "q13", "q14", "q15",
2210 "q16", "q17", "q18", "q19",
2211 "q20", "q21", "q22", "q23",
2212 "q24", "q25", "q26", "q27",
2213 "q28", "q29", "q30", "q31",
2216 static const char *const d_name[] =
2218 "d0", "d1", "d2", "d3",
2219 "d4", "d5", "d6", "d7",
2220 "d8", "d9", "d10", "d11",
2221 "d12", "d13", "d14", "d15",
2222 "d16", "d17", "d18", "d19",
2223 "d20", "d21", "d22", "d23",
2224 "d24", "d25", "d26", "d27",
2225 "d28", "d29", "d30", "d31",
2228 static const char *const s_name[] =
2230 "s0", "s1", "s2", "s3",
2231 "s4", "s5", "s6", "s7",
2232 "s8", "s9", "s10", "s11",
2233 "s12", "s13", "s14", "s15",
2234 "s16", "s17", "s18", "s19",
2235 "s20", "s21", "s22", "s23",
2236 "s24", "s25", "s26", "s27",
2237 "s28", "s29", "s30", "s31",
2240 static const char *const h_name[] =
2242 "h0", "h1", "h2", "h3",
2243 "h4", "h5", "h6", "h7",
2244 "h8", "h9", "h10", "h11",
2245 "h12", "h13", "h14", "h15",
2246 "h16", "h17", "h18", "h19",
2247 "h20", "h21", "h22", "h23",
2248 "h24", "h25", "h26", "h27",
2249 "h28", "h29", "h30", "h31",
2252 static const char *const b_name[] =
2254 "b0", "b1", "b2", "b3",
2255 "b4", "b5", "b6", "b7",
2256 "b8", "b9", "b10", "b11",
2257 "b12", "b13", "b14", "b15",
2258 "b16", "b17", "b18", "b19",
2259 "b20", "b21", "b22", "b23",
2260 "b24", "b25", "b26", "b27",
2261 "b28", "b29", "b30", "b31",
2264 regnum -= gdbarch_num_regs (gdbarch);
2266 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2267 return q_name[regnum - AARCH64_Q0_REGNUM];
2269 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2270 return d_name[regnum - AARCH64_D0_REGNUM];
2272 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2273 return s_name[regnum - AARCH64_S0_REGNUM];
2275 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2276 return h_name[regnum - AARCH64_H0_REGNUM];
2278 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2279 return b_name[regnum - AARCH64_B0_REGNUM];
2281 internal_error (__FILE__, __LINE__,
2282 _("aarch64_pseudo_register_name: bad register number %d"),
2286 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2288 static struct type *
2289 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2291 regnum -= gdbarch_num_regs (gdbarch);
2293 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2294 return aarch64_vnq_type (gdbarch);
2296 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2297 return aarch64_vnd_type (gdbarch);
2299 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2300 return aarch64_vns_type (gdbarch);
2302 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2303 return aarch64_vnh_type (gdbarch);
2305 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2306 return aarch64_vnb_type (gdbarch);
2308 internal_error (__FILE__, __LINE__,
2309 _("aarch64_pseudo_register_type: bad register number %d"),
2313 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2316 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2317 struct reggroup *group)
2319 regnum -= gdbarch_num_regs (gdbarch);
2321 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2322 return group == all_reggroup || group == vector_reggroup;
2323 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2324 return (group == all_reggroup || group == vector_reggroup
2325 || group == float_reggroup);
2326 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2327 return (group == all_reggroup || group == vector_reggroup
2328 || group == float_reggroup);
2329 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2330 return group == all_reggroup || group == vector_reggroup;
2331 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2332 return group == all_reggroup || group == vector_reggroup;
2334 return group == all_reggroup;
2337 /* Implement the "pseudo_register_read_value" gdbarch method. */
2339 static struct value *
2340 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2341 struct regcache *regcache,
2344 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2345 struct value *result_value;
2348 result_value = allocate_value (register_type (gdbarch, regnum));
2349 VALUE_LVAL (result_value) = lval_register;
2350 VALUE_REGNUM (result_value) = regnum;
2351 buf = value_contents_raw (result_value);
2353 regnum -= gdbarch_num_regs (gdbarch);
2355 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2357 enum register_status status;
2360 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2361 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2362 if (status != REG_VALID)
2363 mark_value_bytes_unavailable (result_value, 0,
2364 TYPE_LENGTH (value_type (result_value)));
2366 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2367 return result_value;
2370 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2372 enum register_status status;
2375 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2376 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2377 if (status != REG_VALID)
2378 mark_value_bytes_unavailable (result_value, 0,
2379 TYPE_LENGTH (value_type (result_value)));
2381 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2382 return result_value;
2385 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2387 enum register_status status;
2390 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2391 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2392 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2393 return result_value;
2396 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2398 enum register_status status;
2401 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2402 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2403 if (status != REG_VALID)
2404 mark_value_bytes_unavailable (result_value, 0,
2405 TYPE_LENGTH (value_type (result_value)));
2407 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2408 return result_value;
2411 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2413 enum register_status status;
2416 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2417 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2418 if (status != REG_VALID)
2419 mark_value_bytes_unavailable (result_value, 0,
2420 TYPE_LENGTH (value_type (result_value)));
2422 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2423 return result_value;
2426 gdb_assert_not_reached ("regnum out of bound");
2429 /* Implement the "pseudo_register_write" gdbarch method. */
2432 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2433 int regnum, const gdb_byte *buf)
2435 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2437 /* Ensure the register buffer is zero, we want gdb writes of the
2438 various 'scalar' pseudo registers to behavior like architectural
2439 writes, register width bytes are written the remainder are set to
2441 memset (reg_buf, 0, sizeof (reg_buf));
2443 regnum -= gdbarch_num_regs (gdbarch);
2445 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2447 /* pseudo Q registers */
2450 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2451 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2452 regcache_raw_write (regcache, v_regnum, reg_buf);
2456 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2458 /* pseudo D registers */
2461 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2462 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2463 regcache_raw_write (regcache, v_regnum, reg_buf);
2467 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2471 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2472 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2473 regcache_raw_write (regcache, v_regnum, reg_buf);
2477 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2479 /* pseudo H registers */
2482 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2483 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2484 regcache_raw_write (regcache, v_regnum, reg_buf);
2488 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2490 /* pseudo B registers */
2493 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2494 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2495 regcache_raw_write (regcache, v_regnum, reg_buf);
2499 gdb_assert_not_reached ("regnum out of bound");
2502 /* Callback function for user_reg_add. */
2504 static struct value *
2505 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2507 const int *reg_p = baton;
2509 return value_of_register (*reg_p, frame);
2513 /* Implement the "software_single_step" gdbarch method, needed to
2514 single step through atomic sequences on AArch64. */
2517 aarch64_software_single_step (struct frame_info *frame)
2519 struct gdbarch *gdbarch = get_frame_arch (frame);
2520 struct address_space *aspace = get_frame_address_space (frame);
2521 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2522 const int insn_size = 4;
2523 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2524 CORE_ADDR pc = get_frame_pc (frame);
2525 CORE_ADDR breaks[2] = { -1, -1 };
2527 CORE_ADDR closing_insn = 0;
2528 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2529 byte_order_for_code);
2532 int bc_insn_count = 0; /* Conditional branch instruction count. */
2533 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2535 /* Look for a Load Exclusive instruction which begins the sequence. */
2536 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2539 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2545 insn = read_memory_unsigned_integer (loc, insn_size,
2546 byte_order_for_code);
2548 /* Check if the instruction is a conditional branch. */
2549 if (decode_bcond (loc, insn, &cond, &offset))
2551 if (bc_insn_count >= 1)
2554 /* It is, so we'll try to set a breakpoint at the destination. */
2555 breaks[1] = loc + offset;
2561 /* Look for the Store Exclusive which closes the atomic sequence. */
2562 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2569 /* We didn't find a closing Store Exclusive instruction, fall back. */
2573 /* Insert breakpoint after the end of the atomic sequence. */
2574 breaks[0] = loc + insn_size;
2576 /* Check for duplicated breakpoints, and also check that the second
2577 breakpoint is not within the atomic sequence. */
2579 && (breaks[1] == breaks[0]
2580 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2581 last_breakpoint = 0;
2583 /* Insert the breakpoint at the end of the sequence, and one at the
2584 destination of the conditional branch, if it exists. */
2585 for (index = 0; index <= last_breakpoint; index++)
2586 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2591 /* Initialize the current architecture based on INFO. If possible,
2592 re-use an architecture from ARCHES, which is a list of
2593 architectures already created during this debugging session.
2595 Called e.g. at program startup, when reading a core file, and when
2596 reading a binary file. */
2598 static struct gdbarch *
2599 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2601 struct gdbarch_tdep *tdep;
2602 struct gdbarch *gdbarch;
2603 struct gdbarch_list *best_arch;
2604 struct tdesc_arch_data *tdesc_data = NULL;
2605 const struct target_desc *tdesc = info.target_desc;
2607 int have_fpa_registers = 1;
2609 const struct tdesc_feature *feature;
2611 int num_pseudo_regs = 0;
2613 /* Ensure we always have a target descriptor. */
2614 if (!tdesc_has_registers (tdesc))
2615 tdesc = tdesc_aarch64;
2619 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2621 if (feature == NULL)
2624 tdesc_data = tdesc_data_alloc ();
2626 /* Validate the descriptor provides the mandatory core R registers
2627 and allocate their numbers. */
2628 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2630 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2631 aarch64_r_register_names[i]);
2633 num_regs = AARCH64_X0_REGNUM + i;
2635 /* Look for the V registers. */
2636 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2639 /* Validate the descriptor provides the mandatory V registers
2640 and allocate their numbers. */
2641 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2643 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2644 aarch64_v_register_names[i]);
2646 num_regs = AARCH64_V0_REGNUM + i;
2648 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2649 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2650 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2651 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2652 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2657 tdesc_data_cleanup (tdesc_data);
2661 /* AArch64 code is always little-endian. */
2662 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2664 /* If there is already a candidate, use it. */
2665 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2667 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2669 /* Found a match. */
2673 if (best_arch != NULL)
2675 if (tdesc_data != NULL)
2676 tdesc_data_cleanup (tdesc_data);
2677 return best_arch->gdbarch;
2680 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2681 gdbarch = gdbarch_alloc (&info, tdep);
2683 /* This should be low enough for everything. */
2684 tdep->lowest_pc = 0x20;
2685 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2686 tdep->jb_elt_size = 8;
2688 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2689 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2691 /* Frame handling. */
2692 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2693 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2694 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2696 /* Advance PC across function entry code. */
2697 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2699 /* The stack grows downward. */
2700 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2702 /* Breakpoint manipulation. */
2703 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2704 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2705 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2707 /* Information about registers, etc. */
2708 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2709 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2710 set_gdbarch_num_regs (gdbarch, num_regs);
2712 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2713 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2714 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2715 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2716 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2717 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2718 aarch64_pseudo_register_reggroup_p);
2721 set_gdbarch_short_bit (gdbarch, 16);
2722 set_gdbarch_int_bit (gdbarch, 32);
2723 set_gdbarch_float_bit (gdbarch, 32);
2724 set_gdbarch_double_bit (gdbarch, 64);
2725 set_gdbarch_long_double_bit (gdbarch, 128);
2726 set_gdbarch_long_bit (gdbarch, 64);
2727 set_gdbarch_long_long_bit (gdbarch, 64);
2728 set_gdbarch_ptr_bit (gdbarch, 64);
2729 set_gdbarch_char_signed (gdbarch, 0);
2730 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2731 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2732 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2734 /* Internal <-> external register number maps. */
2735 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2737 /* Returning results. */
2738 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2741 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2743 /* Virtual tables. */
2744 set_gdbarch_vbit_in_delta (gdbarch, 1);
2746 /* Hook in the ABI-specific overrides, if they have been registered. */
2747 info.target_desc = tdesc;
2748 info.tdep_info = (void *) tdesc_data;
2749 gdbarch_init_osabi (info, gdbarch);
2751 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2753 /* Add some default predicates. */
2754 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2755 dwarf2_append_unwinders (gdbarch);
2756 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2758 frame_base_set_default (gdbarch, &aarch64_normal_base);
2760 /* Now we have tuned the configuration, set a few final things,
2761 based on what the OS ABI has told us. */
2763 if (tdep->jb_pc >= 0)
2764 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2766 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2768 /* Add standard register aliases. */
2769 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2770 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2771 value_of_aarch64_user_reg,
2772 &aarch64_register_aliases[i].regnum);
2778 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2780 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2785 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2786 paddress (gdbarch, tdep->lowest_pc));
2789 /* Suppress warning from -Wmissing-prototypes. */
2790 extern initialize_file_ftype _initialize_aarch64_tdep;
2793 _initialize_aarch64_tdep (void)
2795 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2798 initialize_tdesc_aarch64 ();
2800 /* Debug this file's internals. */
2801 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2802 Set AArch64 debugging."), _("\
2803 Show AArch64 debugging."), _("\
2804 When on, AArch64 specific debugging is enabled."),
2807 &setdebuglist, &showdebuglist);
2810 /* AArch64 process record-replay related structures, defines etc. */
2812 #define submask(x) ((1L << ((x) + 1)) - 1)
2813 #define bit(obj,st) (((obj) >> (st)) & 1)
2814 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2816 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2819 unsigned int reg_len = LENGTH; \
2822 REGS = XNEWVEC (uint32_t, reg_len); \
2823 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2828 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2831 unsigned int mem_len = LENGTH; \
2834 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2835 memcpy(&MEMS->len, &RECORD_BUF[0], \
2836 sizeof(struct aarch64_mem_r) * LENGTH); \
2841 /* AArch64 record/replay structures and enumerations. */
2843 struct aarch64_mem_r
2845 uint64_t len; /* Record length. */
2846 uint64_t addr; /* Memory address. */
2849 enum aarch64_record_result
2851 AARCH64_RECORD_SUCCESS,
2852 AARCH64_RECORD_FAILURE,
2853 AARCH64_RECORD_UNSUPPORTED,
2854 AARCH64_RECORD_UNKNOWN
2857 typedef struct insn_decode_record_t
2859 struct gdbarch *gdbarch;
2860 struct regcache *regcache;
2861 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2862 uint32_t aarch64_insn; /* Insn to be recorded. */
2863 uint32_t mem_rec_count; /* Count of memory records. */
2864 uint32_t reg_rec_count; /* Count of register records. */
2865 uint32_t *aarch64_regs; /* Registers to be recorded. */
2866 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2867 } insn_decode_record;
2869 /* Record handler for data processing - register instructions. */
2872 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2874 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2875 uint32_t record_buf[4];
2877 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2878 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2879 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2881 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2885 /* Logical (shifted register). */
2886 if (insn_bits24_27 == 0x0a)
2887 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2889 else if (insn_bits24_27 == 0x0b)
2890 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2892 return AARCH64_RECORD_UNKNOWN;
2894 record_buf[0] = reg_rd;
2895 aarch64_insn_r->reg_rec_count = 1;
2897 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2901 if (insn_bits24_27 == 0x0b)
2903 /* Data-processing (3 source). */
2904 record_buf[0] = reg_rd;
2905 aarch64_insn_r->reg_rec_count = 1;
2907 else if (insn_bits24_27 == 0x0a)
2909 if (insn_bits21_23 == 0x00)
2911 /* Add/subtract (with carry). */
2912 record_buf[0] = reg_rd;
2913 aarch64_insn_r->reg_rec_count = 1;
2914 if (bit (aarch64_insn_r->aarch64_insn, 29))
2916 record_buf[1] = AARCH64_CPSR_REGNUM;
2917 aarch64_insn_r->reg_rec_count = 2;
2920 else if (insn_bits21_23 == 0x02)
2922 /* Conditional compare (register) and conditional compare
2923 (immediate) instructions. */
2924 record_buf[0] = AARCH64_CPSR_REGNUM;
2925 aarch64_insn_r->reg_rec_count = 1;
2927 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2929 /* CConditional select. */
2930 /* Data-processing (2 source). */
2931 /* Data-processing (1 source). */
2932 record_buf[0] = reg_rd;
2933 aarch64_insn_r->reg_rec_count = 1;
2936 return AARCH64_RECORD_UNKNOWN;
2940 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2942 return AARCH64_RECORD_SUCCESS;
2945 /* Record handler for data processing - immediate instructions. */
2948 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2950 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2951 uint32_t record_buf[4];
2953 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2954 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2955 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2956 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2958 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2959 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2960 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2962 record_buf[0] = reg_rd;
2963 aarch64_insn_r->reg_rec_count = 1;
2965 else if (insn_bits24_27 == 0x01)
2967 /* Add/Subtract (immediate). */
2968 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2969 record_buf[0] = reg_rd;
2970 aarch64_insn_r->reg_rec_count = 1;
2972 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2974 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2976 /* Logical (immediate). */
2977 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2978 record_buf[0] = reg_rd;
2979 aarch64_insn_r->reg_rec_count = 1;
2981 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2984 return AARCH64_RECORD_UNKNOWN;
2986 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2988 return AARCH64_RECORD_SUCCESS;
2991 /* Record handler for branch, exception generation and system instructions. */
2994 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2996 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2997 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2998 uint32_t record_buf[4];
3000 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3001 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3002 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3004 if (insn_bits28_31 == 0x0d)
3006 /* Exception generation instructions. */
3007 if (insn_bits24_27 == 0x04)
3009 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3010 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3011 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3013 ULONGEST svc_number;
3015 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3017 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3021 return AARCH64_RECORD_UNSUPPORTED;
3023 /* System instructions. */
3024 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3026 uint32_t reg_rt, reg_crn;
3028 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3029 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3031 /* Record rt in case of sysl and mrs instructions. */
3032 if (bit (aarch64_insn_r->aarch64_insn, 21))
3034 record_buf[0] = reg_rt;
3035 aarch64_insn_r->reg_rec_count = 1;
3037 /* Record cpsr for hint and msr(immediate) instructions. */
3038 else if (reg_crn == 0x02 || reg_crn == 0x04)
3040 record_buf[0] = AARCH64_CPSR_REGNUM;
3041 aarch64_insn_r->reg_rec_count = 1;
3044 /* Unconditional branch (register). */
3045 else if((insn_bits24_27 & 0x0e) == 0x06)
3047 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3048 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3049 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3052 return AARCH64_RECORD_UNKNOWN;
3054 /* Unconditional branch (immediate). */
3055 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3057 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3058 if (bit (aarch64_insn_r->aarch64_insn, 31))
3059 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3062 /* Compare & branch (immediate), Test & branch (immediate) and
3063 Conditional branch (immediate). */
3064 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3066 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3068 return AARCH64_RECORD_SUCCESS;
3071 /* Record handler for advanced SIMD load and store instructions. */
3074 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3077 uint64_t addr_offset = 0;
3078 uint32_t record_buf[24];
3079 uint64_t record_buf_mem[24];
3080 uint32_t reg_rn, reg_rt;
3081 uint32_t reg_index = 0, mem_index = 0;
3082 uint8_t opcode_bits, size_bits;
3084 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3085 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3086 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3087 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3088 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3092 fprintf_unfiltered (gdb_stdlog,
3093 "Process record: Advanced SIMD load/store\n");
3096 /* Load/store single structure. */
3097 if (bit (aarch64_insn_r->aarch64_insn, 24))
3099 uint8_t sindex, scale, selem, esize, replicate = 0;
3100 scale = opcode_bits >> 2;
3101 selem = ((opcode_bits & 0x02) |
3102 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3106 if (size_bits & 0x01)
3107 return AARCH64_RECORD_UNKNOWN;
3110 if ((size_bits >> 1) & 0x01)
3111 return AARCH64_RECORD_UNKNOWN;
3112 if (size_bits & 0x01)
3114 if (!((opcode_bits >> 1) & 0x01))
3117 return AARCH64_RECORD_UNKNOWN;
3121 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3128 return AARCH64_RECORD_UNKNOWN;
3134 for (sindex = 0; sindex < selem; sindex++)
3136 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3137 reg_rt = (reg_rt + 1) % 32;
3141 for (sindex = 0; sindex < selem; sindex++)
3142 if (bit (aarch64_insn_r->aarch64_insn, 22))
3143 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3146 record_buf_mem[mem_index++] = esize / 8;
3147 record_buf_mem[mem_index++] = address + addr_offset;
3149 addr_offset = addr_offset + (esize / 8);
3150 reg_rt = (reg_rt + 1) % 32;
3153 /* Load/store multiple structure. */
3156 uint8_t selem, esize, rpt, elements;
3157 uint8_t eindex, rindex;
3159 esize = 8 << size_bits;
3160 if (bit (aarch64_insn_r->aarch64_insn, 30))
3161 elements = 128 / esize;
3163 elements = 64 / esize;
3165 switch (opcode_bits)
3167 /*LD/ST4 (4 Registers). */
3172 /*LD/ST1 (4 Registers). */
3177 /*LD/ST3 (3 Registers). */
3182 /*LD/ST1 (3 Registers). */
3187 /*LD/ST1 (1 Register). */
3192 /*LD/ST2 (2 Registers). */
3197 /*LD/ST1 (2 Registers). */
3203 return AARCH64_RECORD_UNSUPPORTED;
3206 for (rindex = 0; rindex < rpt; rindex++)
3207 for (eindex = 0; eindex < elements; eindex++)
3209 uint8_t reg_tt, sindex;
3210 reg_tt = (reg_rt + rindex) % 32;
3211 for (sindex = 0; sindex < selem; sindex++)
3213 if (bit (aarch64_insn_r->aarch64_insn, 22))
3214 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3217 record_buf_mem[mem_index++] = esize / 8;
3218 record_buf_mem[mem_index++] = address + addr_offset;
3220 addr_offset = addr_offset + (esize / 8);
3221 reg_tt = (reg_tt + 1) % 32;
3226 if (bit (aarch64_insn_r->aarch64_insn, 23))
3227 record_buf[reg_index++] = reg_rn;
3229 aarch64_insn_r->reg_rec_count = reg_index;
3230 aarch64_insn_r->mem_rec_count = mem_index / 2;
3231 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3233 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3235 return AARCH64_RECORD_SUCCESS;
3238 /* Record handler for load and store instructions. */
3241 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3243 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3244 uint8_t insn_bit23, insn_bit21;
3245 uint8_t opc, size_bits, ld_flag, vector_flag;
3246 uint32_t reg_rn, reg_rt, reg_rt2;
3247 uint64_t datasize, offset;
3248 uint32_t record_buf[8];
3249 uint64_t record_buf_mem[8];
3252 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3253 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3254 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3255 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3256 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3257 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3258 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3259 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3260 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3261 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3262 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3264 /* Load/store exclusive. */
3265 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3269 fprintf_unfiltered (gdb_stdlog,
3270 "Process record: load/store exclusive\n");
3275 record_buf[0] = reg_rt;
3276 aarch64_insn_r->reg_rec_count = 1;
3279 record_buf[1] = reg_rt2;
3280 aarch64_insn_r->reg_rec_count = 2;
3286 datasize = (8 << size_bits) * 2;
3288 datasize = (8 << size_bits);
3289 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3291 record_buf_mem[0] = datasize / 8;
3292 record_buf_mem[1] = address;
3293 aarch64_insn_r->mem_rec_count = 1;
3296 /* Save register rs. */
3297 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3298 aarch64_insn_r->reg_rec_count = 1;
3302 /* Load register (literal) instructions decoding. */
3303 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3307 fprintf_unfiltered (gdb_stdlog,
3308 "Process record: load register (literal)\n");
3311 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3313 record_buf[0] = reg_rt;
3314 aarch64_insn_r->reg_rec_count = 1;
3316 /* All types of load/store pair instructions decoding. */
3317 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3321 fprintf_unfiltered (gdb_stdlog,
3322 "Process record: load/store pair\n");
3329 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3330 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3334 record_buf[0] = reg_rt;
3335 record_buf[1] = reg_rt2;
3337 aarch64_insn_r->reg_rec_count = 2;
3342 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3344 size_bits = size_bits >> 1;
3345 datasize = 8 << (2 + size_bits);
3346 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3347 offset = offset << (2 + size_bits);
3348 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3350 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3352 if (imm7_off & 0x40)
3353 address = address - offset;
3355 address = address + offset;
3358 record_buf_mem[0] = datasize / 8;
3359 record_buf_mem[1] = address;
3360 record_buf_mem[2] = datasize / 8;
3361 record_buf_mem[3] = address + (datasize / 8);
3362 aarch64_insn_r->mem_rec_count = 2;
3364 if (bit (aarch64_insn_r->aarch64_insn, 23))
3365 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3367 /* Load/store register (unsigned immediate) instructions. */
3368 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3370 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3377 if (size_bits != 0x03)
3380 return AARCH64_RECORD_UNKNOWN;
3384 fprintf_unfiltered (gdb_stdlog,
3385 "Process record: load/store (unsigned immediate):"
3386 " size %x V %d opc %x\n", size_bits, vector_flag,
3392 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3393 datasize = 8 << size_bits;
3394 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3396 offset = offset << size_bits;
3397 address = address + offset;
3399 record_buf_mem[0] = datasize >> 3;
3400 record_buf_mem[1] = address;
3401 aarch64_insn_r->mem_rec_count = 1;
3406 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3408 record_buf[0] = reg_rt;
3409 aarch64_insn_r->reg_rec_count = 1;
3412 /* Load/store register (register offset) instructions. */
3413 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3414 && insn_bits10_11 == 0x02 && insn_bit21)
3418 fprintf_unfiltered (gdb_stdlog,
3419 "Process record: load/store (register offset)\n");
3421 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3428 if (size_bits != 0x03)
3431 return AARCH64_RECORD_UNKNOWN;
3435 uint64_t reg_rm_val;
3436 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3437 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3438 if (bit (aarch64_insn_r->aarch64_insn, 12))
3439 offset = reg_rm_val << size_bits;
3441 offset = reg_rm_val;
3442 datasize = 8 << size_bits;
3443 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3445 address = address + offset;
3446 record_buf_mem[0] = datasize >> 3;
3447 record_buf_mem[1] = address;
3448 aarch64_insn_r->mem_rec_count = 1;
3453 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3455 record_buf[0] = reg_rt;
3456 aarch64_insn_r->reg_rec_count = 1;
3459 /* Load/store register (immediate and unprivileged) instructions. */
3460 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3465 fprintf_unfiltered (gdb_stdlog,
3466 "Process record: load/store (immediate and unprivileged)\n");
3468 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3475 if (size_bits != 0x03)
3478 return AARCH64_RECORD_UNKNOWN;
3483 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3484 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3485 datasize = 8 << size_bits;
3486 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3488 if (insn_bits10_11 != 0x01)
3490 if (imm9_off & 0x0100)
3491 address = address - offset;
3493 address = address + offset;
3495 record_buf_mem[0] = datasize >> 3;
3496 record_buf_mem[1] = address;
3497 aarch64_insn_r->mem_rec_count = 1;
3502 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3504 record_buf[0] = reg_rt;
3505 aarch64_insn_r->reg_rec_count = 1;
3507 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3508 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3510 /* Advanced SIMD load/store instructions. */
3512 return aarch64_record_asimd_load_store (aarch64_insn_r);
3514 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3516 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3518 return AARCH64_RECORD_SUCCESS;
3521 /* Record handler for data processing SIMD and floating point instructions. */
3524 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3526 uint8_t insn_bit21, opcode, rmode, reg_rd;
3527 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3528 uint8_t insn_bits11_14;
3529 uint32_t record_buf[2];
3531 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3532 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3533 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3534 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3535 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3536 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3537 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3538 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3539 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3543 fprintf_unfiltered (gdb_stdlog,
3544 "Process record: data processing SIMD/FP: ");
3547 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3549 /* Floating point - fixed point conversion instructions. */
3553 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3555 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3556 record_buf[0] = reg_rd;
3558 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3560 /* Floating point - conditional compare instructions. */
3561 else if (insn_bits10_11 == 0x01)
3564 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3566 record_buf[0] = AARCH64_CPSR_REGNUM;
3568 /* Floating point - data processing (2-source) and
3569 conditional select instructions. */
3570 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3573 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3575 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3577 else if (insn_bits10_11 == 0x00)
3579 /* Floating point - immediate instructions. */
3580 if ((insn_bits12_15 & 0x01) == 0x01
3581 || (insn_bits12_15 & 0x07) == 0x04)
3584 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3585 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3587 /* Floating point - compare instructions. */
3588 else if ((insn_bits12_15 & 0x03) == 0x02)
3591 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3592 record_buf[0] = AARCH64_CPSR_REGNUM;
3594 /* Floating point - integer conversions instructions. */
3595 else if (insn_bits12_15 == 0x00)
3597 /* Convert float to integer instruction. */
3598 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3601 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3603 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3605 /* Convert integer to float instruction. */
3606 else if ((opcode >> 1) == 0x01 && !rmode)
3609 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3611 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3613 /* Move float to integer instruction. */
3614 else if ((opcode >> 1) == 0x03)
3617 fprintf_unfiltered (gdb_stdlog, "move float to int");
3619 if (!(opcode & 0x01))
3620 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3622 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3625 return AARCH64_RECORD_UNKNOWN;
3628 return AARCH64_RECORD_UNKNOWN;
3631 return AARCH64_RECORD_UNKNOWN;
3633 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3636 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3638 /* Advanced SIMD copy instructions. */
3639 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3640 && !bit (aarch64_insn_r->aarch64_insn, 15)
3641 && bit (aarch64_insn_r->aarch64_insn, 10))
3643 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3644 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3646 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3649 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3651 /* All remaining floating point or advanced SIMD instructions. */
3655 fprintf_unfiltered (gdb_stdlog, "all remain");
3657 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3661 fprintf_unfiltered (gdb_stdlog, "\n");
3663 aarch64_insn_r->reg_rec_count++;
3664 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3665 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3667 return AARCH64_RECORD_SUCCESS;
3670 /* Decodes insns type and invokes its record handler. */
3673 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3675 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3677 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3678 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3679 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3680 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3682 /* Data processing - immediate instructions. */
3683 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3684 return aarch64_record_data_proc_imm (aarch64_insn_r);
3686 /* Branch, exception generation and system instructions. */
3687 if (ins_bit26 && !ins_bit27 && ins_bit28)
3688 return aarch64_record_branch_except_sys (aarch64_insn_r);
3690 /* Load and store instructions. */
3691 if (!ins_bit25 && ins_bit27)
3692 return aarch64_record_load_store (aarch64_insn_r);
3694 /* Data processing - register instructions. */
3695 if (ins_bit25 && !ins_bit26 && ins_bit27)
3696 return aarch64_record_data_proc_reg (aarch64_insn_r);
3698 /* Data processing - SIMD and floating point instructions. */
3699 if (ins_bit25 && ins_bit26 && ins_bit27)
3700 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3702 return AARCH64_RECORD_UNSUPPORTED;
3705 /* Cleans up local record registers and memory allocations. */
3708 deallocate_reg_mem (insn_decode_record *record)
3710 xfree (record->aarch64_regs);
3711 xfree (record->aarch64_mems);
3714 /* Parse the current instruction and record the values of the registers and
3715 memory that will be changed in current instruction to record_arch_list
3716 return -1 if something is wrong. */
3719 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3720 CORE_ADDR insn_addr)
3722 uint32_t rec_no = 0;
3723 uint8_t insn_size = 4;
3725 ULONGEST t_bit = 0, insn_id = 0;
3726 gdb_byte buf[insn_size];
3727 insn_decode_record aarch64_record;
3729 memset (&buf[0], 0, insn_size);
3730 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3731 target_read_memory (insn_addr, &buf[0], insn_size);
3732 aarch64_record.aarch64_insn
3733 = (uint32_t) extract_unsigned_integer (&buf[0],
3735 gdbarch_byte_order (gdbarch));
3736 aarch64_record.regcache = regcache;
3737 aarch64_record.this_addr = insn_addr;
3738 aarch64_record.gdbarch = gdbarch;
3740 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3741 if (ret == AARCH64_RECORD_UNSUPPORTED)
3743 printf_unfiltered (_("Process record does not support instruction "
3744 "0x%0x at address %s.\n"),
3745 aarch64_record.aarch64_insn,
3746 paddress (gdbarch, insn_addr));
3752 /* Record registers. */
3753 record_full_arch_list_add_reg (aarch64_record.regcache,
3755 /* Always record register CPSR. */
3756 record_full_arch_list_add_reg (aarch64_record.regcache,
3757 AARCH64_CPSR_REGNUM);
3758 if (aarch64_record.aarch64_regs)
3759 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3760 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3761 aarch64_record.aarch64_regs[rec_no]))
3764 /* Record memories. */
3765 if (aarch64_record.aarch64_mems)
3766 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3767 if (record_full_arch_list_add_mem
3768 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3769 aarch64_record.aarch64_mems[rec_no].len))
3772 if (record_full_arch_list_add_end ())
3776 deallocate_reg_mem (&aarch64_record);