1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2018 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43 #include "nat/aarch64-sve-linux-ptrace.h"
50 /* Per-process arch-specific data we want to keep. */
52 struct arch_process_info
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
64 struct aarch64_debug_reg_state debug_reg_state;
67 /* Return true if the size of register 0 is 8 byte. */
72 struct regcache *regcache = get_thread_regcache (current_thread, 0);
74 return register_size (regcache->tdesc, 0) == 8;
77 /* Return true if the regcache contains the number of SVE registers. */
82 struct regcache *regcache = get_thread_regcache (current_thread, 0);
84 return regcache->tdesc->reg_defs.size () == AARCH64_SVE_NUM_REGS;
88 aarch64_fill_gregset (struct regcache *regcache, void *buf)
90 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
93 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
94 collect_register (regcache, AARCH64_X0_REGNUM + i, ®set->regs[i]);
95 collect_register (regcache, AARCH64_SP_REGNUM, ®set->sp);
96 collect_register (regcache, AARCH64_PC_REGNUM, ®set->pc);
97 collect_register (regcache, AARCH64_CPSR_REGNUM, ®set->pstate);
101 aarch64_store_gregset (struct regcache *regcache, const void *buf)
103 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
106 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
107 supply_register (regcache, AARCH64_X0_REGNUM + i, ®set->regs[i]);
108 supply_register (regcache, AARCH64_SP_REGNUM, ®set->sp);
109 supply_register (regcache, AARCH64_PC_REGNUM, ®set->pc);
110 supply_register (regcache, AARCH64_CPSR_REGNUM, ®set->pstate);
114 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
116 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
119 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
120 collect_register (regcache, AARCH64_V0_REGNUM + i, ®set->vregs[i]);
121 collect_register (regcache, AARCH64_FPSR_REGNUM, ®set->fpsr);
122 collect_register (regcache, AARCH64_FPCR_REGNUM, ®set->fpcr);
126 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
128 const struct user_fpsimd_state *regset
129 = (const struct user_fpsimd_state *) buf;
132 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
133 supply_register (regcache, AARCH64_V0_REGNUM + i, ®set->vregs[i]);
134 supply_register (regcache, AARCH64_FPSR_REGNUM, ®set->fpsr);
135 supply_register (regcache, AARCH64_FPCR_REGNUM, ®set->fpcr);
138 /* Enable miscellaneous debugging output. The name is historical - it
139 was originally used to debug LinuxThreads support. */
140 extern int debug_threads;
142 /* Implementation of linux_target_ops method "get_pc". */
145 aarch64_get_pc (struct regcache *regcache)
147 if (register_size (regcache->tdesc, 0) == 8)
148 return linux_get_pc_64bit (regcache);
150 return linux_get_pc_32bit (regcache);
153 /* Implementation of linux_target_ops method "set_pc". */
156 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
158 if (register_size (regcache->tdesc, 0) == 8)
159 linux_set_pc_64bit (regcache, pc);
161 linux_set_pc_32bit (regcache, pc);
164 #define aarch64_breakpoint_len 4
166 /* AArch64 BRK software debug mode instruction.
167 This instruction needs to match gdb/aarch64-tdep.c
168 (aarch64_default_breakpoint). */
169 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
171 /* Implementation of linux_target_ops method "breakpoint_at". */
174 aarch64_breakpoint_at (CORE_ADDR where)
176 if (is_64bit_tdesc ())
178 gdb_byte insn[aarch64_breakpoint_len];
180 (*the_target->read_memory) (where, (unsigned char *) &insn,
181 aarch64_breakpoint_len);
182 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
188 return arm_breakpoint_at (where);
192 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
196 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
198 state->dr_addr_bp[i] = 0;
199 state->dr_ctrl_bp[i] = 0;
200 state->dr_ref_count_bp[i] = 0;
203 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
205 state->dr_addr_wp[i] = 0;
206 state->dr_ctrl_wp[i] = 0;
207 state->dr_ref_count_wp[i] = 0;
211 /* Return the pointer to the debug register state structure in the
212 current process' arch-specific data area. */
214 struct aarch64_debug_reg_state *
215 aarch64_get_debug_reg_state (pid_t pid)
217 struct process_info *proc = find_process_pid (pid);
219 return &proc->priv->arch_private->debug_reg_state;
222 /* Implementation of linux_target_ops method "supports_z_point_type". */
225 aarch64_supports_z_point_type (char z_type)
231 case Z_PACKET_WRITE_WP:
232 case Z_PACKET_READ_WP:
233 case Z_PACKET_ACCESS_WP:
240 /* Implementation of linux_target_ops method "insert_point".
242 It actually only records the info of the to-be-inserted bp/wp;
243 the actual insertion will happen when threads are resumed. */
246 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
247 int len, struct raw_breakpoint *bp)
250 enum target_hw_bp_type targ_type;
251 struct aarch64_debug_reg_state *state
252 = aarch64_get_debug_reg_state (pid_of (current_thread));
255 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
256 (unsigned long) addr, len);
258 /* Determine the type from the raw breakpoint type. */
259 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
261 if (targ_type != hw_execute)
263 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
264 ret = aarch64_handle_watchpoint (targ_type, addr, len,
265 1 /* is_insert */, state);
273 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
274 instruction. Set it to 2 to correctly encode length bit
275 mask in hardware/watchpoint control register. */
278 ret = aarch64_handle_breakpoint (targ_type, addr, len,
279 1 /* is_insert */, state);
283 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
289 /* Implementation of linux_target_ops method "remove_point".
291 It actually only records the info of the to-be-removed bp/wp,
292 the actual removal will be done when threads are resumed. */
295 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
296 int len, struct raw_breakpoint *bp)
299 enum target_hw_bp_type targ_type;
300 struct aarch64_debug_reg_state *state
301 = aarch64_get_debug_reg_state (pid_of (current_thread));
304 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
305 (unsigned long) addr, len);
307 /* Determine the type from the raw breakpoint type. */
308 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
310 /* Set up state pointers. */
311 if (targ_type != hw_execute)
313 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
319 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
320 instruction. Set it to 2 to correctly encode length bit
321 mask in hardware/watchpoint control register. */
324 ret = aarch64_handle_breakpoint (targ_type, addr, len,
325 0 /* is_insert */, state);
329 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
335 /* Implementation of linux_target_ops method "stopped_data_address". */
338 aarch64_stopped_data_address (void)
342 struct aarch64_debug_reg_state *state;
344 pid = lwpid_of (current_thread);
346 /* Get the siginfo. */
347 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
348 return (CORE_ADDR) 0;
350 /* Need to be a hardware breakpoint/watchpoint trap. */
351 if (siginfo.si_signo != SIGTRAP
352 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
353 return (CORE_ADDR) 0;
355 /* Check if the address matches any watched address. */
356 state = aarch64_get_debug_reg_state (pid_of (current_thread));
357 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
359 const unsigned int offset
360 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
361 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
362 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
363 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
364 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
365 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
367 if (state->dr_ref_count_wp[i]
368 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
369 && addr_trap >= addr_watch_aligned
370 && addr_trap < addr_watch + len)
372 /* ADDR_TRAP reports the first address of the memory range
373 accessed by the CPU, regardless of what was the memory
374 range watched. Thus, a large CPU access that straddles
375 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
376 ADDR_TRAP that is lower than the
377 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
379 addr: | 4 | 5 | 6 | 7 | 8 |
380 |---- range watched ----|
381 |----------- range accessed ------------|
383 In this case, ADDR_TRAP will be 4.
385 To match a watchpoint known to GDB core, we must never
386 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
387 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
388 positive on kernels older than 4.10. See PR
394 return (CORE_ADDR) 0;
397 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
400 aarch64_stopped_by_watchpoint (void)
402 if (aarch64_stopped_data_address () != 0)
408 /* Fetch the thread-local storage pointer for libthread_db. */
411 ps_get_thread_area (struct ps_prochandle *ph,
412 lwpid_t lwpid, int idx, void **base)
414 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
418 /* Implementation of linux_target_ops method "siginfo_fixup". */
421 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
423 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
424 if (!is_64bit_tdesc ())
427 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
430 aarch64_siginfo_from_compat_siginfo (native,
431 (struct compat_siginfo *) inf);
439 /* Implementation of linux_target_ops method "new_process". */
441 static struct arch_process_info *
442 aarch64_linux_new_process (void)
444 struct arch_process_info *info = XCNEW (struct arch_process_info);
446 aarch64_init_debug_reg_state (&info->debug_reg_state);
451 /* Implementation of linux_target_ops method "delete_process". */
454 aarch64_linux_delete_process (struct arch_process_info *info)
459 /* Implementation of linux_target_ops method "linux_new_fork". */
462 aarch64_linux_new_fork (struct process_info *parent,
463 struct process_info *child)
465 /* These are allocated by linux_add_process. */
466 gdb_assert (parent->priv != NULL
467 && parent->priv->arch_private != NULL);
468 gdb_assert (child->priv != NULL
469 && child->priv->arch_private != NULL);
471 /* Linux kernel before 2.6.33 commit
472 72f674d203cd230426437cdcf7dd6f681dad8b0d
473 will inherit hardware debug registers from parent
474 on fork/vfork/clone. Newer Linux kernels create such tasks with
475 zeroed debug registers.
477 GDB core assumes the child inherits the watchpoints/hw
478 breakpoints of the parent, and will remove them all from the
479 forked off process. Copy the debug registers mirrors into the
480 new process so that all breakpoints and watchpoints can be
481 removed together. The debug registers mirror will become zeroed
482 in the end before detaching the forked off process, thus making
483 this compatible with older Linux kernels too. */
485 *child->priv->arch_private = *parent->priv->arch_private;
488 /* Implementation of linux_target_ops method "arch_setup". */
491 aarch64_arch_setup (void)
493 unsigned int machine;
497 tid = lwpid_of (current_thread);
499 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
503 uint64_t vq = aarch64_sve_get_vq (tid);
504 current_process ()->tdesc = aarch64_linux_read_description (vq);
507 current_process ()->tdesc = tdesc_arm_with_neon;
509 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
512 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
515 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
517 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
520 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
523 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
525 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
528 static struct regset_info aarch64_regsets[] =
530 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
531 sizeof (struct user_pt_regs), GENERAL_REGS,
532 aarch64_fill_gregset, aarch64_store_gregset },
533 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
534 sizeof (struct user_fpsimd_state), FP_REGS,
535 aarch64_fill_fpregset, aarch64_store_fpregset
540 static struct regsets_info aarch64_regsets_info =
542 aarch64_regsets, /* regsets */
544 NULL, /* disabled_regsets */
547 static struct regs_info regs_info_aarch64 =
549 NULL, /* regset_bitmap */
551 &aarch64_regsets_info,
554 static struct regset_info aarch64_sve_regsets[] =
556 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
557 sizeof (struct user_pt_regs), GENERAL_REGS,
558 aarch64_fill_gregset, aarch64_store_gregset },
559 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
560 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
561 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
566 static struct regsets_info aarch64_sve_regsets_info =
568 aarch64_sve_regsets, /* regsets. */
569 0, /* num_regsets. */
570 NULL, /* disabled_regsets. */
573 static struct regs_info regs_info_aarch64_sve =
575 NULL, /* regset_bitmap. */
577 &aarch64_sve_regsets_info,
580 /* Implementation of linux_target_ops method "regs_info". */
582 static const struct regs_info *
583 aarch64_regs_info (void)
585 if (!is_64bit_tdesc ())
586 return ®s_info_aarch32;
589 return ®s_info_aarch64_sve;
591 return ®s_info_aarch64;
594 /* Implementation of linux_target_ops method "supports_tracepoints". */
597 aarch64_supports_tracepoints (void)
599 if (current_thread == NULL)
603 /* We don't support tracepoints on aarch32 now. */
604 return is_64bit_tdesc ();
608 /* Implementation of linux_target_ops method "get_thread_area". */
611 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
616 iovec.iov_base = ®
617 iovec.iov_len = sizeof (reg);
619 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
627 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
630 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
632 int use_64bit = register_size (regcache->tdesc, 0) == 8;
638 collect_register_by_name (regcache, "x8", &l_sysno);
639 *sysno = (int) l_sysno;
642 collect_register_by_name (regcache, "r7", sysno);
645 /* List of condition codes that we need. */
647 enum aarch64_condition_codes
658 enum aarch64_operand_type
664 /* Representation of an operand. At this time, it only supports register
665 and immediate types. */
667 struct aarch64_operand
669 /* Type of the operand. */
670 enum aarch64_operand_type type;
672 /* Value of the operand according to the type. */
676 struct aarch64_register reg;
680 /* List of registers that we are currently using, we can add more here as
681 we need to use them. */
683 /* General purpose scratch registers (64 bit). */
684 static const struct aarch64_register x0 = { 0, 1 };
685 static const struct aarch64_register x1 = { 1, 1 };
686 static const struct aarch64_register x2 = { 2, 1 };
687 static const struct aarch64_register x3 = { 3, 1 };
688 static const struct aarch64_register x4 = { 4, 1 };
690 /* General purpose scratch registers (32 bit). */
691 static const struct aarch64_register w0 = { 0, 0 };
692 static const struct aarch64_register w2 = { 2, 0 };
694 /* Intra-procedure scratch registers. */
695 static const struct aarch64_register ip0 = { 16, 1 };
697 /* Special purpose registers. */
698 static const struct aarch64_register fp = { 29, 1 };
699 static const struct aarch64_register lr = { 30, 1 };
700 static const struct aarch64_register sp = { 31, 1 };
701 static const struct aarch64_register xzr = { 31, 1 };
703 /* Dynamically allocate a new register. If we know the register
704 statically, we should make it a global as above instead of using this
707 static struct aarch64_register
708 aarch64_register (unsigned num, int is64)
710 return (struct aarch64_register) { num, is64 };
713 /* Helper function to create a register operand, for instructions with
714 different types of operands.
717 p += emit_mov (p, x0, register_operand (x1)); */
719 static struct aarch64_operand
720 register_operand (struct aarch64_register reg)
722 struct aarch64_operand operand;
724 operand.type = OPERAND_REGISTER;
730 /* Helper function to create an immediate operand, for instructions with
731 different types of operands.
734 p += emit_mov (p, x0, immediate_operand (12)); */
736 static struct aarch64_operand
737 immediate_operand (uint32_t imm)
739 struct aarch64_operand operand;
741 operand.type = OPERAND_IMMEDIATE;
747 /* Helper function to create an offset memory operand.
750 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
752 static struct aarch64_memory_operand
753 offset_memory_operand (int32_t offset)
755 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
758 /* Helper function to create a pre-index memory operand.
761 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
763 static struct aarch64_memory_operand
764 preindex_memory_operand (int32_t index)
766 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
769 /* Helper function to create a post-index memory operand.
772 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
774 static struct aarch64_memory_operand
775 postindex_memory_operand (int32_t index)
777 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
780 /* System control registers. These special registers can be written and
781 read with the MRS and MSR instructions.
783 - NZCV: Condition flags. GDB refers to this register under the CPSR
785 - FPSR: Floating-point status register.
786 - FPCR: Floating-point control registers.
787 - TPIDR_EL0: Software thread ID register. */
789 enum aarch64_system_control_registers
791 /* op0 op1 crn crm op2 */
792 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
793 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
794 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
795 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
798 /* Write a BLR instruction into *BUF.
802 RN is the register to branch to. */
805 emit_blr (uint32_t *buf, struct aarch64_register rn)
807 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
810 /* Write a RET instruction into *BUF.
814 RN is the register to branch to. */
817 emit_ret (uint32_t *buf, struct aarch64_register rn)
819 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
823 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
824 struct aarch64_register rt,
825 struct aarch64_register rt2,
826 struct aarch64_register rn,
827 struct aarch64_memory_operand operand)
834 opc = ENCODE (2, 2, 30);
836 opc = ENCODE (0, 2, 30);
838 switch (operand.type)
840 case MEMORY_OPERAND_OFFSET:
842 pre_index = ENCODE (1, 1, 24);
843 write_back = ENCODE (0, 1, 23);
846 case MEMORY_OPERAND_POSTINDEX:
848 pre_index = ENCODE (0, 1, 24);
849 write_back = ENCODE (1, 1, 23);
852 case MEMORY_OPERAND_PREINDEX:
854 pre_index = ENCODE (1, 1, 24);
855 write_back = ENCODE (1, 1, 23);
862 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
863 | ENCODE (operand.index >> 3, 7, 15)
864 | ENCODE (rt2.num, 5, 10)
865 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
868 /* Write a STP instruction into *BUF.
870 STP rt, rt2, [rn, #offset]
871 STP rt, rt2, [rn, #index]!
872 STP rt, rt2, [rn], #index
874 RT and RT2 are the registers to store.
875 RN is the base address register.
876 OFFSET is the immediate to add to the base address. It is limited to a
877 -512 .. 504 range (7 bits << 3). */
880 emit_stp (uint32_t *buf, struct aarch64_register rt,
881 struct aarch64_register rt2, struct aarch64_register rn,
882 struct aarch64_memory_operand operand)
884 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
887 /* Write a LDP instruction into *BUF.
889 LDP rt, rt2, [rn, #offset]
890 LDP rt, rt2, [rn, #index]!
891 LDP rt, rt2, [rn], #index
893 RT and RT2 are the registers to store.
894 RN is the base address register.
895 OFFSET is the immediate to add to the base address. It is limited to a
896 -512 .. 504 range (7 bits << 3). */
899 emit_ldp (uint32_t *buf, struct aarch64_register rt,
900 struct aarch64_register rt2, struct aarch64_register rn,
901 struct aarch64_memory_operand operand)
903 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
906 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
908 LDP qt, qt2, [rn, #offset]
910 RT and RT2 are the Q registers to store.
911 RN is the base address register.
912 OFFSET is the immediate to add to the base address. It is limited to
913 -1024 .. 1008 range (7 bits << 4). */
916 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
917 struct aarch64_register rn, int32_t offset)
919 uint32_t opc = ENCODE (2, 2, 30);
920 uint32_t pre_index = ENCODE (1, 1, 24);
922 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
923 | ENCODE (offset >> 4, 7, 15)
924 | ENCODE (rt2, 5, 10)
925 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
928 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
930 STP qt, qt2, [rn, #offset]
932 RT and RT2 are the Q registers to store.
933 RN is the base address register.
934 OFFSET is the immediate to add to the base address. It is limited to
935 -1024 .. 1008 range (7 bits << 4). */
938 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
939 struct aarch64_register rn, int32_t offset)
941 uint32_t opc = ENCODE (2, 2, 30);
942 uint32_t pre_index = ENCODE (1, 1, 24);
944 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
945 | ENCODE (offset >> 4, 7, 15)
946 | ENCODE (rt2, 5, 10)
947 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
950 /* Write a LDRH instruction into *BUF.
952 LDRH wt, [xn, #offset]
953 LDRH wt, [xn, #index]!
954 LDRH wt, [xn], #index
956 RT is the register to store.
957 RN is the base address register.
958 OFFSET is the immediate to add to the base address. It is limited to
959 0 .. 32760 range (12 bits << 3). */
962 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
963 struct aarch64_register rn,
964 struct aarch64_memory_operand operand)
966 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
969 /* Write a LDRB instruction into *BUF.
971 LDRB wt, [xn, #offset]
972 LDRB wt, [xn, #index]!
973 LDRB wt, [xn], #index
975 RT is the register to store.
976 RN is the base address register.
977 OFFSET is the immediate to add to the base address. It is limited to
978 0 .. 32760 range (12 bits << 3). */
981 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
982 struct aarch64_register rn,
983 struct aarch64_memory_operand operand)
985 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
990 /* Write a STR instruction into *BUF.
992 STR rt, [rn, #offset]
993 STR rt, [rn, #index]!
996 RT is the register to store.
997 RN is the base address register.
998 OFFSET is the immediate to add to the base address. It is limited to
999 0 .. 32760 range (12 bits << 3). */
1002 emit_str (uint32_t *buf, struct aarch64_register rt,
1003 struct aarch64_register rn,
1004 struct aarch64_memory_operand operand)
1006 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1009 /* Helper function emitting an exclusive load or store instruction. */
1012 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1013 enum aarch64_opcodes opcode,
1014 struct aarch64_register rs,
1015 struct aarch64_register rt,
1016 struct aarch64_register rt2,
1017 struct aarch64_register rn)
1019 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1020 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1021 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1024 /* Write a LAXR instruction into *BUF.
1028 RT is the destination register.
1029 RN is the base address register. */
1032 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1033 struct aarch64_register rn)
1035 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1039 /* Write a STXR instruction into *BUF.
1043 RS is the result register, it indicates if the store succeeded or not.
1044 RT is the destination register.
1045 RN is the base address register. */
1048 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1049 struct aarch64_register rt, struct aarch64_register rn)
1051 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1055 /* Write a STLR instruction into *BUF.
1059 RT is the register to store.
1060 RN is the base address register. */
1063 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1064 struct aarch64_register rn)
1066 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1070 /* Helper function for data processing instructions with register sources. */
1073 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1074 struct aarch64_register rd,
1075 struct aarch64_register rn,
1076 struct aarch64_register rm)
1078 uint32_t size = ENCODE (rd.is64, 1, 31);
1080 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1081 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1084 /* Helper function for data processing instructions taking either a register
1088 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1089 struct aarch64_register rd,
1090 struct aarch64_register rn,
1091 struct aarch64_operand operand)
1093 uint32_t size = ENCODE (rd.is64, 1, 31);
1094 /* The opcode is different for register and immediate source operands. */
1095 uint32_t operand_opcode;
1097 if (operand.type == OPERAND_IMMEDIATE)
1099 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1100 operand_opcode = ENCODE (8, 4, 25);
1102 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1103 | ENCODE (operand.imm, 12, 10)
1104 | ENCODE (rn.num, 5, 5)
1105 | ENCODE (rd.num, 5, 0));
1109 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1110 operand_opcode = ENCODE (5, 4, 25);
1112 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1117 /* Write an ADD instruction into *BUF.
1122 This function handles both an immediate and register add.
1124 RD is the destination register.
1125 RN is the input register.
1126 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1127 OPERAND_REGISTER. */
1130 emit_add (uint32_t *buf, struct aarch64_register rd,
1131 struct aarch64_register rn, struct aarch64_operand operand)
1133 return emit_data_processing (buf, ADD, rd, rn, operand);
1136 /* Write a SUB instruction into *BUF.
1141 This function handles both an immediate and register sub.
1143 RD is the destination register.
1144 RN is the input register.
1145 IMM is the immediate to substract to RN. */
1148 emit_sub (uint32_t *buf, struct aarch64_register rd,
1149 struct aarch64_register rn, struct aarch64_operand operand)
1151 return emit_data_processing (buf, SUB, rd, rn, operand);
1154 /* Write a MOV instruction into *BUF.
1159 This function handles both a wide immediate move and a register move,
1160 with the condition that the source register is not xzr. xzr and the
1161 stack pointer share the same encoding and this function only supports
1164 RD is the destination register.
1165 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1166 OPERAND_REGISTER. */
1169 emit_mov (uint32_t *buf, struct aarch64_register rd,
1170 struct aarch64_operand operand)
1172 if (operand.type == OPERAND_IMMEDIATE)
1174 uint32_t size = ENCODE (rd.is64, 1, 31);
1175 /* Do not shift the immediate. */
1176 uint32_t shift = ENCODE (0, 2, 21);
1178 return aarch64_emit_insn (buf, MOV | size | shift
1179 | ENCODE (operand.imm, 16, 5)
1180 | ENCODE (rd.num, 5, 0));
1183 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1186 /* Write a MOVK instruction into *BUF.
1188 MOVK rd, #imm, lsl #shift
1190 RD is the destination register.
1191 IMM is the immediate.
1192 SHIFT is the logical shift left to apply to IMM. */
1195 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1198 uint32_t size = ENCODE (rd.is64, 1, 31);
1200 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1201 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1204 /* Write instructions into *BUF in order to move ADDR into a register.
1205 ADDR can be a 64-bit value.
1207 This function will emit a series of MOV and MOVK instructions, such as:
1210 MOVK xd, #(addr >> 16), lsl #16
1211 MOVK xd, #(addr >> 32), lsl #32
1212 MOVK xd, #(addr >> 48), lsl #48 */
1215 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1219 /* The MOV (wide immediate) instruction clears to top bits of the
1221 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1223 if ((addr >> 16) != 0)
1224 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1228 if ((addr >> 32) != 0)
1229 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1233 if ((addr >> 48) != 0)
1234 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1239 /* Write a SUBS instruction into *BUF.
1243 This instruction update the condition flags.
1245 RD is the destination register.
1246 RN and RM are the source registers. */
1249 emit_subs (uint32_t *buf, struct aarch64_register rd,
1250 struct aarch64_register rn, struct aarch64_operand operand)
1252 return emit_data_processing (buf, SUBS, rd, rn, operand);
1255 /* Write a CMP instruction into *BUF.
1259 This instruction is an alias of SUBS xzr, rn, rm.
1261 RN and RM are the registers to compare. */
1264 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1265 struct aarch64_operand operand)
1267 return emit_subs (buf, xzr, rn, operand);
1270 /* Write a AND instruction into *BUF.
1274 RD is the destination register.
1275 RN and RM are the source registers. */
1278 emit_and (uint32_t *buf, struct aarch64_register rd,
1279 struct aarch64_register rn, struct aarch64_register rm)
1281 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1284 /* Write a ORR instruction into *BUF.
1288 RD is the destination register.
1289 RN and RM are the source registers. */
1292 emit_orr (uint32_t *buf, struct aarch64_register rd,
1293 struct aarch64_register rn, struct aarch64_register rm)
1295 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1298 /* Write a ORN instruction into *BUF.
1302 RD is the destination register.
1303 RN and RM are the source registers. */
1306 emit_orn (uint32_t *buf, struct aarch64_register rd,
1307 struct aarch64_register rn, struct aarch64_register rm)
1309 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1312 /* Write a EOR instruction into *BUF.
1316 RD is the destination register.
1317 RN and RM are the source registers. */
1320 emit_eor (uint32_t *buf, struct aarch64_register rd,
1321 struct aarch64_register rn, struct aarch64_register rm)
1323 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1326 /* Write a MVN instruction into *BUF.
1330 This is an alias for ORN rd, xzr, rm.
1332 RD is the destination register.
1333 RM is the source register. */
1336 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1337 struct aarch64_register rm)
1339 return emit_orn (buf, rd, xzr, rm);
1342 /* Write a LSLV instruction into *BUF.
1346 RD is the destination register.
1347 RN and RM are the source registers. */
1350 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1351 struct aarch64_register rn, struct aarch64_register rm)
1353 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1356 /* Write a LSRV instruction into *BUF.
1360 RD is the destination register.
1361 RN and RM are the source registers. */
1364 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1365 struct aarch64_register rn, struct aarch64_register rm)
1367 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1370 /* Write a ASRV instruction into *BUF.
1374 RD is the destination register.
1375 RN and RM are the source registers. */
1378 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1379 struct aarch64_register rn, struct aarch64_register rm)
1381 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1384 /* Write a MUL instruction into *BUF.
1388 RD is the destination register.
1389 RN and RM are the source registers. */
1392 emit_mul (uint32_t *buf, struct aarch64_register rd,
1393 struct aarch64_register rn, struct aarch64_register rm)
1395 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1398 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1402 RT is the destination register.
1403 SYSTEM_REG is special purpose register to read. */
1406 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1407 enum aarch64_system_control_registers system_reg)
1409 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1410 | ENCODE (rt.num, 5, 0));
1413 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1417 SYSTEM_REG is special purpose register to write.
1418 RT is the input register. */
1421 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1422 struct aarch64_register rt)
1424 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1425 | ENCODE (rt.num, 5, 0));
1428 /* Write a SEVL instruction into *BUF.
1430 This is a hint instruction telling the hardware to trigger an event. */
1433 emit_sevl (uint32_t *buf)
1435 return aarch64_emit_insn (buf, SEVL);
1438 /* Write a WFE instruction into *BUF.
1440 This is a hint instruction telling the hardware to wait for an event. */
1443 emit_wfe (uint32_t *buf)
1445 return aarch64_emit_insn (buf, WFE);
1448 /* Write a SBFM instruction into *BUF.
1450 SBFM rd, rn, #immr, #imms
1452 This instruction moves the bits from #immr to #imms into the
1453 destination, sign extending the result.
1455 RD is the destination register.
1456 RN is the source register.
1457 IMMR is the bit number to start at (least significant bit).
1458 IMMS is the bit number to stop at (most significant bit). */
1461 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1462 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1464 uint32_t size = ENCODE (rd.is64, 1, 31);
1465 uint32_t n = ENCODE (rd.is64, 1, 22);
1467 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1468 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1469 | ENCODE (rd.num, 5, 0));
1472 /* Write a SBFX instruction into *BUF.
1474 SBFX rd, rn, #lsb, #width
1476 This instruction moves #width bits from #lsb into the destination, sign
1477 extending the result. This is an alias for:
1479 SBFM rd, rn, #lsb, #(lsb + width - 1)
1481 RD is the destination register.
1482 RN is the source register.
1483 LSB is the bit number to start at (least significant bit).
1484 WIDTH is the number of bits to move. */
1487 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1488 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1490 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1493 /* Write a UBFM instruction into *BUF.
1495 UBFM rd, rn, #immr, #imms
1497 This instruction moves the bits from #immr to #imms into the
1498 destination, extending the result with zeros.
1500 RD is the destination register.
1501 RN is the source register.
1502 IMMR is the bit number to start at (least significant bit).
1503 IMMS is the bit number to stop at (most significant bit). */
1506 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1507 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1509 uint32_t size = ENCODE (rd.is64, 1, 31);
1510 uint32_t n = ENCODE (rd.is64, 1, 22);
1512 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1513 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1514 | ENCODE (rd.num, 5, 0));
1517 /* Write a UBFX instruction into *BUF.
1519 UBFX rd, rn, #lsb, #width
1521 This instruction moves #width bits from #lsb into the destination,
1522 extending the result with zeros. This is an alias for:
1524 UBFM rd, rn, #lsb, #(lsb + width - 1)
1526 RD is the destination register.
1527 RN is the source register.
1528 LSB is the bit number to start at (least significant bit).
1529 WIDTH is the number of bits to move. */
1532 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1533 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1535 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1538 /* Write a CSINC instruction into *BUF.
1540 CSINC rd, rn, rm, cond
1542 This instruction conditionally increments rn or rm and places the result
1543 in rd. rn is chosen is the condition is true.
1545 RD is the destination register.
1546 RN and RM are the source registers.
1547 COND is the encoded condition. */
1550 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1551 struct aarch64_register rn, struct aarch64_register rm,
1554 uint32_t size = ENCODE (rd.is64, 1, 31);
1556 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1557 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1558 | ENCODE (rd.num, 5, 0));
1561 /* Write a CSET instruction into *BUF.
1565 This instruction conditionally write 1 or 0 in the destination register.
1566 1 is written if the condition is true. This is an alias for:
1568 CSINC rd, xzr, xzr, !cond
1570 Note that the condition needs to be inverted.
1572 RD is the destination register.
1573 RN and RM are the source registers.
1574 COND is the encoded condition. */
1577 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1579 /* The least significant bit of the condition needs toggling in order to
1581 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1584 /* Write LEN instructions from BUF into the inferior memory at *TO.
1586 Note instructions are always little endian on AArch64, unlike data. */
1589 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1591 size_t byte_len = len * sizeof (uint32_t);
1592 #if (__BYTE_ORDER == __BIG_ENDIAN)
1593 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1596 for (i = 0; i < len; i++)
1597 le_buf[i] = htole32 (buf[i]);
1599 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1603 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1609 /* Sub-class of struct aarch64_insn_data, store information of
1610 instruction relocation for fast tracepoint. Visitor can
1611 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1612 the relocated instructions in buffer pointed by INSN_PTR. */
1614 struct aarch64_insn_relocation_data
1616 struct aarch64_insn_data base;
1618 /* The new address the instruction is relocated to. */
1620 /* Pointer to the buffer of relocated instruction(s). */
1624 /* Implementation of aarch64_insn_visitor method "b". */
1627 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1628 struct aarch64_insn_data *data)
1630 struct aarch64_insn_relocation_data *insn_reloc
1631 = (struct aarch64_insn_relocation_data *) data;
1633 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1635 if (can_encode_int32 (new_offset, 28))
1636 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1639 /* Implementation of aarch64_insn_visitor method "b_cond". */
1642 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1643 struct aarch64_insn_data *data)
1645 struct aarch64_insn_relocation_data *insn_reloc
1646 = (struct aarch64_insn_relocation_data *) data;
1648 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1650 if (can_encode_int32 (new_offset, 21))
1652 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1655 else if (can_encode_int32 (new_offset, 28))
1657 /* The offset is out of range for a conditional branch
1658 instruction but not for a unconditional branch. We can use
1659 the following instructions instead:
1661 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1662 B NOT_TAKEN ; Else jump over TAKEN and continue.
1669 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1670 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1671 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1675 /* Implementation of aarch64_insn_visitor method "cb". */
1678 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1679 const unsigned rn, int is64,
1680 struct aarch64_insn_data *data)
1682 struct aarch64_insn_relocation_data *insn_reloc
1683 = (struct aarch64_insn_relocation_data *) data;
1685 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1687 if (can_encode_int32 (new_offset, 21))
1689 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1690 aarch64_register (rn, is64), new_offset);
1692 else if (can_encode_int32 (new_offset, 28))
1694 /* The offset is out of range for a compare and branch
1695 instruction but not for a unconditional branch. We can use
1696 the following instructions instead:
1698 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1699 B NOT_TAKEN ; Else jump over TAKEN and continue.
1705 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1706 aarch64_register (rn, is64), 8);
1707 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1708 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1712 /* Implementation of aarch64_insn_visitor method "tb". */
1715 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1716 const unsigned rt, unsigned bit,
1717 struct aarch64_insn_data *data)
1719 struct aarch64_insn_relocation_data *insn_reloc
1720 = (struct aarch64_insn_relocation_data *) data;
1722 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1724 if (can_encode_int32 (new_offset, 16))
1726 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1727 aarch64_register (rt, 1), new_offset);
1729 else if (can_encode_int32 (new_offset, 28))
1731 /* The offset is out of range for a test bit and branch
1732 instruction but not for a unconditional branch. We can use
1733 the following instructions instead:
1735 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1736 B NOT_TAKEN ; Else jump over TAKEN and continue.
1742 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1743 aarch64_register (rt, 1), 8);
1744 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1745 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1750 /* Implementation of aarch64_insn_visitor method "adr". */
1753 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1755 struct aarch64_insn_data *data)
1757 struct aarch64_insn_relocation_data *insn_reloc
1758 = (struct aarch64_insn_relocation_data *) data;
1759 /* We know exactly the address the ADR{P,} instruction will compute.
1760 We can just write it to the destination register. */
1761 CORE_ADDR address = data->insn_addr + offset;
1765 /* Clear the lower 12 bits of the offset to get the 4K page. */
1766 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1767 aarch64_register (rd, 1),
1771 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1772 aarch64_register (rd, 1), address);
1775 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1778 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1779 const unsigned rt, const int is64,
1780 struct aarch64_insn_data *data)
1782 struct aarch64_insn_relocation_data *insn_reloc
1783 = (struct aarch64_insn_relocation_data *) data;
1784 CORE_ADDR address = data->insn_addr + offset;
1786 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1787 aarch64_register (rt, 1), address);
1789 /* We know exactly what address to load from, and what register we
1792 MOV xd, #(oldloc + offset)
1793 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1796 LDR xd, [xd] ; or LDRSW xd, [xd]
1801 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1802 aarch64_register (rt, 1),
1803 aarch64_register (rt, 1),
1804 offset_memory_operand (0));
1806 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1807 aarch64_register (rt, is64),
1808 aarch64_register (rt, 1),
1809 offset_memory_operand (0));
1812 /* Implementation of aarch64_insn_visitor method "others". */
1815 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1816 struct aarch64_insn_data *data)
1818 struct aarch64_insn_relocation_data *insn_reloc
1819 = (struct aarch64_insn_relocation_data *) data;
1821 /* The instruction is not PC relative. Just re-emit it at the new
1823 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1826 static const struct aarch64_insn_visitor visitor =
1828 aarch64_ftrace_insn_reloc_b,
1829 aarch64_ftrace_insn_reloc_b_cond,
1830 aarch64_ftrace_insn_reloc_cb,
1831 aarch64_ftrace_insn_reloc_tb,
1832 aarch64_ftrace_insn_reloc_adr,
1833 aarch64_ftrace_insn_reloc_ldr_literal,
1834 aarch64_ftrace_insn_reloc_others,
1837 /* Implementation of linux_target_ops method
1838 "install_fast_tracepoint_jump_pad". */
1841 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1843 CORE_ADDR collector,
1846 CORE_ADDR *jump_entry,
1847 CORE_ADDR *trampoline,
1848 ULONGEST *trampoline_size,
1849 unsigned char *jjump_pad_insn,
1850 ULONGEST *jjump_pad_insn_size,
1851 CORE_ADDR *adjusted_insn_addr,
1852 CORE_ADDR *adjusted_insn_addr_end,
1860 CORE_ADDR buildaddr = *jump_entry;
1861 struct aarch64_insn_relocation_data insn_data;
1863 /* We need to save the current state on the stack both to restore it
1864 later and to collect register values when the tracepoint is hit.
1866 The saved registers are pushed in a layout that needs to be in sync
1867 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1868 the supply_fast_tracepoint_registers function will fill in the
1869 register cache from a pointer to saved registers on the stack we build
1872 For simplicity, we set the size of each cell on the stack to 16 bytes.
1873 This way one cell can hold any register type, from system registers
1874 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1875 has to be 16 bytes aligned anyway.
1877 Note that the CPSR register does not exist on AArch64. Instead we
1878 can access system bits describing the process state with the
1879 MRS/MSR instructions, namely the condition flags. We save them as
1880 if they are part of a CPSR register because that's how GDB
1881 interprets these system bits. At the moment, only the condition
1882 flags are saved in CPSR (NZCV).
1884 Stack layout, each cell is 16 bytes (descending):
1886 High *-------- SIMD&FP registers from 31 down to 0. --------*
1892 *---- General purpose registers from 30 down to 0. ----*
1898 *------------- Special purpose registers. -------------*
1901 | CPSR (NZCV) | 5 cells
1904 *------------- collecting_t object --------------------*
1905 | TPIDR_EL0 | struct tracepoint * |
1906 Low *------------------------------------------------------*
1908 After this stack is set up, we issue a call to the collector, passing
1909 it the saved registers at (SP + 16). */
1911 /* Push SIMD&FP registers on the stack:
1913 SUB sp, sp, #(32 * 16)
1915 STP q30, q31, [sp, #(30 * 16)]
1920 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1921 for (i = 30; i >= 0; i -= 2)
1922 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1924 /* Push general puspose registers on the stack. Note that we do not need
1925 to push x31 as it represents the xzr register and not the stack
1926 pointer in a STR instruction.
1928 SUB sp, sp, #(31 * 16)
1930 STR x30, [sp, #(30 * 16)]
1935 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1936 for (i = 30; i >= 0; i -= 1)
1937 p += emit_str (p, aarch64_register (i, 1), sp,
1938 offset_memory_operand (i * 16));
1940 /* Make space for 5 more cells.
1942 SUB sp, sp, #(5 * 16)
1945 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1950 ADD x4, sp, #((32 + 31 + 5) * 16)
1951 STR x4, [sp, #(4 * 16)]
1954 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1955 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1957 /* Save PC (tracepoint address):
1962 STR x3, [sp, #(3 * 16)]
1966 p += emit_mov_addr (p, x3, tpaddr);
1967 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1969 /* Save CPSR (NZCV), FPSR and FPCR:
1975 STR x2, [sp, #(2 * 16)]
1976 STR x1, [sp, #(1 * 16)]
1977 STR x0, [sp, #(0 * 16)]
1980 p += emit_mrs (p, x2, NZCV);
1981 p += emit_mrs (p, x1, FPSR);
1982 p += emit_mrs (p, x0, FPCR);
1983 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1984 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1985 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1987 /* Push the collecting_t object. It consist of the address of the
1988 tracepoint and an ID for the current thread. We get the latter by
1989 reading the tpidr_el0 system register. It corresponds to the
1990 NT_ARM_TLS register accessible with ptrace.
1997 STP x0, x1, [sp, #-16]!
2001 p += emit_mov_addr (p, x0, tpoint);
2002 p += emit_mrs (p, x1, TPIDR_EL0);
2003 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2007 The shared memory for the lock is at lockaddr. It will hold zero
2008 if no-one is holding the lock, otherwise it contains the address of
2009 the collecting_t object on the stack of the thread which acquired it.
2011 At this stage, the stack pointer points to this thread's collecting_t
2014 We use the following registers:
2015 - x0: Address of the lock.
2016 - x1: Pointer to collecting_t object.
2017 - x2: Scratch register.
2023 ; Trigger an event local to this core. So the following WFE
2024 ; instruction is ignored.
2027 ; Wait for an event. The event is triggered by either the SEVL
2028 ; or STLR instructions (store release).
2031 ; Atomically read at lockaddr. This marks the memory location as
2032 ; exclusive. This instruction also has memory constraints which
2033 ; make sure all previous data reads and writes are done before
2037 ; Try again if another thread holds the lock.
2040 ; We can lock it! Write the address of the collecting_t object.
2041 ; This instruction will fail if the memory location is not marked
2042 ; as exclusive anymore. If it succeeds, it will remove the
2043 ; exclusive mark on the memory location. This way, if another
2044 ; thread executes this instruction before us, we will fail and try
2051 p += emit_mov_addr (p, x0, lockaddr);
2052 p += emit_mov (p, x1, register_operand (sp));
2056 p += emit_ldaxr (p, x2, x0);
2057 p += emit_cb (p, 1, w2, -2 * 4);
2058 p += emit_stxr (p, w2, x1, x0);
2059 p += emit_cb (p, 1, x2, -4 * 4);
2061 /* Call collector (struct tracepoint *, unsigned char *):
2066 ; Saved registers start after the collecting_t object.
2069 ; We use an intra-procedure-call scratch register.
2070 MOV ip0, #(collector)
2073 ; And call back to C!
2078 p += emit_mov_addr (p, x0, tpoint);
2079 p += emit_add (p, x1, sp, immediate_operand (16));
2081 p += emit_mov_addr (p, ip0, collector);
2082 p += emit_blr (p, ip0);
2084 /* Release the lock.
2089 ; This instruction is a normal store with memory ordering
2090 ; constraints. Thanks to this we do not have to put a data
2091 ; barrier instruction to make sure all data read and writes are done
2092 ; before this instruction is executed. Furthermore, this instrucion
2093 ; will trigger an event, letting other threads know they can grab
2098 p += emit_mov_addr (p, x0, lockaddr);
2099 p += emit_stlr (p, xzr, x0);
2101 /* Free collecting_t object:
2106 p += emit_add (p, sp, sp, immediate_operand (16));
2108 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2109 registers from the stack.
2111 LDR x2, [sp, #(2 * 16)]
2112 LDR x1, [sp, #(1 * 16)]
2113 LDR x0, [sp, #(0 * 16)]
2119 ADD sp, sp #(5 * 16)
2122 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2123 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2124 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2125 p += emit_msr (p, NZCV, x2);
2126 p += emit_msr (p, FPSR, x1);
2127 p += emit_msr (p, FPCR, x0);
2129 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2131 /* Pop general purpose registers:
2135 LDR x30, [sp, #(30 * 16)]
2137 ADD sp, sp, #(31 * 16)
2140 for (i = 0; i <= 30; i += 1)
2141 p += emit_ldr (p, aarch64_register (i, 1), sp,
2142 offset_memory_operand (i * 16));
2143 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2145 /* Pop SIMD&FP registers:
2149 LDP q30, q31, [sp, #(30 * 16)]
2151 ADD sp, sp, #(32 * 16)
2154 for (i = 0; i <= 30; i += 2)
2155 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2156 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2158 /* Write the code into the inferior memory. */
2159 append_insns (&buildaddr, p - buf, buf);
2161 /* Now emit the relocated instruction. */
2162 *adjusted_insn_addr = buildaddr;
2163 target_read_uint32 (tpaddr, &insn);
2165 insn_data.base.insn_addr = tpaddr;
2166 insn_data.new_addr = buildaddr;
2167 insn_data.insn_ptr = buf;
2169 aarch64_relocate_instruction (insn, &visitor,
2170 (struct aarch64_insn_data *) &insn_data);
2172 /* We may not have been able to relocate the instruction. */
2173 if (insn_data.insn_ptr == buf)
2176 "E.Could not relocate instruction from %s to %s.",
2177 core_addr_to_string_nz (tpaddr),
2178 core_addr_to_string_nz (buildaddr));
2182 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2183 *adjusted_insn_addr_end = buildaddr;
2185 /* Go back to the start of the buffer. */
2188 /* Emit a branch back from the jump pad. */
2189 offset = (tpaddr + orig_size - buildaddr);
2190 if (!can_encode_int32 (offset, 28))
2193 "E.Jump back from jump pad too far from tracepoint "
2194 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2199 p += emit_b (p, 0, offset);
2200 append_insns (&buildaddr, p - buf, buf);
2202 /* Give the caller a branch instruction into the jump pad. */
2203 offset = (*jump_entry - tpaddr);
2204 if (!can_encode_int32 (offset, 28))
2207 "E.Jump pad too far from tracepoint "
2208 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2213 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2214 *jjump_pad_insn_size = 4;
2216 /* Return the end address of our pad. */
2217 *jump_entry = buildaddr;
2222 /* Helper function writing LEN instructions from START into
2223 current_insn_ptr. */
2226 emit_ops_insns (const uint32_t *start, int len)
2228 CORE_ADDR buildaddr = current_insn_ptr;
2231 debug_printf ("Adding %d instrucions at %s\n",
2232 len, paddress (buildaddr));
2234 append_insns (&buildaddr, len, start);
2235 current_insn_ptr = buildaddr;
2238 /* Pop a register from the stack. */
2241 emit_pop (uint32_t *buf, struct aarch64_register rt)
2243 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2246 /* Push a register on the stack. */
2249 emit_push (uint32_t *buf, struct aarch64_register rt)
2251 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2254 /* Implementation of emit_ops method "emit_prologue". */
2257 aarch64_emit_prologue (void)
2262 /* This function emit a prologue for the following function prototype:
2264 enum eval_result_type f (unsigned char *regs,
2267 The first argument is a buffer of raw registers. The second
2268 argument is the result of
2269 evaluating the expression, which will be set to whatever is on top of
2270 the stack at the end.
2272 The stack set up by the prologue is as such:
2274 High *------------------------------------------------------*
2277 | x1 (ULONGEST *value) |
2278 | x0 (unsigned char *regs) |
2279 Low *------------------------------------------------------*
2281 As we are implementing a stack machine, each opcode can expand the
2282 stack so we never know how far we are from the data saved by this
2283 prologue. In order to be able refer to value and regs later, we save
2284 the current stack pointer in the frame pointer. This way, it is not
2285 clobbered when calling C functions.
2287 Finally, throughtout every operation, we are using register x0 as the
2288 top of the stack, and x1 as a scratch register. */
2290 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2291 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2292 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2294 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2297 emit_ops_insns (buf, p - buf);
2300 /* Implementation of emit_ops method "emit_epilogue". */
2303 aarch64_emit_epilogue (void)
2308 /* Store the result of the expression (x0) in *value. */
2309 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2310 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2311 p += emit_str (p, x0, x1, offset_memory_operand (0));
2313 /* Restore the previous state. */
2314 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2315 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2317 /* Return expr_eval_no_error. */
2318 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2319 p += emit_ret (p, lr);
2321 emit_ops_insns (buf, p - buf);
2324 /* Implementation of emit_ops method "emit_add". */
2327 aarch64_emit_add (void)
2332 p += emit_pop (p, x1);
2333 p += emit_add (p, x0, x1, register_operand (x0));
2335 emit_ops_insns (buf, p - buf);
2338 /* Implementation of emit_ops method "emit_sub". */
2341 aarch64_emit_sub (void)
2346 p += emit_pop (p, x1);
2347 p += emit_sub (p, x0, x1, register_operand (x0));
2349 emit_ops_insns (buf, p - buf);
2352 /* Implementation of emit_ops method "emit_mul". */
2355 aarch64_emit_mul (void)
2360 p += emit_pop (p, x1);
2361 p += emit_mul (p, x0, x1, x0);
2363 emit_ops_insns (buf, p - buf);
2366 /* Implementation of emit_ops method "emit_lsh". */
2369 aarch64_emit_lsh (void)
2374 p += emit_pop (p, x1);
2375 p += emit_lslv (p, x0, x1, x0);
2377 emit_ops_insns (buf, p - buf);
2380 /* Implementation of emit_ops method "emit_rsh_signed". */
2383 aarch64_emit_rsh_signed (void)
2388 p += emit_pop (p, x1);
2389 p += emit_asrv (p, x0, x1, x0);
2391 emit_ops_insns (buf, p - buf);
2394 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2397 aarch64_emit_rsh_unsigned (void)
2402 p += emit_pop (p, x1);
2403 p += emit_lsrv (p, x0, x1, x0);
2405 emit_ops_insns (buf, p - buf);
2408 /* Implementation of emit_ops method "emit_ext". */
2411 aarch64_emit_ext (int arg)
2416 p += emit_sbfx (p, x0, x0, 0, arg);
2418 emit_ops_insns (buf, p - buf);
2421 /* Implementation of emit_ops method "emit_log_not". */
2424 aarch64_emit_log_not (void)
2429 /* If the top of the stack is 0, replace it with 1. Else replace it with
2432 p += emit_cmp (p, x0, immediate_operand (0));
2433 p += emit_cset (p, x0, EQ);
2435 emit_ops_insns (buf, p - buf);
2438 /* Implementation of emit_ops method "emit_bit_and". */
2441 aarch64_emit_bit_and (void)
2446 p += emit_pop (p, x1);
2447 p += emit_and (p, x0, x0, x1);
2449 emit_ops_insns (buf, p - buf);
2452 /* Implementation of emit_ops method "emit_bit_or". */
2455 aarch64_emit_bit_or (void)
2460 p += emit_pop (p, x1);
2461 p += emit_orr (p, x0, x0, x1);
2463 emit_ops_insns (buf, p - buf);
2466 /* Implementation of emit_ops method "emit_bit_xor". */
2469 aarch64_emit_bit_xor (void)
2474 p += emit_pop (p, x1);
2475 p += emit_eor (p, x0, x0, x1);
2477 emit_ops_insns (buf, p - buf);
2480 /* Implementation of emit_ops method "emit_bit_not". */
2483 aarch64_emit_bit_not (void)
2488 p += emit_mvn (p, x0, x0);
2490 emit_ops_insns (buf, p - buf);
2493 /* Implementation of emit_ops method "emit_equal". */
2496 aarch64_emit_equal (void)
2501 p += emit_pop (p, x1);
2502 p += emit_cmp (p, x0, register_operand (x1));
2503 p += emit_cset (p, x0, EQ);
2505 emit_ops_insns (buf, p - buf);
2508 /* Implementation of emit_ops method "emit_less_signed". */
2511 aarch64_emit_less_signed (void)
2516 p += emit_pop (p, x1);
2517 p += emit_cmp (p, x1, register_operand (x0));
2518 p += emit_cset (p, x0, LT);
2520 emit_ops_insns (buf, p - buf);
2523 /* Implementation of emit_ops method "emit_less_unsigned". */
2526 aarch64_emit_less_unsigned (void)
2531 p += emit_pop (p, x1);
2532 p += emit_cmp (p, x1, register_operand (x0));
2533 p += emit_cset (p, x0, LO);
2535 emit_ops_insns (buf, p - buf);
2538 /* Implementation of emit_ops method "emit_ref". */
2541 aarch64_emit_ref (int size)
2549 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2552 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2555 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2558 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2561 /* Unknown size, bail on compilation. */
2566 emit_ops_insns (buf, p - buf);
2569 /* Implementation of emit_ops method "emit_if_goto". */
2572 aarch64_emit_if_goto (int *offset_p, int *size_p)
2577 /* The Z flag is set or cleared here. */
2578 p += emit_cmp (p, x0, immediate_operand (0));
2579 /* This instruction must not change the Z flag. */
2580 p += emit_pop (p, x0);
2581 /* Branch over the next instruction if x0 == 0. */
2582 p += emit_bcond (p, EQ, 8);
2584 /* The NOP instruction will be patched with an unconditional branch. */
2586 *offset_p = (p - buf) * 4;
2591 emit_ops_insns (buf, p - buf);
2594 /* Implementation of emit_ops method "emit_goto". */
2597 aarch64_emit_goto (int *offset_p, int *size_p)
2602 /* The NOP instruction will be patched with an unconditional branch. */
2609 emit_ops_insns (buf, p - buf);
2612 /* Implementation of emit_ops method "write_goto_address". */
2615 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2619 emit_b (&insn, 0, to - from);
2620 append_insns (&from, 1, &insn);
2623 /* Implementation of emit_ops method "emit_const". */
2626 aarch64_emit_const (LONGEST num)
2631 p += emit_mov_addr (p, x0, num);
2633 emit_ops_insns (buf, p - buf);
2636 /* Implementation of emit_ops method "emit_call". */
2639 aarch64_emit_call (CORE_ADDR fn)
2644 p += emit_mov_addr (p, ip0, fn);
2645 p += emit_blr (p, ip0);
2647 emit_ops_insns (buf, p - buf);
2650 /* Implementation of emit_ops method "emit_reg". */
2653 aarch64_emit_reg (int reg)
2658 /* Set x0 to unsigned char *regs. */
2659 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2660 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2661 p += emit_mov (p, x1, immediate_operand (reg));
2663 emit_ops_insns (buf, p - buf);
2665 aarch64_emit_call (get_raw_reg_func_addr ());
2668 /* Implementation of emit_ops method "emit_pop". */
2671 aarch64_emit_pop (void)
2676 p += emit_pop (p, x0);
2678 emit_ops_insns (buf, p - buf);
2681 /* Implementation of emit_ops method "emit_stack_flush". */
2684 aarch64_emit_stack_flush (void)
2689 p += emit_push (p, x0);
2691 emit_ops_insns (buf, p - buf);
2694 /* Implementation of emit_ops method "emit_zero_ext". */
2697 aarch64_emit_zero_ext (int arg)
2702 p += emit_ubfx (p, x0, x0, 0, arg);
2704 emit_ops_insns (buf, p - buf);
2707 /* Implementation of emit_ops method "emit_swap". */
2710 aarch64_emit_swap (void)
2715 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2716 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2717 p += emit_mov (p, x0, register_operand (x1));
2719 emit_ops_insns (buf, p - buf);
2722 /* Implementation of emit_ops method "emit_stack_adjust". */
2725 aarch64_emit_stack_adjust (int n)
2727 /* This is not needed with our design. */
2731 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2733 emit_ops_insns (buf, p - buf);
2736 /* Implementation of emit_ops method "emit_int_call_1". */
2739 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2744 p += emit_mov (p, x0, immediate_operand (arg1));
2746 emit_ops_insns (buf, p - buf);
2748 aarch64_emit_call (fn);
2751 /* Implementation of emit_ops method "emit_void_call_2". */
2754 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2759 /* Push x0 on the stack. */
2760 aarch64_emit_stack_flush ();
2762 /* Setup arguments for the function call:
2765 x1: top of the stack
2770 p += emit_mov (p, x1, register_operand (x0));
2771 p += emit_mov (p, x0, immediate_operand (arg1));
2773 emit_ops_insns (buf, p - buf);
2775 aarch64_emit_call (fn);
2778 aarch64_emit_pop ();
2781 /* Implementation of emit_ops method "emit_eq_goto". */
2784 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2789 p += emit_pop (p, x1);
2790 p += emit_cmp (p, x1, register_operand (x0));
2791 /* Branch over the next instruction if x0 != x1. */
2792 p += emit_bcond (p, NE, 8);
2793 /* The NOP instruction will be patched with an unconditional branch. */
2795 *offset_p = (p - buf) * 4;
2800 emit_ops_insns (buf, p - buf);
2803 /* Implementation of emit_ops method "emit_ne_goto". */
2806 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2811 p += emit_pop (p, x1);
2812 p += emit_cmp (p, x1, register_operand (x0));
2813 /* Branch over the next instruction if x0 == x1. */
2814 p += emit_bcond (p, EQ, 8);
2815 /* The NOP instruction will be patched with an unconditional branch. */
2817 *offset_p = (p - buf) * 4;
2822 emit_ops_insns (buf, p - buf);
2825 /* Implementation of emit_ops method "emit_lt_goto". */
2828 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2833 p += emit_pop (p, x1);
2834 p += emit_cmp (p, x1, register_operand (x0));
2835 /* Branch over the next instruction if x0 >= x1. */
2836 p += emit_bcond (p, GE, 8);
2837 /* The NOP instruction will be patched with an unconditional branch. */
2839 *offset_p = (p - buf) * 4;
2844 emit_ops_insns (buf, p - buf);
2847 /* Implementation of emit_ops method "emit_le_goto". */
2850 aarch64_emit_le_goto (int *offset_p, int *size_p)
2855 p += emit_pop (p, x1);
2856 p += emit_cmp (p, x1, register_operand (x0));
2857 /* Branch over the next instruction if x0 > x1. */
2858 p += emit_bcond (p, GT, 8);
2859 /* The NOP instruction will be patched with an unconditional branch. */
2861 *offset_p = (p - buf) * 4;
2866 emit_ops_insns (buf, p - buf);
2869 /* Implementation of emit_ops method "emit_gt_goto". */
2872 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2877 p += emit_pop (p, x1);
2878 p += emit_cmp (p, x1, register_operand (x0));
2879 /* Branch over the next instruction if x0 <= x1. */
2880 p += emit_bcond (p, LE, 8);
2881 /* The NOP instruction will be patched with an unconditional branch. */
2883 *offset_p = (p - buf) * 4;
2888 emit_ops_insns (buf, p - buf);
2891 /* Implementation of emit_ops method "emit_ge_got". */
2894 aarch64_emit_ge_got (int *offset_p, int *size_p)
2899 p += emit_pop (p, x1);
2900 p += emit_cmp (p, x1, register_operand (x0));
2901 /* Branch over the next instruction if x0 <= x1. */
2902 p += emit_bcond (p, LT, 8);
2903 /* The NOP instruction will be patched with an unconditional branch. */
2905 *offset_p = (p - buf) * 4;
2910 emit_ops_insns (buf, p - buf);
2913 static struct emit_ops aarch64_emit_ops_impl =
2915 aarch64_emit_prologue,
2916 aarch64_emit_epilogue,
2921 aarch64_emit_rsh_signed,
2922 aarch64_emit_rsh_unsigned,
2924 aarch64_emit_log_not,
2925 aarch64_emit_bit_and,
2926 aarch64_emit_bit_or,
2927 aarch64_emit_bit_xor,
2928 aarch64_emit_bit_not,
2930 aarch64_emit_less_signed,
2931 aarch64_emit_less_unsigned,
2933 aarch64_emit_if_goto,
2935 aarch64_write_goto_address,
2940 aarch64_emit_stack_flush,
2941 aarch64_emit_zero_ext,
2943 aarch64_emit_stack_adjust,
2944 aarch64_emit_int_call_1,
2945 aarch64_emit_void_call_2,
2946 aarch64_emit_eq_goto,
2947 aarch64_emit_ne_goto,
2948 aarch64_emit_lt_goto,
2949 aarch64_emit_le_goto,
2950 aarch64_emit_gt_goto,
2951 aarch64_emit_ge_got,
2954 /* Implementation of linux_target_ops method "emit_ops". */
2956 static struct emit_ops *
2957 aarch64_emit_ops (void)
2959 return &aarch64_emit_ops_impl;
2962 /* Implementation of linux_target_ops method
2963 "get_min_fast_tracepoint_insn_len". */
2966 aarch64_get_min_fast_tracepoint_insn_len (void)
2971 /* Implementation of linux_target_ops method "supports_range_stepping". */
2974 aarch64_supports_range_stepping (void)
2979 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2981 static const gdb_byte *
2982 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2984 if (is_64bit_tdesc ())
2986 *size = aarch64_breakpoint_len;
2987 return aarch64_breakpoint;
2990 return arm_sw_breakpoint_from_kind (kind, size);
2993 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2996 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2998 if (is_64bit_tdesc ())
2999 return aarch64_breakpoint_len;
3001 return arm_breakpoint_kind_from_pc (pcptr);
3004 /* Implementation of the linux_target_ops method
3005 "breakpoint_kind_from_current_state". */
3008 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3010 if (is_64bit_tdesc ())
3011 return aarch64_breakpoint_len;
3013 return arm_breakpoint_kind_from_current_state (pcptr);
3016 /* Support for hardware single step. */
3019 aarch64_supports_hardware_single_step (void)
3024 struct linux_target_ops the_low_target =
3028 NULL, /* cannot_fetch_register */
3029 NULL, /* cannot_store_register */
3030 NULL, /* fetch_register */
3033 aarch64_breakpoint_kind_from_pc,
3034 aarch64_sw_breakpoint_from_kind,
3035 NULL, /* get_next_pcs */
3036 0, /* decr_pc_after_break */
3037 aarch64_breakpoint_at,
3038 aarch64_supports_z_point_type,
3039 aarch64_insert_point,
3040 aarch64_remove_point,
3041 aarch64_stopped_by_watchpoint,
3042 aarch64_stopped_data_address,
3043 NULL, /* collect_ptrace_register */
3044 NULL, /* supply_ptrace_register */
3045 aarch64_linux_siginfo_fixup,
3046 aarch64_linux_new_process,
3047 aarch64_linux_delete_process,
3048 aarch64_linux_new_thread,
3049 aarch64_linux_delete_thread,
3050 aarch64_linux_new_fork,
3051 aarch64_linux_prepare_to_resume,
3052 NULL, /* process_qsupported */
3053 aarch64_supports_tracepoints,
3054 aarch64_get_thread_area,
3055 aarch64_install_fast_tracepoint_jump_pad,
3057 aarch64_get_min_fast_tracepoint_insn_len,
3058 aarch64_supports_range_stepping,
3059 aarch64_breakpoint_kind_from_current_state,
3060 aarch64_supports_hardware_single_step,
3061 aarch64_get_syscall_trapinfo,
3065 initialize_low_arch (void)
3067 initialize_low_arch_aarch32 ();
3069 initialize_regsets_info (&aarch64_regsets_info);
3070 initialize_regsets_info (&aarch64_sve_regsets_info);
3073 initialize_low_tdesc ();