1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
42 /* Defined in auto-generated files. */
43 void init_registers_aarch64 (void);
44 extern const struct target_desc *tdesc_aarch64;
50 #define AARCH64_X_REGS_NUM 31
51 #define AARCH64_V_REGS_NUM 32
52 #define AARCH64_X0_REGNO 0
53 #define AARCH64_SP_REGNO 31
54 #define AARCH64_PC_REGNO 32
55 #define AARCH64_CPSR_REGNO 33
56 #define AARCH64_V0_REGNO 34
57 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
60 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
62 /* Per-process arch-specific data we want to keep. */
64 struct arch_process_info
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
76 struct aarch64_debug_reg_state debug_reg_state;
79 /* Return true if the size of register 0 is 8 byte. */
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
86 return register_size (regcache->tdesc, 0) == 8;
89 /* Implementation of linux_target_ops method "cannot_store_register". */
92 aarch64_cannot_store_register (int regno)
94 return regno >= AARCH64_NUM_REGS;
97 /* Implementation of linux_target_ops method "cannot_fetch_register". */
100 aarch64_cannot_fetch_register (int regno)
102 return regno >= AARCH64_NUM_REGS;
106 aarch64_fill_gregset (struct regcache *regcache, void *buf)
108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, ®set->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, ®set->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, ®set->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, ®set->pstate);
119 aarch64_store_gregset (struct regcache *regcache, const void *buf)
121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, ®set->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, ®set->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, ®set->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, ®set->pstate);
132 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, ®set->vregs[i]);
139 collect_register (regcache, AARCH64_FPSR_REGNO, ®set->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, ®set->fpcr);
144 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, ®set->vregs[i]);
152 supply_register (regcache, AARCH64_FPSR_REGNO, ®set->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, ®set->fpcr);
156 /* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158 extern int debug_threads;
160 /* Implementation of linux_target_ops method "get_pc". */
163 aarch64_get_pc (struct regcache *regcache)
165 if (register_size (regcache->tdesc, 0) == 8)
169 collect_register_by_name (regcache, "pc", &pc);
171 debug_printf ("stop pc is %08lx\n", pc);
178 collect_register_by_name (regcache, "pc", &pc);
180 debug_printf ("stop pc is %04x\n", pc);
185 /* Implementation of linux_target_ops method "set_pc". */
188 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
190 if (register_size (regcache->tdesc, 0) == 8)
192 unsigned long newpc = pc;
193 supply_register_by_name (regcache, "pc", &newpc);
197 unsigned int newpc = pc;
198 supply_register_by_name (regcache, "pc", &newpc);
202 #define aarch64_breakpoint_len 4
204 /* AArch64 BRK software debug mode instruction.
205 This instruction needs to match gdb/aarch64-tdep.c
206 (aarch64_default_breakpoint). */
207 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
209 /* Implementation of linux_target_ops method "breakpoint_at". */
212 aarch64_breakpoint_at (CORE_ADDR where)
214 gdb_byte insn[aarch64_breakpoint_len];
216 (*the_target->read_memory) (where, (unsigned char *) &insn,
217 aarch64_breakpoint_len);
218 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
225 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
229 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
231 state->dr_addr_bp[i] = 0;
232 state->dr_ctrl_bp[i] = 0;
233 state->dr_ref_count_bp[i] = 0;
236 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
238 state->dr_addr_wp[i] = 0;
239 state->dr_ctrl_wp[i] = 0;
240 state->dr_ref_count_wp[i] = 0;
244 /* Return the pointer to the debug register state structure in the
245 current process' arch-specific data area. */
247 struct aarch64_debug_reg_state *
248 aarch64_get_debug_reg_state (pid_t pid)
250 struct process_info *proc = find_process_pid (pid);
252 return &proc->priv->arch_private->debug_reg_state;
255 /* Implementation of linux_target_ops method "supports_z_point_type". */
258 aarch64_supports_z_point_type (char z_type)
264 case Z_PACKET_WRITE_WP:
265 case Z_PACKET_READ_WP:
266 case Z_PACKET_ACCESS_WP:
273 /* Implementation of linux_target_ops method "insert_point".
275 It actually only records the info of the to-be-inserted bp/wp;
276 the actual insertion will happen when threads are resumed. */
279 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
280 int len, struct raw_breakpoint *bp)
283 enum target_hw_bp_type targ_type;
284 struct aarch64_debug_reg_state *state
285 = aarch64_get_debug_reg_state (pid_of (current_thread));
288 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
289 (unsigned long) addr, len);
291 /* Determine the type from the raw breakpoint type. */
292 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
294 if (targ_type != hw_execute)
296 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
297 ret = aarch64_handle_watchpoint (targ_type, addr, len,
298 1 /* is_insert */, state);
306 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
307 instruction. Set it to 2 to correctly encode length bit
308 mask in hardware/watchpoint control register. */
311 ret = aarch64_handle_breakpoint (targ_type, addr, len,
312 1 /* is_insert */, state);
316 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
322 /* Implementation of linux_target_ops method "remove_point".
324 It actually only records the info of the to-be-removed bp/wp,
325 the actual removal will be done when threads are resumed. */
328 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
329 int len, struct raw_breakpoint *bp)
332 enum target_hw_bp_type targ_type;
333 struct aarch64_debug_reg_state *state
334 = aarch64_get_debug_reg_state (pid_of (current_thread));
337 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
338 (unsigned long) addr, len);
340 /* Determine the type from the raw breakpoint type. */
341 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
343 /* Set up state pointers. */
344 if (targ_type != hw_execute)
346 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
352 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
353 instruction. Set it to 2 to correctly encode length bit
354 mask in hardware/watchpoint control register. */
357 ret = aarch64_handle_breakpoint (targ_type, addr, len,
358 0 /* is_insert */, state);
362 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
368 /* Implementation of linux_target_ops method "stopped_data_address". */
371 aarch64_stopped_data_address (void)
375 struct aarch64_debug_reg_state *state;
377 pid = lwpid_of (current_thread);
379 /* Get the siginfo. */
380 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
381 return (CORE_ADDR) 0;
383 /* Need to be a hardware breakpoint/watchpoint trap. */
384 if (siginfo.si_signo != SIGTRAP
385 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
386 return (CORE_ADDR) 0;
388 /* Check if the address matches any watched address. */
389 state = aarch64_get_debug_reg_state (pid_of (current_thread));
390 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
392 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
393 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
394 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
395 if (state->dr_ref_count_wp[i]
396 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
397 && addr_trap >= addr_watch
398 && addr_trap < addr_watch + len)
402 return (CORE_ADDR) 0;
405 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
408 aarch64_stopped_by_watchpoint (void)
410 if (aarch64_stopped_data_address () != 0)
416 /* Fetch the thread-local storage pointer for libthread_db. */
419 ps_get_thread_area (const struct ps_prochandle *ph,
420 lwpid_t lwpid, int idx, void **base)
422 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
426 /* Implementation of linux_target_ops method "siginfo_fixup". */
429 aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
431 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
432 if (!is_64bit_tdesc ())
435 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
438 aarch64_siginfo_from_compat_siginfo (native,
439 (struct compat_siginfo *) inf);
447 /* Implementation of linux_target_ops method "linux_new_process". */
449 static struct arch_process_info *
450 aarch64_linux_new_process (void)
452 struct arch_process_info *info = XCNEW (struct arch_process_info);
454 aarch64_init_debug_reg_state (&info->debug_reg_state);
459 /* Implementation of linux_target_ops method "linux_new_fork". */
462 aarch64_linux_new_fork (struct process_info *parent,
463 struct process_info *child)
465 /* These are allocated by linux_add_process. */
466 gdb_assert (parent->priv != NULL
467 && parent->priv->arch_private != NULL);
468 gdb_assert (child->priv != NULL
469 && child->priv->arch_private != NULL);
471 /* Linux kernel before 2.6.33 commit
472 72f674d203cd230426437cdcf7dd6f681dad8b0d
473 will inherit hardware debug registers from parent
474 on fork/vfork/clone. Newer Linux kernels create such tasks with
475 zeroed debug registers.
477 GDB core assumes the child inherits the watchpoints/hw
478 breakpoints of the parent, and will remove them all from the
479 forked off process. Copy the debug registers mirrors into the
480 new process so that all breakpoints and watchpoints can be
481 removed together. The debug registers mirror will become zeroed
482 in the end before detaching the forked off process, thus making
483 this compatible with older Linux kernels too. */
485 *child->priv->arch_private = *parent->priv->arch_private;
488 /* Return the right target description according to the ELF file of
491 static const struct target_desc *
492 aarch64_linux_read_description (void)
494 unsigned int machine;
498 tid = lwpid_of (current_thread);
500 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
503 return tdesc_aarch64;
505 return tdesc_arm_with_neon;
508 /* Implementation of linux_target_ops method "arch_setup". */
511 aarch64_arch_setup (void)
513 current_process ()->tdesc = aarch64_linux_read_description ();
515 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
518 static struct regset_info aarch64_regsets[] =
520 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
521 sizeof (struct user_pt_regs), GENERAL_REGS,
522 aarch64_fill_gregset, aarch64_store_gregset },
523 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
524 sizeof (struct user_fpsimd_state), FP_REGS,
525 aarch64_fill_fpregset, aarch64_store_fpregset
530 static struct regsets_info aarch64_regsets_info =
532 aarch64_regsets, /* regsets */
534 NULL, /* disabled_regsets */
537 static struct regs_info regs_info_aarch64 =
539 NULL, /* regset_bitmap */
541 &aarch64_regsets_info,
544 /* Implementation of linux_target_ops method "regs_info". */
546 static const struct regs_info *
547 aarch64_regs_info (void)
549 if (is_64bit_tdesc ())
550 return ®s_info_aarch64;
552 return ®s_info_aarch32;
555 /* Implementation of linux_target_ops method "supports_tracepoints". */
558 aarch64_supports_tracepoints (void)
560 if (current_thread == NULL)
564 /* We don't support tracepoints on aarch32 now. */
565 return is_64bit_tdesc ();
569 /* Implementation of linux_target_ops method "get_thread_area". */
572 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
577 iovec.iov_base = ®
578 iovec.iov_len = sizeof (reg);
580 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
588 /* List of condition codes that we need. */
590 enum aarch64_condition_codes
601 enum aarch64_operand_type
607 /* Representation of an operand. At this time, it only supports register
608 and immediate types. */
610 struct aarch64_operand
612 /* Type of the operand. */
613 enum aarch64_operand_type type;
615 /* Value of the operand according to the type. */
619 struct aarch64_register reg;
623 /* List of registers that we are currently using, we can add more here as
624 we need to use them. */
626 /* General purpose scratch registers (64 bit). */
627 static const struct aarch64_register x0 = { 0, 1 };
628 static const struct aarch64_register x1 = { 1, 1 };
629 static const struct aarch64_register x2 = { 2, 1 };
630 static const struct aarch64_register x3 = { 3, 1 };
631 static const struct aarch64_register x4 = { 4, 1 };
633 /* General purpose scratch registers (32 bit). */
634 static const struct aarch64_register w0 = { 0, 0 };
635 static const struct aarch64_register w2 = { 2, 0 };
637 /* Intra-procedure scratch registers. */
638 static const struct aarch64_register ip0 = { 16, 1 };
640 /* Special purpose registers. */
641 static const struct aarch64_register fp = { 29, 1 };
642 static const struct aarch64_register lr = { 30, 1 };
643 static const struct aarch64_register sp = { 31, 1 };
644 static const struct aarch64_register xzr = { 31, 1 };
646 /* Dynamically allocate a new register. If we know the register
647 statically, we should make it a global as above instead of using this
650 static struct aarch64_register
651 aarch64_register (unsigned num, int is64)
653 return (struct aarch64_register) { num, is64 };
656 /* Helper function to create a register operand, for instructions with
657 different types of operands.
660 p += emit_mov (p, x0, register_operand (x1)); */
662 static struct aarch64_operand
663 register_operand (struct aarch64_register reg)
665 struct aarch64_operand operand;
667 operand.type = OPERAND_REGISTER;
673 /* Helper function to create an immediate operand, for instructions with
674 different types of operands.
677 p += emit_mov (p, x0, immediate_operand (12)); */
679 static struct aarch64_operand
680 immediate_operand (uint32_t imm)
682 struct aarch64_operand operand;
684 operand.type = OPERAND_IMMEDIATE;
690 /* Helper function to create an offset memory operand.
693 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
695 static struct aarch64_memory_operand
696 offset_memory_operand (int32_t offset)
698 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
701 /* Helper function to create a pre-index memory operand.
704 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
706 static struct aarch64_memory_operand
707 preindex_memory_operand (int32_t index)
709 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
712 /* Helper function to create a post-index memory operand.
715 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
717 static struct aarch64_memory_operand
718 postindex_memory_operand (int32_t index)
720 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
723 /* System control registers. These special registers can be written and
724 read with the MRS and MSR instructions.
726 - NZCV: Condition flags. GDB refers to this register under the CPSR
728 - FPSR: Floating-point status register.
729 - FPCR: Floating-point control registers.
730 - TPIDR_EL0: Software thread ID register. */
732 enum aarch64_system_control_registers
734 /* op0 op1 crn crm op2 */
735 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
736 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
737 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
738 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
741 /* Write a BLR instruction into *BUF.
745 RN is the register to branch to. */
748 emit_blr (uint32_t *buf, struct aarch64_register rn)
750 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
753 /* Write a RET instruction into *BUF.
757 RN is the register to branch to. */
760 emit_ret (uint32_t *buf, struct aarch64_register rn)
762 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
766 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
767 struct aarch64_register rt,
768 struct aarch64_register rt2,
769 struct aarch64_register rn,
770 struct aarch64_memory_operand operand)
777 opc = ENCODE (2, 2, 30);
779 opc = ENCODE (0, 2, 30);
781 switch (operand.type)
783 case MEMORY_OPERAND_OFFSET:
785 pre_index = ENCODE (1, 1, 24);
786 write_back = ENCODE (0, 1, 23);
789 case MEMORY_OPERAND_POSTINDEX:
791 pre_index = ENCODE (0, 1, 24);
792 write_back = ENCODE (1, 1, 23);
795 case MEMORY_OPERAND_PREINDEX:
797 pre_index = ENCODE (1, 1, 24);
798 write_back = ENCODE (1, 1, 23);
805 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
806 | ENCODE (operand.index >> 3, 7, 15)
807 | ENCODE (rt2.num, 5, 10)
808 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
811 /* Write a STP instruction into *BUF.
813 STP rt, rt2, [rn, #offset]
814 STP rt, rt2, [rn, #index]!
815 STP rt, rt2, [rn], #index
817 RT and RT2 are the registers to store.
818 RN is the base address register.
819 OFFSET is the immediate to add to the base address. It is limited to a
820 -512 .. 504 range (7 bits << 3). */
823 emit_stp (uint32_t *buf, struct aarch64_register rt,
824 struct aarch64_register rt2, struct aarch64_register rn,
825 struct aarch64_memory_operand operand)
827 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
830 /* Write a LDP instruction into *BUF.
832 LDP rt, rt2, [rn, #offset]
833 LDP rt, rt2, [rn, #index]!
834 LDP rt, rt2, [rn], #index
836 RT and RT2 are the registers to store.
837 RN is the base address register.
838 OFFSET is the immediate to add to the base address. It is limited to a
839 -512 .. 504 range (7 bits << 3). */
842 emit_ldp (uint32_t *buf, struct aarch64_register rt,
843 struct aarch64_register rt2, struct aarch64_register rn,
844 struct aarch64_memory_operand operand)
846 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
849 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
851 LDP qt, qt2, [rn, #offset]
853 RT and RT2 are the Q registers to store.
854 RN is the base address register.
855 OFFSET is the immediate to add to the base address. It is limited to
856 -1024 .. 1008 range (7 bits << 4). */
859 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
860 struct aarch64_register rn, int32_t offset)
862 uint32_t opc = ENCODE (2, 2, 30);
863 uint32_t pre_index = ENCODE (1, 1, 24);
865 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
866 | ENCODE (offset >> 4, 7, 15)
867 | ENCODE (rt2, 5, 10)
868 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
871 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
873 STP qt, qt2, [rn, #offset]
875 RT and RT2 are the Q registers to store.
876 RN is the base address register.
877 OFFSET is the immediate to add to the base address. It is limited to
878 -1024 .. 1008 range (7 bits << 4). */
881 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
882 struct aarch64_register rn, int32_t offset)
884 uint32_t opc = ENCODE (2, 2, 30);
885 uint32_t pre_index = ENCODE (1, 1, 24);
887 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
888 | ENCODE (offset >> 4, 7, 15)
889 | ENCODE (rt2, 5, 10)
890 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
893 /* Write a LDRH instruction into *BUF.
895 LDRH wt, [xn, #offset]
896 LDRH wt, [xn, #index]!
897 LDRH wt, [xn], #index
899 RT is the register to store.
900 RN is the base address register.
901 OFFSET is the immediate to add to the base address. It is limited to
902 0 .. 32760 range (12 bits << 3). */
905 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
906 struct aarch64_register rn,
907 struct aarch64_memory_operand operand)
909 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
912 /* Write a LDRB instruction into *BUF.
914 LDRB wt, [xn, #offset]
915 LDRB wt, [xn, #index]!
916 LDRB wt, [xn], #index
918 RT is the register to store.
919 RN is the base address register.
920 OFFSET is the immediate to add to the base address. It is limited to
921 0 .. 32760 range (12 bits << 3). */
924 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
925 struct aarch64_register rn,
926 struct aarch64_memory_operand operand)
928 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
933 /* Write a STR instruction into *BUF.
935 STR rt, [rn, #offset]
936 STR rt, [rn, #index]!
939 RT is the register to store.
940 RN is the base address register.
941 OFFSET is the immediate to add to the base address. It is limited to
942 0 .. 32760 range (12 bits << 3). */
945 emit_str (uint32_t *buf, struct aarch64_register rt,
946 struct aarch64_register rn,
947 struct aarch64_memory_operand operand)
949 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
952 /* Helper function emitting an exclusive load or store instruction. */
955 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
956 enum aarch64_opcodes opcode,
957 struct aarch64_register rs,
958 struct aarch64_register rt,
959 struct aarch64_register rt2,
960 struct aarch64_register rn)
962 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
963 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
964 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
967 /* Write a LAXR instruction into *BUF.
971 RT is the destination register.
972 RN is the base address register. */
975 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
976 struct aarch64_register rn)
978 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
982 /* Write a STXR instruction into *BUF.
986 RS is the result register, it indicates if the store succeeded or not.
987 RT is the destination register.
988 RN is the base address register. */
991 emit_stxr (uint32_t *buf, struct aarch64_register rs,
992 struct aarch64_register rt, struct aarch64_register rn)
994 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
998 /* Write a STLR instruction into *BUF.
1002 RT is the register to store.
1003 RN is the base address register. */
1006 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1007 struct aarch64_register rn)
1009 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1013 /* Helper function for data processing instructions with register sources. */
1016 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1017 struct aarch64_register rd,
1018 struct aarch64_register rn,
1019 struct aarch64_register rm)
1021 uint32_t size = ENCODE (rd.is64, 1, 31);
1023 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1024 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1027 /* Helper function for data processing instructions taking either a register
1031 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1032 struct aarch64_register rd,
1033 struct aarch64_register rn,
1034 struct aarch64_operand operand)
1036 uint32_t size = ENCODE (rd.is64, 1, 31);
1037 /* The opcode is different for register and immediate source operands. */
1038 uint32_t operand_opcode;
1040 if (operand.type == OPERAND_IMMEDIATE)
1042 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1043 operand_opcode = ENCODE (8, 4, 25);
1045 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1046 | ENCODE (operand.imm, 12, 10)
1047 | ENCODE (rn.num, 5, 5)
1048 | ENCODE (rd.num, 5, 0));
1052 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1053 operand_opcode = ENCODE (5, 4, 25);
1055 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1060 /* Write an ADD instruction into *BUF.
1065 This function handles both an immediate and register add.
1067 RD is the destination register.
1068 RN is the input register.
1069 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1070 OPERAND_REGISTER. */
1073 emit_add (uint32_t *buf, struct aarch64_register rd,
1074 struct aarch64_register rn, struct aarch64_operand operand)
1076 return emit_data_processing (buf, ADD, rd, rn, operand);
1079 /* Write a SUB instruction into *BUF.
1084 This function handles both an immediate and register sub.
1086 RD is the destination register.
1087 RN is the input register.
1088 IMM is the immediate to substract to RN. */
1091 emit_sub (uint32_t *buf, struct aarch64_register rd,
1092 struct aarch64_register rn, struct aarch64_operand operand)
1094 return emit_data_processing (buf, SUB, rd, rn, operand);
1097 /* Write a MOV instruction into *BUF.
1102 This function handles both a wide immediate move and a register move,
1103 with the condition that the source register is not xzr. xzr and the
1104 stack pointer share the same encoding and this function only supports
1107 RD is the destination register.
1108 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1109 OPERAND_REGISTER. */
1112 emit_mov (uint32_t *buf, struct aarch64_register rd,
1113 struct aarch64_operand operand)
1115 if (operand.type == OPERAND_IMMEDIATE)
1117 uint32_t size = ENCODE (rd.is64, 1, 31);
1118 /* Do not shift the immediate. */
1119 uint32_t shift = ENCODE (0, 2, 21);
1121 return aarch64_emit_insn (buf, MOV | size | shift
1122 | ENCODE (operand.imm, 16, 5)
1123 | ENCODE (rd.num, 5, 0));
1126 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1129 /* Write a MOVK instruction into *BUF.
1131 MOVK rd, #imm, lsl #shift
1133 RD is the destination register.
1134 IMM is the immediate.
1135 SHIFT is the logical shift left to apply to IMM. */
1138 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1141 uint32_t size = ENCODE (rd.is64, 1, 31);
1143 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1144 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1147 /* Write instructions into *BUF in order to move ADDR into a register.
1148 ADDR can be a 64-bit value.
1150 This function will emit a series of MOV and MOVK instructions, such as:
1153 MOVK xd, #(addr >> 16), lsl #16
1154 MOVK xd, #(addr >> 32), lsl #32
1155 MOVK xd, #(addr >> 48), lsl #48 */
1158 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1162 /* The MOV (wide immediate) instruction clears to top bits of the
1164 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1166 if ((addr >> 16) != 0)
1167 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1171 if ((addr >> 32) != 0)
1172 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1176 if ((addr >> 48) != 0)
1177 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1182 /* Write a SUBS instruction into *BUF.
1186 This instruction update the condition flags.
1188 RD is the destination register.
1189 RN and RM are the source registers. */
1192 emit_subs (uint32_t *buf, struct aarch64_register rd,
1193 struct aarch64_register rn, struct aarch64_operand operand)
1195 return emit_data_processing (buf, SUBS, rd, rn, operand);
1198 /* Write a CMP instruction into *BUF.
1202 This instruction is an alias of SUBS xzr, rn, rm.
1204 RN and RM are the registers to compare. */
1207 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1208 struct aarch64_operand operand)
1210 return emit_subs (buf, xzr, rn, operand);
1213 /* Write a AND instruction into *BUF.
1217 RD is the destination register.
1218 RN and RM are the source registers. */
1221 emit_and (uint32_t *buf, struct aarch64_register rd,
1222 struct aarch64_register rn, struct aarch64_register rm)
1224 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1227 /* Write a ORR instruction into *BUF.
1231 RD is the destination register.
1232 RN and RM are the source registers. */
1235 emit_orr (uint32_t *buf, struct aarch64_register rd,
1236 struct aarch64_register rn, struct aarch64_register rm)
1238 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1241 /* Write a ORN instruction into *BUF.
1245 RD is the destination register.
1246 RN and RM are the source registers. */
1249 emit_orn (uint32_t *buf, struct aarch64_register rd,
1250 struct aarch64_register rn, struct aarch64_register rm)
1252 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1255 /* Write a EOR instruction into *BUF.
1259 RD is the destination register.
1260 RN and RM are the source registers. */
1263 emit_eor (uint32_t *buf, struct aarch64_register rd,
1264 struct aarch64_register rn, struct aarch64_register rm)
1266 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1269 /* Write a MVN instruction into *BUF.
1273 This is an alias for ORN rd, xzr, rm.
1275 RD is the destination register.
1276 RM is the source register. */
1279 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1280 struct aarch64_register rm)
1282 return emit_orn (buf, rd, xzr, rm);
1285 /* Write a LSLV instruction into *BUF.
1289 RD is the destination register.
1290 RN and RM are the source registers. */
1293 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1294 struct aarch64_register rn, struct aarch64_register rm)
1296 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1299 /* Write a LSRV instruction into *BUF.
1303 RD is the destination register.
1304 RN and RM are the source registers. */
1307 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1308 struct aarch64_register rn, struct aarch64_register rm)
1310 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1313 /* Write a ASRV instruction into *BUF.
1317 RD is the destination register.
1318 RN and RM are the source registers. */
1321 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1322 struct aarch64_register rn, struct aarch64_register rm)
1324 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1327 /* Write a MUL instruction into *BUF.
1331 RD is the destination register.
1332 RN and RM are the source registers. */
1335 emit_mul (uint32_t *buf, struct aarch64_register rd,
1336 struct aarch64_register rn, struct aarch64_register rm)
1338 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1341 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1345 RT is the destination register.
1346 SYSTEM_REG is special purpose register to read. */
1349 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1350 enum aarch64_system_control_registers system_reg)
1352 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1353 | ENCODE (rt.num, 5, 0));
1356 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1360 SYSTEM_REG is special purpose register to write.
1361 RT is the input register. */
1364 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1365 struct aarch64_register rt)
1367 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1368 | ENCODE (rt.num, 5, 0));
1371 /* Write a SEVL instruction into *BUF.
1373 This is a hint instruction telling the hardware to trigger an event. */
1376 emit_sevl (uint32_t *buf)
1378 return aarch64_emit_insn (buf, SEVL);
1381 /* Write a WFE instruction into *BUF.
1383 This is a hint instruction telling the hardware to wait for an event. */
1386 emit_wfe (uint32_t *buf)
1388 return aarch64_emit_insn (buf, WFE);
1391 /* Write a SBFM instruction into *BUF.
1393 SBFM rd, rn, #immr, #imms
1395 This instruction moves the bits from #immr to #imms into the
1396 destination, sign extending the result.
1398 RD is the destination register.
1399 RN is the source register.
1400 IMMR is the bit number to start at (least significant bit).
1401 IMMS is the bit number to stop at (most significant bit). */
1404 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1405 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1407 uint32_t size = ENCODE (rd.is64, 1, 31);
1408 uint32_t n = ENCODE (rd.is64, 1, 22);
1410 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1411 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1412 | ENCODE (rd.num, 5, 0));
1415 /* Write a SBFX instruction into *BUF.
1417 SBFX rd, rn, #lsb, #width
1419 This instruction moves #width bits from #lsb into the destination, sign
1420 extending the result. This is an alias for:
1422 SBFM rd, rn, #lsb, #(lsb + width - 1)
1424 RD is the destination register.
1425 RN is the source register.
1426 LSB is the bit number to start at (least significant bit).
1427 WIDTH is the number of bits to move. */
1430 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1431 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1433 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1436 /* Write a UBFM instruction into *BUF.
1438 UBFM rd, rn, #immr, #imms
1440 This instruction moves the bits from #immr to #imms into the
1441 destination, extending the result with zeros.
1443 RD is the destination register.
1444 RN is the source register.
1445 IMMR is the bit number to start at (least significant bit).
1446 IMMS is the bit number to stop at (most significant bit). */
1449 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1450 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1452 uint32_t size = ENCODE (rd.is64, 1, 31);
1453 uint32_t n = ENCODE (rd.is64, 1, 22);
1455 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1456 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1457 | ENCODE (rd.num, 5, 0));
1460 /* Write a UBFX instruction into *BUF.
1462 UBFX rd, rn, #lsb, #width
1464 This instruction moves #width bits from #lsb into the destination,
1465 extending the result with zeros. This is an alias for:
1467 UBFM rd, rn, #lsb, #(lsb + width - 1)
1469 RD is the destination register.
1470 RN is the source register.
1471 LSB is the bit number to start at (least significant bit).
1472 WIDTH is the number of bits to move. */
1475 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1476 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1478 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1481 /* Write a CSINC instruction into *BUF.
1483 CSINC rd, rn, rm, cond
1485 This instruction conditionally increments rn or rm and places the result
1486 in rd. rn is chosen is the condition is true.
1488 RD is the destination register.
1489 RN and RM are the source registers.
1490 COND is the encoded condition. */
1493 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1494 struct aarch64_register rn, struct aarch64_register rm,
1497 uint32_t size = ENCODE (rd.is64, 1, 31);
1499 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1500 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1501 | ENCODE (rd.num, 5, 0));
1504 /* Write a CSET instruction into *BUF.
1508 This instruction conditionally write 1 or 0 in the destination register.
1509 1 is written if the condition is true. This is an alias for:
1511 CSINC rd, xzr, xzr, !cond
1513 Note that the condition needs to be inverted.
1515 RD is the destination register.
1516 RN and RM are the source registers.
1517 COND is the encoded condition. */
1520 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1522 /* The least significant bit of the condition needs toggling in order to
1524 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1527 /* Write LEN instructions from BUF into the inferior memory at *TO.
1529 Note instructions are always little endian on AArch64, unlike data. */
1532 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1534 size_t byte_len = len * sizeof (uint32_t);
1535 #if (__BYTE_ORDER == __BIG_ENDIAN)
1536 uint32_t *le_buf = xmalloc (byte_len);
1539 for (i = 0; i < len; i++)
1540 le_buf[i] = htole32 (buf[i]);
1542 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1546 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1552 /* Sub-class of struct aarch64_insn_data, store information of
1553 instruction relocation for fast tracepoint. Visitor can
1554 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1555 the relocated instructions in buffer pointed by INSN_PTR. */
1557 struct aarch64_insn_relocation_data
1559 struct aarch64_insn_data base;
1561 /* The new address the instruction is relocated to. */
1563 /* Pointer to the buffer of relocated instruction(s). */
1567 /* Implementation of aarch64_insn_visitor method "b". */
1570 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1571 struct aarch64_insn_data *data)
1573 struct aarch64_insn_relocation_data *insn_reloc
1574 = (struct aarch64_insn_relocation_data *) data;
1576 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1578 if (can_encode_int32 (new_offset, 28))
1579 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1582 /* Implementation of aarch64_insn_visitor method "b_cond". */
1585 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1586 struct aarch64_insn_data *data)
1588 struct aarch64_insn_relocation_data *insn_reloc
1589 = (struct aarch64_insn_relocation_data *) data;
1591 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1593 if (can_encode_int32 (new_offset, 21))
1595 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1598 else if (can_encode_int32 (new_offset, 28))
1600 /* The offset is out of range for a conditional branch
1601 instruction but not for a unconditional branch. We can use
1602 the following instructions instead:
1604 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1605 B NOT_TAKEN ; Else jump over TAKEN and continue.
1612 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1613 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1614 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1618 /* Implementation of aarch64_insn_visitor method "cb". */
1621 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1622 const unsigned rn, int is64,
1623 struct aarch64_insn_data *data)
1625 struct aarch64_insn_relocation_data *insn_reloc
1626 = (struct aarch64_insn_relocation_data *) data;
1628 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1630 if (can_encode_int32 (new_offset, 21))
1632 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1633 aarch64_register (rn, is64), new_offset);
1635 else if (can_encode_int32 (new_offset, 28))
1637 /* The offset is out of range for a compare and branch
1638 instruction but not for a unconditional branch. We can use
1639 the following instructions instead:
1641 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1642 B NOT_TAKEN ; Else jump over TAKEN and continue.
1648 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1649 aarch64_register (rn, is64), 8);
1650 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1651 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1655 /* Implementation of aarch64_insn_visitor method "tb". */
1658 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1659 const unsigned rt, unsigned bit,
1660 struct aarch64_insn_data *data)
1662 struct aarch64_insn_relocation_data *insn_reloc
1663 = (struct aarch64_insn_relocation_data *) data;
1665 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1667 if (can_encode_int32 (new_offset, 16))
1669 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1670 aarch64_register (rt, 1), new_offset);
1672 else if (can_encode_int32 (new_offset, 28))
1674 /* The offset is out of range for a test bit and branch
1675 instruction but not for a unconditional branch. We can use
1676 the following instructions instead:
1678 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1679 B NOT_TAKEN ; Else jump over TAKEN and continue.
1685 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1686 aarch64_register (rt, 1), 8);
1687 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1688 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1693 /* Implementation of aarch64_insn_visitor method "adr". */
1696 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1698 struct aarch64_insn_data *data)
1700 struct aarch64_insn_relocation_data *insn_reloc
1701 = (struct aarch64_insn_relocation_data *) data;
1702 /* We know exactly the address the ADR{P,} instruction will compute.
1703 We can just write it to the destination register. */
1704 CORE_ADDR address = data->insn_addr + offset;
1708 /* Clear the lower 12 bits of the offset to get the 4K page. */
1709 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1710 aarch64_register (rd, 1),
1714 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1715 aarch64_register (rd, 1), address);
1718 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1721 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1722 const unsigned rt, const int is64,
1723 struct aarch64_insn_data *data)
1725 struct aarch64_insn_relocation_data *insn_reloc
1726 = (struct aarch64_insn_relocation_data *) data;
1727 CORE_ADDR address = data->insn_addr + offset;
1729 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1730 aarch64_register (rt, 1), address);
1732 /* We know exactly what address to load from, and what register we
1735 MOV xd, #(oldloc + offset)
1736 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1739 LDR xd, [xd] ; or LDRSW xd, [xd]
1744 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1745 aarch64_register (rt, 1),
1746 aarch64_register (rt, 1),
1747 offset_memory_operand (0));
1749 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1750 aarch64_register (rt, is64),
1751 aarch64_register (rt, 1),
1752 offset_memory_operand (0));
1755 /* Implementation of aarch64_insn_visitor method "others". */
1758 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1759 struct aarch64_insn_data *data)
1761 struct aarch64_insn_relocation_data *insn_reloc
1762 = (struct aarch64_insn_relocation_data *) data;
1764 /* The instruction is not PC relative. Just re-emit it at the new
1766 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1769 static const struct aarch64_insn_visitor visitor =
1771 aarch64_ftrace_insn_reloc_b,
1772 aarch64_ftrace_insn_reloc_b_cond,
1773 aarch64_ftrace_insn_reloc_cb,
1774 aarch64_ftrace_insn_reloc_tb,
1775 aarch64_ftrace_insn_reloc_adr,
1776 aarch64_ftrace_insn_reloc_ldr_literal,
1777 aarch64_ftrace_insn_reloc_others,
1780 /* Implementation of linux_target_ops method
1781 "install_fast_tracepoint_jump_pad". */
1784 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1786 CORE_ADDR collector,
1789 CORE_ADDR *jump_entry,
1790 CORE_ADDR *trampoline,
1791 ULONGEST *trampoline_size,
1792 unsigned char *jjump_pad_insn,
1793 ULONGEST *jjump_pad_insn_size,
1794 CORE_ADDR *adjusted_insn_addr,
1795 CORE_ADDR *adjusted_insn_addr_end,
1803 CORE_ADDR buildaddr = *jump_entry;
1804 struct aarch64_insn_relocation_data insn_data;
1806 /* We need to save the current state on the stack both to restore it
1807 later and to collect register values when the tracepoint is hit.
1809 The saved registers are pushed in a layout that needs to be in sync
1810 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1811 the supply_fast_tracepoint_registers function will fill in the
1812 register cache from a pointer to saved registers on the stack we build
1815 For simplicity, we set the size of each cell on the stack to 16 bytes.
1816 This way one cell can hold any register type, from system registers
1817 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1818 has to be 16 bytes aligned anyway.
1820 Note that the CPSR register does not exist on AArch64. Instead we
1821 can access system bits describing the process state with the
1822 MRS/MSR instructions, namely the condition flags. We save them as
1823 if they are part of a CPSR register because that's how GDB
1824 interprets these system bits. At the moment, only the condition
1825 flags are saved in CPSR (NZCV).
1827 Stack layout, each cell is 16 bytes (descending):
1829 High *-------- SIMD&FP registers from 31 down to 0. --------*
1835 *---- General purpose registers from 30 down to 0. ----*
1841 *------------- Special purpose registers. -------------*
1844 | CPSR (NZCV) | 5 cells
1847 *------------- collecting_t object --------------------*
1848 | TPIDR_EL0 | struct tracepoint * |
1849 Low *------------------------------------------------------*
1851 After this stack is set up, we issue a call to the collector, passing
1852 it the saved registers at (SP + 16). */
1854 /* Push SIMD&FP registers on the stack:
1856 SUB sp, sp, #(32 * 16)
1858 STP q30, q31, [sp, #(30 * 16)]
1863 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1864 for (i = 30; i >= 0; i -= 2)
1865 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1867 /* Push general puspose registers on the stack. Note that we do not need
1868 to push x31 as it represents the xzr register and not the stack
1869 pointer in a STR instruction.
1871 SUB sp, sp, #(31 * 16)
1873 STR x30, [sp, #(30 * 16)]
1878 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1879 for (i = 30; i >= 0; i -= 1)
1880 p += emit_str (p, aarch64_register (i, 1), sp,
1881 offset_memory_operand (i * 16));
1883 /* Make space for 5 more cells.
1885 SUB sp, sp, #(5 * 16)
1888 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1893 ADD x4, sp, #((32 + 31 + 5) * 16)
1894 STR x4, [sp, #(4 * 16)]
1897 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1898 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1900 /* Save PC (tracepoint address):
1905 STR x3, [sp, #(3 * 16)]
1909 p += emit_mov_addr (p, x3, tpaddr);
1910 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1912 /* Save CPSR (NZCV), FPSR and FPCR:
1918 STR x2, [sp, #(2 * 16)]
1919 STR x1, [sp, #(1 * 16)]
1920 STR x0, [sp, #(0 * 16)]
1923 p += emit_mrs (p, x2, NZCV);
1924 p += emit_mrs (p, x1, FPSR);
1925 p += emit_mrs (p, x0, FPCR);
1926 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1927 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1928 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1930 /* Push the collecting_t object. It consist of the address of the
1931 tracepoint and an ID for the current thread. We get the latter by
1932 reading the tpidr_el0 system register. It corresponds to the
1933 NT_ARM_TLS register accessible with ptrace.
1940 STP x0, x1, [sp, #-16]!
1944 p += emit_mov_addr (p, x0, tpoint);
1945 p += emit_mrs (p, x1, TPIDR_EL0);
1946 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1950 The shared memory for the lock is at lockaddr. It will hold zero
1951 if no-one is holding the lock, otherwise it contains the address of
1952 the collecting_t object on the stack of the thread which acquired it.
1954 At this stage, the stack pointer points to this thread's collecting_t
1957 We use the following registers:
1958 - x0: Address of the lock.
1959 - x1: Pointer to collecting_t object.
1960 - x2: Scratch register.
1966 ; Trigger an event local to this core. So the following WFE
1967 ; instruction is ignored.
1970 ; Wait for an event. The event is triggered by either the SEVL
1971 ; or STLR instructions (store release).
1974 ; Atomically read at lockaddr. This marks the memory location as
1975 ; exclusive. This instruction also has memory constraints which
1976 ; make sure all previous data reads and writes are done before
1980 ; Try again if another thread holds the lock.
1983 ; We can lock it! Write the address of the collecting_t object.
1984 ; This instruction will fail if the memory location is not marked
1985 ; as exclusive anymore. If it succeeds, it will remove the
1986 ; exclusive mark on the memory location. This way, if another
1987 ; thread executes this instruction before us, we will fail and try
1994 p += emit_mov_addr (p, x0, lockaddr);
1995 p += emit_mov (p, x1, register_operand (sp));
1999 p += emit_ldaxr (p, x2, x0);
2000 p += emit_cb (p, 1, w2, -2 * 4);
2001 p += emit_stxr (p, w2, x1, x0);
2002 p += emit_cb (p, 1, x2, -4 * 4);
2004 /* Call collector (struct tracepoint *, unsigned char *):
2009 ; Saved registers start after the collecting_t object.
2012 ; We use an intra-procedure-call scratch register.
2013 MOV ip0, #(collector)
2016 ; And call back to C!
2021 p += emit_mov_addr (p, x0, tpoint);
2022 p += emit_add (p, x1, sp, immediate_operand (16));
2024 p += emit_mov_addr (p, ip0, collector);
2025 p += emit_blr (p, ip0);
2027 /* Release the lock.
2032 ; This instruction is a normal store with memory ordering
2033 ; constraints. Thanks to this we do not have to put a data
2034 ; barrier instruction to make sure all data read and writes are done
2035 ; before this instruction is executed. Furthermore, this instrucion
2036 ; will trigger an event, letting other threads know they can grab
2041 p += emit_mov_addr (p, x0, lockaddr);
2042 p += emit_stlr (p, xzr, x0);
2044 /* Free collecting_t object:
2049 p += emit_add (p, sp, sp, immediate_operand (16));
2051 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2052 registers from the stack.
2054 LDR x2, [sp, #(2 * 16)]
2055 LDR x1, [sp, #(1 * 16)]
2056 LDR x0, [sp, #(0 * 16)]
2062 ADD sp, sp #(5 * 16)
2065 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2066 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2067 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2068 p += emit_msr (p, NZCV, x2);
2069 p += emit_msr (p, FPSR, x1);
2070 p += emit_msr (p, FPCR, x0);
2072 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2074 /* Pop general purpose registers:
2078 LDR x30, [sp, #(30 * 16)]
2080 ADD sp, sp, #(31 * 16)
2083 for (i = 0; i <= 30; i += 1)
2084 p += emit_ldr (p, aarch64_register (i, 1), sp,
2085 offset_memory_operand (i * 16));
2086 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2088 /* Pop SIMD&FP registers:
2092 LDP q30, q31, [sp, #(30 * 16)]
2094 ADD sp, sp, #(32 * 16)
2097 for (i = 0; i <= 30; i += 2)
2098 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2099 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2101 /* Write the code into the inferior memory. */
2102 append_insns (&buildaddr, p - buf, buf);
2104 /* Now emit the relocated instruction. */
2105 *adjusted_insn_addr = buildaddr;
2106 target_read_uint32 (tpaddr, &insn);
2108 insn_data.base.insn_addr = tpaddr;
2109 insn_data.new_addr = buildaddr;
2110 insn_data.insn_ptr = buf;
2112 aarch64_relocate_instruction (insn, &visitor,
2113 (struct aarch64_insn_data *) &insn_data);
2115 /* We may not have been able to relocate the instruction. */
2116 if (insn_data.insn_ptr == buf)
2119 "E.Could not relocate instruction from %s to %s.",
2120 core_addr_to_string_nz (tpaddr),
2121 core_addr_to_string_nz (buildaddr));
2125 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2126 *adjusted_insn_addr_end = buildaddr;
2128 /* Go back to the start of the buffer. */
2131 /* Emit a branch back from the jump pad. */
2132 offset = (tpaddr + orig_size - buildaddr);
2133 if (!can_encode_int32 (offset, 28))
2136 "E.Jump back from jump pad too far from tracepoint "
2137 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2142 p += emit_b (p, 0, offset);
2143 append_insns (&buildaddr, p - buf, buf);
2145 /* Give the caller a branch instruction into the jump pad. */
2146 offset = (*jump_entry - tpaddr);
2147 if (!can_encode_int32 (offset, 28))
2150 "E.Jump pad too far from tracepoint "
2151 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2156 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2157 *jjump_pad_insn_size = 4;
2159 /* Return the end address of our pad. */
2160 *jump_entry = buildaddr;
2165 /* Helper function writing LEN instructions from START into
2166 current_insn_ptr. */
2169 emit_ops_insns (const uint32_t *start, int len)
2171 CORE_ADDR buildaddr = current_insn_ptr;
2174 debug_printf ("Adding %d instrucions at %s\n",
2175 len, paddress (buildaddr));
2177 append_insns (&buildaddr, len, start);
2178 current_insn_ptr = buildaddr;
2181 /* Pop a register from the stack. */
2184 emit_pop (uint32_t *buf, struct aarch64_register rt)
2186 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2189 /* Push a register on the stack. */
2192 emit_push (uint32_t *buf, struct aarch64_register rt)
2194 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2197 /* Implementation of emit_ops method "emit_prologue". */
2200 aarch64_emit_prologue (void)
2205 /* This function emit a prologue for the following function prototype:
2207 enum eval_result_type f (unsigned char *regs,
2210 The first argument is a buffer of raw registers. The second
2211 argument is the result of
2212 evaluating the expression, which will be set to whatever is on top of
2213 the stack at the end.
2215 The stack set up by the prologue is as such:
2217 High *------------------------------------------------------*
2220 | x1 (ULONGEST *value) |
2221 | x0 (unsigned char *regs) |
2222 Low *------------------------------------------------------*
2224 As we are implementing a stack machine, each opcode can expand the
2225 stack so we never know how far we are from the data saved by this
2226 prologue. In order to be able refer to value and regs later, we save
2227 the current stack pointer in the frame pointer. This way, it is not
2228 clobbered when calling C functions.
2230 Finally, throughtout every operation, we are using register x0 as the
2231 top of the stack, and x1 as a scratch register. */
2233 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2234 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2235 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2237 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2240 emit_ops_insns (buf, p - buf);
2243 /* Implementation of emit_ops method "emit_epilogue". */
2246 aarch64_emit_epilogue (void)
2251 /* Store the result of the expression (x0) in *value. */
2252 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2253 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2254 p += emit_str (p, x0, x1, offset_memory_operand (0));
2256 /* Restore the previous state. */
2257 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2258 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2260 /* Return expr_eval_no_error. */
2261 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2262 p += emit_ret (p, lr);
2264 emit_ops_insns (buf, p - buf);
2267 /* Implementation of emit_ops method "emit_add". */
2270 aarch64_emit_add (void)
2275 p += emit_pop (p, x1);
2276 p += emit_add (p, x0, x0, register_operand (x1));
2278 emit_ops_insns (buf, p - buf);
2281 /* Implementation of emit_ops method "emit_sub". */
2284 aarch64_emit_sub (void)
2289 p += emit_pop (p, x1);
2290 p += emit_sub (p, x0, x0, register_operand (x1));
2292 emit_ops_insns (buf, p - buf);
2295 /* Implementation of emit_ops method "emit_mul". */
2298 aarch64_emit_mul (void)
2303 p += emit_pop (p, x1);
2304 p += emit_mul (p, x0, x1, x0);
2306 emit_ops_insns (buf, p - buf);
2309 /* Implementation of emit_ops method "emit_lsh". */
2312 aarch64_emit_lsh (void)
2317 p += emit_pop (p, x1);
2318 p += emit_lslv (p, x0, x1, x0);
2320 emit_ops_insns (buf, p - buf);
2323 /* Implementation of emit_ops method "emit_rsh_signed". */
2326 aarch64_emit_rsh_signed (void)
2331 p += emit_pop (p, x1);
2332 p += emit_asrv (p, x0, x1, x0);
2334 emit_ops_insns (buf, p - buf);
2337 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2340 aarch64_emit_rsh_unsigned (void)
2345 p += emit_pop (p, x1);
2346 p += emit_lsrv (p, x0, x1, x0);
2348 emit_ops_insns (buf, p - buf);
2351 /* Implementation of emit_ops method "emit_ext". */
2354 aarch64_emit_ext (int arg)
2359 p += emit_sbfx (p, x0, x0, 0, arg);
2361 emit_ops_insns (buf, p - buf);
2364 /* Implementation of emit_ops method "emit_log_not". */
2367 aarch64_emit_log_not (void)
2372 /* If the top of the stack is 0, replace it with 1. Else replace it with
2375 p += emit_cmp (p, x0, immediate_operand (0));
2376 p += emit_cset (p, x0, EQ);
2378 emit_ops_insns (buf, p - buf);
2381 /* Implementation of emit_ops method "emit_bit_and". */
2384 aarch64_emit_bit_and (void)
2389 p += emit_pop (p, x1);
2390 p += emit_and (p, x0, x0, x1);
2392 emit_ops_insns (buf, p - buf);
2395 /* Implementation of emit_ops method "emit_bit_or". */
2398 aarch64_emit_bit_or (void)
2403 p += emit_pop (p, x1);
2404 p += emit_orr (p, x0, x0, x1);
2406 emit_ops_insns (buf, p - buf);
2409 /* Implementation of emit_ops method "emit_bit_xor". */
2412 aarch64_emit_bit_xor (void)
2417 p += emit_pop (p, x1);
2418 p += emit_eor (p, x0, x0, x1);
2420 emit_ops_insns (buf, p - buf);
2423 /* Implementation of emit_ops method "emit_bit_not". */
2426 aarch64_emit_bit_not (void)
2431 p += emit_mvn (p, x0, x0);
2433 emit_ops_insns (buf, p - buf);
2436 /* Implementation of emit_ops method "emit_equal". */
2439 aarch64_emit_equal (void)
2444 p += emit_pop (p, x1);
2445 p += emit_cmp (p, x0, register_operand (x1));
2446 p += emit_cset (p, x0, EQ);
2448 emit_ops_insns (buf, p - buf);
2451 /* Implementation of emit_ops method "emit_less_signed". */
2454 aarch64_emit_less_signed (void)
2459 p += emit_pop (p, x1);
2460 p += emit_cmp (p, x1, register_operand (x0));
2461 p += emit_cset (p, x0, LT);
2463 emit_ops_insns (buf, p - buf);
2466 /* Implementation of emit_ops method "emit_less_unsigned". */
2469 aarch64_emit_less_unsigned (void)
2474 p += emit_pop (p, x1);
2475 p += emit_cmp (p, x1, register_operand (x0));
2476 p += emit_cset (p, x0, LO);
2478 emit_ops_insns (buf, p - buf);
2481 /* Implementation of emit_ops method "emit_ref". */
2484 aarch64_emit_ref (int size)
2492 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2495 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2498 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2501 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2504 /* Unknown size, bail on compilation. */
2509 emit_ops_insns (buf, p - buf);
2512 /* Implementation of emit_ops method "emit_if_goto". */
2515 aarch64_emit_if_goto (int *offset_p, int *size_p)
2520 /* The Z flag is set or cleared here. */
2521 p += emit_cmp (p, x0, immediate_operand (0));
2522 /* This instruction must not change the Z flag. */
2523 p += emit_pop (p, x0);
2524 /* Branch over the next instruction if x0 == 0. */
2525 p += emit_bcond (p, EQ, 8);
2527 /* The NOP instruction will be patched with an unconditional branch. */
2529 *offset_p = (p - buf) * 4;
2534 emit_ops_insns (buf, p - buf);
2537 /* Implementation of emit_ops method "emit_goto". */
2540 aarch64_emit_goto (int *offset_p, int *size_p)
2545 /* The NOP instruction will be patched with an unconditional branch. */
2552 emit_ops_insns (buf, p - buf);
2555 /* Implementation of emit_ops method "write_goto_address". */
2558 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2562 emit_b (&insn, 0, to - from);
2563 append_insns (&from, 1, &insn);
2566 /* Implementation of emit_ops method "emit_const". */
2569 aarch64_emit_const (LONGEST num)
2574 p += emit_mov_addr (p, x0, num);
2576 emit_ops_insns (buf, p - buf);
2579 /* Implementation of emit_ops method "emit_call". */
2582 aarch64_emit_call (CORE_ADDR fn)
2587 p += emit_mov_addr (p, ip0, fn);
2588 p += emit_blr (p, ip0);
2590 emit_ops_insns (buf, p - buf);
2593 /* Implementation of emit_ops method "emit_reg". */
2596 aarch64_emit_reg (int reg)
2601 /* Set x0 to unsigned char *regs. */
2602 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2603 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2604 p += emit_mov (p, x1, immediate_operand (reg));
2606 emit_ops_insns (buf, p - buf);
2608 aarch64_emit_call (get_raw_reg_func_addr ());
2611 /* Implementation of emit_ops method "emit_pop". */
2614 aarch64_emit_pop (void)
2619 p += emit_pop (p, x0);
2621 emit_ops_insns (buf, p - buf);
2624 /* Implementation of emit_ops method "emit_stack_flush". */
2627 aarch64_emit_stack_flush (void)
2632 p += emit_push (p, x0);
2634 emit_ops_insns (buf, p - buf);
2637 /* Implementation of emit_ops method "emit_zero_ext". */
2640 aarch64_emit_zero_ext (int arg)
2645 p += emit_ubfx (p, x0, x0, 0, arg);
2647 emit_ops_insns (buf, p - buf);
2650 /* Implementation of emit_ops method "emit_swap". */
2653 aarch64_emit_swap (void)
2658 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2659 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2660 p += emit_mov (p, x0, register_operand (x1));
2662 emit_ops_insns (buf, p - buf);
2665 /* Implementation of emit_ops method "emit_stack_adjust". */
2668 aarch64_emit_stack_adjust (int n)
2670 /* This is not needed with our design. */
2674 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2676 emit_ops_insns (buf, p - buf);
2679 /* Implementation of emit_ops method "emit_int_call_1". */
2682 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2687 p += emit_mov (p, x0, immediate_operand (arg1));
2689 emit_ops_insns (buf, p - buf);
2691 aarch64_emit_call (fn);
2694 /* Implementation of emit_ops method "emit_void_call_2". */
2697 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2702 /* Push x0 on the stack. */
2703 aarch64_emit_stack_flush ();
2705 /* Setup arguments for the function call:
2708 x1: top of the stack
2713 p += emit_mov (p, x1, register_operand (x0));
2714 p += emit_mov (p, x0, immediate_operand (arg1));
2716 emit_ops_insns (buf, p - buf);
2718 aarch64_emit_call (fn);
2721 aarch64_emit_pop ();
2724 /* Implementation of emit_ops method "emit_eq_goto". */
2727 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2732 p += emit_pop (p, x1);
2733 p += emit_cmp (p, x1, register_operand (x0));
2734 /* Branch over the next instruction if x0 != x1. */
2735 p += emit_bcond (p, NE, 8);
2736 /* The NOP instruction will be patched with an unconditional branch. */
2738 *offset_p = (p - buf) * 4;
2743 emit_ops_insns (buf, p - buf);
2746 /* Implementation of emit_ops method "emit_ne_goto". */
2749 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2754 p += emit_pop (p, x1);
2755 p += emit_cmp (p, x1, register_operand (x0));
2756 /* Branch over the next instruction if x0 == x1. */
2757 p += emit_bcond (p, EQ, 8);
2758 /* The NOP instruction will be patched with an unconditional branch. */
2760 *offset_p = (p - buf) * 4;
2765 emit_ops_insns (buf, p - buf);
2768 /* Implementation of emit_ops method "emit_lt_goto". */
2771 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2776 p += emit_pop (p, x1);
2777 p += emit_cmp (p, x1, register_operand (x0));
2778 /* Branch over the next instruction if x0 >= x1. */
2779 p += emit_bcond (p, GE, 8);
2780 /* The NOP instruction will be patched with an unconditional branch. */
2782 *offset_p = (p - buf) * 4;
2787 emit_ops_insns (buf, p - buf);
2790 /* Implementation of emit_ops method "emit_le_goto". */
2793 aarch64_emit_le_goto (int *offset_p, int *size_p)
2798 p += emit_pop (p, x1);
2799 p += emit_cmp (p, x1, register_operand (x0));
2800 /* Branch over the next instruction if x0 > x1. */
2801 p += emit_bcond (p, GT, 8);
2802 /* The NOP instruction will be patched with an unconditional branch. */
2804 *offset_p = (p - buf) * 4;
2809 emit_ops_insns (buf, p - buf);
2812 /* Implementation of emit_ops method "emit_gt_goto". */
2815 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2820 p += emit_pop (p, x1);
2821 p += emit_cmp (p, x1, register_operand (x0));
2822 /* Branch over the next instruction if x0 <= x1. */
2823 p += emit_bcond (p, LE, 8);
2824 /* The NOP instruction will be patched with an unconditional branch. */
2826 *offset_p = (p - buf) * 4;
2831 emit_ops_insns (buf, p - buf);
2834 /* Implementation of emit_ops method "emit_ge_got". */
2837 aarch64_emit_ge_got (int *offset_p, int *size_p)
2842 p += emit_pop (p, x1);
2843 p += emit_cmp (p, x1, register_operand (x0));
2844 /* Branch over the next instruction if x0 <= x1. */
2845 p += emit_bcond (p, LT, 8);
2846 /* The NOP instruction will be patched with an unconditional branch. */
2848 *offset_p = (p - buf) * 4;
2853 emit_ops_insns (buf, p - buf);
2856 static struct emit_ops aarch64_emit_ops_impl =
2858 aarch64_emit_prologue,
2859 aarch64_emit_epilogue,
2864 aarch64_emit_rsh_signed,
2865 aarch64_emit_rsh_unsigned,
2867 aarch64_emit_log_not,
2868 aarch64_emit_bit_and,
2869 aarch64_emit_bit_or,
2870 aarch64_emit_bit_xor,
2871 aarch64_emit_bit_not,
2873 aarch64_emit_less_signed,
2874 aarch64_emit_less_unsigned,
2876 aarch64_emit_if_goto,
2878 aarch64_write_goto_address,
2883 aarch64_emit_stack_flush,
2884 aarch64_emit_zero_ext,
2886 aarch64_emit_stack_adjust,
2887 aarch64_emit_int_call_1,
2888 aarch64_emit_void_call_2,
2889 aarch64_emit_eq_goto,
2890 aarch64_emit_ne_goto,
2891 aarch64_emit_lt_goto,
2892 aarch64_emit_le_goto,
2893 aarch64_emit_gt_goto,
2894 aarch64_emit_ge_got,
2897 /* Implementation of linux_target_ops method "emit_ops". */
2899 static struct emit_ops *
2900 aarch64_emit_ops (void)
2902 return &aarch64_emit_ops_impl;
2905 /* Implementation of linux_target_ops method
2906 "get_min_fast_tracepoint_insn_len". */
2909 aarch64_get_min_fast_tracepoint_insn_len (void)
2914 /* Implementation of linux_target_ops method "supports_range_stepping". */
2917 aarch64_supports_range_stepping (void)
2922 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2924 static const gdb_byte *
2925 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2927 if (is_64bit_tdesc ())
2929 *size = aarch64_breakpoint_len;
2930 return aarch64_breakpoint;
2933 return arm_sw_breakpoint_from_kind (kind, size);
2936 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2939 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2941 if (is_64bit_tdesc ())
2942 return aarch64_breakpoint_len;
2944 return arm_breakpoint_kind_from_pc (pcptr);
2947 /* Implementation of the linux_target_ops method
2948 "breakpoint_kind_from_current_state". */
2951 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2953 if (is_64bit_tdesc ())
2954 return aarch64_breakpoint_len;
2956 return arm_breakpoint_kind_from_current_state (pcptr);
2959 /* Support for hardware single step. */
2962 aarch64_supports_hardware_single_step (void)
2967 struct linux_target_ops the_low_target =
2971 aarch64_cannot_fetch_register,
2972 aarch64_cannot_store_register,
2973 NULL, /* fetch_register */
2976 aarch64_breakpoint_kind_from_pc,
2977 aarch64_sw_breakpoint_from_kind,
2978 NULL, /* breakpoint_reinsert_addr */
2979 0, /* decr_pc_after_break */
2980 aarch64_breakpoint_at,
2981 aarch64_supports_z_point_type,
2982 aarch64_insert_point,
2983 aarch64_remove_point,
2984 aarch64_stopped_by_watchpoint,
2985 aarch64_stopped_data_address,
2986 NULL, /* collect_ptrace_register */
2987 NULL, /* supply_ptrace_register */
2988 aarch64_linux_siginfo_fixup,
2989 aarch64_linux_new_process,
2990 aarch64_linux_new_thread,
2991 aarch64_linux_new_fork,
2992 aarch64_linux_prepare_to_resume,
2993 NULL, /* process_qsupported */
2994 aarch64_supports_tracepoints,
2995 aarch64_get_thread_area,
2996 aarch64_install_fast_tracepoint_jump_pad,
2998 aarch64_get_min_fast_tracepoint_insn_len,
2999 aarch64_supports_range_stepping,
3000 aarch64_breakpoint_kind_from_current_state,
3001 aarch64_supports_hardware_single_step,
3005 initialize_low_arch (void)
3007 init_registers_aarch64 ();
3009 initialize_low_arch_aarch32 ();
3011 initialize_regsets_info (&aarch64_regsets_info);