1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
42 /* Defined in auto-generated files. */
43 void init_registers_aarch64 (void);
44 extern const struct target_desc *tdesc_aarch64;
50 #define AARCH64_X_REGS_NUM 31
51 #define AARCH64_V_REGS_NUM 32
52 #define AARCH64_X0_REGNO 0
53 #define AARCH64_SP_REGNO 31
54 #define AARCH64_PC_REGNO 32
55 #define AARCH64_CPSR_REGNO 33
56 #define AARCH64_V0_REGNO 34
57 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
60 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
62 /* Per-process arch-specific data we want to keep. */
64 struct arch_process_info
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
76 struct aarch64_debug_reg_state debug_reg_state;
79 /* Return true if the size of register 0 is 8 byte. */
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
86 return register_size (regcache->tdesc, 0) == 8;
89 /* Implementation of linux_target_ops method "cannot_store_register". */
92 aarch64_cannot_store_register (int regno)
94 return regno >= AARCH64_NUM_REGS;
97 /* Implementation of linux_target_ops method "cannot_fetch_register". */
100 aarch64_cannot_fetch_register (int regno)
102 return regno >= AARCH64_NUM_REGS;
106 aarch64_fill_gregset (struct regcache *regcache, void *buf)
108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, ®set->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, ®set->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, ®set->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, ®set->pstate);
119 aarch64_store_gregset (struct regcache *regcache, const void *buf)
121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, ®set->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, ®set->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, ®set->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, ®set->pstate);
132 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, ®set->vregs[i]);
139 collect_register (regcache, AARCH64_FPSR_REGNO, ®set->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, ®set->fpcr);
144 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, ®set->vregs[i]);
152 supply_register (regcache, AARCH64_FPSR_REGNO, ®set->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, ®set->fpcr);
156 /* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158 extern int debug_threads;
160 /* Implementation of linux_target_ops method "get_pc". */
163 aarch64_get_pc (struct regcache *regcache)
165 if (register_size (regcache->tdesc, 0) == 8)
169 collect_register_by_name (regcache, "pc", &pc);
171 debug_printf ("stop pc is %08lx\n", pc);
178 collect_register_by_name (regcache, "pc", &pc);
180 debug_printf ("stop pc is %04x\n", pc);
185 /* Implementation of linux_target_ops method "set_pc". */
188 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
190 if (register_size (regcache->tdesc, 0) == 8)
192 unsigned long newpc = pc;
193 supply_register_by_name (regcache, "pc", &newpc);
197 unsigned int newpc = pc;
198 supply_register_by_name (regcache, "pc", &newpc);
202 #define aarch64_breakpoint_len 4
204 /* AArch64 BRK software debug mode instruction.
205 This instruction needs to match gdb/aarch64-tdep.c
206 (aarch64_default_breakpoint). */
207 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
209 /* Implementation of linux_target_ops method "breakpoint_at". */
212 aarch64_breakpoint_at (CORE_ADDR where)
214 if (is_64bit_tdesc ())
216 gdb_byte insn[aarch64_breakpoint_len];
218 (*the_target->read_memory) (where, (unsigned char *) &insn,
219 aarch64_breakpoint_len);
220 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
226 return arm_breakpoint_at (where);
230 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
234 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
236 state->dr_addr_bp[i] = 0;
237 state->dr_ctrl_bp[i] = 0;
238 state->dr_ref_count_bp[i] = 0;
241 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
243 state->dr_addr_wp[i] = 0;
244 state->dr_ctrl_wp[i] = 0;
245 state->dr_ref_count_wp[i] = 0;
249 /* Return the pointer to the debug register state structure in the
250 current process' arch-specific data area. */
252 struct aarch64_debug_reg_state *
253 aarch64_get_debug_reg_state (pid_t pid)
255 struct process_info *proc = find_process_pid (pid);
257 return &proc->priv->arch_private->debug_reg_state;
260 /* Implementation of linux_target_ops method "supports_z_point_type". */
263 aarch64_supports_z_point_type (char z_type)
269 case Z_PACKET_WRITE_WP:
270 case Z_PACKET_READ_WP:
271 case Z_PACKET_ACCESS_WP:
278 /* Implementation of linux_target_ops method "insert_point".
280 It actually only records the info of the to-be-inserted bp/wp;
281 the actual insertion will happen when threads are resumed. */
284 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
285 int len, struct raw_breakpoint *bp)
288 enum target_hw_bp_type targ_type;
289 struct aarch64_debug_reg_state *state
290 = aarch64_get_debug_reg_state (pid_of (current_thread));
293 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
294 (unsigned long) addr, len);
296 /* Determine the type from the raw breakpoint type. */
297 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
299 if (targ_type != hw_execute)
301 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
302 ret = aarch64_handle_watchpoint (targ_type, addr, len,
303 1 /* is_insert */, state);
311 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
312 instruction. Set it to 2 to correctly encode length bit
313 mask in hardware/watchpoint control register. */
316 ret = aarch64_handle_breakpoint (targ_type, addr, len,
317 1 /* is_insert */, state);
321 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
327 /* Implementation of linux_target_ops method "remove_point".
329 It actually only records the info of the to-be-removed bp/wp,
330 the actual removal will be done when threads are resumed. */
333 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
334 int len, struct raw_breakpoint *bp)
337 enum target_hw_bp_type targ_type;
338 struct aarch64_debug_reg_state *state
339 = aarch64_get_debug_reg_state (pid_of (current_thread));
342 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
343 (unsigned long) addr, len);
345 /* Determine the type from the raw breakpoint type. */
346 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
348 /* Set up state pointers. */
349 if (targ_type != hw_execute)
351 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
357 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
358 instruction. Set it to 2 to correctly encode length bit
359 mask in hardware/watchpoint control register. */
362 ret = aarch64_handle_breakpoint (targ_type, addr, len,
363 0 /* is_insert */, state);
367 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
373 /* Implementation of linux_target_ops method "stopped_data_address". */
376 aarch64_stopped_data_address (void)
380 struct aarch64_debug_reg_state *state;
382 pid = lwpid_of (current_thread);
384 /* Get the siginfo. */
385 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
386 return (CORE_ADDR) 0;
388 /* Need to be a hardware breakpoint/watchpoint trap. */
389 if (siginfo.si_signo != SIGTRAP
390 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
391 return (CORE_ADDR) 0;
393 /* Check if the address matches any watched address. */
394 state = aarch64_get_debug_reg_state (pid_of (current_thread));
395 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
397 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
398 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
399 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
400 if (state->dr_ref_count_wp[i]
401 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
402 && addr_trap >= addr_watch
403 && addr_trap < addr_watch + len)
407 return (CORE_ADDR) 0;
410 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
413 aarch64_stopped_by_watchpoint (void)
415 if (aarch64_stopped_data_address () != 0)
421 /* Fetch the thread-local storage pointer for libthread_db. */
424 ps_get_thread_area (const struct ps_prochandle *ph,
425 lwpid_t lwpid, int idx, void **base)
427 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
431 /* Implementation of linux_target_ops method "siginfo_fixup". */
434 aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
436 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
437 if (!is_64bit_tdesc ())
440 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
443 aarch64_siginfo_from_compat_siginfo (native,
444 (struct compat_siginfo *) inf);
452 /* Implementation of linux_target_ops method "linux_new_process". */
454 static struct arch_process_info *
455 aarch64_linux_new_process (void)
457 struct arch_process_info *info = XCNEW (struct arch_process_info);
459 aarch64_init_debug_reg_state (&info->debug_reg_state);
464 /* Implementation of linux_target_ops method "linux_new_fork". */
467 aarch64_linux_new_fork (struct process_info *parent,
468 struct process_info *child)
470 /* These are allocated by linux_add_process. */
471 gdb_assert (parent->priv != NULL
472 && parent->priv->arch_private != NULL);
473 gdb_assert (child->priv != NULL
474 && child->priv->arch_private != NULL);
476 /* Linux kernel before 2.6.33 commit
477 72f674d203cd230426437cdcf7dd6f681dad8b0d
478 will inherit hardware debug registers from parent
479 on fork/vfork/clone. Newer Linux kernels create such tasks with
480 zeroed debug registers.
482 GDB core assumes the child inherits the watchpoints/hw
483 breakpoints of the parent, and will remove them all from the
484 forked off process. Copy the debug registers mirrors into the
485 new process so that all breakpoints and watchpoints can be
486 removed together. The debug registers mirror will become zeroed
487 in the end before detaching the forked off process, thus making
488 this compatible with older Linux kernels too. */
490 *child->priv->arch_private = *parent->priv->arch_private;
493 /* Return the right target description according to the ELF file of
496 static const struct target_desc *
497 aarch64_linux_read_description (void)
499 unsigned int machine;
503 tid = lwpid_of (current_thread);
505 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
508 return tdesc_aarch64;
510 return tdesc_arm_with_neon;
513 /* Implementation of linux_target_ops method "arch_setup". */
516 aarch64_arch_setup (void)
518 current_process ()->tdesc = aarch64_linux_read_description ();
520 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
523 static struct regset_info aarch64_regsets[] =
525 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
526 sizeof (struct user_pt_regs), GENERAL_REGS,
527 aarch64_fill_gregset, aarch64_store_gregset },
528 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
529 sizeof (struct user_fpsimd_state), FP_REGS,
530 aarch64_fill_fpregset, aarch64_store_fpregset
535 static struct regsets_info aarch64_regsets_info =
537 aarch64_regsets, /* regsets */
539 NULL, /* disabled_regsets */
542 static struct regs_info regs_info_aarch64 =
544 NULL, /* regset_bitmap */
546 &aarch64_regsets_info,
549 /* Implementation of linux_target_ops method "regs_info". */
551 static const struct regs_info *
552 aarch64_regs_info (void)
554 if (is_64bit_tdesc ())
555 return ®s_info_aarch64;
557 return ®s_info_aarch32;
560 /* Implementation of linux_target_ops method "supports_tracepoints". */
563 aarch64_supports_tracepoints (void)
565 if (current_thread == NULL)
569 /* We don't support tracepoints on aarch32 now. */
570 return is_64bit_tdesc ();
574 /* Implementation of linux_target_ops method "get_thread_area". */
577 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
582 iovec.iov_base = ®
583 iovec.iov_len = sizeof (reg);
585 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
593 /* List of condition codes that we need. */
595 enum aarch64_condition_codes
606 enum aarch64_operand_type
612 /* Representation of an operand. At this time, it only supports register
613 and immediate types. */
615 struct aarch64_operand
617 /* Type of the operand. */
618 enum aarch64_operand_type type;
620 /* Value of the operand according to the type. */
624 struct aarch64_register reg;
628 /* List of registers that we are currently using, we can add more here as
629 we need to use them. */
631 /* General purpose scratch registers (64 bit). */
632 static const struct aarch64_register x0 = { 0, 1 };
633 static const struct aarch64_register x1 = { 1, 1 };
634 static const struct aarch64_register x2 = { 2, 1 };
635 static const struct aarch64_register x3 = { 3, 1 };
636 static const struct aarch64_register x4 = { 4, 1 };
638 /* General purpose scratch registers (32 bit). */
639 static const struct aarch64_register w0 = { 0, 0 };
640 static const struct aarch64_register w2 = { 2, 0 };
642 /* Intra-procedure scratch registers. */
643 static const struct aarch64_register ip0 = { 16, 1 };
645 /* Special purpose registers. */
646 static const struct aarch64_register fp = { 29, 1 };
647 static const struct aarch64_register lr = { 30, 1 };
648 static const struct aarch64_register sp = { 31, 1 };
649 static const struct aarch64_register xzr = { 31, 1 };
651 /* Dynamically allocate a new register. If we know the register
652 statically, we should make it a global as above instead of using this
655 static struct aarch64_register
656 aarch64_register (unsigned num, int is64)
658 return (struct aarch64_register) { num, is64 };
661 /* Helper function to create a register operand, for instructions with
662 different types of operands.
665 p += emit_mov (p, x0, register_operand (x1)); */
667 static struct aarch64_operand
668 register_operand (struct aarch64_register reg)
670 struct aarch64_operand operand;
672 operand.type = OPERAND_REGISTER;
678 /* Helper function to create an immediate operand, for instructions with
679 different types of operands.
682 p += emit_mov (p, x0, immediate_operand (12)); */
684 static struct aarch64_operand
685 immediate_operand (uint32_t imm)
687 struct aarch64_operand operand;
689 operand.type = OPERAND_IMMEDIATE;
695 /* Helper function to create an offset memory operand.
698 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
700 static struct aarch64_memory_operand
701 offset_memory_operand (int32_t offset)
703 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
706 /* Helper function to create a pre-index memory operand.
709 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
711 static struct aarch64_memory_operand
712 preindex_memory_operand (int32_t index)
714 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
717 /* Helper function to create a post-index memory operand.
720 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
722 static struct aarch64_memory_operand
723 postindex_memory_operand (int32_t index)
725 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
728 /* System control registers. These special registers can be written and
729 read with the MRS and MSR instructions.
731 - NZCV: Condition flags. GDB refers to this register under the CPSR
733 - FPSR: Floating-point status register.
734 - FPCR: Floating-point control registers.
735 - TPIDR_EL0: Software thread ID register. */
737 enum aarch64_system_control_registers
739 /* op0 op1 crn crm op2 */
740 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
741 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
742 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
743 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
746 /* Write a BLR instruction into *BUF.
750 RN is the register to branch to. */
753 emit_blr (uint32_t *buf, struct aarch64_register rn)
755 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
758 /* Write a RET instruction into *BUF.
762 RN is the register to branch to. */
765 emit_ret (uint32_t *buf, struct aarch64_register rn)
767 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
771 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
772 struct aarch64_register rt,
773 struct aarch64_register rt2,
774 struct aarch64_register rn,
775 struct aarch64_memory_operand operand)
782 opc = ENCODE (2, 2, 30);
784 opc = ENCODE (0, 2, 30);
786 switch (operand.type)
788 case MEMORY_OPERAND_OFFSET:
790 pre_index = ENCODE (1, 1, 24);
791 write_back = ENCODE (0, 1, 23);
794 case MEMORY_OPERAND_POSTINDEX:
796 pre_index = ENCODE (0, 1, 24);
797 write_back = ENCODE (1, 1, 23);
800 case MEMORY_OPERAND_PREINDEX:
802 pre_index = ENCODE (1, 1, 24);
803 write_back = ENCODE (1, 1, 23);
810 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
811 | ENCODE (operand.index >> 3, 7, 15)
812 | ENCODE (rt2.num, 5, 10)
813 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
816 /* Write a STP instruction into *BUF.
818 STP rt, rt2, [rn, #offset]
819 STP rt, rt2, [rn, #index]!
820 STP rt, rt2, [rn], #index
822 RT and RT2 are the registers to store.
823 RN is the base address register.
824 OFFSET is the immediate to add to the base address. It is limited to a
825 -512 .. 504 range (7 bits << 3). */
828 emit_stp (uint32_t *buf, struct aarch64_register rt,
829 struct aarch64_register rt2, struct aarch64_register rn,
830 struct aarch64_memory_operand operand)
832 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
835 /* Write a LDP instruction into *BUF.
837 LDP rt, rt2, [rn, #offset]
838 LDP rt, rt2, [rn, #index]!
839 LDP rt, rt2, [rn], #index
841 RT and RT2 are the registers to store.
842 RN is the base address register.
843 OFFSET is the immediate to add to the base address. It is limited to a
844 -512 .. 504 range (7 bits << 3). */
847 emit_ldp (uint32_t *buf, struct aarch64_register rt,
848 struct aarch64_register rt2, struct aarch64_register rn,
849 struct aarch64_memory_operand operand)
851 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
854 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
856 LDP qt, qt2, [rn, #offset]
858 RT and RT2 are the Q registers to store.
859 RN is the base address register.
860 OFFSET is the immediate to add to the base address. It is limited to
861 -1024 .. 1008 range (7 bits << 4). */
864 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
865 struct aarch64_register rn, int32_t offset)
867 uint32_t opc = ENCODE (2, 2, 30);
868 uint32_t pre_index = ENCODE (1, 1, 24);
870 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
871 | ENCODE (offset >> 4, 7, 15)
872 | ENCODE (rt2, 5, 10)
873 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
876 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
878 STP qt, qt2, [rn, #offset]
880 RT and RT2 are the Q registers to store.
881 RN is the base address register.
882 OFFSET is the immediate to add to the base address. It is limited to
883 -1024 .. 1008 range (7 bits << 4). */
886 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
887 struct aarch64_register rn, int32_t offset)
889 uint32_t opc = ENCODE (2, 2, 30);
890 uint32_t pre_index = ENCODE (1, 1, 24);
892 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
893 | ENCODE (offset >> 4, 7, 15)
894 | ENCODE (rt2, 5, 10)
895 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
898 /* Write a LDRH instruction into *BUF.
900 LDRH wt, [xn, #offset]
901 LDRH wt, [xn, #index]!
902 LDRH wt, [xn], #index
904 RT is the register to store.
905 RN is the base address register.
906 OFFSET is the immediate to add to the base address. It is limited to
907 0 .. 32760 range (12 bits << 3). */
910 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
911 struct aarch64_register rn,
912 struct aarch64_memory_operand operand)
914 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
917 /* Write a LDRB instruction into *BUF.
919 LDRB wt, [xn, #offset]
920 LDRB wt, [xn, #index]!
921 LDRB wt, [xn], #index
923 RT is the register to store.
924 RN is the base address register.
925 OFFSET is the immediate to add to the base address. It is limited to
926 0 .. 32760 range (12 bits << 3). */
929 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
930 struct aarch64_register rn,
931 struct aarch64_memory_operand operand)
933 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
938 /* Write a STR instruction into *BUF.
940 STR rt, [rn, #offset]
941 STR rt, [rn, #index]!
944 RT is the register to store.
945 RN is the base address register.
946 OFFSET is the immediate to add to the base address. It is limited to
947 0 .. 32760 range (12 bits << 3). */
950 emit_str (uint32_t *buf, struct aarch64_register rt,
951 struct aarch64_register rn,
952 struct aarch64_memory_operand operand)
954 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
957 /* Helper function emitting an exclusive load or store instruction. */
960 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
961 enum aarch64_opcodes opcode,
962 struct aarch64_register rs,
963 struct aarch64_register rt,
964 struct aarch64_register rt2,
965 struct aarch64_register rn)
967 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
968 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
969 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
972 /* Write a LAXR instruction into *BUF.
976 RT is the destination register.
977 RN is the base address register. */
980 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
981 struct aarch64_register rn)
983 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
987 /* Write a STXR instruction into *BUF.
991 RS is the result register, it indicates if the store succeeded or not.
992 RT is the destination register.
993 RN is the base address register. */
996 emit_stxr (uint32_t *buf, struct aarch64_register rs,
997 struct aarch64_register rt, struct aarch64_register rn)
999 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1003 /* Write a STLR instruction into *BUF.
1007 RT is the register to store.
1008 RN is the base address register. */
1011 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1012 struct aarch64_register rn)
1014 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1018 /* Helper function for data processing instructions with register sources. */
1021 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1022 struct aarch64_register rd,
1023 struct aarch64_register rn,
1024 struct aarch64_register rm)
1026 uint32_t size = ENCODE (rd.is64, 1, 31);
1028 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1029 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1032 /* Helper function for data processing instructions taking either a register
1036 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1037 struct aarch64_register rd,
1038 struct aarch64_register rn,
1039 struct aarch64_operand operand)
1041 uint32_t size = ENCODE (rd.is64, 1, 31);
1042 /* The opcode is different for register and immediate source operands. */
1043 uint32_t operand_opcode;
1045 if (operand.type == OPERAND_IMMEDIATE)
1047 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1048 operand_opcode = ENCODE (8, 4, 25);
1050 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1051 | ENCODE (operand.imm, 12, 10)
1052 | ENCODE (rn.num, 5, 5)
1053 | ENCODE (rd.num, 5, 0));
1057 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1058 operand_opcode = ENCODE (5, 4, 25);
1060 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1065 /* Write an ADD instruction into *BUF.
1070 This function handles both an immediate and register add.
1072 RD is the destination register.
1073 RN is the input register.
1074 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1075 OPERAND_REGISTER. */
1078 emit_add (uint32_t *buf, struct aarch64_register rd,
1079 struct aarch64_register rn, struct aarch64_operand operand)
1081 return emit_data_processing (buf, ADD, rd, rn, operand);
1084 /* Write a SUB instruction into *BUF.
1089 This function handles both an immediate and register sub.
1091 RD is the destination register.
1092 RN is the input register.
1093 IMM is the immediate to substract to RN. */
1096 emit_sub (uint32_t *buf, struct aarch64_register rd,
1097 struct aarch64_register rn, struct aarch64_operand operand)
1099 return emit_data_processing (buf, SUB, rd, rn, operand);
1102 /* Write a MOV instruction into *BUF.
1107 This function handles both a wide immediate move and a register move,
1108 with the condition that the source register is not xzr. xzr and the
1109 stack pointer share the same encoding and this function only supports
1112 RD is the destination register.
1113 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1114 OPERAND_REGISTER. */
1117 emit_mov (uint32_t *buf, struct aarch64_register rd,
1118 struct aarch64_operand operand)
1120 if (operand.type == OPERAND_IMMEDIATE)
1122 uint32_t size = ENCODE (rd.is64, 1, 31);
1123 /* Do not shift the immediate. */
1124 uint32_t shift = ENCODE (0, 2, 21);
1126 return aarch64_emit_insn (buf, MOV | size | shift
1127 | ENCODE (operand.imm, 16, 5)
1128 | ENCODE (rd.num, 5, 0));
1131 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1134 /* Write a MOVK instruction into *BUF.
1136 MOVK rd, #imm, lsl #shift
1138 RD is the destination register.
1139 IMM is the immediate.
1140 SHIFT is the logical shift left to apply to IMM. */
1143 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1146 uint32_t size = ENCODE (rd.is64, 1, 31);
1148 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1149 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1152 /* Write instructions into *BUF in order to move ADDR into a register.
1153 ADDR can be a 64-bit value.
1155 This function will emit a series of MOV and MOVK instructions, such as:
1158 MOVK xd, #(addr >> 16), lsl #16
1159 MOVK xd, #(addr >> 32), lsl #32
1160 MOVK xd, #(addr >> 48), lsl #48 */
1163 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1167 /* The MOV (wide immediate) instruction clears to top bits of the
1169 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1171 if ((addr >> 16) != 0)
1172 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1176 if ((addr >> 32) != 0)
1177 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1181 if ((addr >> 48) != 0)
1182 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1187 /* Write a SUBS instruction into *BUF.
1191 This instruction update the condition flags.
1193 RD is the destination register.
1194 RN and RM are the source registers. */
1197 emit_subs (uint32_t *buf, struct aarch64_register rd,
1198 struct aarch64_register rn, struct aarch64_operand operand)
1200 return emit_data_processing (buf, SUBS, rd, rn, operand);
1203 /* Write a CMP instruction into *BUF.
1207 This instruction is an alias of SUBS xzr, rn, rm.
1209 RN and RM are the registers to compare. */
1212 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1213 struct aarch64_operand operand)
1215 return emit_subs (buf, xzr, rn, operand);
1218 /* Write a AND instruction into *BUF.
1222 RD is the destination register.
1223 RN and RM are the source registers. */
1226 emit_and (uint32_t *buf, struct aarch64_register rd,
1227 struct aarch64_register rn, struct aarch64_register rm)
1229 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1232 /* Write a ORR instruction into *BUF.
1236 RD is the destination register.
1237 RN and RM are the source registers. */
1240 emit_orr (uint32_t *buf, struct aarch64_register rd,
1241 struct aarch64_register rn, struct aarch64_register rm)
1243 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1246 /* Write a ORN instruction into *BUF.
1250 RD is the destination register.
1251 RN and RM are the source registers. */
1254 emit_orn (uint32_t *buf, struct aarch64_register rd,
1255 struct aarch64_register rn, struct aarch64_register rm)
1257 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1260 /* Write a EOR instruction into *BUF.
1264 RD is the destination register.
1265 RN and RM are the source registers. */
1268 emit_eor (uint32_t *buf, struct aarch64_register rd,
1269 struct aarch64_register rn, struct aarch64_register rm)
1271 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1274 /* Write a MVN instruction into *BUF.
1278 This is an alias for ORN rd, xzr, rm.
1280 RD is the destination register.
1281 RM is the source register. */
1284 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1285 struct aarch64_register rm)
1287 return emit_orn (buf, rd, xzr, rm);
1290 /* Write a LSLV instruction into *BUF.
1294 RD is the destination register.
1295 RN and RM are the source registers. */
1298 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1299 struct aarch64_register rn, struct aarch64_register rm)
1301 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1304 /* Write a LSRV instruction into *BUF.
1308 RD is the destination register.
1309 RN and RM are the source registers. */
1312 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1313 struct aarch64_register rn, struct aarch64_register rm)
1315 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1318 /* Write a ASRV instruction into *BUF.
1322 RD is the destination register.
1323 RN and RM are the source registers. */
1326 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1327 struct aarch64_register rn, struct aarch64_register rm)
1329 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1332 /* Write a MUL instruction into *BUF.
1336 RD is the destination register.
1337 RN and RM are the source registers. */
1340 emit_mul (uint32_t *buf, struct aarch64_register rd,
1341 struct aarch64_register rn, struct aarch64_register rm)
1343 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1346 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1350 RT is the destination register.
1351 SYSTEM_REG is special purpose register to read. */
1354 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1355 enum aarch64_system_control_registers system_reg)
1357 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1358 | ENCODE (rt.num, 5, 0));
1361 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1365 SYSTEM_REG is special purpose register to write.
1366 RT is the input register. */
1369 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1370 struct aarch64_register rt)
1372 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1373 | ENCODE (rt.num, 5, 0));
1376 /* Write a SEVL instruction into *BUF.
1378 This is a hint instruction telling the hardware to trigger an event. */
1381 emit_sevl (uint32_t *buf)
1383 return aarch64_emit_insn (buf, SEVL);
1386 /* Write a WFE instruction into *BUF.
1388 This is a hint instruction telling the hardware to wait for an event. */
1391 emit_wfe (uint32_t *buf)
1393 return aarch64_emit_insn (buf, WFE);
1396 /* Write a SBFM instruction into *BUF.
1398 SBFM rd, rn, #immr, #imms
1400 This instruction moves the bits from #immr to #imms into the
1401 destination, sign extending the result.
1403 RD is the destination register.
1404 RN is the source register.
1405 IMMR is the bit number to start at (least significant bit).
1406 IMMS is the bit number to stop at (most significant bit). */
1409 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1410 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1412 uint32_t size = ENCODE (rd.is64, 1, 31);
1413 uint32_t n = ENCODE (rd.is64, 1, 22);
1415 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1416 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1417 | ENCODE (rd.num, 5, 0));
1420 /* Write a SBFX instruction into *BUF.
1422 SBFX rd, rn, #lsb, #width
1424 This instruction moves #width bits from #lsb into the destination, sign
1425 extending the result. This is an alias for:
1427 SBFM rd, rn, #lsb, #(lsb + width - 1)
1429 RD is the destination register.
1430 RN is the source register.
1431 LSB is the bit number to start at (least significant bit).
1432 WIDTH is the number of bits to move. */
1435 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1436 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1438 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1441 /* Write a UBFM instruction into *BUF.
1443 UBFM rd, rn, #immr, #imms
1445 This instruction moves the bits from #immr to #imms into the
1446 destination, extending the result with zeros.
1448 RD is the destination register.
1449 RN is the source register.
1450 IMMR is the bit number to start at (least significant bit).
1451 IMMS is the bit number to stop at (most significant bit). */
1454 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1455 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1457 uint32_t size = ENCODE (rd.is64, 1, 31);
1458 uint32_t n = ENCODE (rd.is64, 1, 22);
1460 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1461 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1462 | ENCODE (rd.num, 5, 0));
1465 /* Write a UBFX instruction into *BUF.
1467 UBFX rd, rn, #lsb, #width
1469 This instruction moves #width bits from #lsb into the destination,
1470 extending the result with zeros. This is an alias for:
1472 UBFM rd, rn, #lsb, #(lsb + width - 1)
1474 RD is the destination register.
1475 RN is the source register.
1476 LSB is the bit number to start at (least significant bit).
1477 WIDTH is the number of bits to move. */
1480 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1481 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1483 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1486 /* Write a CSINC instruction into *BUF.
1488 CSINC rd, rn, rm, cond
1490 This instruction conditionally increments rn or rm and places the result
1491 in rd. rn is chosen is the condition is true.
1493 RD is the destination register.
1494 RN and RM are the source registers.
1495 COND is the encoded condition. */
1498 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1499 struct aarch64_register rn, struct aarch64_register rm,
1502 uint32_t size = ENCODE (rd.is64, 1, 31);
1504 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1505 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1506 | ENCODE (rd.num, 5, 0));
1509 /* Write a CSET instruction into *BUF.
1513 This instruction conditionally write 1 or 0 in the destination register.
1514 1 is written if the condition is true. This is an alias for:
1516 CSINC rd, xzr, xzr, !cond
1518 Note that the condition needs to be inverted.
1520 RD is the destination register.
1521 RN and RM are the source registers.
1522 COND is the encoded condition. */
1525 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1527 /* The least significant bit of the condition needs toggling in order to
1529 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1532 /* Write LEN instructions from BUF into the inferior memory at *TO.
1534 Note instructions are always little endian on AArch64, unlike data. */
1537 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1539 size_t byte_len = len * sizeof (uint32_t);
1540 #if (__BYTE_ORDER == __BIG_ENDIAN)
1541 uint32_t *le_buf = xmalloc (byte_len);
1544 for (i = 0; i < len; i++)
1545 le_buf[i] = htole32 (buf[i]);
1547 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1551 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1557 /* Sub-class of struct aarch64_insn_data, store information of
1558 instruction relocation for fast tracepoint. Visitor can
1559 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1560 the relocated instructions in buffer pointed by INSN_PTR. */
1562 struct aarch64_insn_relocation_data
1564 struct aarch64_insn_data base;
1566 /* The new address the instruction is relocated to. */
1568 /* Pointer to the buffer of relocated instruction(s). */
1572 /* Implementation of aarch64_insn_visitor method "b". */
1575 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1576 struct aarch64_insn_data *data)
1578 struct aarch64_insn_relocation_data *insn_reloc
1579 = (struct aarch64_insn_relocation_data *) data;
1581 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1583 if (can_encode_int32 (new_offset, 28))
1584 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1587 /* Implementation of aarch64_insn_visitor method "b_cond". */
1590 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1591 struct aarch64_insn_data *data)
1593 struct aarch64_insn_relocation_data *insn_reloc
1594 = (struct aarch64_insn_relocation_data *) data;
1596 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1598 if (can_encode_int32 (new_offset, 21))
1600 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1603 else if (can_encode_int32 (new_offset, 28))
1605 /* The offset is out of range for a conditional branch
1606 instruction but not for a unconditional branch. We can use
1607 the following instructions instead:
1609 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1610 B NOT_TAKEN ; Else jump over TAKEN and continue.
1617 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1618 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1619 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1623 /* Implementation of aarch64_insn_visitor method "cb". */
1626 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1627 const unsigned rn, int is64,
1628 struct aarch64_insn_data *data)
1630 struct aarch64_insn_relocation_data *insn_reloc
1631 = (struct aarch64_insn_relocation_data *) data;
1633 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1635 if (can_encode_int32 (new_offset, 21))
1637 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1638 aarch64_register (rn, is64), new_offset);
1640 else if (can_encode_int32 (new_offset, 28))
1642 /* The offset is out of range for a compare and branch
1643 instruction but not for a unconditional branch. We can use
1644 the following instructions instead:
1646 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1647 B NOT_TAKEN ; Else jump over TAKEN and continue.
1653 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1654 aarch64_register (rn, is64), 8);
1655 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1656 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1660 /* Implementation of aarch64_insn_visitor method "tb". */
1663 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1664 const unsigned rt, unsigned bit,
1665 struct aarch64_insn_data *data)
1667 struct aarch64_insn_relocation_data *insn_reloc
1668 = (struct aarch64_insn_relocation_data *) data;
1670 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1672 if (can_encode_int32 (new_offset, 16))
1674 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1675 aarch64_register (rt, 1), new_offset);
1677 else if (can_encode_int32 (new_offset, 28))
1679 /* The offset is out of range for a test bit and branch
1680 instruction but not for a unconditional branch. We can use
1681 the following instructions instead:
1683 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1684 B NOT_TAKEN ; Else jump over TAKEN and continue.
1690 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1691 aarch64_register (rt, 1), 8);
1692 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1693 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1698 /* Implementation of aarch64_insn_visitor method "adr". */
1701 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1703 struct aarch64_insn_data *data)
1705 struct aarch64_insn_relocation_data *insn_reloc
1706 = (struct aarch64_insn_relocation_data *) data;
1707 /* We know exactly the address the ADR{P,} instruction will compute.
1708 We can just write it to the destination register. */
1709 CORE_ADDR address = data->insn_addr + offset;
1713 /* Clear the lower 12 bits of the offset to get the 4K page. */
1714 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1715 aarch64_register (rd, 1),
1719 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1720 aarch64_register (rd, 1), address);
1723 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1726 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1727 const unsigned rt, const int is64,
1728 struct aarch64_insn_data *data)
1730 struct aarch64_insn_relocation_data *insn_reloc
1731 = (struct aarch64_insn_relocation_data *) data;
1732 CORE_ADDR address = data->insn_addr + offset;
1734 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1735 aarch64_register (rt, 1), address);
1737 /* We know exactly what address to load from, and what register we
1740 MOV xd, #(oldloc + offset)
1741 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1744 LDR xd, [xd] ; or LDRSW xd, [xd]
1749 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1750 aarch64_register (rt, 1),
1751 aarch64_register (rt, 1),
1752 offset_memory_operand (0));
1754 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1755 aarch64_register (rt, is64),
1756 aarch64_register (rt, 1),
1757 offset_memory_operand (0));
1760 /* Implementation of aarch64_insn_visitor method "others". */
1763 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1764 struct aarch64_insn_data *data)
1766 struct aarch64_insn_relocation_data *insn_reloc
1767 = (struct aarch64_insn_relocation_data *) data;
1769 /* The instruction is not PC relative. Just re-emit it at the new
1771 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1774 static const struct aarch64_insn_visitor visitor =
1776 aarch64_ftrace_insn_reloc_b,
1777 aarch64_ftrace_insn_reloc_b_cond,
1778 aarch64_ftrace_insn_reloc_cb,
1779 aarch64_ftrace_insn_reloc_tb,
1780 aarch64_ftrace_insn_reloc_adr,
1781 aarch64_ftrace_insn_reloc_ldr_literal,
1782 aarch64_ftrace_insn_reloc_others,
1785 /* Implementation of linux_target_ops method
1786 "install_fast_tracepoint_jump_pad". */
1789 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1791 CORE_ADDR collector,
1794 CORE_ADDR *jump_entry,
1795 CORE_ADDR *trampoline,
1796 ULONGEST *trampoline_size,
1797 unsigned char *jjump_pad_insn,
1798 ULONGEST *jjump_pad_insn_size,
1799 CORE_ADDR *adjusted_insn_addr,
1800 CORE_ADDR *adjusted_insn_addr_end,
1808 CORE_ADDR buildaddr = *jump_entry;
1809 struct aarch64_insn_relocation_data insn_data;
1811 /* We need to save the current state on the stack both to restore it
1812 later and to collect register values when the tracepoint is hit.
1814 The saved registers are pushed in a layout that needs to be in sync
1815 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1816 the supply_fast_tracepoint_registers function will fill in the
1817 register cache from a pointer to saved registers on the stack we build
1820 For simplicity, we set the size of each cell on the stack to 16 bytes.
1821 This way one cell can hold any register type, from system registers
1822 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1823 has to be 16 bytes aligned anyway.
1825 Note that the CPSR register does not exist on AArch64. Instead we
1826 can access system bits describing the process state with the
1827 MRS/MSR instructions, namely the condition flags. We save them as
1828 if they are part of a CPSR register because that's how GDB
1829 interprets these system bits. At the moment, only the condition
1830 flags are saved in CPSR (NZCV).
1832 Stack layout, each cell is 16 bytes (descending):
1834 High *-------- SIMD&FP registers from 31 down to 0. --------*
1840 *---- General purpose registers from 30 down to 0. ----*
1846 *------------- Special purpose registers. -------------*
1849 | CPSR (NZCV) | 5 cells
1852 *------------- collecting_t object --------------------*
1853 | TPIDR_EL0 | struct tracepoint * |
1854 Low *------------------------------------------------------*
1856 After this stack is set up, we issue a call to the collector, passing
1857 it the saved registers at (SP + 16). */
1859 /* Push SIMD&FP registers on the stack:
1861 SUB sp, sp, #(32 * 16)
1863 STP q30, q31, [sp, #(30 * 16)]
1868 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1869 for (i = 30; i >= 0; i -= 2)
1870 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1872 /* Push general puspose registers on the stack. Note that we do not need
1873 to push x31 as it represents the xzr register and not the stack
1874 pointer in a STR instruction.
1876 SUB sp, sp, #(31 * 16)
1878 STR x30, [sp, #(30 * 16)]
1883 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1884 for (i = 30; i >= 0; i -= 1)
1885 p += emit_str (p, aarch64_register (i, 1), sp,
1886 offset_memory_operand (i * 16));
1888 /* Make space for 5 more cells.
1890 SUB sp, sp, #(5 * 16)
1893 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1898 ADD x4, sp, #((32 + 31 + 5) * 16)
1899 STR x4, [sp, #(4 * 16)]
1902 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1903 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1905 /* Save PC (tracepoint address):
1910 STR x3, [sp, #(3 * 16)]
1914 p += emit_mov_addr (p, x3, tpaddr);
1915 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1917 /* Save CPSR (NZCV), FPSR and FPCR:
1923 STR x2, [sp, #(2 * 16)]
1924 STR x1, [sp, #(1 * 16)]
1925 STR x0, [sp, #(0 * 16)]
1928 p += emit_mrs (p, x2, NZCV);
1929 p += emit_mrs (p, x1, FPSR);
1930 p += emit_mrs (p, x0, FPCR);
1931 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1932 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1933 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1935 /* Push the collecting_t object. It consist of the address of the
1936 tracepoint and an ID for the current thread. We get the latter by
1937 reading the tpidr_el0 system register. It corresponds to the
1938 NT_ARM_TLS register accessible with ptrace.
1945 STP x0, x1, [sp, #-16]!
1949 p += emit_mov_addr (p, x0, tpoint);
1950 p += emit_mrs (p, x1, TPIDR_EL0);
1951 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1955 The shared memory for the lock is at lockaddr. It will hold zero
1956 if no-one is holding the lock, otherwise it contains the address of
1957 the collecting_t object on the stack of the thread which acquired it.
1959 At this stage, the stack pointer points to this thread's collecting_t
1962 We use the following registers:
1963 - x0: Address of the lock.
1964 - x1: Pointer to collecting_t object.
1965 - x2: Scratch register.
1971 ; Trigger an event local to this core. So the following WFE
1972 ; instruction is ignored.
1975 ; Wait for an event. The event is triggered by either the SEVL
1976 ; or STLR instructions (store release).
1979 ; Atomically read at lockaddr. This marks the memory location as
1980 ; exclusive. This instruction also has memory constraints which
1981 ; make sure all previous data reads and writes are done before
1985 ; Try again if another thread holds the lock.
1988 ; We can lock it! Write the address of the collecting_t object.
1989 ; This instruction will fail if the memory location is not marked
1990 ; as exclusive anymore. If it succeeds, it will remove the
1991 ; exclusive mark on the memory location. This way, if another
1992 ; thread executes this instruction before us, we will fail and try
1999 p += emit_mov_addr (p, x0, lockaddr);
2000 p += emit_mov (p, x1, register_operand (sp));
2004 p += emit_ldaxr (p, x2, x0);
2005 p += emit_cb (p, 1, w2, -2 * 4);
2006 p += emit_stxr (p, w2, x1, x0);
2007 p += emit_cb (p, 1, x2, -4 * 4);
2009 /* Call collector (struct tracepoint *, unsigned char *):
2014 ; Saved registers start after the collecting_t object.
2017 ; We use an intra-procedure-call scratch register.
2018 MOV ip0, #(collector)
2021 ; And call back to C!
2026 p += emit_mov_addr (p, x0, tpoint);
2027 p += emit_add (p, x1, sp, immediate_operand (16));
2029 p += emit_mov_addr (p, ip0, collector);
2030 p += emit_blr (p, ip0);
2032 /* Release the lock.
2037 ; This instruction is a normal store with memory ordering
2038 ; constraints. Thanks to this we do not have to put a data
2039 ; barrier instruction to make sure all data read and writes are done
2040 ; before this instruction is executed. Furthermore, this instrucion
2041 ; will trigger an event, letting other threads know they can grab
2046 p += emit_mov_addr (p, x0, lockaddr);
2047 p += emit_stlr (p, xzr, x0);
2049 /* Free collecting_t object:
2054 p += emit_add (p, sp, sp, immediate_operand (16));
2056 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2057 registers from the stack.
2059 LDR x2, [sp, #(2 * 16)]
2060 LDR x1, [sp, #(1 * 16)]
2061 LDR x0, [sp, #(0 * 16)]
2067 ADD sp, sp #(5 * 16)
2070 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2071 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2072 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2073 p += emit_msr (p, NZCV, x2);
2074 p += emit_msr (p, FPSR, x1);
2075 p += emit_msr (p, FPCR, x0);
2077 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2079 /* Pop general purpose registers:
2083 LDR x30, [sp, #(30 * 16)]
2085 ADD sp, sp, #(31 * 16)
2088 for (i = 0; i <= 30; i += 1)
2089 p += emit_ldr (p, aarch64_register (i, 1), sp,
2090 offset_memory_operand (i * 16));
2091 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2093 /* Pop SIMD&FP registers:
2097 LDP q30, q31, [sp, #(30 * 16)]
2099 ADD sp, sp, #(32 * 16)
2102 for (i = 0; i <= 30; i += 2)
2103 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2104 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2106 /* Write the code into the inferior memory. */
2107 append_insns (&buildaddr, p - buf, buf);
2109 /* Now emit the relocated instruction. */
2110 *adjusted_insn_addr = buildaddr;
2111 target_read_uint32 (tpaddr, &insn);
2113 insn_data.base.insn_addr = tpaddr;
2114 insn_data.new_addr = buildaddr;
2115 insn_data.insn_ptr = buf;
2117 aarch64_relocate_instruction (insn, &visitor,
2118 (struct aarch64_insn_data *) &insn_data);
2120 /* We may not have been able to relocate the instruction. */
2121 if (insn_data.insn_ptr == buf)
2124 "E.Could not relocate instruction from %s to %s.",
2125 core_addr_to_string_nz (tpaddr),
2126 core_addr_to_string_nz (buildaddr));
2130 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2131 *adjusted_insn_addr_end = buildaddr;
2133 /* Go back to the start of the buffer. */
2136 /* Emit a branch back from the jump pad. */
2137 offset = (tpaddr + orig_size - buildaddr);
2138 if (!can_encode_int32 (offset, 28))
2141 "E.Jump back from jump pad too far from tracepoint "
2142 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2147 p += emit_b (p, 0, offset);
2148 append_insns (&buildaddr, p - buf, buf);
2150 /* Give the caller a branch instruction into the jump pad. */
2151 offset = (*jump_entry - tpaddr);
2152 if (!can_encode_int32 (offset, 28))
2155 "E.Jump pad too far from tracepoint "
2156 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2161 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2162 *jjump_pad_insn_size = 4;
2164 /* Return the end address of our pad. */
2165 *jump_entry = buildaddr;
2170 /* Helper function writing LEN instructions from START into
2171 current_insn_ptr. */
2174 emit_ops_insns (const uint32_t *start, int len)
2176 CORE_ADDR buildaddr = current_insn_ptr;
2179 debug_printf ("Adding %d instrucions at %s\n",
2180 len, paddress (buildaddr));
2182 append_insns (&buildaddr, len, start);
2183 current_insn_ptr = buildaddr;
2186 /* Pop a register from the stack. */
2189 emit_pop (uint32_t *buf, struct aarch64_register rt)
2191 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2194 /* Push a register on the stack. */
2197 emit_push (uint32_t *buf, struct aarch64_register rt)
2199 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2202 /* Implementation of emit_ops method "emit_prologue". */
2205 aarch64_emit_prologue (void)
2210 /* This function emit a prologue for the following function prototype:
2212 enum eval_result_type f (unsigned char *regs,
2215 The first argument is a buffer of raw registers. The second
2216 argument is the result of
2217 evaluating the expression, which will be set to whatever is on top of
2218 the stack at the end.
2220 The stack set up by the prologue is as such:
2222 High *------------------------------------------------------*
2225 | x1 (ULONGEST *value) |
2226 | x0 (unsigned char *regs) |
2227 Low *------------------------------------------------------*
2229 As we are implementing a stack machine, each opcode can expand the
2230 stack so we never know how far we are from the data saved by this
2231 prologue. In order to be able refer to value and regs later, we save
2232 the current stack pointer in the frame pointer. This way, it is not
2233 clobbered when calling C functions.
2235 Finally, throughtout every operation, we are using register x0 as the
2236 top of the stack, and x1 as a scratch register. */
2238 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2239 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2240 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2242 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2245 emit_ops_insns (buf, p - buf);
2248 /* Implementation of emit_ops method "emit_epilogue". */
2251 aarch64_emit_epilogue (void)
2256 /* Store the result of the expression (x0) in *value. */
2257 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2258 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2259 p += emit_str (p, x0, x1, offset_memory_operand (0));
2261 /* Restore the previous state. */
2262 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2263 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2265 /* Return expr_eval_no_error. */
2266 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2267 p += emit_ret (p, lr);
2269 emit_ops_insns (buf, p - buf);
2272 /* Implementation of emit_ops method "emit_add". */
2275 aarch64_emit_add (void)
2280 p += emit_pop (p, x1);
2281 p += emit_add (p, x0, x0, register_operand (x1));
2283 emit_ops_insns (buf, p - buf);
2286 /* Implementation of emit_ops method "emit_sub". */
2289 aarch64_emit_sub (void)
2294 p += emit_pop (p, x1);
2295 p += emit_sub (p, x0, x0, register_operand (x1));
2297 emit_ops_insns (buf, p - buf);
2300 /* Implementation of emit_ops method "emit_mul". */
2303 aarch64_emit_mul (void)
2308 p += emit_pop (p, x1);
2309 p += emit_mul (p, x0, x1, x0);
2311 emit_ops_insns (buf, p - buf);
2314 /* Implementation of emit_ops method "emit_lsh". */
2317 aarch64_emit_lsh (void)
2322 p += emit_pop (p, x1);
2323 p += emit_lslv (p, x0, x1, x0);
2325 emit_ops_insns (buf, p - buf);
2328 /* Implementation of emit_ops method "emit_rsh_signed". */
2331 aarch64_emit_rsh_signed (void)
2336 p += emit_pop (p, x1);
2337 p += emit_asrv (p, x0, x1, x0);
2339 emit_ops_insns (buf, p - buf);
2342 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2345 aarch64_emit_rsh_unsigned (void)
2350 p += emit_pop (p, x1);
2351 p += emit_lsrv (p, x0, x1, x0);
2353 emit_ops_insns (buf, p - buf);
2356 /* Implementation of emit_ops method "emit_ext". */
2359 aarch64_emit_ext (int arg)
2364 p += emit_sbfx (p, x0, x0, 0, arg);
2366 emit_ops_insns (buf, p - buf);
2369 /* Implementation of emit_ops method "emit_log_not". */
2372 aarch64_emit_log_not (void)
2377 /* If the top of the stack is 0, replace it with 1. Else replace it with
2380 p += emit_cmp (p, x0, immediate_operand (0));
2381 p += emit_cset (p, x0, EQ);
2383 emit_ops_insns (buf, p - buf);
2386 /* Implementation of emit_ops method "emit_bit_and". */
2389 aarch64_emit_bit_and (void)
2394 p += emit_pop (p, x1);
2395 p += emit_and (p, x0, x0, x1);
2397 emit_ops_insns (buf, p - buf);
2400 /* Implementation of emit_ops method "emit_bit_or". */
2403 aarch64_emit_bit_or (void)
2408 p += emit_pop (p, x1);
2409 p += emit_orr (p, x0, x0, x1);
2411 emit_ops_insns (buf, p - buf);
2414 /* Implementation of emit_ops method "emit_bit_xor". */
2417 aarch64_emit_bit_xor (void)
2422 p += emit_pop (p, x1);
2423 p += emit_eor (p, x0, x0, x1);
2425 emit_ops_insns (buf, p - buf);
2428 /* Implementation of emit_ops method "emit_bit_not". */
2431 aarch64_emit_bit_not (void)
2436 p += emit_mvn (p, x0, x0);
2438 emit_ops_insns (buf, p - buf);
2441 /* Implementation of emit_ops method "emit_equal". */
2444 aarch64_emit_equal (void)
2449 p += emit_pop (p, x1);
2450 p += emit_cmp (p, x0, register_operand (x1));
2451 p += emit_cset (p, x0, EQ);
2453 emit_ops_insns (buf, p - buf);
2456 /* Implementation of emit_ops method "emit_less_signed". */
2459 aarch64_emit_less_signed (void)
2464 p += emit_pop (p, x1);
2465 p += emit_cmp (p, x1, register_operand (x0));
2466 p += emit_cset (p, x0, LT);
2468 emit_ops_insns (buf, p - buf);
2471 /* Implementation of emit_ops method "emit_less_unsigned". */
2474 aarch64_emit_less_unsigned (void)
2479 p += emit_pop (p, x1);
2480 p += emit_cmp (p, x1, register_operand (x0));
2481 p += emit_cset (p, x0, LO);
2483 emit_ops_insns (buf, p - buf);
2486 /* Implementation of emit_ops method "emit_ref". */
2489 aarch64_emit_ref (int size)
2497 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2500 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2503 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2506 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2509 /* Unknown size, bail on compilation. */
2514 emit_ops_insns (buf, p - buf);
2517 /* Implementation of emit_ops method "emit_if_goto". */
2520 aarch64_emit_if_goto (int *offset_p, int *size_p)
2525 /* The Z flag is set or cleared here. */
2526 p += emit_cmp (p, x0, immediate_operand (0));
2527 /* This instruction must not change the Z flag. */
2528 p += emit_pop (p, x0);
2529 /* Branch over the next instruction if x0 == 0. */
2530 p += emit_bcond (p, EQ, 8);
2532 /* The NOP instruction will be patched with an unconditional branch. */
2534 *offset_p = (p - buf) * 4;
2539 emit_ops_insns (buf, p - buf);
2542 /* Implementation of emit_ops method "emit_goto". */
2545 aarch64_emit_goto (int *offset_p, int *size_p)
2550 /* The NOP instruction will be patched with an unconditional branch. */
2557 emit_ops_insns (buf, p - buf);
2560 /* Implementation of emit_ops method "write_goto_address". */
2563 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2567 emit_b (&insn, 0, to - from);
2568 append_insns (&from, 1, &insn);
2571 /* Implementation of emit_ops method "emit_const". */
2574 aarch64_emit_const (LONGEST num)
2579 p += emit_mov_addr (p, x0, num);
2581 emit_ops_insns (buf, p - buf);
2584 /* Implementation of emit_ops method "emit_call". */
2587 aarch64_emit_call (CORE_ADDR fn)
2592 p += emit_mov_addr (p, ip0, fn);
2593 p += emit_blr (p, ip0);
2595 emit_ops_insns (buf, p - buf);
2598 /* Implementation of emit_ops method "emit_reg". */
2601 aarch64_emit_reg (int reg)
2606 /* Set x0 to unsigned char *regs. */
2607 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2608 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2609 p += emit_mov (p, x1, immediate_operand (reg));
2611 emit_ops_insns (buf, p - buf);
2613 aarch64_emit_call (get_raw_reg_func_addr ());
2616 /* Implementation of emit_ops method "emit_pop". */
2619 aarch64_emit_pop (void)
2624 p += emit_pop (p, x0);
2626 emit_ops_insns (buf, p - buf);
2629 /* Implementation of emit_ops method "emit_stack_flush". */
2632 aarch64_emit_stack_flush (void)
2637 p += emit_push (p, x0);
2639 emit_ops_insns (buf, p - buf);
2642 /* Implementation of emit_ops method "emit_zero_ext". */
2645 aarch64_emit_zero_ext (int arg)
2650 p += emit_ubfx (p, x0, x0, 0, arg);
2652 emit_ops_insns (buf, p - buf);
2655 /* Implementation of emit_ops method "emit_swap". */
2658 aarch64_emit_swap (void)
2663 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2664 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2665 p += emit_mov (p, x0, register_operand (x1));
2667 emit_ops_insns (buf, p - buf);
2670 /* Implementation of emit_ops method "emit_stack_adjust". */
2673 aarch64_emit_stack_adjust (int n)
2675 /* This is not needed with our design. */
2679 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2681 emit_ops_insns (buf, p - buf);
2684 /* Implementation of emit_ops method "emit_int_call_1". */
2687 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2692 p += emit_mov (p, x0, immediate_operand (arg1));
2694 emit_ops_insns (buf, p - buf);
2696 aarch64_emit_call (fn);
2699 /* Implementation of emit_ops method "emit_void_call_2". */
2702 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2707 /* Push x0 on the stack. */
2708 aarch64_emit_stack_flush ();
2710 /* Setup arguments for the function call:
2713 x1: top of the stack
2718 p += emit_mov (p, x1, register_operand (x0));
2719 p += emit_mov (p, x0, immediate_operand (arg1));
2721 emit_ops_insns (buf, p - buf);
2723 aarch64_emit_call (fn);
2726 aarch64_emit_pop ();
2729 /* Implementation of emit_ops method "emit_eq_goto". */
2732 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2737 p += emit_pop (p, x1);
2738 p += emit_cmp (p, x1, register_operand (x0));
2739 /* Branch over the next instruction if x0 != x1. */
2740 p += emit_bcond (p, NE, 8);
2741 /* The NOP instruction will be patched with an unconditional branch. */
2743 *offset_p = (p - buf) * 4;
2748 emit_ops_insns (buf, p - buf);
2751 /* Implementation of emit_ops method "emit_ne_goto". */
2754 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2759 p += emit_pop (p, x1);
2760 p += emit_cmp (p, x1, register_operand (x0));
2761 /* Branch over the next instruction if x0 == x1. */
2762 p += emit_bcond (p, EQ, 8);
2763 /* The NOP instruction will be patched with an unconditional branch. */
2765 *offset_p = (p - buf) * 4;
2770 emit_ops_insns (buf, p - buf);
2773 /* Implementation of emit_ops method "emit_lt_goto". */
2776 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2781 p += emit_pop (p, x1);
2782 p += emit_cmp (p, x1, register_operand (x0));
2783 /* Branch over the next instruction if x0 >= x1. */
2784 p += emit_bcond (p, GE, 8);
2785 /* The NOP instruction will be patched with an unconditional branch. */
2787 *offset_p = (p - buf) * 4;
2792 emit_ops_insns (buf, p - buf);
2795 /* Implementation of emit_ops method "emit_le_goto". */
2798 aarch64_emit_le_goto (int *offset_p, int *size_p)
2803 p += emit_pop (p, x1);
2804 p += emit_cmp (p, x1, register_operand (x0));
2805 /* Branch over the next instruction if x0 > x1. */
2806 p += emit_bcond (p, GT, 8);
2807 /* The NOP instruction will be patched with an unconditional branch. */
2809 *offset_p = (p - buf) * 4;
2814 emit_ops_insns (buf, p - buf);
2817 /* Implementation of emit_ops method "emit_gt_goto". */
2820 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2825 p += emit_pop (p, x1);
2826 p += emit_cmp (p, x1, register_operand (x0));
2827 /* Branch over the next instruction if x0 <= x1. */
2828 p += emit_bcond (p, LE, 8);
2829 /* The NOP instruction will be patched with an unconditional branch. */
2831 *offset_p = (p - buf) * 4;
2836 emit_ops_insns (buf, p - buf);
2839 /* Implementation of emit_ops method "emit_ge_got". */
2842 aarch64_emit_ge_got (int *offset_p, int *size_p)
2847 p += emit_pop (p, x1);
2848 p += emit_cmp (p, x1, register_operand (x0));
2849 /* Branch over the next instruction if x0 <= x1. */
2850 p += emit_bcond (p, LT, 8);
2851 /* The NOP instruction will be patched with an unconditional branch. */
2853 *offset_p = (p - buf) * 4;
2858 emit_ops_insns (buf, p - buf);
2861 static struct emit_ops aarch64_emit_ops_impl =
2863 aarch64_emit_prologue,
2864 aarch64_emit_epilogue,
2869 aarch64_emit_rsh_signed,
2870 aarch64_emit_rsh_unsigned,
2872 aarch64_emit_log_not,
2873 aarch64_emit_bit_and,
2874 aarch64_emit_bit_or,
2875 aarch64_emit_bit_xor,
2876 aarch64_emit_bit_not,
2878 aarch64_emit_less_signed,
2879 aarch64_emit_less_unsigned,
2881 aarch64_emit_if_goto,
2883 aarch64_write_goto_address,
2888 aarch64_emit_stack_flush,
2889 aarch64_emit_zero_ext,
2891 aarch64_emit_stack_adjust,
2892 aarch64_emit_int_call_1,
2893 aarch64_emit_void_call_2,
2894 aarch64_emit_eq_goto,
2895 aarch64_emit_ne_goto,
2896 aarch64_emit_lt_goto,
2897 aarch64_emit_le_goto,
2898 aarch64_emit_gt_goto,
2899 aarch64_emit_ge_got,
2902 /* Implementation of linux_target_ops method "emit_ops". */
2904 static struct emit_ops *
2905 aarch64_emit_ops (void)
2907 return &aarch64_emit_ops_impl;
2910 /* Implementation of linux_target_ops method
2911 "get_min_fast_tracepoint_insn_len". */
2914 aarch64_get_min_fast_tracepoint_insn_len (void)
2919 /* Implementation of linux_target_ops method "supports_range_stepping". */
2922 aarch64_supports_range_stepping (void)
2927 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2929 static const gdb_byte *
2930 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2932 if (is_64bit_tdesc ())
2934 *size = aarch64_breakpoint_len;
2935 return aarch64_breakpoint;
2938 return arm_sw_breakpoint_from_kind (kind, size);
2941 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2944 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2946 if (is_64bit_tdesc ())
2947 return aarch64_breakpoint_len;
2949 return arm_breakpoint_kind_from_pc (pcptr);
2952 /* Implementation of the linux_target_ops method
2953 "breakpoint_kind_from_current_state". */
2956 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2958 if (is_64bit_tdesc ())
2959 return aarch64_breakpoint_len;
2961 return arm_breakpoint_kind_from_current_state (pcptr);
2964 /* Support for hardware single step. */
2967 aarch64_supports_hardware_single_step (void)
2972 struct linux_target_ops the_low_target =
2976 aarch64_cannot_fetch_register,
2977 aarch64_cannot_store_register,
2978 NULL, /* fetch_register */
2981 aarch64_breakpoint_kind_from_pc,
2982 aarch64_sw_breakpoint_from_kind,
2983 NULL, /* breakpoint_reinsert_addr */
2984 0, /* decr_pc_after_break */
2985 aarch64_breakpoint_at,
2986 aarch64_supports_z_point_type,
2987 aarch64_insert_point,
2988 aarch64_remove_point,
2989 aarch64_stopped_by_watchpoint,
2990 aarch64_stopped_data_address,
2991 NULL, /* collect_ptrace_register */
2992 NULL, /* supply_ptrace_register */
2993 aarch64_linux_siginfo_fixup,
2994 aarch64_linux_new_process,
2995 aarch64_linux_new_thread,
2996 aarch64_linux_new_fork,
2997 aarch64_linux_prepare_to_resume,
2998 NULL, /* process_qsupported */
2999 aarch64_supports_tracepoints,
3000 aarch64_get_thread_area,
3001 aarch64_install_fast_tracepoint_jump_pad,
3003 aarch64_get_min_fast_tracepoint_insn_len,
3004 aarch64_supports_range_stepping,
3005 aarch64_breakpoint_kind_from_current_state,
3006 aarch64_supports_hardware_single_step,
3010 initialize_low_arch (void)
3012 init_registers_aarch64 ();
3014 initialize_low_arch_aarch32 ();
3016 initialize_regsets_info (&aarch64_regsets_info);