1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2016 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
42 /* Defined in auto-generated files. */
43 void init_registers_aarch64 (void);
44 extern const struct target_desc *tdesc_aarch64;
50 #define AARCH64_X_REGS_NUM 31
51 #define AARCH64_V_REGS_NUM 32
52 #define AARCH64_X0_REGNO 0
53 #define AARCH64_SP_REGNO 31
54 #define AARCH64_PC_REGNO 32
55 #define AARCH64_CPSR_REGNO 33
56 #define AARCH64_V0_REGNO 34
57 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
60 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
62 /* Per-process arch-specific data we want to keep. */
64 struct arch_process_info
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
76 struct aarch64_debug_reg_state debug_reg_state;
79 /* Return true if the size of register 0 is 8 byte. */
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
86 return register_size (regcache->tdesc, 0) == 8;
89 /* Implementation of linux_target_ops method "cannot_store_register". */
92 aarch64_cannot_store_register (int regno)
94 return regno >= AARCH64_NUM_REGS;
97 /* Implementation of linux_target_ops method "cannot_fetch_register". */
100 aarch64_cannot_fetch_register (int regno)
102 return regno >= AARCH64_NUM_REGS;
106 aarch64_fill_gregset (struct regcache *regcache, void *buf)
108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, ®set->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, ®set->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, ®set->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, ®set->pstate);
119 aarch64_store_gregset (struct regcache *regcache, const void *buf)
121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, ®set->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, ®set->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, ®set->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, ®set->pstate);
132 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, ®set->vregs[i]);
139 collect_register (regcache, AARCH64_FPSR_REGNO, ®set->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, ®set->fpcr);
144 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, ®set->vregs[i]);
152 supply_register (regcache, AARCH64_FPSR_REGNO, ®set->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, ®set->fpcr);
156 /* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158 extern int debug_threads;
160 /* Implementation of linux_target_ops method "get_pc". */
163 aarch64_get_pc (struct regcache *regcache)
165 if (register_size (regcache->tdesc, 0) == 8)
166 return linux_get_pc_64bit (regcache);
168 return linux_get_pc_32bit (regcache);
171 /* Implementation of linux_target_ops method "set_pc". */
174 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
176 if (register_size (regcache->tdesc, 0) == 8)
177 linux_set_pc_64bit (regcache, pc);
179 linux_set_pc_32bit (regcache, pc);
182 #define aarch64_breakpoint_len 4
184 /* AArch64 BRK software debug mode instruction.
185 This instruction needs to match gdb/aarch64-tdep.c
186 (aarch64_default_breakpoint). */
187 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
189 /* Implementation of linux_target_ops method "breakpoint_at". */
192 aarch64_breakpoint_at (CORE_ADDR where)
194 if (is_64bit_tdesc ())
196 gdb_byte insn[aarch64_breakpoint_len];
198 (*the_target->read_memory) (where, (unsigned char *) &insn,
199 aarch64_breakpoint_len);
200 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
206 return arm_breakpoint_at (where);
210 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
214 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
216 state->dr_addr_bp[i] = 0;
217 state->dr_ctrl_bp[i] = 0;
218 state->dr_ref_count_bp[i] = 0;
221 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
223 state->dr_addr_wp[i] = 0;
224 state->dr_ctrl_wp[i] = 0;
225 state->dr_ref_count_wp[i] = 0;
229 /* Return the pointer to the debug register state structure in the
230 current process' arch-specific data area. */
232 struct aarch64_debug_reg_state *
233 aarch64_get_debug_reg_state (pid_t pid)
235 struct process_info *proc = find_process_pid (pid);
237 return &proc->priv->arch_private->debug_reg_state;
240 /* Implementation of linux_target_ops method "supports_z_point_type". */
243 aarch64_supports_z_point_type (char z_type)
249 case Z_PACKET_WRITE_WP:
250 case Z_PACKET_READ_WP:
251 case Z_PACKET_ACCESS_WP:
258 /* Implementation of linux_target_ops method "insert_point".
260 It actually only records the info of the to-be-inserted bp/wp;
261 the actual insertion will happen when threads are resumed. */
264 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
265 int len, struct raw_breakpoint *bp)
268 enum target_hw_bp_type targ_type;
269 struct aarch64_debug_reg_state *state
270 = aarch64_get_debug_reg_state (pid_of (current_thread));
273 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
274 (unsigned long) addr, len);
276 /* Determine the type from the raw breakpoint type. */
277 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
279 if (targ_type != hw_execute)
281 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
282 ret = aarch64_handle_watchpoint (targ_type, addr, len,
283 1 /* is_insert */, state);
291 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
292 instruction. Set it to 2 to correctly encode length bit
293 mask in hardware/watchpoint control register. */
296 ret = aarch64_handle_breakpoint (targ_type, addr, len,
297 1 /* is_insert */, state);
301 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
307 /* Implementation of linux_target_ops method "remove_point".
309 It actually only records the info of the to-be-removed bp/wp,
310 the actual removal will be done when threads are resumed. */
313 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
314 int len, struct raw_breakpoint *bp)
317 enum target_hw_bp_type targ_type;
318 struct aarch64_debug_reg_state *state
319 = aarch64_get_debug_reg_state (pid_of (current_thread));
322 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
323 (unsigned long) addr, len);
325 /* Determine the type from the raw breakpoint type. */
326 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
328 /* Set up state pointers. */
329 if (targ_type != hw_execute)
331 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
337 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
338 instruction. Set it to 2 to correctly encode length bit
339 mask in hardware/watchpoint control register. */
342 ret = aarch64_handle_breakpoint (targ_type, addr, len,
343 0 /* is_insert */, state);
347 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
353 /* Implementation of linux_target_ops method "stopped_data_address". */
356 aarch64_stopped_data_address (void)
360 struct aarch64_debug_reg_state *state;
362 pid = lwpid_of (current_thread);
364 /* Get the siginfo. */
365 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
366 return (CORE_ADDR) 0;
368 /* Need to be a hardware breakpoint/watchpoint trap. */
369 if (siginfo.si_signo != SIGTRAP
370 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
371 return (CORE_ADDR) 0;
373 /* Check if the address matches any watched address. */
374 state = aarch64_get_debug_reg_state (pid_of (current_thread));
375 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
377 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
378 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
379 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
380 if (state->dr_ref_count_wp[i]
381 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
382 && addr_trap >= addr_watch
383 && addr_trap < addr_watch + len)
387 return (CORE_ADDR) 0;
390 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
393 aarch64_stopped_by_watchpoint (void)
395 if (aarch64_stopped_data_address () != 0)
401 /* Fetch the thread-local storage pointer for libthread_db. */
404 ps_get_thread_area (const struct ps_prochandle *ph,
405 lwpid_t lwpid, int idx, void **base)
407 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
411 /* Implementation of linux_target_ops method "siginfo_fixup". */
414 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
416 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
417 if (!is_64bit_tdesc ())
420 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
423 aarch64_siginfo_from_compat_siginfo (native,
424 (struct compat_siginfo *) inf);
432 /* Implementation of linux_target_ops method "linux_new_process". */
434 static struct arch_process_info *
435 aarch64_linux_new_process (void)
437 struct arch_process_info *info = XCNEW (struct arch_process_info);
439 aarch64_init_debug_reg_state (&info->debug_reg_state);
444 /* Implementation of linux_target_ops method "linux_new_fork". */
447 aarch64_linux_new_fork (struct process_info *parent,
448 struct process_info *child)
450 /* These are allocated by linux_add_process. */
451 gdb_assert (parent->priv != NULL
452 && parent->priv->arch_private != NULL);
453 gdb_assert (child->priv != NULL
454 && child->priv->arch_private != NULL);
456 /* Linux kernel before 2.6.33 commit
457 72f674d203cd230426437cdcf7dd6f681dad8b0d
458 will inherit hardware debug registers from parent
459 on fork/vfork/clone. Newer Linux kernels create such tasks with
460 zeroed debug registers.
462 GDB core assumes the child inherits the watchpoints/hw
463 breakpoints of the parent, and will remove them all from the
464 forked off process. Copy the debug registers mirrors into the
465 new process so that all breakpoints and watchpoints can be
466 removed together. The debug registers mirror will become zeroed
467 in the end before detaching the forked off process, thus making
468 this compatible with older Linux kernels too. */
470 *child->priv->arch_private = *parent->priv->arch_private;
473 /* Return the right target description according to the ELF file of
476 static const struct target_desc *
477 aarch64_linux_read_description (void)
479 unsigned int machine;
483 tid = lwpid_of (current_thread);
485 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
488 return tdesc_aarch64;
490 return tdesc_arm_with_neon;
493 /* Implementation of linux_target_ops method "arch_setup". */
496 aarch64_arch_setup (void)
498 current_process ()->tdesc = aarch64_linux_read_description ();
500 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
503 static struct regset_info aarch64_regsets[] =
505 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
506 sizeof (struct user_pt_regs), GENERAL_REGS,
507 aarch64_fill_gregset, aarch64_store_gregset },
508 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
509 sizeof (struct user_fpsimd_state), FP_REGS,
510 aarch64_fill_fpregset, aarch64_store_fpregset
515 static struct regsets_info aarch64_regsets_info =
517 aarch64_regsets, /* regsets */
519 NULL, /* disabled_regsets */
522 static struct regs_info regs_info_aarch64 =
524 NULL, /* regset_bitmap */
526 &aarch64_regsets_info,
529 /* Implementation of linux_target_ops method "regs_info". */
531 static const struct regs_info *
532 aarch64_regs_info (void)
534 if (is_64bit_tdesc ())
535 return ®s_info_aarch64;
537 return ®s_info_aarch32;
540 /* Implementation of linux_target_ops method "supports_tracepoints". */
543 aarch64_supports_tracepoints (void)
545 if (current_thread == NULL)
549 /* We don't support tracepoints on aarch32 now. */
550 return is_64bit_tdesc ();
554 /* Implementation of linux_target_ops method "get_thread_area". */
557 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
562 iovec.iov_base = ®
563 iovec.iov_len = sizeof (reg);
565 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
573 /* List of condition codes that we need. */
575 enum aarch64_condition_codes
586 enum aarch64_operand_type
592 /* Representation of an operand. At this time, it only supports register
593 and immediate types. */
595 struct aarch64_operand
597 /* Type of the operand. */
598 enum aarch64_operand_type type;
600 /* Value of the operand according to the type. */
604 struct aarch64_register reg;
608 /* List of registers that we are currently using, we can add more here as
609 we need to use them. */
611 /* General purpose scratch registers (64 bit). */
612 static const struct aarch64_register x0 = { 0, 1 };
613 static const struct aarch64_register x1 = { 1, 1 };
614 static const struct aarch64_register x2 = { 2, 1 };
615 static const struct aarch64_register x3 = { 3, 1 };
616 static const struct aarch64_register x4 = { 4, 1 };
618 /* General purpose scratch registers (32 bit). */
619 static const struct aarch64_register w0 = { 0, 0 };
620 static const struct aarch64_register w2 = { 2, 0 };
622 /* Intra-procedure scratch registers. */
623 static const struct aarch64_register ip0 = { 16, 1 };
625 /* Special purpose registers. */
626 static const struct aarch64_register fp = { 29, 1 };
627 static const struct aarch64_register lr = { 30, 1 };
628 static const struct aarch64_register sp = { 31, 1 };
629 static const struct aarch64_register xzr = { 31, 1 };
631 /* Dynamically allocate a new register. If we know the register
632 statically, we should make it a global as above instead of using this
635 static struct aarch64_register
636 aarch64_register (unsigned num, int is64)
638 return (struct aarch64_register) { num, is64 };
641 /* Helper function to create a register operand, for instructions with
642 different types of operands.
645 p += emit_mov (p, x0, register_operand (x1)); */
647 static struct aarch64_operand
648 register_operand (struct aarch64_register reg)
650 struct aarch64_operand operand;
652 operand.type = OPERAND_REGISTER;
658 /* Helper function to create an immediate operand, for instructions with
659 different types of operands.
662 p += emit_mov (p, x0, immediate_operand (12)); */
664 static struct aarch64_operand
665 immediate_operand (uint32_t imm)
667 struct aarch64_operand operand;
669 operand.type = OPERAND_IMMEDIATE;
675 /* Helper function to create an offset memory operand.
678 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
680 static struct aarch64_memory_operand
681 offset_memory_operand (int32_t offset)
683 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
686 /* Helper function to create a pre-index memory operand.
689 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
691 static struct aarch64_memory_operand
692 preindex_memory_operand (int32_t index)
694 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
697 /* Helper function to create a post-index memory operand.
700 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
702 static struct aarch64_memory_operand
703 postindex_memory_operand (int32_t index)
705 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
708 /* System control registers. These special registers can be written and
709 read with the MRS and MSR instructions.
711 - NZCV: Condition flags. GDB refers to this register under the CPSR
713 - FPSR: Floating-point status register.
714 - FPCR: Floating-point control registers.
715 - TPIDR_EL0: Software thread ID register. */
717 enum aarch64_system_control_registers
719 /* op0 op1 crn crm op2 */
720 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
721 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
722 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
723 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
726 /* Write a BLR instruction into *BUF.
730 RN is the register to branch to. */
733 emit_blr (uint32_t *buf, struct aarch64_register rn)
735 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
738 /* Write a RET instruction into *BUF.
742 RN is the register to branch to. */
745 emit_ret (uint32_t *buf, struct aarch64_register rn)
747 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
751 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
752 struct aarch64_register rt,
753 struct aarch64_register rt2,
754 struct aarch64_register rn,
755 struct aarch64_memory_operand operand)
762 opc = ENCODE (2, 2, 30);
764 opc = ENCODE (0, 2, 30);
766 switch (operand.type)
768 case MEMORY_OPERAND_OFFSET:
770 pre_index = ENCODE (1, 1, 24);
771 write_back = ENCODE (0, 1, 23);
774 case MEMORY_OPERAND_POSTINDEX:
776 pre_index = ENCODE (0, 1, 24);
777 write_back = ENCODE (1, 1, 23);
780 case MEMORY_OPERAND_PREINDEX:
782 pre_index = ENCODE (1, 1, 24);
783 write_back = ENCODE (1, 1, 23);
790 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
791 | ENCODE (operand.index >> 3, 7, 15)
792 | ENCODE (rt2.num, 5, 10)
793 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
796 /* Write a STP instruction into *BUF.
798 STP rt, rt2, [rn, #offset]
799 STP rt, rt2, [rn, #index]!
800 STP rt, rt2, [rn], #index
802 RT and RT2 are the registers to store.
803 RN is the base address register.
804 OFFSET is the immediate to add to the base address. It is limited to a
805 -512 .. 504 range (7 bits << 3). */
808 emit_stp (uint32_t *buf, struct aarch64_register rt,
809 struct aarch64_register rt2, struct aarch64_register rn,
810 struct aarch64_memory_operand operand)
812 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
815 /* Write a LDP instruction into *BUF.
817 LDP rt, rt2, [rn, #offset]
818 LDP rt, rt2, [rn, #index]!
819 LDP rt, rt2, [rn], #index
821 RT and RT2 are the registers to store.
822 RN is the base address register.
823 OFFSET is the immediate to add to the base address. It is limited to a
824 -512 .. 504 range (7 bits << 3). */
827 emit_ldp (uint32_t *buf, struct aarch64_register rt,
828 struct aarch64_register rt2, struct aarch64_register rn,
829 struct aarch64_memory_operand operand)
831 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
834 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
836 LDP qt, qt2, [rn, #offset]
838 RT and RT2 are the Q registers to store.
839 RN is the base address register.
840 OFFSET is the immediate to add to the base address. It is limited to
841 -1024 .. 1008 range (7 bits << 4). */
844 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
845 struct aarch64_register rn, int32_t offset)
847 uint32_t opc = ENCODE (2, 2, 30);
848 uint32_t pre_index = ENCODE (1, 1, 24);
850 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
851 | ENCODE (offset >> 4, 7, 15)
852 | ENCODE (rt2, 5, 10)
853 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
856 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
858 STP qt, qt2, [rn, #offset]
860 RT and RT2 are the Q registers to store.
861 RN is the base address register.
862 OFFSET is the immediate to add to the base address. It is limited to
863 -1024 .. 1008 range (7 bits << 4). */
866 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
867 struct aarch64_register rn, int32_t offset)
869 uint32_t opc = ENCODE (2, 2, 30);
870 uint32_t pre_index = ENCODE (1, 1, 24);
872 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
873 | ENCODE (offset >> 4, 7, 15)
874 | ENCODE (rt2, 5, 10)
875 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
878 /* Write a LDRH instruction into *BUF.
880 LDRH wt, [xn, #offset]
881 LDRH wt, [xn, #index]!
882 LDRH wt, [xn], #index
884 RT is the register to store.
885 RN is the base address register.
886 OFFSET is the immediate to add to the base address. It is limited to
887 0 .. 32760 range (12 bits << 3). */
890 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
891 struct aarch64_register rn,
892 struct aarch64_memory_operand operand)
894 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
897 /* Write a LDRB instruction into *BUF.
899 LDRB wt, [xn, #offset]
900 LDRB wt, [xn, #index]!
901 LDRB wt, [xn], #index
903 RT is the register to store.
904 RN is the base address register.
905 OFFSET is the immediate to add to the base address. It is limited to
906 0 .. 32760 range (12 bits << 3). */
909 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
910 struct aarch64_register rn,
911 struct aarch64_memory_operand operand)
913 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
918 /* Write a STR instruction into *BUF.
920 STR rt, [rn, #offset]
921 STR rt, [rn, #index]!
924 RT is the register to store.
925 RN is the base address register.
926 OFFSET is the immediate to add to the base address. It is limited to
927 0 .. 32760 range (12 bits << 3). */
930 emit_str (uint32_t *buf, struct aarch64_register rt,
931 struct aarch64_register rn,
932 struct aarch64_memory_operand operand)
934 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
937 /* Helper function emitting an exclusive load or store instruction. */
940 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
941 enum aarch64_opcodes opcode,
942 struct aarch64_register rs,
943 struct aarch64_register rt,
944 struct aarch64_register rt2,
945 struct aarch64_register rn)
947 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
948 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
949 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
952 /* Write a LAXR instruction into *BUF.
956 RT is the destination register.
957 RN is the base address register. */
960 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
961 struct aarch64_register rn)
963 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
967 /* Write a STXR instruction into *BUF.
971 RS is the result register, it indicates if the store succeeded or not.
972 RT is the destination register.
973 RN is the base address register. */
976 emit_stxr (uint32_t *buf, struct aarch64_register rs,
977 struct aarch64_register rt, struct aarch64_register rn)
979 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
983 /* Write a STLR instruction into *BUF.
987 RT is the register to store.
988 RN is the base address register. */
991 emit_stlr (uint32_t *buf, struct aarch64_register rt,
992 struct aarch64_register rn)
994 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
998 /* Helper function for data processing instructions with register sources. */
1001 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1002 struct aarch64_register rd,
1003 struct aarch64_register rn,
1004 struct aarch64_register rm)
1006 uint32_t size = ENCODE (rd.is64, 1, 31);
1008 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1009 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1012 /* Helper function for data processing instructions taking either a register
1016 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1017 struct aarch64_register rd,
1018 struct aarch64_register rn,
1019 struct aarch64_operand operand)
1021 uint32_t size = ENCODE (rd.is64, 1, 31);
1022 /* The opcode is different for register and immediate source operands. */
1023 uint32_t operand_opcode;
1025 if (operand.type == OPERAND_IMMEDIATE)
1027 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1028 operand_opcode = ENCODE (8, 4, 25);
1030 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1031 | ENCODE (operand.imm, 12, 10)
1032 | ENCODE (rn.num, 5, 5)
1033 | ENCODE (rd.num, 5, 0));
1037 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1038 operand_opcode = ENCODE (5, 4, 25);
1040 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1045 /* Write an ADD instruction into *BUF.
1050 This function handles both an immediate and register add.
1052 RD is the destination register.
1053 RN is the input register.
1054 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1055 OPERAND_REGISTER. */
1058 emit_add (uint32_t *buf, struct aarch64_register rd,
1059 struct aarch64_register rn, struct aarch64_operand operand)
1061 return emit_data_processing (buf, ADD, rd, rn, operand);
1064 /* Write a SUB instruction into *BUF.
1069 This function handles both an immediate and register sub.
1071 RD is the destination register.
1072 RN is the input register.
1073 IMM is the immediate to substract to RN. */
1076 emit_sub (uint32_t *buf, struct aarch64_register rd,
1077 struct aarch64_register rn, struct aarch64_operand operand)
1079 return emit_data_processing (buf, SUB, rd, rn, operand);
1082 /* Write a MOV instruction into *BUF.
1087 This function handles both a wide immediate move and a register move,
1088 with the condition that the source register is not xzr. xzr and the
1089 stack pointer share the same encoding and this function only supports
1092 RD is the destination register.
1093 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1094 OPERAND_REGISTER. */
1097 emit_mov (uint32_t *buf, struct aarch64_register rd,
1098 struct aarch64_operand operand)
1100 if (operand.type == OPERAND_IMMEDIATE)
1102 uint32_t size = ENCODE (rd.is64, 1, 31);
1103 /* Do not shift the immediate. */
1104 uint32_t shift = ENCODE (0, 2, 21);
1106 return aarch64_emit_insn (buf, MOV | size | shift
1107 | ENCODE (operand.imm, 16, 5)
1108 | ENCODE (rd.num, 5, 0));
1111 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1114 /* Write a MOVK instruction into *BUF.
1116 MOVK rd, #imm, lsl #shift
1118 RD is the destination register.
1119 IMM is the immediate.
1120 SHIFT is the logical shift left to apply to IMM. */
1123 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1126 uint32_t size = ENCODE (rd.is64, 1, 31);
1128 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1129 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1132 /* Write instructions into *BUF in order to move ADDR into a register.
1133 ADDR can be a 64-bit value.
1135 This function will emit a series of MOV and MOVK instructions, such as:
1138 MOVK xd, #(addr >> 16), lsl #16
1139 MOVK xd, #(addr >> 32), lsl #32
1140 MOVK xd, #(addr >> 48), lsl #48 */
1143 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1147 /* The MOV (wide immediate) instruction clears to top bits of the
1149 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1151 if ((addr >> 16) != 0)
1152 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1156 if ((addr >> 32) != 0)
1157 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1161 if ((addr >> 48) != 0)
1162 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1167 /* Write a SUBS instruction into *BUF.
1171 This instruction update the condition flags.
1173 RD is the destination register.
1174 RN and RM are the source registers. */
1177 emit_subs (uint32_t *buf, struct aarch64_register rd,
1178 struct aarch64_register rn, struct aarch64_operand operand)
1180 return emit_data_processing (buf, SUBS, rd, rn, operand);
1183 /* Write a CMP instruction into *BUF.
1187 This instruction is an alias of SUBS xzr, rn, rm.
1189 RN and RM are the registers to compare. */
1192 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1193 struct aarch64_operand operand)
1195 return emit_subs (buf, xzr, rn, operand);
1198 /* Write a AND instruction into *BUF.
1202 RD is the destination register.
1203 RN and RM are the source registers. */
1206 emit_and (uint32_t *buf, struct aarch64_register rd,
1207 struct aarch64_register rn, struct aarch64_register rm)
1209 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1212 /* Write a ORR instruction into *BUF.
1216 RD is the destination register.
1217 RN and RM are the source registers. */
1220 emit_orr (uint32_t *buf, struct aarch64_register rd,
1221 struct aarch64_register rn, struct aarch64_register rm)
1223 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1226 /* Write a ORN instruction into *BUF.
1230 RD is the destination register.
1231 RN and RM are the source registers. */
1234 emit_orn (uint32_t *buf, struct aarch64_register rd,
1235 struct aarch64_register rn, struct aarch64_register rm)
1237 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1240 /* Write a EOR instruction into *BUF.
1244 RD is the destination register.
1245 RN and RM are the source registers. */
1248 emit_eor (uint32_t *buf, struct aarch64_register rd,
1249 struct aarch64_register rn, struct aarch64_register rm)
1251 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1254 /* Write a MVN instruction into *BUF.
1258 This is an alias for ORN rd, xzr, rm.
1260 RD is the destination register.
1261 RM is the source register. */
1264 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1265 struct aarch64_register rm)
1267 return emit_orn (buf, rd, xzr, rm);
1270 /* Write a LSLV instruction into *BUF.
1274 RD is the destination register.
1275 RN and RM are the source registers. */
1278 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1279 struct aarch64_register rn, struct aarch64_register rm)
1281 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1284 /* Write a LSRV instruction into *BUF.
1288 RD is the destination register.
1289 RN and RM are the source registers. */
1292 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1293 struct aarch64_register rn, struct aarch64_register rm)
1295 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1298 /* Write a ASRV instruction into *BUF.
1302 RD is the destination register.
1303 RN and RM are the source registers. */
1306 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1307 struct aarch64_register rn, struct aarch64_register rm)
1309 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1312 /* Write a MUL instruction into *BUF.
1316 RD is the destination register.
1317 RN and RM are the source registers. */
1320 emit_mul (uint32_t *buf, struct aarch64_register rd,
1321 struct aarch64_register rn, struct aarch64_register rm)
1323 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1326 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1330 RT is the destination register.
1331 SYSTEM_REG is special purpose register to read. */
1334 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1335 enum aarch64_system_control_registers system_reg)
1337 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1338 | ENCODE (rt.num, 5, 0));
1341 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1345 SYSTEM_REG is special purpose register to write.
1346 RT is the input register. */
1349 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1350 struct aarch64_register rt)
1352 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1353 | ENCODE (rt.num, 5, 0));
1356 /* Write a SEVL instruction into *BUF.
1358 This is a hint instruction telling the hardware to trigger an event. */
1361 emit_sevl (uint32_t *buf)
1363 return aarch64_emit_insn (buf, SEVL);
1366 /* Write a WFE instruction into *BUF.
1368 This is a hint instruction telling the hardware to wait for an event. */
1371 emit_wfe (uint32_t *buf)
1373 return aarch64_emit_insn (buf, WFE);
1376 /* Write a SBFM instruction into *BUF.
1378 SBFM rd, rn, #immr, #imms
1380 This instruction moves the bits from #immr to #imms into the
1381 destination, sign extending the result.
1383 RD is the destination register.
1384 RN is the source register.
1385 IMMR is the bit number to start at (least significant bit).
1386 IMMS is the bit number to stop at (most significant bit). */
1389 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1390 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1392 uint32_t size = ENCODE (rd.is64, 1, 31);
1393 uint32_t n = ENCODE (rd.is64, 1, 22);
1395 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1396 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1397 | ENCODE (rd.num, 5, 0));
1400 /* Write a SBFX instruction into *BUF.
1402 SBFX rd, rn, #lsb, #width
1404 This instruction moves #width bits from #lsb into the destination, sign
1405 extending the result. This is an alias for:
1407 SBFM rd, rn, #lsb, #(lsb + width - 1)
1409 RD is the destination register.
1410 RN is the source register.
1411 LSB is the bit number to start at (least significant bit).
1412 WIDTH is the number of bits to move. */
1415 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1416 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1418 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1421 /* Write a UBFM instruction into *BUF.
1423 UBFM rd, rn, #immr, #imms
1425 This instruction moves the bits from #immr to #imms into the
1426 destination, extending the result with zeros.
1428 RD is the destination register.
1429 RN is the source register.
1430 IMMR is the bit number to start at (least significant bit).
1431 IMMS is the bit number to stop at (most significant bit). */
1434 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1435 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1437 uint32_t size = ENCODE (rd.is64, 1, 31);
1438 uint32_t n = ENCODE (rd.is64, 1, 22);
1440 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1441 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1442 | ENCODE (rd.num, 5, 0));
1445 /* Write a UBFX instruction into *BUF.
1447 UBFX rd, rn, #lsb, #width
1449 This instruction moves #width bits from #lsb into the destination,
1450 extending the result with zeros. This is an alias for:
1452 UBFM rd, rn, #lsb, #(lsb + width - 1)
1454 RD is the destination register.
1455 RN is the source register.
1456 LSB is the bit number to start at (least significant bit).
1457 WIDTH is the number of bits to move. */
1460 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1461 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1463 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1466 /* Write a CSINC instruction into *BUF.
1468 CSINC rd, rn, rm, cond
1470 This instruction conditionally increments rn or rm and places the result
1471 in rd. rn is chosen is the condition is true.
1473 RD is the destination register.
1474 RN and RM are the source registers.
1475 COND is the encoded condition. */
1478 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1479 struct aarch64_register rn, struct aarch64_register rm,
1482 uint32_t size = ENCODE (rd.is64, 1, 31);
1484 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1485 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1486 | ENCODE (rd.num, 5, 0));
1489 /* Write a CSET instruction into *BUF.
1493 This instruction conditionally write 1 or 0 in the destination register.
1494 1 is written if the condition is true. This is an alias for:
1496 CSINC rd, xzr, xzr, !cond
1498 Note that the condition needs to be inverted.
1500 RD is the destination register.
1501 RN and RM are the source registers.
1502 COND is the encoded condition. */
1505 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1507 /* The least significant bit of the condition needs toggling in order to
1509 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1512 /* Write LEN instructions from BUF into the inferior memory at *TO.
1514 Note instructions are always little endian on AArch64, unlike data. */
1517 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1519 size_t byte_len = len * sizeof (uint32_t);
1520 #if (__BYTE_ORDER == __BIG_ENDIAN)
1521 uint32_t *le_buf = xmalloc (byte_len);
1524 for (i = 0; i < len; i++)
1525 le_buf[i] = htole32 (buf[i]);
1527 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1531 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1537 /* Sub-class of struct aarch64_insn_data, store information of
1538 instruction relocation for fast tracepoint. Visitor can
1539 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1540 the relocated instructions in buffer pointed by INSN_PTR. */
1542 struct aarch64_insn_relocation_data
1544 struct aarch64_insn_data base;
1546 /* The new address the instruction is relocated to. */
1548 /* Pointer to the buffer of relocated instruction(s). */
1552 /* Implementation of aarch64_insn_visitor method "b". */
1555 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1556 struct aarch64_insn_data *data)
1558 struct aarch64_insn_relocation_data *insn_reloc
1559 = (struct aarch64_insn_relocation_data *) data;
1561 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1563 if (can_encode_int32 (new_offset, 28))
1564 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1567 /* Implementation of aarch64_insn_visitor method "b_cond". */
1570 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1571 struct aarch64_insn_data *data)
1573 struct aarch64_insn_relocation_data *insn_reloc
1574 = (struct aarch64_insn_relocation_data *) data;
1576 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1578 if (can_encode_int32 (new_offset, 21))
1580 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1583 else if (can_encode_int32 (new_offset, 28))
1585 /* The offset is out of range for a conditional branch
1586 instruction but not for a unconditional branch. We can use
1587 the following instructions instead:
1589 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1590 B NOT_TAKEN ; Else jump over TAKEN and continue.
1597 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1598 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1599 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1603 /* Implementation of aarch64_insn_visitor method "cb". */
1606 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1607 const unsigned rn, int is64,
1608 struct aarch64_insn_data *data)
1610 struct aarch64_insn_relocation_data *insn_reloc
1611 = (struct aarch64_insn_relocation_data *) data;
1613 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1615 if (can_encode_int32 (new_offset, 21))
1617 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1618 aarch64_register (rn, is64), new_offset);
1620 else if (can_encode_int32 (new_offset, 28))
1622 /* The offset is out of range for a compare and branch
1623 instruction but not for a unconditional branch. We can use
1624 the following instructions instead:
1626 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1627 B NOT_TAKEN ; Else jump over TAKEN and continue.
1633 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1634 aarch64_register (rn, is64), 8);
1635 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1636 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1640 /* Implementation of aarch64_insn_visitor method "tb". */
1643 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1644 const unsigned rt, unsigned bit,
1645 struct aarch64_insn_data *data)
1647 struct aarch64_insn_relocation_data *insn_reloc
1648 = (struct aarch64_insn_relocation_data *) data;
1650 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1652 if (can_encode_int32 (new_offset, 16))
1654 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1655 aarch64_register (rt, 1), new_offset);
1657 else if (can_encode_int32 (new_offset, 28))
1659 /* The offset is out of range for a test bit and branch
1660 instruction but not for a unconditional branch. We can use
1661 the following instructions instead:
1663 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1664 B NOT_TAKEN ; Else jump over TAKEN and continue.
1670 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1671 aarch64_register (rt, 1), 8);
1672 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1673 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1678 /* Implementation of aarch64_insn_visitor method "adr". */
1681 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1683 struct aarch64_insn_data *data)
1685 struct aarch64_insn_relocation_data *insn_reloc
1686 = (struct aarch64_insn_relocation_data *) data;
1687 /* We know exactly the address the ADR{P,} instruction will compute.
1688 We can just write it to the destination register. */
1689 CORE_ADDR address = data->insn_addr + offset;
1693 /* Clear the lower 12 bits of the offset to get the 4K page. */
1694 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1695 aarch64_register (rd, 1),
1699 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1700 aarch64_register (rd, 1), address);
1703 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1706 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1707 const unsigned rt, const int is64,
1708 struct aarch64_insn_data *data)
1710 struct aarch64_insn_relocation_data *insn_reloc
1711 = (struct aarch64_insn_relocation_data *) data;
1712 CORE_ADDR address = data->insn_addr + offset;
1714 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1715 aarch64_register (rt, 1), address);
1717 /* We know exactly what address to load from, and what register we
1720 MOV xd, #(oldloc + offset)
1721 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1724 LDR xd, [xd] ; or LDRSW xd, [xd]
1729 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1730 aarch64_register (rt, 1),
1731 aarch64_register (rt, 1),
1732 offset_memory_operand (0));
1734 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1735 aarch64_register (rt, is64),
1736 aarch64_register (rt, 1),
1737 offset_memory_operand (0));
1740 /* Implementation of aarch64_insn_visitor method "others". */
1743 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1744 struct aarch64_insn_data *data)
1746 struct aarch64_insn_relocation_data *insn_reloc
1747 = (struct aarch64_insn_relocation_data *) data;
1749 /* The instruction is not PC relative. Just re-emit it at the new
1751 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1754 static const struct aarch64_insn_visitor visitor =
1756 aarch64_ftrace_insn_reloc_b,
1757 aarch64_ftrace_insn_reloc_b_cond,
1758 aarch64_ftrace_insn_reloc_cb,
1759 aarch64_ftrace_insn_reloc_tb,
1760 aarch64_ftrace_insn_reloc_adr,
1761 aarch64_ftrace_insn_reloc_ldr_literal,
1762 aarch64_ftrace_insn_reloc_others,
1765 /* Implementation of linux_target_ops method
1766 "install_fast_tracepoint_jump_pad". */
1769 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1771 CORE_ADDR collector,
1774 CORE_ADDR *jump_entry,
1775 CORE_ADDR *trampoline,
1776 ULONGEST *trampoline_size,
1777 unsigned char *jjump_pad_insn,
1778 ULONGEST *jjump_pad_insn_size,
1779 CORE_ADDR *adjusted_insn_addr,
1780 CORE_ADDR *adjusted_insn_addr_end,
1788 CORE_ADDR buildaddr = *jump_entry;
1789 struct aarch64_insn_relocation_data insn_data;
1791 /* We need to save the current state on the stack both to restore it
1792 later and to collect register values when the tracepoint is hit.
1794 The saved registers are pushed in a layout that needs to be in sync
1795 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1796 the supply_fast_tracepoint_registers function will fill in the
1797 register cache from a pointer to saved registers on the stack we build
1800 For simplicity, we set the size of each cell on the stack to 16 bytes.
1801 This way one cell can hold any register type, from system registers
1802 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1803 has to be 16 bytes aligned anyway.
1805 Note that the CPSR register does not exist on AArch64. Instead we
1806 can access system bits describing the process state with the
1807 MRS/MSR instructions, namely the condition flags. We save them as
1808 if they are part of a CPSR register because that's how GDB
1809 interprets these system bits. At the moment, only the condition
1810 flags are saved in CPSR (NZCV).
1812 Stack layout, each cell is 16 bytes (descending):
1814 High *-------- SIMD&FP registers from 31 down to 0. --------*
1820 *---- General purpose registers from 30 down to 0. ----*
1826 *------------- Special purpose registers. -------------*
1829 | CPSR (NZCV) | 5 cells
1832 *------------- collecting_t object --------------------*
1833 | TPIDR_EL0 | struct tracepoint * |
1834 Low *------------------------------------------------------*
1836 After this stack is set up, we issue a call to the collector, passing
1837 it the saved registers at (SP + 16). */
1839 /* Push SIMD&FP registers on the stack:
1841 SUB sp, sp, #(32 * 16)
1843 STP q30, q31, [sp, #(30 * 16)]
1848 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1849 for (i = 30; i >= 0; i -= 2)
1850 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1852 /* Push general puspose registers on the stack. Note that we do not need
1853 to push x31 as it represents the xzr register and not the stack
1854 pointer in a STR instruction.
1856 SUB sp, sp, #(31 * 16)
1858 STR x30, [sp, #(30 * 16)]
1863 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1864 for (i = 30; i >= 0; i -= 1)
1865 p += emit_str (p, aarch64_register (i, 1), sp,
1866 offset_memory_operand (i * 16));
1868 /* Make space for 5 more cells.
1870 SUB sp, sp, #(5 * 16)
1873 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1878 ADD x4, sp, #((32 + 31 + 5) * 16)
1879 STR x4, [sp, #(4 * 16)]
1882 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1883 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1885 /* Save PC (tracepoint address):
1890 STR x3, [sp, #(3 * 16)]
1894 p += emit_mov_addr (p, x3, tpaddr);
1895 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1897 /* Save CPSR (NZCV), FPSR and FPCR:
1903 STR x2, [sp, #(2 * 16)]
1904 STR x1, [sp, #(1 * 16)]
1905 STR x0, [sp, #(0 * 16)]
1908 p += emit_mrs (p, x2, NZCV);
1909 p += emit_mrs (p, x1, FPSR);
1910 p += emit_mrs (p, x0, FPCR);
1911 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1912 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1913 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1915 /* Push the collecting_t object. It consist of the address of the
1916 tracepoint and an ID for the current thread. We get the latter by
1917 reading the tpidr_el0 system register. It corresponds to the
1918 NT_ARM_TLS register accessible with ptrace.
1925 STP x0, x1, [sp, #-16]!
1929 p += emit_mov_addr (p, x0, tpoint);
1930 p += emit_mrs (p, x1, TPIDR_EL0);
1931 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1935 The shared memory for the lock is at lockaddr. It will hold zero
1936 if no-one is holding the lock, otherwise it contains the address of
1937 the collecting_t object on the stack of the thread which acquired it.
1939 At this stage, the stack pointer points to this thread's collecting_t
1942 We use the following registers:
1943 - x0: Address of the lock.
1944 - x1: Pointer to collecting_t object.
1945 - x2: Scratch register.
1951 ; Trigger an event local to this core. So the following WFE
1952 ; instruction is ignored.
1955 ; Wait for an event. The event is triggered by either the SEVL
1956 ; or STLR instructions (store release).
1959 ; Atomically read at lockaddr. This marks the memory location as
1960 ; exclusive. This instruction also has memory constraints which
1961 ; make sure all previous data reads and writes are done before
1965 ; Try again if another thread holds the lock.
1968 ; We can lock it! Write the address of the collecting_t object.
1969 ; This instruction will fail if the memory location is not marked
1970 ; as exclusive anymore. If it succeeds, it will remove the
1971 ; exclusive mark on the memory location. This way, if another
1972 ; thread executes this instruction before us, we will fail and try
1979 p += emit_mov_addr (p, x0, lockaddr);
1980 p += emit_mov (p, x1, register_operand (sp));
1984 p += emit_ldaxr (p, x2, x0);
1985 p += emit_cb (p, 1, w2, -2 * 4);
1986 p += emit_stxr (p, w2, x1, x0);
1987 p += emit_cb (p, 1, x2, -4 * 4);
1989 /* Call collector (struct tracepoint *, unsigned char *):
1994 ; Saved registers start after the collecting_t object.
1997 ; We use an intra-procedure-call scratch register.
1998 MOV ip0, #(collector)
2001 ; And call back to C!
2006 p += emit_mov_addr (p, x0, tpoint);
2007 p += emit_add (p, x1, sp, immediate_operand (16));
2009 p += emit_mov_addr (p, ip0, collector);
2010 p += emit_blr (p, ip0);
2012 /* Release the lock.
2017 ; This instruction is a normal store with memory ordering
2018 ; constraints. Thanks to this we do not have to put a data
2019 ; barrier instruction to make sure all data read and writes are done
2020 ; before this instruction is executed. Furthermore, this instrucion
2021 ; will trigger an event, letting other threads know they can grab
2026 p += emit_mov_addr (p, x0, lockaddr);
2027 p += emit_stlr (p, xzr, x0);
2029 /* Free collecting_t object:
2034 p += emit_add (p, sp, sp, immediate_operand (16));
2036 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2037 registers from the stack.
2039 LDR x2, [sp, #(2 * 16)]
2040 LDR x1, [sp, #(1 * 16)]
2041 LDR x0, [sp, #(0 * 16)]
2047 ADD sp, sp #(5 * 16)
2050 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2051 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2052 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2053 p += emit_msr (p, NZCV, x2);
2054 p += emit_msr (p, FPSR, x1);
2055 p += emit_msr (p, FPCR, x0);
2057 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2059 /* Pop general purpose registers:
2063 LDR x30, [sp, #(30 * 16)]
2065 ADD sp, sp, #(31 * 16)
2068 for (i = 0; i <= 30; i += 1)
2069 p += emit_ldr (p, aarch64_register (i, 1), sp,
2070 offset_memory_operand (i * 16));
2071 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2073 /* Pop SIMD&FP registers:
2077 LDP q30, q31, [sp, #(30 * 16)]
2079 ADD sp, sp, #(32 * 16)
2082 for (i = 0; i <= 30; i += 2)
2083 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2084 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2086 /* Write the code into the inferior memory. */
2087 append_insns (&buildaddr, p - buf, buf);
2089 /* Now emit the relocated instruction. */
2090 *adjusted_insn_addr = buildaddr;
2091 target_read_uint32 (tpaddr, &insn);
2093 insn_data.base.insn_addr = tpaddr;
2094 insn_data.new_addr = buildaddr;
2095 insn_data.insn_ptr = buf;
2097 aarch64_relocate_instruction (insn, &visitor,
2098 (struct aarch64_insn_data *) &insn_data);
2100 /* We may not have been able to relocate the instruction. */
2101 if (insn_data.insn_ptr == buf)
2104 "E.Could not relocate instruction from %s to %s.",
2105 core_addr_to_string_nz (tpaddr),
2106 core_addr_to_string_nz (buildaddr));
2110 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2111 *adjusted_insn_addr_end = buildaddr;
2113 /* Go back to the start of the buffer. */
2116 /* Emit a branch back from the jump pad. */
2117 offset = (tpaddr + orig_size - buildaddr);
2118 if (!can_encode_int32 (offset, 28))
2121 "E.Jump back from jump pad too far from tracepoint "
2122 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2127 p += emit_b (p, 0, offset);
2128 append_insns (&buildaddr, p - buf, buf);
2130 /* Give the caller a branch instruction into the jump pad. */
2131 offset = (*jump_entry - tpaddr);
2132 if (!can_encode_int32 (offset, 28))
2135 "E.Jump pad too far from tracepoint "
2136 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2141 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2142 *jjump_pad_insn_size = 4;
2144 /* Return the end address of our pad. */
2145 *jump_entry = buildaddr;
2150 /* Helper function writing LEN instructions from START into
2151 current_insn_ptr. */
2154 emit_ops_insns (const uint32_t *start, int len)
2156 CORE_ADDR buildaddr = current_insn_ptr;
2159 debug_printf ("Adding %d instrucions at %s\n",
2160 len, paddress (buildaddr));
2162 append_insns (&buildaddr, len, start);
2163 current_insn_ptr = buildaddr;
2166 /* Pop a register from the stack. */
2169 emit_pop (uint32_t *buf, struct aarch64_register rt)
2171 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2174 /* Push a register on the stack. */
2177 emit_push (uint32_t *buf, struct aarch64_register rt)
2179 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2182 /* Implementation of emit_ops method "emit_prologue". */
2185 aarch64_emit_prologue (void)
2190 /* This function emit a prologue for the following function prototype:
2192 enum eval_result_type f (unsigned char *regs,
2195 The first argument is a buffer of raw registers. The second
2196 argument is the result of
2197 evaluating the expression, which will be set to whatever is on top of
2198 the stack at the end.
2200 The stack set up by the prologue is as such:
2202 High *------------------------------------------------------*
2205 | x1 (ULONGEST *value) |
2206 | x0 (unsigned char *regs) |
2207 Low *------------------------------------------------------*
2209 As we are implementing a stack machine, each opcode can expand the
2210 stack so we never know how far we are from the data saved by this
2211 prologue. In order to be able refer to value and regs later, we save
2212 the current stack pointer in the frame pointer. This way, it is not
2213 clobbered when calling C functions.
2215 Finally, throughtout every operation, we are using register x0 as the
2216 top of the stack, and x1 as a scratch register. */
2218 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2219 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2220 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2222 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2225 emit_ops_insns (buf, p - buf);
2228 /* Implementation of emit_ops method "emit_epilogue". */
2231 aarch64_emit_epilogue (void)
2236 /* Store the result of the expression (x0) in *value. */
2237 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2238 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2239 p += emit_str (p, x0, x1, offset_memory_operand (0));
2241 /* Restore the previous state. */
2242 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2243 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2245 /* Return expr_eval_no_error. */
2246 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2247 p += emit_ret (p, lr);
2249 emit_ops_insns (buf, p - buf);
2252 /* Implementation of emit_ops method "emit_add". */
2255 aarch64_emit_add (void)
2260 p += emit_pop (p, x1);
2261 p += emit_add (p, x0, x0, register_operand (x1));
2263 emit_ops_insns (buf, p - buf);
2266 /* Implementation of emit_ops method "emit_sub". */
2269 aarch64_emit_sub (void)
2274 p += emit_pop (p, x1);
2275 p += emit_sub (p, x0, x0, register_operand (x1));
2277 emit_ops_insns (buf, p - buf);
2280 /* Implementation of emit_ops method "emit_mul". */
2283 aarch64_emit_mul (void)
2288 p += emit_pop (p, x1);
2289 p += emit_mul (p, x0, x1, x0);
2291 emit_ops_insns (buf, p - buf);
2294 /* Implementation of emit_ops method "emit_lsh". */
2297 aarch64_emit_lsh (void)
2302 p += emit_pop (p, x1);
2303 p += emit_lslv (p, x0, x1, x0);
2305 emit_ops_insns (buf, p - buf);
2308 /* Implementation of emit_ops method "emit_rsh_signed". */
2311 aarch64_emit_rsh_signed (void)
2316 p += emit_pop (p, x1);
2317 p += emit_asrv (p, x0, x1, x0);
2319 emit_ops_insns (buf, p - buf);
2322 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2325 aarch64_emit_rsh_unsigned (void)
2330 p += emit_pop (p, x1);
2331 p += emit_lsrv (p, x0, x1, x0);
2333 emit_ops_insns (buf, p - buf);
2336 /* Implementation of emit_ops method "emit_ext". */
2339 aarch64_emit_ext (int arg)
2344 p += emit_sbfx (p, x0, x0, 0, arg);
2346 emit_ops_insns (buf, p - buf);
2349 /* Implementation of emit_ops method "emit_log_not". */
2352 aarch64_emit_log_not (void)
2357 /* If the top of the stack is 0, replace it with 1. Else replace it with
2360 p += emit_cmp (p, x0, immediate_operand (0));
2361 p += emit_cset (p, x0, EQ);
2363 emit_ops_insns (buf, p - buf);
2366 /* Implementation of emit_ops method "emit_bit_and". */
2369 aarch64_emit_bit_and (void)
2374 p += emit_pop (p, x1);
2375 p += emit_and (p, x0, x0, x1);
2377 emit_ops_insns (buf, p - buf);
2380 /* Implementation of emit_ops method "emit_bit_or". */
2383 aarch64_emit_bit_or (void)
2388 p += emit_pop (p, x1);
2389 p += emit_orr (p, x0, x0, x1);
2391 emit_ops_insns (buf, p - buf);
2394 /* Implementation of emit_ops method "emit_bit_xor". */
2397 aarch64_emit_bit_xor (void)
2402 p += emit_pop (p, x1);
2403 p += emit_eor (p, x0, x0, x1);
2405 emit_ops_insns (buf, p - buf);
2408 /* Implementation of emit_ops method "emit_bit_not". */
2411 aarch64_emit_bit_not (void)
2416 p += emit_mvn (p, x0, x0);
2418 emit_ops_insns (buf, p - buf);
2421 /* Implementation of emit_ops method "emit_equal". */
2424 aarch64_emit_equal (void)
2429 p += emit_pop (p, x1);
2430 p += emit_cmp (p, x0, register_operand (x1));
2431 p += emit_cset (p, x0, EQ);
2433 emit_ops_insns (buf, p - buf);
2436 /* Implementation of emit_ops method "emit_less_signed". */
2439 aarch64_emit_less_signed (void)
2444 p += emit_pop (p, x1);
2445 p += emit_cmp (p, x1, register_operand (x0));
2446 p += emit_cset (p, x0, LT);
2448 emit_ops_insns (buf, p - buf);
2451 /* Implementation of emit_ops method "emit_less_unsigned". */
2454 aarch64_emit_less_unsigned (void)
2459 p += emit_pop (p, x1);
2460 p += emit_cmp (p, x1, register_operand (x0));
2461 p += emit_cset (p, x0, LO);
2463 emit_ops_insns (buf, p - buf);
2466 /* Implementation of emit_ops method "emit_ref". */
2469 aarch64_emit_ref (int size)
2477 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2480 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2483 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2486 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2489 /* Unknown size, bail on compilation. */
2494 emit_ops_insns (buf, p - buf);
2497 /* Implementation of emit_ops method "emit_if_goto". */
2500 aarch64_emit_if_goto (int *offset_p, int *size_p)
2505 /* The Z flag is set or cleared here. */
2506 p += emit_cmp (p, x0, immediate_operand (0));
2507 /* This instruction must not change the Z flag. */
2508 p += emit_pop (p, x0);
2509 /* Branch over the next instruction if x0 == 0. */
2510 p += emit_bcond (p, EQ, 8);
2512 /* The NOP instruction will be patched with an unconditional branch. */
2514 *offset_p = (p - buf) * 4;
2519 emit_ops_insns (buf, p - buf);
2522 /* Implementation of emit_ops method "emit_goto". */
2525 aarch64_emit_goto (int *offset_p, int *size_p)
2530 /* The NOP instruction will be patched with an unconditional branch. */
2537 emit_ops_insns (buf, p - buf);
2540 /* Implementation of emit_ops method "write_goto_address". */
2543 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2547 emit_b (&insn, 0, to - from);
2548 append_insns (&from, 1, &insn);
2551 /* Implementation of emit_ops method "emit_const". */
2554 aarch64_emit_const (LONGEST num)
2559 p += emit_mov_addr (p, x0, num);
2561 emit_ops_insns (buf, p - buf);
2564 /* Implementation of emit_ops method "emit_call". */
2567 aarch64_emit_call (CORE_ADDR fn)
2572 p += emit_mov_addr (p, ip0, fn);
2573 p += emit_blr (p, ip0);
2575 emit_ops_insns (buf, p - buf);
2578 /* Implementation of emit_ops method "emit_reg". */
2581 aarch64_emit_reg (int reg)
2586 /* Set x0 to unsigned char *regs. */
2587 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2588 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2589 p += emit_mov (p, x1, immediate_operand (reg));
2591 emit_ops_insns (buf, p - buf);
2593 aarch64_emit_call (get_raw_reg_func_addr ());
2596 /* Implementation of emit_ops method "emit_pop". */
2599 aarch64_emit_pop (void)
2604 p += emit_pop (p, x0);
2606 emit_ops_insns (buf, p - buf);
2609 /* Implementation of emit_ops method "emit_stack_flush". */
2612 aarch64_emit_stack_flush (void)
2617 p += emit_push (p, x0);
2619 emit_ops_insns (buf, p - buf);
2622 /* Implementation of emit_ops method "emit_zero_ext". */
2625 aarch64_emit_zero_ext (int arg)
2630 p += emit_ubfx (p, x0, x0, 0, arg);
2632 emit_ops_insns (buf, p - buf);
2635 /* Implementation of emit_ops method "emit_swap". */
2638 aarch64_emit_swap (void)
2643 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2644 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2645 p += emit_mov (p, x0, register_operand (x1));
2647 emit_ops_insns (buf, p - buf);
2650 /* Implementation of emit_ops method "emit_stack_adjust". */
2653 aarch64_emit_stack_adjust (int n)
2655 /* This is not needed with our design. */
2659 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2661 emit_ops_insns (buf, p - buf);
2664 /* Implementation of emit_ops method "emit_int_call_1". */
2667 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2672 p += emit_mov (p, x0, immediate_operand (arg1));
2674 emit_ops_insns (buf, p - buf);
2676 aarch64_emit_call (fn);
2679 /* Implementation of emit_ops method "emit_void_call_2". */
2682 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2687 /* Push x0 on the stack. */
2688 aarch64_emit_stack_flush ();
2690 /* Setup arguments for the function call:
2693 x1: top of the stack
2698 p += emit_mov (p, x1, register_operand (x0));
2699 p += emit_mov (p, x0, immediate_operand (arg1));
2701 emit_ops_insns (buf, p - buf);
2703 aarch64_emit_call (fn);
2706 aarch64_emit_pop ();
2709 /* Implementation of emit_ops method "emit_eq_goto". */
2712 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2717 p += emit_pop (p, x1);
2718 p += emit_cmp (p, x1, register_operand (x0));
2719 /* Branch over the next instruction if x0 != x1. */
2720 p += emit_bcond (p, NE, 8);
2721 /* The NOP instruction will be patched with an unconditional branch. */
2723 *offset_p = (p - buf) * 4;
2728 emit_ops_insns (buf, p - buf);
2731 /* Implementation of emit_ops method "emit_ne_goto". */
2734 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2739 p += emit_pop (p, x1);
2740 p += emit_cmp (p, x1, register_operand (x0));
2741 /* Branch over the next instruction if x0 == x1. */
2742 p += emit_bcond (p, EQ, 8);
2743 /* The NOP instruction will be patched with an unconditional branch. */
2745 *offset_p = (p - buf) * 4;
2750 emit_ops_insns (buf, p - buf);
2753 /* Implementation of emit_ops method "emit_lt_goto". */
2756 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2761 p += emit_pop (p, x1);
2762 p += emit_cmp (p, x1, register_operand (x0));
2763 /* Branch over the next instruction if x0 >= x1. */
2764 p += emit_bcond (p, GE, 8);
2765 /* The NOP instruction will be patched with an unconditional branch. */
2767 *offset_p = (p - buf) * 4;
2772 emit_ops_insns (buf, p - buf);
2775 /* Implementation of emit_ops method "emit_le_goto". */
2778 aarch64_emit_le_goto (int *offset_p, int *size_p)
2783 p += emit_pop (p, x1);
2784 p += emit_cmp (p, x1, register_operand (x0));
2785 /* Branch over the next instruction if x0 > x1. */
2786 p += emit_bcond (p, GT, 8);
2787 /* The NOP instruction will be patched with an unconditional branch. */
2789 *offset_p = (p - buf) * 4;
2794 emit_ops_insns (buf, p - buf);
2797 /* Implementation of emit_ops method "emit_gt_goto". */
2800 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2805 p += emit_pop (p, x1);
2806 p += emit_cmp (p, x1, register_operand (x0));
2807 /* Branch over the next instruction if x0 <= x1. */
2808 p += emit_bcond (p, LE, 8);
2809 /* The NOP instruction will be patched with an unconditional branch. */
2811 *offset_p = (p - buf) * 4;
2816 emit_ops_insns (buf, p - buf);
2819 /* Implementation of emit_ops method "emit_ge_got". */
2822 aarch64_emit_ge_got (int *offset_p, int *size_p)
2827 p += emit_pop (p, x1);
2828 p += emit_cmp (p, x1, register_operand (x0));
2829 /* Branch over the next instruction if x0 <= x1. */
2830 p += emit_bcond (p, LT, 8);
2831 /* The NOP instruction will be patched with an unconditional branch. */
2833 *offset_p = (p - buf) * 4;
2838 emit_ops_insns (buf, p - buf);
2841 static struct emit_ops aarch64_emit_ops_impl =
2843 aarch64_emit_prologue,
2844 aarch64_emit_epilogue,
2849 aarch64_emit_rsh_signed,
2850 aarch64_emit_rsh_unsigned,
2852 aarch64_emit_log_not,
2853 aarch64_emit_bit_and,
2854 aarch64_emit_bit_or,
2855 aarch64_emit_bit_xor,
2856 aarch64_emit_bit_not,
2858 aarch64_emit_less_signed,
2859 aarch64_emit_less_unsigned,
2861 aarch64_emit_if_goto,
2863 aarch64_write_goto_address,
2868 aarch64_emit_stack_flush,
2869 aarch64_emit_zero_ext,
2871 aarch64_emit_stack_adjust,
2872 aarch64_emit_int_call_1,
2873 aarch64_emit_void_call_2,
2874 aarch64_emit_eq_goto,
2875 aarch64_emit_ne_goto,
2876 aarch64_emit_lt_goto,
2877 aarch64_emit_le_goto,
2878 aarch64_emit_gt_goto,
2879 aarch64_emit_ge_got,
2882 /* Implementation of linux_target_ops method "emit_ops". */
2884 static struct emit_ops *
2885 aarch64_emit_ops (void)
2887 return &aarch64_emit_ops_impl;
2890 /* Implementation of linux_target_ops method
2891 "get_min_fast_tracepoint_insn_len". */
2894 aarch64_get_min_fast_tracepoint_insn_len (void)
2899 /* Implementation of linux_target_ops method "supports_range_stepping". */
2902 aarch64_supports_range_stepping (void)
2907 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2909 static const gdb_byte *
2910 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2912 if (is_64bit_tdesc ())
2914 *size = aarch64_breakpoint_len;
2915 return aarch64_breakpoint;
2918 return arm_sw_breakpoint_from_kind (kind, size);
2921 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2924 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2926 if (is_64bit_tdesc ())
2927 return aarch64_breakpoint_len;
2929 return arm_breakpoint_kind_from_pc (pcptr);
2932 /* Implementation of the linux_target_ops method
2933 "breakpoint_kind_from_current_state". */
2936 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2938 if (is_64bit_tdesc ())
2939 return aarch64_breakpoint_len;
2941 return arm_breakpoint_kind_from_current_state (pcptr);
2944 /* Support for hardware single step. */
2947 aarch64_supports_hardware_single_step (void)
2952 struct linux_target_ops the_low_target =
2956 aarch64_cannot_fetch_register,
2957 aarch64_cannot_store_register,
2958 NULL, /* fetch_register */
2961 aarch64_breakpoint_kind_from_pc,
2962 aarch64_sw_breakpoint_from_kind,
2963 NULL, /* get_next_pcs */
2964 0, /* decr_pc_after_break */
2965 aarch64_breakpoint_at,
2966 aarch64_supports_z_point_type,
2967 aarch64_insert_point,
2968 aarch64_remove_point,
2969 aarch64_stopped_by_watchpoint,
2970 aarch64_stopped_data_address,
2971 NULL, /* collect_ptrace_register */
2972 NULL, /* supply_ptrace_register */
2973 aarch64_linux_siginfo_fixup,
2974 aarch64_linux_new_process,
2975 aarch64_linux_new_thread,
2976 aarch64_linux_new_fork,
2977 aarch64_linux_prepare_to_resume,
2978 NULL, /* process_qsupported */
2979 aarch64_supports_tracepoints,
2980 aarch64_get_thread_area,
2981 aarch64_install_fast_tracepoint_jump_pad,
2983 aarch64_get_min_fast_tracepoint_insn_len,
2984 aarch64_supports_range_stepping,
2985 aarch64_breakpoint_kind_from_current_state,
2986 aarch64_supports_hardware_single_step,
2990 initialize_low_arch (void)
2992 init_registers_aarch64 ();
2994 initialize_low_arch_aarch32 ();
2996 initialize_regsets_info (&aarch64_regsets_info);