1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include <ctype.h> /* XXX for isupper (). */
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
32 #include "reggroups.h"
35 #include "arch-utils.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
41 #include "dwarf2-frame.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
49 #include "gdb/sim-arm.h"
52 #include "coff/internal.h"
55 #include "gdb_assert.h"
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-iwmmxt.c"
60 #include "features/arm-with-vfpv2.c"
61 #include "features/arm-with-vfpv3.c"
62 #include "features/arm-with-neon.c"
66 /* Macros for setting and testing a bit in a minimal symbol that marks
67 it as Thumb function. The MSB of the minimal symbol's "info" field
68 is used for this purpose.
70 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
71 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
73 #define MSYMBOL_SET_SPECIAL(msym) \
74 MSYMBOL_TARGET_FLAG_1 (msym) = 1
76 #define MSYMBOL_IS_SPECIAL(msym) \
77 MSYMBOL_TARGET_FLAG_1 (msym)
79 /* Per-objfile data used for mapping symbols. */
80 static const struct objfile_data *arm_objfile_data_key;
82 struct arm_mapping_symbol
87 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
88 DEF_VEC_O(arm_mapping_symbol_s);
90 struct arm_per_objfile
92 VEC(arm_mapping_symbol_s) **section_maps;
95 /* The list of available "set arm ..." and "show arm ..." commands. */
96 static struct cmd_list_element *setarmcmdlist = NULL;
97 static struct cmd_list_element *showarmcmdlist = NULL;
99 /* The type of floating-point to use. Keep this in sync with enum
100 arm_float_model, and the help string in _initialize_arm_tdep. */
101 static const char *fp_model_strings[] =
111 /* A variable that can be configured by the user. */
112 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
113 static const char *current_fp_model = "auto";
115 /* The ABI to use. Keep this in sync with arm_abi_kind. */
116 static const char *arm_abi_strings[] =
124 /* A variable that can be configured by the user. */
125 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
126 static const char *arm_abi_string = "auto";
128 /* The execution mode to assume. */
129 static const char *arm_mode_strings[] =
137 static const char *arm_fallback_mode_string = "auto";
138 static const char *arm_force_mode_string = "auto";
140 /* Internal override of the execution mode. -1 means no override,
141 0 means override to ARM mode, 1 means override to Thumb mode.
142 The effect is the same as if arm_force_mode has been set by the
143 user (except the internal override has precedence over a user's
144 arm_force_mode override). */
145 static int arm_override_mode = -1;
147 /* Number of different reg name sets (options). */
148 static int num_disassembly_options;
150 /* The standard register names, and all the valid aliases for them. Note
151 that `fp', `sp' and `pc' are not added in this alias list, because they
152 have been added as builtin user registers in
153 std-regs.c:_initialize_frame_reg. */
158 } arm_register_aliases[] = {
159 /* Basic register numbers. */
176 /* Synonyms (argument and variable registers). */
189 /* Other platform-specific names for r9. */
195 /* Names used by GCC (not listed in the ARM EABI). */
197 /* A special name from the older ATPCS. */
201 static const char *const arm_register_names[] =
202 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
203 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
204 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
205 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
206 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
207 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
208 "fps", "cpsr" }; /* 24 25 */
210 /* Valid register name styles. */
211 static const char **valid_disassembly_styles;
213 /* Disassembly style to use. Default to "std" register names. */
214 static const char *disassembly_style;
216 /* This is used to keep the bfd arch_info in sync with the disassembly
218 static void set_disassembly_style_sfunc(char *, int,
219 struct cmd_list_element *);
220 static void set_disassembly_style (void);
222 static void convert_from_extended (const struct floatformat *, const void *,
224 static void convert_to_extended (const struct floatformat *, void *,
227 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
228 struct regcache *regcache,
229 int regnum, gdb_byte *buf);
230 static void arm_neon_quad_write (struct gdbarch *gdbarch,
231 struct regcache *regcache,
232 int regnum, const gdb_byte *buf);
234 static int thumb_insn_size (unsigned short inst1);
236 struct arm_prologue_cache
238 /* The stack pointer at the time this frame was created; i.e. the
239 caller's stack pointer when this function was called. It is used
240 to identify this frame. */
243 /* The frame base for this frame is just prev_sp - frame size.
244 FRAMESIZE is the distance from the frame pointer to the
245 initial stack pointer. */
249 /* The register used to hold the frame pointer for this frame. */
252 /* Saved register offsets. */
253 struct trad_frame_saved_reg *saved_regs;
256 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
257 CORE_ADDR prologue_start,
258 CORE_ADDR prologue_end,
259 struct arm_prologue_cache *cache);
261 /* Architecture version for displaced stepping. This effects the behaviour of
262 certain instructions, and really should not be hard-wired. */
264 #define DISPLACED_STEPPING_ARCH_VERSION 5
266 /* Addresses for calling Thumb functions have the bit 0 set.
267 Here are some macros to test, set, or clear bit 0 of addresses. */
268 #define IS_THUMB_ADDR(addr) ((addr) & 1)
269 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
270 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
272 /* Set to true if the 32-bit mode is in use. */
276 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
279 arm_psr_thumb_bit (struct gdbarch *gdbarch)
281 if (gdbarch_tdep (gdbarch)->is_m)
287 /* Determine if FRAME is executing in Thumb mode. */
290 arm_frame_is_thumb (struct frame_info *frame)
293 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
295 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
296 directly (from a signal frame or dummy frame) or by interpreting
297 the saved LR (from a prologue or DWARF frame). So consult it and
298 trust the unwinders. */
299 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
301 return (cpsr & t_bit) != 0;
304 /* Callback for VEC_lower_bound. */
307 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
308 const struct arm_mapping_symbol *rhs)
310 return lhs->value < rhs->value;
313 /* Search for the mapping symbol covering MEMADDR. If one is found,
314 return its type. Otherwise, return 0. If START is non-NULL,
315 set *START to the location of the mapping symbol. */
318 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
320 struct obj_section *sec;
322 /* If there are mapping symbols, consult them. */
323 sec = find_pc_section (memaddr);
326 struct arm_per_objfile *data;
327 VEC(arm_mapping_symbol_s) *map;
328 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
332 data = objfile_data (sec->objfile, arm_objfile_data_key);
335 map = data->section_maps[sec->the_bfd_section->index];
336 if (!VEC_empty (arm_mapping_symbol_s, map))
338 struct arm_mapping_symbol *map_sym;
340 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
341 arm_compare_mapping_symbols);
343 /* VEC_lower_bound finds the earliest ordered insertion
344 point. If the following symbol starts at this exact
345 address, we use that; otherwise, the preceding
346 mapping symbol covers this address. */
347 if (idx < VEC_length (arm_mapping_symbol_s, map))
349 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
350 if (map_sym->value == map_key.value)
353 *start = map_sym->value + obj_section_addr (sec);
354 return map_sym->type;
360 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
362 *start = map_sym->value + obj_section_addr (sec);
363 return map_sym->type;
372 /* Determine if the program counter specified in MEMADDR is in a Thumb
373 function. This function should be called for addresses unrelated to
374 any executing frame; otherwise, prefer arm_frame_is_thumb. */
377 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
379 struct obj_section *sec;
380 struct minimal_symbol *sym;
382 struct displaced_step_closure* dsc
383 = get_displaced_step_closure_by_addr(memaddr);
385 /* If checking the mode of displaced instruction in copy area, the mode
386 should be determined by instruction on the original address. */
390 fprintf_unfiltered (gdb_stdlog,
391 "displaced: check mode of %.8lx instead of %.8lx\n",
392 (unsigned long) dsc->insn_addr,
393 (unsigned long) memaddr);
394 memaddr = dsc->insn_addr;
397 /* If bit 0 of the address is set, assume this is a Thumb address. */
398 if (IS_THUMB_ADDR (memaddr))
401 /* Respect internal mode override if active. */
402 if (arm_override_mode != -1)
403 return arm_override_mode;
405 /* If the user wants to override the symbol table, let him. */
406 if (strcmp (arm_force_mode_string, "arm") == 0)
408 if (strcmp (arm_force_mode_string, "thumb") == 0)
411 /* ARM v6-M and v7-M are always in Thumb mode. */
412 if (gdbarch_tdep (gdbarch)->is_m)
415 /* If there are mapping symbols, consult them. */
416 type = arm_find_mapping_symbol (memaddr, NULL);
420 /* Thumb functions have a "special" bit set in minimal symbols. */
421 sym = lookup_minimal_symbol_by_pc (memaddr);
423 return (MSYMBOL_IS_SPECIAL (sym));
425 /* If the user wants to override the fallback mode, let them. */
426 if (strcmp (arm_fallback_mode_string, "arm") == 0)
428 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
431 /* If we couldn't find any symbol, but we're talking to a running
432 target, then trust the current value of $cpsr. This lets
433 "display/i $pc" always show the correct mode (though if there is
434 a symbol table we will not reach here, so it still may not be
435 displayed in the mode it will be executed). */
436 if (target_has_registers)
437 return arm_frame_is_thumb (get_current_frame ());
439 /* Otherwise we're out of luck; we assume ARM. */
443 /* Remove useless bits from addresses in a running program. */
445 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
448 return UNMAKE_THUMB_ADDR (val);
450 return (val & 0x03fffffc);
453 /* When reading symbols, we need to zap the low bit of the address,
454 which may be set to 1 for Thumb functions. */
456 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
461 /* Return 1 if PC is the start of a compiler helper function which
462 can be safely ignored during prologue skipping. IS_THUMB is true
463 if the function is known to be a Thumb function due to the way it
466 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
468 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
469 struct minimal_symbol *msym;
471 msym = lookup_minimal_symbol_by_pc (pc);
473 && SYMBOL_VALUE_ADDRESS (msym) == pc
474 && SYMBOL_LINKAGE_NAME (msym) != NULL)
476 const char *name = SYMBOL_LINKAGE_NAME (msym);
478 /* The GNU linker's Thumb call stub to foo is named
480 if (strstr (name, "_from_thumb") != NULL)
483 /* On soft-float targets, __truncdfsf2 is called to convert promoted
484 arguments to their argument types in non-prototyped
486 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
488 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
491 /* Internal functions related to thread-local storage. */
492 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
494 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
499 /* If we run against a stripped glibc, we may be unable to identify
500 special functions by name. Check for one important case,
501 __aeabi_read_tp, by comparing the *code* against the default
502 implementation (this is hand-written ARM assembler in glibc). */
505 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
506 == 0xe3e00a0f /* mov r0, #0xffff0fff */
507 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
508 == 0xe240f01f) /* sub pc, r0, #31 */
515 /* Support routines for instruction parsing. */
516 #define submask(x) ((1L << ((x) + 1)) - 1)
517 #define bit(obj,st) (((obj) >> (st)) & 1)
518 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
519 #define sbits(obj,st,fn) \
520 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
521 #define BranchDest(addr,instr) \
522 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
524 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
525 the first 16-bit of instruction, and INSN2 is the second 16-bit of
527 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
528 ((bits ((insn1), 0, 3) << 12) \
529 | (bits ((insn1), 10, 10) << 11) \
530 | (bits ((insn2), 12, 14) << 8) \
531 | bits ((insn2), 0, 7))
533 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
534 the 32-bit instruction. */
535 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
536 ((bits ((insn), 16, 19) << 12) \
537 | bits ((insn), 0, 11))
539 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
542 thumb_expand_immediate (unsigned int imm)
544 unsigned int count = imm >> 7;
552 return (imm & 0xff) | ((imm & 0xff) << 16);
554 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
556 return (imm & 0xff) | ((imm & 0xff) << 8)
557 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
560 return (0x80 | (imm & 0x7f)) << (32 - count);
563 /* Return 1 if the 16-bit Thumb instruction INST might change
564 control flow, 0 otherwise. */
567 thumb_instruction_changes_pc (unsigned short inst)
569 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
572 if ((inst & 0xf000) == 0xd000) /* conditional branch */
575 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
578 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
581 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
584 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
590 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
591 might change control flow, 0 otherwise. */
594 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
596 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
598 /* Branches and miscellaneous control instructions. */
600 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
605 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
607 /* SUBS PC, LR, #imm8. */
610 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
612 /* Conditional branch. */
619 if ((inst1 & 0xfe50) == 0xe810)
621 /* Load multiple or RFE. */
623 if (bit (inst1, 7) && !bit (inst1, 8))
629 else if (!bit (inst1, 7) && bit (inst1, 8))
635 else if (bit (inst1, 7) && bit (inst1, 8))
640 else if (!bit (inst1, 7) && !bit (inst1, 8))
649 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
651 /* MOV PC or MOVS PC. */
655 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
658 if (bits (inst1, 0, 3) == 15)
664 if ((inst2 & 0x0fc0) == 0x0000)
670 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
676 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
685 /* Analyze a Thumb prologue, looking for a recognizable stack frame
686 and frame pointer. Scan until we encounter a store that could
687 clobber the stack frame unexpectedly, or an unknown instruction.
688 Return the last address which is definitely safe to skip for an
689 initial breakpoint. */
692 thumb_analyze_prologue (struct gdbarch *gdbarch,
693 CORE_ADDR start, CORE_ADDR limit,
694 struct arm_prologue_cache *cache)
696 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
697 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
700 struct pv_area *stack;
701 struct cleanup *back_to;
703 CORE_ADDR unrecognized_pc = 0;
705 for (i = 0; i < 16; i++)
706 regs[i] = pv_register (i, 0);
707 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
708 back_to = make_cleanup_free_pv_area (stack);
710 while (start < limit)
714 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
716 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
721 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
724 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
725 whether to save LR (R14). */
726 mask = (insn & 0xff) | ((insn & 0x100) << 6);
728 /* Calculate offsets of saved R0-R7 and LR. */
729 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
730 if (mask & (1 << regno))
732 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
734 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
737 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
740 offset = (insn & 0x7f) << 2; /* get scaled offset */
741 if (insn & 0x80) /* Check for SUB. */
742 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
745 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
748 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
749 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
751 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
752 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
753 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
755 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
756 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
757 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
759 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
760 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
761 && pv_is_constant (regs[bits (insn, 3, 5)]))
762 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
763 regs[bits (insn, 6, 8)]);
764 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
765 && pv_is_constant (regs[bits (insn, 3, 6)]))
767 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
768 int rm = bits (insn, 3, 6);
769 regs[rd] = pv_add (regs[rd], regs[rm]);
771 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
773 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
774 int src_reg = (insn & 0x78) >> 3;
775 regs[dst_reg] = regs[src_reg];
777 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
779 /* Handle stores to the stack. Normally pushes are used,
780 but with GCC -mtpcs-frame, there may be other stores
781 in the prologue to create the frame. */
782 int regno = (insn >> 8) & 0x7;
785 offset = (insn & 0xff) << 2;
786 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
788 if (pv_area_store_would_trash (stack, addr))
791 pv_area_store (stack, addr, 4, regs[regno]);
793 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
795 int rd = bits (insn, 0, 2);
796 int rn = bits (insn, 3, 5);
799 offset = bits (insn, 6, 10) << 2;
800 addr = pv_add_constant (regs[rn], offset);
802 if (pv_area_store_would_trash (stack, addr))
805 pv_area_store (stack, addr, 4, regs[rd]);
807 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
808 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
809 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
810 /* Ignore stores of argument registers to the stack. */
812 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
813 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
814 /* Ignore block loads from the stack, potentially copying
815 parameters from memory. */
817 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
818 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
819 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
820 /* Similarly ignore single loads from the stack. */
822 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
823 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
824 /* Skip register copies, i.e. saves to another register
825 instead of the stack. */
827 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
828 /* Recognize constant loads; even with small stacks these are necessary
830 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
831 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
833 /* Constant pool loads, for the same reason. */
834 unsigned int constant;
837 loc = start + 4 + bits (insn, 0, 7) * 4;
838 constant = read_memory_unsigned_integer (loc, 4, byte_order);
839 regs[bits (insn, 8, 10)] = pv_constant (constant);
841 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
843 unsigned short inst2;
845 inst2 = read_memory_unsigned_integer (start + 2, 2,
846 byte_order_for_code);
848 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
850 /* BL, BLX. Allow some special function calls when
851 skipping the prologue; GCC generates these before
852 storing arguments to the stack. */
854 int j1, j2, imm1, imm2;
856 imm1 = sbits (insn, 0, 10);
857 imm2 = bits (inst2, 0, 10);
858 j1 = bit (inst2, 13);
859 j2 = bit (inst2, 11);
861 offset = ((imm1 << 12) + (imm2 << 1));
862 offset ^= ((!j2) << 22) | ((!j1) << 23);
864 nextpc = start + 4 + offset;
865 /* For BLX make sure to clear the low bits. */
866 if (bit (inst2, 12) == 0)
867 nextpc = nextpc & 0xfffffffc;
869 if (!skip_prologue_function (gdbarch, nextpc,
870 bit (inst2, 12) != 0))
874 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
876 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
878 pv_t addr = regs[bits (insn, 0, 3)];
881 if (pv_area_store_would_trash (stack, addr))
884 /* Calculate offsets of saved registers. */
885 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
886 if (inst2 & (1 << regno))
888 addr = pv_add_constant (addr, -4);
889 pv_area_store (stack, addr, 4, regs[regno]);
893 regs[bits (insn, 0, 3)] = addr;
896 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
898 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
900 int regno1 = bits (inst2, 12, 15);
901 int regno2 = bits (inst2, 8, 11);
902 pv_t addr = regs[bits (insn, 0, 3)];
904 offset = inst2 & 0xff;
906 addr = pv_add_constant (addr, offset);
908 addr = pv_add_constant (addr, -offset);
910 if (pv_area_store_would_trash (stack, addr))
913 pv_area_store (stack, addr, 4, regs[regno1]);
914 pv_area_store (stack, pv_add_constant (addr, 4),
918 regs[bits (insn, 0, 3)] = addr;
921 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
922 && (inst2 & 0x0c00) == 0x0c00
923 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
925 int regno = bits (inst2, 12, 15);
926 pv_t addr = regs[bits (insn, 0, 3)];
928 offset = inst2 & 0xff;
930 addr = pv_add_constant (addr, offset);
932 addr = pv_add_constant (addr, -offset);
934 if (pv_area_store_would_trash (stack, addr))
937 pv_area_store (stack, addr, 4, regs[regno]);
940 regs[bits (insn, 0, 3)] = addr;
943 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
944 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
946 int regno = bits (inst2, 12, 15);
949 offset = inst2 & 0xfff;
950 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
952 if (pv_area_store_would_trash (stack, addr))
955 pv_area_store (stack, addr, 4, regs[regno]);
958 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
960 /* Ignore stores of argument registers to the stack. */
963 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
964 && (inst2 & 0x0d00) == 0x0c00
965 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
966 /* Ignore stores of argument registers to the stack. */
969 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
971 && (inst2 & 0x8000) == 0x0000
972 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
973 /* Ignore block loads from the stack, potentially copying
974 parameters from memory. */
977 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
979 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
980 /* Similarly ignore dual loads from the stack. */
983 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
984 && (inst2 & 0x0d00) == 0x0c00
985 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
986 /* Similarly ignore single loads from the stack. */
989 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
990 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
991 /* Similarly ignore single loads from the stack. */
994 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
995 && (inst2 & 0x8000) == 0x0000)
997 unsigned int imm = ((bits (insn, 10, 10) << 11)
998 | (bits (inst2, 12, 14) << 8)
999 | bits (inst2, 0, 7));
1001 regs[bits (inst2, 8, 11)]
1002 = pv_add_constant (regs[bits (insn, 0, 3)],
1003 thumb_expand_immediate (imm));
1006 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1007 && (inst2 & 0x8000) == 0x0000)
1009 unsigned int imm = ((bits (insn, 10, 10) << 11)
1010 | (bits (inst2, 12, 14) << 8)
1011 | bits (inst2, 0, 7));
1013 regs[bits (inst2, 8, 11)]
1014 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1017 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1018 && (inst2 & 0x8000) == 0x0000)
1020 unsigned int imm = ((bits (insn, 10, 10) << 11)
1021 | (bits (inst2, 12, 14) << 8)
1022 | bits (inst2, 0, 7));
1024 regs[bits (inst2, 8, 11)]
1025 = pv_add_constant (regs[bits (insn, 0, 3)],
1026 - (CORE_ADDR) thumb_expand_immediate (imm));
1029 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1030 && (inst2 & 0x8000) == 0x0000)
1032 unsigned int imm = ((bits (insn, 10, 10) << 11)
1033 | (bits (inst2, 12, 14) << 8)
1034 | bits (inst2, 0, 7));
1036 regs[bits (inst2, 8, 11)]
1037 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1040 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1042 unsigned int imm = ((bits (insn, 10, 10) << 11)
1043 | (bits (inst2, 12, 14) << 8)
1044 | bits (inst2, 0, 7));
1046 regs[bits (inst2, 8, 11)]
1047 = pv_constant (thumb_expand_immediate (imm));
1050 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1053 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1055 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1058 else if (insn == 0xea5f /* mov.w Rd,Rm */
1059 && (inst2 & 0xf0f0) == 0)
1061 int dst_reg = (inst2 & 0x0f00) >> 8;
1062 int src_reg = inst2 & 0xf;
1063 regs[dst_reg] = regs[src_reg];
1066 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1068 /* Constant pool loads. */
1069 unsigned int constant;
1072 offset = bits (insn, 0, 11);
1074 loc = start + 4 + offset;
1076 loc = start + 4 - offset;
1078 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1079 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1082 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1084 /* Constant pool loads. */
1085 unsigned int constant;
1088 offset = bits (insn, 0, 7) << 2;
1090 loc = start + 4 + offset;
1092 loc = start + 4 - offset;
1094 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1095 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1097 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1098 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1101 else if (thumb2_instruction_changes_pc (insn, inst2))
1103 /* Don't scan past anything that might change control flow. */
1108 /* The optimizer might shove anything into the prologue,
1109 so we just skip what we don't recognize. */
1110 unrecognized_pc = start;
1115 else if (thumb_instruction_changes_pc (insn))
1117 /* Don't scan past anything that might change control flow. */
1122 /* The optimizer might shove anything into the prologue,
1123 so we just skip what we don't recognize. */
1124 unrecognized_pc = start;
1131 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1132 paddress (gdbarch, start));
1134 if (unrecognized_pc == 0)
1135 unrecognized_pc = start;
1139 do_cleanups (back_to);
1140 return unrecognized_pc;
1143 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1145 /* Frame pointer is fp. Frame size is constant. */
1146 cache->framereg = ARM_FP_REGNUM;
1147 cache->framesize = -regs[ARM_FP_REGNUM].k;
1149 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1151 /* Frame pointer is r7. Frame size is constant. */
1152 cache->framereg = THUMB_FP_REGNUM;
1153 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1155 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1157 /* Try the stack pointer... this is a bit desperate. */
1158 cache->framereg = ARM_SP_REGNUM;
1159 cache->framesize = -regs[ARM_SP_REGNUM].k;
1163 /* We're just out of luck. We don't know where the frame is. */
1164 cache->framereg = -1;
1165 cache->framesize = 0;
1168 for (i = 0; i < 16; i++)
1169 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1170 cache->saved_regs[i].addr = offset;
1172 do_cleanups (back_to);
1173 return unrecognized_pc;
1177 /* Try to analyze the instructions starting from PC, which load symbol
1178 __stack_chk_guard. Return the address of instruction after loading this
1179 symbol, set the dest register number to *BASEREG, and set the size of
1180 instructions for loading symbol in OFFSET. Return 0 if instructions are
1184 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1185 unsigned int *destreg, int *offset)
1187 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1188 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1189 unsigned int low, high, address;
1194 unsigned short insn1
1195 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1197 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1199 *destreg = bits (insn1, 8, 10);
1201 address = bits (insn1, 0, 7);
1203 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1205 unsigned short insn2
1206 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1208 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1211 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1213 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1215 /* movt Rd, #const */
1216 if ((insn1 & 0xfbc0) == 0xf2c0)
1218 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1219 *destreg = bits (insn2, 8, 11);
1221 address = (high << 16 | low);
1228 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1230 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1232 address = bits (insn, 0, 11);
1233 *destreg = bits (insn, 12, 15);
1236 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1238 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1241 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1243 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1245 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1246 *destreg = bits (insn, 12, 15);
1248 address = (high << 16 | low);
1256 /* Try to skip a sequence of instructions used for stack protector. If PC
1257 points to the first instruction of this sequence, return the address of
1258 first instruction after this sequence, otherwise, return original PC.
1260 On arm, this sequence of instructions is composed of mainly three steps,
1261 Step 1: load symbol __stack_chk_guard,
1262 Step 2: load from address of __stack_chk_guard,
1263 Step 3: store it to somewhere else.
1265 Usually, instructions on step 2 and step 3 are the same on various ARM
1266 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1267 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1268 instructions in step 1 vary from different ARM architectures. On ARMv7,
1271 movw Rn, #:lower16:__stack_chk_guard
1272 movt Rn, #:upper16:__stack_chk_guard
1279 .word __stack_chk_guard
1281 Since ldr/str is a very popular instruction, we can't use them as
1282 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1283 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1284 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1287 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1289 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1290 unsigned int address, basereg;
1291 struct minimal_symbol *stack_chk_guard;
1293 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1296 /* Try to parse the instructions in Step 1. */
1297 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1302 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1303 /* If name of symbol doesn't start with '__stack_chk_guard', this
1304 instruction sequence is not for stack protector. If symbol is
1305 removed, we conservatively think this sequence is for stack protector. */
1307 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1308 strlen ("__stack_chk_guard")) != 0)
1313 unsigned int destreg;
1315 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1317 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1318 if ((insn & 0xf800) != 0x6800)
1320 if (bits (insn, 3, 5) != basereg)
1322 destreg = bits (insn, 0, 2);
1324 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1325 byte_order_for_code);
1326 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1327 if ((insn & 0xf800) != 0x6000)
1329 if (destreg != bits (insn, 0, 2))
1334 unsigned int destreg;
1336 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1338 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1339 if ((insn & 0x0e500000) != 0x04100000)
1341 if (bits (insn, 16, 19) != basereg)
1343 destreg = bits (insn, 12, 15);
1344 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1345 insn = read_memory_unsigned_integer (pc + offset + 4,
1346 4, byte_order_for_code);
1347 if ((insn & 0x0e500000) != 0x04000000)
1349 if (bits (insn, 12, 15) != destreg)
1352 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1355 return pc + offset + 4;
1357 return pc + offset + 8;
1360 /* Advance the PC across any function entry prologue instructions to
1361 reach some "real" code.
1363 The APCS (ARM Procedure Call Standard) defines the following
1367 [stmfd sp!, {a1,a2,a3,a4}]
1368 stmfd sp!, {...,fp,ip,lr,pc}
1369 [stfe f7, [sp, #-12]!]
1370 [stfe f6, [sp, #-12]!]
1371 [stfe f5, [sp, #-12]!]
1372 [stfe f4, [sp, #-12]!]
1373 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1376 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1378 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1381 CORE_ADDR func_addr, limit_pc;
1382 struct symtab_and_line sal;
1384 /* See if we can determine the end of the prologue via the symbol table.
1385 If so, then return either PC, or the PC after the prologue, whichever
1387 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1389 CORE_ADDR post_prologue_pc
1390 = skip_prologue_using_sal (gdbarch, func_addr);
1391 struct symtab *s = find_pc_symtab (func_addr);
1393 if (post_prologue_pc)
1395 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1398 /* GCC always emits a line note before the prologue and another
1399 one after, even if the two are at the same address or on the
1400 same line. Take advantage of this so that we do not need to
1401 know every instruction that might appear in the prologue. We
1402 will have producer information for most binaries; if it is
1403 missing (e.g. for -gstabs), assuming the GNU tools. */
1404 if (post_prologue_pc
1406 || s->producer == NULL
1407 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1408 return post_prologue_pc;
1410 if (post_prologue_pc != 0)
1412 CORE_ADDR analyzed_limit;
1414 /* For non-GCC compilers, make sure the entire line is an
1415 acceptable prologue; GDB will round this function's
1416 return value up to the end of the following line so we
1417 can not skip just part of a line (and we do not want to).
1419 RealView does not treat the prologue specially, but does
1420 associate prologue code with the opening brace; so this
1421 lets us skip the first line if we think it is the opening
1423 if (arm_pc_is_thumb (gdbarch, func_addr))
1424 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1425 post_prologue_pc, NULL);
1427 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1428 post_prologue_pc, NULL);
1430 if (analyzed_limit != post_prologue_pc)
1433 return post_prologue_pc;
1437 /* Can't determine prologue from the symbol table, need to examine
1440 /* Find an upper limit on the function prologue using the debug
1441 information. If the debug information could not be used to provide
1442 that bound, then use an arbitrary large number as the upper bound. */
1443 /* Like arm_scan_prologue, stop no later than pc + 64. */
1444 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1446 limit_pc = pc + 64; /* Magic. */
1449 /* Check if this is Thumb code. */
1450 if (arm_pc_is_thumb (gdbarch, pc))
1451 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1453 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1455 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1457 /* "mov ip, sp" is no longer a required part of the prologue. */
1458 if (inst == 0xe1a0c00d) /* mov ip, sp */
1461 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1464 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1467 /* Some prologues begin with "str lr, [sp, #-4]!". */
1468 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1471 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1474 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1477 /* Any insns after this point may float into the code, if it makes
1478 for better instruction scheduling, so we skip them only if we
1479 find them, but still consider the function to be frame-ful. */
1481 /* We may have either one sfmfd instruction here, or several stfe
1482 insns, depending on the version of floating point code we
1484 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1487 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1490 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1493 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1496 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1497 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1498 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1501 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1502 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1503 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1506 /* Un-recognized instruction; stop scanning. */
1510 return skip_pc; /* End of prologue. */
1514 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1515 This function decodes a Thumb function prologue to determine:
1516 1) the size of the stack frame
1517 2) which registers are saved on it
1518 3) the offsets of saved regs
1519 4) the offset from the stack pointer to the frame pointer
1521 A typical Thumb function prologue would create this stack frame
1522 (offsets relative to FP)
1523 old SP -> 24 stack parameters
1526 R7 -> 0 local variables (16 bytes)
1527 SP -> -12 additional stack space (12 bytes)
1528 The frame size would thus be 36 bytes, and the frame offset would be
1529 12 bytes. The frame register is R7.
1531 The comments for thumb_skip_prolog() describe the algorithm we use
1532 to detect the end of the prolog. */
1536 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1537 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1539 CORE_ADDR prologue_start;
1540 CORE_ADDR prologue_end;
1541 CORE_ADDR current_pc;
1543 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1546 /* See comment in arm_scan_prologue for an explanation of
1548 if (prologue_end > prologue_start + 64)
1550 prologue_end = prologue_start + 64;
1554 /* We're in the boondocks: we have no idea where the start of the
1558 prologue_end = min (prologue_end, prev_pc);
1560 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1563 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1566 arm_instruction_changes_pc (uint32_t this_instr)
1568 if (bits (this_instr, 28, 31) == INST_NV)
1569 /* Unconditional instructions. */
1570 switch (bits (this_instr, 24, 27))
1574 /* Branch with Link and change to Thumb. */
1579 /* Coprocessor register transfer. */
1580 if (bits (this_instr, 12, 15) == 15)
1581 error (_("Invalid update to pc in instruction"));
1587 switch (bits (this_instr, 25, 27))
1590 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1592 /* Multiplies and extra load/stores. */
1593 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1594 /* Neither multiplies nor extension load/stores are allowed
1598 /* Otherwise, miscellaneous instructions. */
1600 /* BX <reg>, BXJ <reg>, BLX <reg> */
1601 if (bits (this_instr, 4, 27) == 0x12fff1
1602 || bits (this_instr, 4, 27) == 0x12fff2
1603 || bits (this_instr, 4, 27) == 0x12fff3)
1606 /* Other miscellaneous instructions are unpredictable if they
1610 /* Data processing instruction. Fall through. */
1613 if (bits (this_instr, 12, 15) == 15)
1620 /* Media instructions and architecturally undefined instructions. */
1621 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1625 if (bit (this_instr, 20) == 0)
1629 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1635 /* Load/store multiple. */
1636 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1642 /* Branch and branch with link. */
1647 /* Coprocessor transfers or SWIs can not affect PC. */
1651 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1655 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1656 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1657 fill it in. Return the first address not recognized as a prologue
1660 We recognize all the instructions typically found in ARM prologues,
1661 plus harmless instructions which can be skipped (either for analysis
1662 purposes, or a more restrictive set that can be skipped when finding
1663 the end of the prologue). */
1666 arm_analyze_prologue (struct gdbarch *gdbarch,
1667 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1668 struct arm_prologue_cache *cache)
1670 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1671 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1673 CORE_ADDR offset, current_pc;
1674 pv_t regs[ARM_FPS_REGNUM];
1675 struct pv_area *stack;
1676 struct cleanup *back_to;
1677 int framereg, framesize;
1678 CORE_ADDR unrecognized_pc = 0;
1680 /* Search the prologue looking for instructions that set up the
1681 frame pointer, adjust the stack pointer, and save registers.
1683 Be careful, however, and if it doesn't look like a prologue,
1684 don't try to scan it. If, for instance, a frameless function
1685 begins with stmfd sp!, then we will tell ourselves there is
1686 a frame, which will confuse stack traceback, as well as "finish"
1687 and other operations that rely on a knowledge of the stack
1690 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1691 regs[regno] = pv_register (regno, 0);
1692 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1693 back_to = make_cleanup_free_pv_area (stack);
1695 for (current_pc = prologue_start;
1696 current_pc < prologue_end;
1700 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1702 if (insn == 0xe1a0c00d) /* mov ip, sp */
1704 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1707 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1708 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1710 unsigned imm = insn & 0xff; /* immediate value */
1711 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1712 int rd = bits (insn, 12, 15);
1713 imm = (imm >> rot) | (imm << (32 - rot));
1714 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1717 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1718 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1720 unsigned imm = insn & 0xff; /* immediate value */
1721 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1722 int rd = bits (insn, 12, 15);
1723 imm = (imm >> rot) | (imm << (32 - rot));
1724 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1727 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1730 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1732 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1733 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1734 regs[bits (insn, 12, 15)]);
1737 else if ((insn & 0xffff0000) == 0xe92d0000)
1738 /* stmfd sp!, {..., fp, ip, lr, pc}
1740 stmfd sp!, {a1, a2, a3, a4} */
1742 int mask = insn & 0xffff;
1744 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1747 /* Calculate offsets of saved registers. */
1748 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1749 if (mask & (1 << regno))
1752 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1753 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1756 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1757 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1758 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1760 /* No need to add this to saved_regs -- it's just an arg reg. */
1763 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1764 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1765 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1767 /* No need to add this to saved_regs -- it's just an arg reg. */
1770 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1772 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1774 /* No need to add this to saved_regs -- it's just arg regs. */
1777 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1779 unsigned imm = insn & 0xff; /* immediate value */
1780 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1781 imm = (imm >> rot) | (imm << (32 - rot));
1782 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1784 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1786 unsigned imm = insn & 0xff; /* immediate value */
1787 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1788 imm = (imm >> rot) | (imm << (32 - rot));
1789 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1791 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1793 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1795 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1798 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1799 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1800 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1802 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1804 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1806 int n_saved_fp_regs;
1807 unsigned int fp_start_reg, fp_bound_reg;
1809 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1812 if ((insn & 0x800) == 0x800) /* N0 is set */
1814 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1815 n_saved_fp_regs = 3;
1817 n_saved_fp_regs = 1;
1821 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1822 n_saved_fp_regs = 2;
1824 n_saved_fp_regs = 4;
1827 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1828 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1829 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1831 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1832 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1833 regs[fp_start_reg++]);
1836 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1838 /* Allow some special function calls when skipping the
1839 prologue; GCC generates these before storing arguments to
1841 CORE_ADDR dest = BranchDest (current_pc, insn);
1843 if (skip_prologue_function (gdbarch, dest, 0))
1848 else if ((insn & 0xf0000000) != 0xe0000000)
1849 break; /* Condition not true, exit early. */
1850 else if (arm_instruction_changes_pc (insn))
1851 /* Don't scan past anything that might change control flow. */
1853 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1854 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1855 /* Ignore block loads from the stack, potentially copying
1856 parameters from memory. */
1858 else if ((insn & 0xfc500000) == 0xe4100000
1859 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1860 /* Similarly ignore single loads from the stack. */
1862 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1863 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1864 register instead of the stack. */
1868 /* The optimizer might shove anything into the prologue,
1869 so we just skip what we don't recognize. */
1870 unrecognized_pc = current_pc;
1875 if (unrecognized_pc == 0)
1876 unrecognized_pc = current_pc;
1878 /* The frame size is just the distance from the frame register
1879 to the original stack pointer. */
1880 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1882 /* Frame pointer is fp. */
1883 framereg = ARM_FP_REGNUM;
1884 framesize = -regs[ARM_FP_REGNUM].k;
1886 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1888 /* Try the stack pointer... this is a bit desperate. */
1889 framereg = ARM_SP_REGNUM;
1890 framesize = -regs[ARM_SP_REGNUM].k;
1894 /* We're just out of luck. We don't know where the frame is. */
1901 cache->framereg = framereg;
1902 cache->framesize = framesize;
1904 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1905 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1906 cache->saved_regs[regno].addr = offset;
1910 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1911 paddress (gdbarch, unrecognized_pc));
1913 do_cleanups (back_to);
1914 return unrecognized_pc;
1918 arm_scan_prologue (struct frame_info *this_frame,
1919 struct arm_prologue_cache *cache)
1921 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1922 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1924 CORE_ADDR prologue_start, prologue_end, current_pc;
1925 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1926 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1927 pv_t regs[ARM_FPS_REGNUM];
1928 struct pv_area *stack;
1929 struct cleanup *back_to;
1932 /* Assume there is no frame until proven otherwise. */
1933 cache->framereg = ARM_SP_REGNUM;
1934 cache->framesize = 0;
1936 /* Check for Thumb prologue. */
1937 if (arm_frame_is_thumb (this_frame))
1939 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1943 /* Find the function prologue. If we can't find the function in
1944 the symbol table, peek in the stack frame to find the PC. */
1945 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1948 /* One way to find the end of the prologue (which works well
1949 for unoptimized code) is to do the following:
1951 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1954 prologue_end = prev_pc;
1955 else if (sal.end < prologue_end)
1956 prologue_end = sal.end;
1958 This mechanism is very accurate so long as the optimizer
1959 doesn't move any instructions from the function body into the
1960 prologue. If this happens, sal.end will be the last
1961 instruction in the first hunk of prologue code just before
1962 the first instruction that the scheduler has moved from
1963 the body to the prologue.
1965 In order to make sure that we scan all of the prologue
1966 instructions, we use a slightly less accurate mechanism which
1967 may scan more than necessary. To help compensate for this
1968 lack of accuracy, the prologue scanning loop below contains
1969 several clauses which'll cause the loop to terminate early if
1970 an implausible prologue instruction is encountered.
1976 is a suitable endpoint since it accounts for the largest
1977 possible prologue plus up to five instructions inserted by
1980 if (prologue_end > prologue_start + 64)
1982 prologue_end = prologue_start + 64; /* See above. */
1987 /* We have no symbol information. Our only option is to assume this
1988 function has a standard stack frame and the normal frame register.
1989 Then, we can find the value of our frame pointer on entrance to
1990 the callee (or at the present moment if this is the innermost frame).
1991 The value stored there should be the address of the stmfd + 8. */
1992 CORE_ADDR frame_loc;
1993 LONGEST return_value;
1995 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1996 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
2000 prologue_start = gdbarch_addr_bits_remove
2001 (gdbarch, return_value) - 8;
2002 prologue_end = prologue_start + 64; /* See above. */
2006 if (prev_pc < prologue_end)
2007 prologue_end = prev_pc;
2009 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2012 static struct arm_prologue_cache *
2013 arm_make_prologue_cache (struct frame_info *this_frame)
2016 struct arm_prologue_cache *cache;
2017 CORE_ADDR unwound_fp;
2019 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2020 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2022 arm_scan_prologue (this_frame, cache);
2024 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2025 if (unwound_fp == 0)
2028 cache->prev_sp = unwound_fp + cache->framesize;
2030 /* Calculate actual addresses of saved registers using offsets
2031 determined by arm_scan_prologue. */
2032 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2033 if (trad_frame_addr_p (cache->saved_regs, reg))
2034 cache->saved_regs[reg].addr += cache->prev_sp;
2039 /* Our frame ID for a normal frame is the current function's starting PC
2040 and the caller's SP when we were called. */
2043 arm_prologue_this_id (struct frame_info *this_frame,
2045 struct frame_id *this_id)
2047 struct arm_prologue_cache *cache;
2051 if (*this_cache == NULL)
2052 *this_cache = arm_make_prologue_cache (this_frame);
2053 cache = *this_cache;
2055 /* This is meant to halt the backtrace at "_start". */
2056 pc = get_frame_pc (this_frame);
2057 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2060 /* If we've hit a wall, stop. */
2061 if (cache->prev_sp == 0)
2064 /* Use function start address as part of the frame ID. If we cannot
2065 identify the start address (due to missing symbol information),
2066 fall back to just using the current PC. */
2067 func = get_frame_func (this_frame);
2071 id = frame_id_build (cache->prev_sp, func);
2075 static struct value *
2076 arm_prologue_prev_register (struct frame_info *this_frame,
2080 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2081 struct arm_prologue_cache *cache;
2083 if (*this_cache == NULL)
2084 *this_cache = arm_make_prologue_cache (this_frame);
2085 cache = *this_cache;
2087 /* If we are asked to unwind the PC, then we need to return the LR
2088 instead. The prologue may save PC, but it will point into this
2089 frame's prologue, not the next frame's resume location. Also
2090 strip the saved T bit. A valid LR may have the low bit set, but
2091 a valid PC never does. */
2092 if (prev_regnum == ARM_PC_REGNUM)
2096 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2097 return frame_unwind_got_constant (this_frame, prev_regnum,
2098 arm_addr_bits_remove (gdbarch, lr));
2101 /* SP is generally not saved to the stack, but this frame is
2102 identified by the next frame's stack pointer at the time of the call.
2103 The value was already reconstructed into PREV_SP. */
2104 if (prev_regnum == ARM_SP_REGNUM)
2105 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2107 /* The CPSR may have been changed by the call instruction and by the
2108 called function. The only bit we can reconstruct is the T bit,
2109 by checking the low bit of LR as of the call. This is a reliable
2110 indicator of Thumb-ness except for some ARM v4T pre-interworking
2111 Thumb code, which could get away with a clear low bit as long as
2112 the called function did not use bx. Guess that all other
2113 bits are unchanged; the condition flags are presumably lost,
2114 but the processor status is likely valid. */
2115 if (prev_regnum == ARM_PS_REGNUM)
2118 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2120 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2121 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2122 if (IS_THUMB_ADDR (lr))
2126 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2129 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2133 struct frame_unwind arm_prologue_unwind = {
2135 default_frame_unwind_stop_reason,
2136 arm_prologue_this_id,
2137 arm_prologue_prev_register,
2139 default_frame_sniffer
2142 /* Maintain a list of ARM exception table entries per objfile, similar to the
2143 list of mapping symbols. We only cache entries for standard ARM-defined
2144 personality routines; the cache will contain only the frame unwinding
2145 instructions associated with the entry (not the descriptors). */
2147 static const struct objfile_data *arm_exidx_data_key;
2149 struct arm_exidx_entry
2154 typedef struct arm_exidx_entry arm_exidx_entry_s;
2155 DEF_VEC_O(arm_exidx_entry_s);
2157 struct arm_exidx_data
2159 VEC(arm_exidx_entry_s) **section_maps;
2163 arm_exidx_data_free (struct objfile *objfile, void *arg)
2165 struct arm_exidx_data *data = arg;
2168 for (i = 0; i < objfile->obfd->section_count; i++)
2169 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2173 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2174 const struct arm_exidx_entry *rhs)
2176 return lhs->addr < rhs->addr;
2179 static struct obj_section *
2180 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2182 struct obj_section *osect;
2184 ALL_OBJFILE_OSECTIONS (objfile, osect)
2185 if (bfd_get_section_flags (objfile->obfd,
2186 osect->the_bfd_section) & SEC_ALLOC)
2188 bfd_vma start, size;
2189 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2190 size = bfd_get_section_size (osect->the_bfd_section);
2192 if (start <= vma && vma < start + size)
2199 /* Parse contents of exception table and exception index sections
2200 of OBJFILE, and fill in the exception table entry cache.
2202 For each entry that refers to a standard ARM-defined personality
2203 routine, extract the frame unwinding instructions (from either
2204 the index or the table section). The unwinding instructions
2206 - extracting them from the rest of the table data
2207 - converting to host endianness
2208 - appending the implicit 0xb0 ("Finish") code
2210 The extracted and normalized instructions are stored for later
2211 retrieval by the arm_find_exidx_entry routine. */
2214 arm_exidx_new_objfile (struct objfile *objfile)
2216 struct cleanup *cleanups;
2217 struct arm_exidx_data *data;
2218 asection *exidx, *extab;
2219 bfd_vma exidx_vma = 0, extab_vma = 0;
2220 bfd_size_type exidx_size = 0, extab_size = 0;
2221 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2224 /* If we've already touched this file, do nothing. */
2225 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2227 cleanups = make_cleanup (null_cleanup, NULL);
2229 /* Read contents of exception table and index. */
2230 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2233 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2234 exidx_size = bfd_get_section_size (exidx);
2235 exidx_data = xmalloc (exidx_size);
2236 make_cleanup (xfree, exidx_data);
2238 if (!bfd_get_section_contents (objfile->obfd, exidx,
2239 exidx_data, 0, exidx_size))
2241 do_cleanups (cleanups);
2246 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2249 extab_vma = bfd_section_vma (objfile->obfd, extab);
2250 extab_size = bfd_get_section_size (extab);
2251 extab_data = xmalloc (extab_size);
2252 make_cleanup (xfree, extab_data);
2254 if (!bfd_get_section_contents (objfile->obfd, extab,
2255 extab_data, 0, extab_size))
2257 do_cleanups (cleanups);
2262 /* Allocate exception table data structure. */
2263 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2264 set_objfile_data (objfile, arm_exidx_data_key, data);
2265 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2266 objfile->obfd->section_count,
2267 VEC(arm_exidx_entry_s) *);
2269 /* Fill in exception table. */
2270 for (i = 0; i < exidx_size / 8; i++)
2272 struct arm_exidx_entry new_exidx_entry;
2273 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2274 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2275 bfd_vma addr = 0, word = 0;
2276 int n_bytes = 0, n_words = 0;
2277 struct obj_section *sec;
2278 gdb_byte *entry = NULL;
2280 /* Extract address of start of function. */
2281 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2282 idx += exidx_vma + i * 8;
2284 /* Find section containing function and compute section offset. */
2285 sec = arm_obj_section_from_vma (objfile, idx);
2288 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2290 /* Determine address of exception table entry. */
2293 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2295 else if ((val & 0xff000000) == 0x80000000)
2297 /* Exception table entry embedded in .ARM.exidx
2298 -- must be short form. */
2302 else if (!(val & 0x80000000))
2304 /* Exception table entry in .ARM.extab. */
2305 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2306 addr += exidx_vma + i * 8 + 4;
2308 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2310 word = bfd_h_get_32 (objfile->obfd,
2311 extab_data + addr - extab_vma);
2314 if ((word & 0xff000000) == 0x80000000)
2319 else if ((word & 0xff000000) == 0x81000000
2320 || (word & 0xff000000) == 0x82000000)
2324 n_words = ((word >> 16) & 0xff);
2326 else if (!(word & 0x80000000))
2329 struct obj_section *pers_sec;
2330 int gnu_personality = 0;
2332 /* Custom personality routine. */
2333 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2334 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2336 /* Check whether we've got one of the variants of the
2337 GNU personality routines. */
2338 pers_sec = arm_obj_section_from_vma (objfile, pers);
2341 static const char *personality[] =
2343 "__gcc_personality_v0",
2344 "__gxx_personality_v0",
2345 "__gcj_personality_v0",
2346 "__gnu_objc_personality_v0",
2350 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2353 for (k = 0; personality[k]; k++)
2354 if (lookup_minimal_symbol_by_pc_name
2355 (pc, personality[k], objfile))
2357 gnu_personality = 1;
2362 /* If so, the next word contains a word count in the high
2363 byte, followed by the same unwind instructions as the
2364 pre-defined forms. */
2366 && addr + 4 <= extab_vma + extab_size)
2368 word = bfd_h_get_32 (objfile->obfd,
2369 extab_data + addr - extab_vma);
2372 n_words = ((word >> 24) & 0xff);
2378 /* Sanity check address. */
2380 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2381 n_words = n_bytes = 0;
2383 /* The unwind instructions reside in WORD (only the N_BYTES least
2384 significant bytes are valid), followed by N_WORDS words in the
2385 extab section starting at ADDR. */
2386 if (n_bytes || n_words)
2388 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2389 n_bytes + n_words * 4 + 1);
2392 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2396 word = bfd_h_get_32 (objfile->obfd,
2397 extab_data + addr - extab_vma);
2400 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2401 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2402 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2403 *p++ = (gdb_byte) (word & 0xff);
2406 /* Implied "Finish" to terminate the list. */
2410 /* Push entry onto vector. They are guaranteed to always
2411 appear in order of increasing addresses. */
2412 new_exidx_entry.addr = idx;
2413 new_exidx_entry.entry = entry;
2414 VEC_safe_push (arm_exidx_entry_s,
2415 data->section_maps[sec->the_bfd_section->index],
2419 do_cleanups (cleanups);
2422 /* Search for the exception table entry covering MEMADDR. If one is found,
2423 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2424 set *START to the start of the region covered by this entry. */
2427 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2429 struct obj_section *sec;
2431 sec = find_pc_section (memaddr);
2434 struct arm_exidx_data *data;
2435 VEC(arm_exidx_entry_s) *map;
2436 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2439 data = objfile_data (sec->objfile, arm_exidx_data_key);
2442 map = data->section_maps[sec->the_bfd_section->index];
2443 if (!VEC_empty (arm_exidx_entry_s, map))
2445 struct arm_exidx_entry *map_sym;
2447 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2448 arm_compare_exidx_entries);
2450 /* VEC_lower_bound finds the earliest ordered insertion
2451 point. If the following symbol starts at this exact
2452 address, we use that; otherwise, the preceding
2453 exception table entry covers this address. */
2454 if (idx < VEC_length (arm_exidx_entry_s, map))
2456 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2457 if (map_sym->addr == map_key.addr)
2460 *start = map_sym->addr + obj_section_addr (sec);
2461 return map_sym->entry;
2467 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2469 *start = map_sym->addr + obj_section_addr (sec);
2470 return map_sym->entry;
2479 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2480 instruction list from the ARM exception table entry ENTRY, allocate and
2481 return a prologue cache structure describing how to unwind this frame.
2483 Return NULL if the unwinding instruction list contains a "spare",
2484 "reserved" or "refuse to unwind" instruction as defined in section
2485 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2486 for the ARM Architecture" document. */
2488 static struct arm_prologue_cache *
2489 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2494 struct arm_prologue_cache *cache;
2495 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2496 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2502 /* Whenever we reload SP, we actually have to retrieve its
2503 actual value in the current frame. */
2506 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2508 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2509 vsp = get_frame_register_unsigned (this_frame, reg);
2513 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2514 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2520 /* Decode next unwind instruction. */
2523 if ((insn & 0xc0) == 0)
2525 int offset = insn & 0x3f;
2526 vsp += (offset << 2) + 4;
2528 else if ((insn & 0xc0) == 0x40)
2530 int offset = insn & 0x3f;
2531 vsp -= (offset << 2) + 4;
2533 else if ((insn & 0xf0) == 0x80)
2535 int mask = ((insn & 0xf) << 8) | *entry++;
2538 /* The special case of an all-zero mask identifies
2539 "Refuse to unwind". We return NULL to fall back
2540 to the prologue analyzer. */
2544 /* Pop registers r4..r15 under mask. */
2545 for (i = 0; i < 12; i++)
2546 if (mask & (1 << i))
2548 cache->saved_regs[4 + i].addr = vsp;
2552 /* Special-case popping SP -- we need to reload vsp. */
2553 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2556 else if ((insn & 0xf0) == 0x90)
2558 int reg = insn & 0xf;
2560 /* Reserved cases. */
2561 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2564 /* Set SP from another register and mark VSP for reload. */
2565 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2568 else if ((insn & 0xf0) == 0xa0)
2570 int count = insn & 0x7;
2571 int pop_lr = (insn & 0x8) != 0;
2574 /* Pop r4..r[4+count]. */
2575 for (i = 0; i <= count; i++)
2577 cache->saved_regs[4 + i].addr = vsp;
2581 /* If indicated by flag, pop LR as well. */
2584 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2588 else if (insn == 0xb0)
2590 /* We could only have updated PC by popping into it; if so, it
2591 will show up as address. Otherwise, copy LR into PC. */
2592 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2593 cache->saved_regs[ARM_PC_REGNUM]
2594 = cache->saved_regs[ARM_LR_REGNUM];
2599 else if (insn == 0xb1)
2601 int mask = *entry++;
2604 /* All-zero mask and mask >= 16 is "spare". */
2605 if (mask == 0 || mask >= 16)
2608 /* Pop r0..r3 under mask. */
2609 for (i = 0; i < 4; i++)
2610 if (mask & (1 << i))
2612 cache->saved_regs[i].addr = vsp;
2616 else if (insn == 0xb2)
2618 ULONGEST offset = 0;
2623 offset |= (*entry & 0x7f) << shift;
2626 while (*entry++ & 0x80);
2628 vsp += 0x204 + (offset << 2);
2630 else if (insn == 0xb3)
2632 int start = *entry >> 4;
2633 int count = (*entry++) & 0xf;
2636 /* Only registers D0..D15 are valid here. */
2637 if (start + count >= 16)
2640 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2641 for (i = 0; i <= count; i++)
2643 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2647 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2650 else if ((insn & 0xf8) == 0xb8)
2652 int count = insn & 0x7;
2655 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2656 for (i = 0; i <= count; i++)
2658 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2662 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2665 else if (insn == 0xc6)
2667 int start = *entry >> 4;
2668 int count = (*entry++) & 0xf;
2671 /* Only registers WR0..WR15 are valid. */
2672 if (start + count >= 16)
2675 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2676 for (i = 0; i <= count; i++)
2678 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2682 else if (insn == 0xc7)
2684 int mask = *entry++;
2687 /* All-zero mask and mask >= 16 is "spare". */
2688 if (mask == 0 || mask >= 16)
2691 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2692 for (i = 0; i < 4; i++)
2693 if (mask & (1 << i))
2695 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2699 else if ((insn & 0xf8) == 0xc0)
2701 int count = insn & 0x7;
2704 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2705 for (i = 0; i <= count; i++)
2707 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2711 else if (insn == 0xc8)
2713 int start = *entry >> 4;
2714 int count = (*entry++) & 0xf;
2717 /* Only registers D0..D31 are valid. */
2718 if (start + count >= 16)
2721 /* Pop VFP double-precision registers
2722 D[16+start]..D[16+start+count]. */
2723 for (i = 0; i <= count; i++)
2725 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2729 else if (insn == 0xc9)
2731 int start = *entry >> 4;
2732 int count = (*entry++) & 0xf;
2735 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2736 for (i = 0; i <= count; i++)
2738 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2742 else if ((insn & 0xf8) == 0xd0)
2744 int count = insn & 0x7;
2747 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2748 for (i = 0; i <= count; i++)
2750 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2756 /* Everything else is "spare". */
2761 /* If we restore SP from a register, assume this was the frame register.
2762 Otherwise just fall back to SP as frame register. */
2763 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2764 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2766 cache->framereg = ARM_SP_REGNUM;
2768 /* Determine offset to previous frame. */
2770 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2772 /* We already got the previous SP. */
2773 cache->prev_sp = vsp;
2778 /* Unwinding via ARM exception table entries. Note that the sniffer
2779 already computes a filled-in prologue cache, which is then used
2780 with the same arm_prologue_this_id and arm_prologue_prev_register
2781 routines also used for prologue-parsing based unwinding. */
2784 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2785 struct frame_info *this_frame,
2786 void **this_prologue_cache)
2788 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2789 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2790 CORE_ADDR addr_in_block, exidx_region, func_start;
2791 struct arm_prologue_cache *cache;
2794 /* See if we have an ARM exception table entry covering this address. */
2795 addr_in_block = get_frame_address_in_block (this_frame);
2796 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2800 /* The ARM exception table does not describe unwind information
2801 for arbitrary PC values, but is guaranteed to be correct only
2802 at call sites. We have to decide here whether we want to use
2803 ARM exception table information for this frame, or fall back
2804 to using prologue parsing. (Note that if we have DWARF CFI,
2805 this sniffer isn't even called -- CFI is always preferred.)
2807 Before we make this decision, however, we check whether we
2808 actually have *symbol* information for the current frame.
2809 If not, prologue parsing would not work anyway, so we might
2810 as well use the exception table and hope for the best. */
2811 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2815 /* If the next frame is "normal", we are at a call site in this
2816 frame, so exception information is guaranteed to be valid. */
2817 if (get_next_frame (this_frame)
2818 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2821 /* We also assume exception information is valid if we're currently
2822 blocked in a system call. The system library is supposed to
2823 ensure this, so that e.g. pthread cancellation works. */
2824 if (arm_frame_is_thumb (this_frame))
2828 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2829 byte_order_for_code, &insn)
2830 && (insn & 0xff00) == 0xdf00 /* svc */)
2837 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2838 byte_order_for_code, &insn)
2839 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2843 /* Bail out if we don't know that exception information is valid. */
2847 /* The ARM exception index does not mark the *end* of the region
2848 covered by the entry, and some functions will not have any entry.
2849 To correctly recognize the end of the covered region, the linker
2850 should have inserted dummy records with a CANTUNWIND marker.
2852 Unfortunately, current versions of GNU ld do not reliably do
2853 this, and thus we may have found an incorrect entry above.
2854 As a (temporary) sanity check, we only use the entry if it
2855 lies *within* the bounds of the function. Note that this check
2856 might reject perfectly valid entries that just happen to cover
2857 multiple functions; therefore this check ought to be removed
2858 once the linker is fixed. */
2859 if (func_start > exidx_region)
2863 /* Decode the list of unwinding instructions into a prologue cache.
2864 Note that this may fail due to e.g. a "refuse to unwind" code. */
2865 cache = arm_exidx_fill_cache (this_frame, entry);
2869 *this_prologue_cache = cache;
2873 struct frame_unwind arm_exidx_unwind = {
2875 default_frame_unwind_stop_reason,
2876 arm_prologue_this_id,
2877 arm_prologue_prev_register,
2879 arm_exidx_unwind_sniffer
2882 static struct arm_prologue_cache *
2883 arm_make_stub_cache (struct frame_info *this_frame)
2885 struct arm_prologue_cache *cache;
2887 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2888 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2890 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2895 /* Our frame ID for a stub frame is the current SP and LR. */
2898 arm_stub_this_id (struct frame_info *this_frame,
2900 struct frame_id *this_id)
2902 struct arm_prologue_cache *cache;
2904 if (*this_cache == NULL)
2905 *this_cache = arm_make_stub_cache (this_frame);
2906 cache = *this_cache;
2908 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2912 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2913 struct frame_info *this_frame,
2914 void **this_prologue_cache)
2916 CORE_ADDR addr_in_block;
2919 addr_in_block = get_frame_address_in_block (this_frame);
2920 if (in_plt_section (addr_in_block, NULL)
2921 /* We also use the stub winder if the target memory is unreadable
2922 to avoid having the prologue unwinder trying to read it. */
2923 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2929 struct frame_unwind arm_stub_unwind = {
2931 default_frame_unwind_stop_reason,
2933 arm_prologue_prev_register,
2935 arm_stub_unwind_sniffer
2939 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2941 struct arm_prologue_cache *cache;
2943 if (*this_cache == NULL)
2944 *this_cache = arm_make_prologue_cache (this_frame);
2945 cache = *this_cache;
2947 return cache->prev_sp - cache->framesize;
2950 struct frame_base arm_normal_base = {
2951 &arm_prologue_unwind,
2952 arm_normal_frame_base,
2953 arm_normal_frame_base,
2954 arm_normal_frame_base
2957 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2958 dummy frame. The frame ID's base needs to match the TOS value
2959 saved by save_dummy_frame_tos() and returned from
2960 arm_push_dummy_call, and the PC needs to match the dummy frame's
2963 static struct frame_id
2964 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2966 return frame_id_build (get_frame_register_unsigned (this_frame,
2968 get_frame_pc (this_frame));
2971 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2972 be used to construct the previous frame's ID, after looking up the
2973 containing function). */
2976 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2979 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2980 return arm_addr_bits_remove (gdbarch, pc);
2984 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2986 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2989 static struct value *
2990 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2993 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2995 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3000 /* The PC is normally copied from the return column, which
3001 describes saves of LR. However, that version may have an
3002 extra bit set to indicate Thumb state. The bit is not
3004 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3005 return frame_unwind_got_constant (this_frame, regnum,
3006 arm_addr_bits_remove (gdbarch, lr));
3009 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3010 cpsr = get_frame_register_unsigned (this_frame, regnum);
3011 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3012 if (IS_THUMB_ADDR (lr))
3016 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3019 internal_error (__FILE__, __LINE__,
3020 _("Unexpected register %d"), regnum);
3025 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3026 struct dwarf2_frame_state_reg *reg,
3027 struct frame_info *this_frame)
3033 reg->how = DWARF2_FRAME_REG_FN;
3034 reg->loc.fn = arm_dwarf2_prev_register;
3037 reg->how = DWARF2_FRAME_REG_CFA;
3042 /* Return true if we are in the function's epilogue, i.e. after the
3043 instruction that destroyed the function's stack frame. */
3046 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3048 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3049 unsigned int insn, insn2;
3050 int found_return = 0, found_stack_adjust = 0;
3051 CORE_ADDR func_start, func_end;
3055 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3058 /* The epilogue is a sequence of instructions along the following lines:
3060 - add stack frame size to SP or FP
3061 - [if frame pointer used] restore SP from FP
3062 - restore registers from SP [may include PC]
3063 - a return-type instruction [if PC wasn't already restored]
3065 In a first pass, we scan forward from the current PC and verify the
3066 instructions we find as compatible with this sequence, ending in a
3069 However, this is not sufficient to distinguish indirect function calls
3070 within a function from indirect tail calls in the epilogue in some cases.
3071 Therefore, if we didn't already find any SP-changing instruction during
3072 forward scan, we add a backward scanning heuristic to ensure we actually
3073 are in the epilogue. */
3076 while (scan_pc < func_end && !found_return)
3078 if (target_read_memory (scan_pc, buf, 2))
3082 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3084 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3086 else if (insn == 0x46f7) /* mov pc, lr */
3088 else if (insn == 0x46bd) /* mov sp, r7 */
3089 found_stack_adjust = 1;
3090 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3091 found_stack_adjust = 1;
3092 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3094 found_stack_adjust = 1;
3095 if (insn & 0x0100) /* <registers> include PC. */
3098 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3100 if (target_read_memory (scan_pc, buf, 2))
3104 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3106 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3108 found_stack_adjust = 1;
3109 if (insn2 & 0x8000) /* <registers> include PC. */
3112 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3113 && (insn2 & 0x0fff) == 0x0b04)
3115 found_stack_adjust = 1;
3116 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3119 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3120 && (insn2 & 0x0e00) == 0x0a00)
3121 found_stack_adjust = 1;
3132 /* Since any instruction in the epilogue sequence, with the possible
3133 exception of return itself, updates the stack pointer, we need to
3134 scan backwards for at most one instruction. Try either a 16-bit or
3135 a 32-bit instruction. This is just a heuristic, so we do not worry
3136 too much about false positives. */
3138 if (!found_stack_adjust)
3140 if (pc - 4 < func_start)
3142 if (target_read_memory (pc - 4, buf, 4))
3145 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3146 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3148 if (insn2 == 0x46bd) /* mov sp, r7 */
3149 found_stack_adjust = 1;
3150 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3151 found_stack_adjust = 1;
3152 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3153 found_stack_adjust = 1;
3154 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3155 found_stack_adjust = 1;
3156 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3157 && (insn2 & 0x0fff) == 0x0b04)
3158 found_stack_adjust = 1;
3159 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3160 && (insn2 & 0x0e00) == 0x0a00)
3161 found_stack_adjust = 1;
3164 return found_stack_adjust;
3167 /* Return true if we are in the function's epilogue, i.e. after the
3168 instruction that destroyed the function's stack frame. */
3171 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3173 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3175 int found_return, found_stack_adjust;
3176 CORE_ADDR func_start, func_end;
3178 if (arm_pc_is_thumb (gdbarch, pc))
3179 return thumb_in_function_epilogue_p (gdbarch, pc);
3181 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3184 /* We are in the epilogue if the previous instruction was a stack
3185 adjustment and the next instruction is a possible return (bx, mov
3186 pc, or pop). We could have to scan backwards to find the stack
3187 adjustment, or forwards to find the return, but this is a decent
3188 approximation. First scan forwards. */
3191 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3192 if (bits (insn, 28, 31) != INST_NV)
3194 if ((insn & 0x0ffffff0) == 0x012fff10)
3197 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3200 else if ((insn & 0x0fff0000) == 0x08bd0000
3201 && (insn & 0x0000c000) != 0)
3202 /* POP (LDMIA), including PC or LR. */
3209 /* Scan backwards. This is just a heuristic, so do not worry about
3210 false positives from mode changes. */
3212 if (pc < func_start + 4)
3215 found_stack_adjust = 0;
3216 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3217 if (bits (insn, 28, 31) != INST_NV)
3219 if ((insn & 0x0df0f000) == 0x0080d000)
3220 /* ADD SP (register or immediate). */
3221 found_stack_adjust = 1;
3222 else if ((insn & 0x0df0f000) == 0x0040d000)
3223 /* SUB SP (register or immediate). */
3224 found_stack_adjust = 1;
3225 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3227 found_stack_adjust = 1;
3228 else if ((insn & 0x0fff0000) == 0x08bd0000)
3230 found_stack_adjust = 1;
3233 if (found_stack_adjust)
3240 /* When arguments must be pushed onto the stack, they go on in reverse
3241 order. The code below implements a FILO (stack) to do this. */
3246 struct stack_item *prev;
3250 static struct stack_item *
3251 push_stack_item (struct stack_item *prev, const void *contents, int len)
3253 struct stack_item *si;
3254 si = xmalloc (sizeof (struct stack_item));
3255 si->data = xmalloc (len);
3258 memcpy (si->data, contents, len);
3262 static struct stack_item *
3263 pop_stack_item (struct stack_item *si)
3265 struct stack_item *dead = si;
3273 /* Return the alignment (in bytes) of the given type. */
3276 arm_type_align (struct type *t)
3282 t = check_typedef (t);
3283 switch (TYPE_CODE (t))
3286 /* Should never happen. */
3287 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3291 case TYPE_CODE_ENUM:
3295 case TYPE_CODE_RANGE:
3296 case TYPE_CODE_BITSTRING:
3298 case TYPE_CODE_CHAR:
3299 case TYPE_CODE_BOOL:
3300 return TYPE_LENGTH (t);
3302 case TYPE_CODE_ARRAY:
3303 case TYPE_CODE_COMPLEX:
3304 /* TODO: What about vector types? */
3305 return arm_type_align (TYPE_TARGET_TYPE (t));
3307 case TYPE_CODE_STRUCT:
3308 case TYPE_CODE_UNION:
3310 for (n = 0; n < TYPE_NFIELDS (t); n++)
3312 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3320 /* Possible base types for a candidate for passing and returning in
3323 enum arm_vfp_cprc_base_type
3332 /* The length of one element of base type B. */
3335 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3339 case VFP_CPRC_SINGLE:
3341 case VFP_CPRC_DOUBLE:
3343 case VFP_CPRC_VEC64:
3345 case VFP_CPRC_VEC128:
3348 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3353 /* The character ('s', 'd' or 'q') for the type of VFP register used
3354 for passing base type B. */
3357 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3361 case VFP_CPRC_SINGLE:
3363 case VFP_CPRC_DOUBLE:
3365 case VFP_CPRC_VEC64:
3367 case VFP_CPRC_VEC128:
3370 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3375 /* Determine whether T may be part of a candidate for passing and
3376 returning in VFP registers, ignoring the limit on the total number
3377 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3378 classification of the first valid component found; if it is not
3379 VFP_CPRC_UNKNOWN, all components must have the same classification
3380 as *BASE_TYPE. If it is found that T contains a type not permitted
3381 for passing and returning in VFP registers, a type differently
3382 classified from *BASE_TYPE, or two types differently classified
3383 from each other, return -1, otherwise return the total number of
3384 base-type elements found (possibly 0 in an empty structure or
3385 array). Vectors and complex types are not currently supported,
3386 matching the generic AAPCS support. */
3389 arm_vfp_cprc_sub_candidate (struct type *t,
3390 enum arm_vfp_cprc_base_type *base_type)
3392 t = check_typedef (t);
3393 switch (TYPE_CODE (t))
3396 switch (TYPE_LENGTH (t))
3399 if (*base_type == VFP_CPRC_UNKNOWN)
3400 *base_type = VFP_CPRC_SINGLE;
3401 else if (*base_type != VFP_CPRC_SINGLE)
3406 if (*base_type == VFP_CPRC_UNKNOWN)
3407 *base_type = VFP_CPRC_DOUBLE;
3408 else if (*base_type != VFP_CPRC_DOUBLE)
3417 case TYPE_CODE_ARRAY:
3421 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3424 if (TYPE_LENGTH (t) == 0)
3426 gdb_assert (count == 0);
3429 else if (count == 0)
3431 unitlen = arm_vfp_cprc_unit_length (*base_type);
3432 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3433 return TYPE_LENGTH (t) / unitlen;
3437 case TYPE_CODE_STRUCT:
3442 for (i = 0; i < TYPE_NFIELDS (t); i++)
3444 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3446 if (sub_count == -1)
3450 if (TYPE_LENGTH (t) == 0)
3452 gdb_assert (count == 0);
3455 else if (count == 0)
3457 unitlen = arm_vfp_cprc_unit_length (*base_type);
3458 if (TYPE_LENGTH (t) != unitlen * count)
3463 case TYPE_CODE_UNION:
3468 for (i = 0; i < TYPE_NFIELDS (t); i++)
3470 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3472 if (sub_count == -1)
3474 count = (count > sub_count ? count : sub_count);
3476 if (TYPE_LENGTH (t) == 0)
3478 gdb_assert (count == 0);
3481 else if (count == 0)
3483 unitlen = arm_vfp_cprc_unit_length (*base_type);
3484 if (TYPE_LENGTH (t) != unitlen * count)
3496 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3497 if passed to or returned from a non-variadic function with the VFP
3498 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3499 *BASE_TYPE to the base type for T and *COUNT to the number of
3500 elements of that base type before returning. */
3503 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3506 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3507 int c = arm_vfp_cprc_sub_candidate (t, &b);
3508 if (c <= 0 || c > 4)
3515 /* Return 1 if the VFP ABI should be used for passing arguments to and
3516 returning values from a function of type FUNC_TYPE, 0
3520 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3522 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3523 /* Variadic functions always use the base ABI. Assume that functions
3524 without debug info are not variadic. */
3525 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3527 /* The VFP ABI is only supported as a variant of AAPCS. */
3528 if (tdep->arm_abi != ARM_ABI_AAPCS)
3530 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3533 /* We currently only support passing parameters in integer registers, which
3534 conforms with GCC's default model, and VFP argument passing following
3535 the VFP variant of AAPCS. Several other variants exist and
3536 we should probably support some of them based on the selected ABI. */
3539 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3540 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3541 struct value **args, CORE_ADDR sp, int struct_return,
3542 CORE_ADDR struct_addr)
3544 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3548 struct stack_item *si = NULL;
3551 unsigned vfp_regs_free = (1 << 16) - 1;
3553 /* Determine the type of this function and whether the VFP ABI
3555 ftype = check_typedef (value_type (function));
3556 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3557 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3558 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3560 /* Set the return address. For the ARM, the return breakpoint is
3561 always at BP_ADDR. */
3562 if (arm_pc_is_thumb (gdbarch, bp_addr))
3564 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3566 /* Walk through the list of args and determine how large a temporary
3567 stack is required. Need to take care here as structs may be
3568 passed on the stack, and we have to push them. */
3571 argreg = ARM_A1_REGNUM;
3574 /* The struct_return pointer occupies the first parameter
3575 passing register. */
3579 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3580 gdbarch_register_name (gdbarch, argreg),
3581 paddress (gdbarch, struct_addr));
3582 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3586 for (argnum = 0; argnum < nargs; argnum++)
3589 struct type *arg_type;
3590 struct type *target_type;
3591 enum type_code typecode;
3592 const bfd_byte *val;
3594 enum arm_vfp_cprc_base_type vfp_base_type;
3596 int may_use_core_reg = 1;
3598 arg_type = check_typedef (value_type (args[argnum]));
3599 len = TYPE_LENGTH (arg_type);
3600 target_type = TYPE_TARGET_TYPE (arg_type);
3601 typecode = TYPE_CODE (arg_type);
3602 val = value_contents (args[argnum]);
3604 align = arm_type_align (arg_type);
3605 /* Round alignment up to a whole number of words. */
3606 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3607 /* Different ABIs have different maximum alignments. */
3608 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3610 /* The APCS ABI only requires word alignment. */
3611 align = INT_REGISTER_SIZE;
3615 /* The AAPCS requires at most doubleword alignment. */
3616 if (align > INT_REGISTER_SIZE * 2)
3617 align = INT_REGISTER_SIZE * 2;
3621 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3629 /* Because this is a CPRC it cannot go in a core register or
3630 cause a core register to be skipped for alignment.
3631 Either it goes in VFP registers and the rest of this loop
3632 iteration is skipped for this argument, or it goes on the
3633 stack (and the stack alignment code is correct for this
3635 may_use_core_reg = 0;
3637 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3638 shift = unit_length / 4;
3639 mask = (1 << (shift * vfp_base_count)) - 1;
3640 for (regno = 0; regno < 16; regno += shift)
3641 if (((vfp_regs_free >> regno) & mask) == mask)
3650 vfp_regs_free &= ~(mask << regno);
3651 reg_scaled = regno / shift;
3652 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3653 for (i = 0; i < vfp_base_count; i++)
3657 if (reg_char == 'q')
3658 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3659 val + i * unit_length);
3662 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3663 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3665 regcache_cooked_write (regcache, regnum,
3666 val + i * unit_length);
3673 /* This CPRC could not go in VFP registers, so all VFP
3674 registers are now marked as used. */
3679 /* Push stack padding for dowubleword alignment. */
3680 if (nstack & (align - 1))
3682 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3683 nstack += INT_REGISTER_SIZE;
3686 /* Doubleword aligned quantities must go in even register pairs. */
3687 if (may_use_core_reg
3688 && argreg <= ARM_LAST_ARG_REGNUM
3689 && align > INT_REGISTER_SIZE
3693 /* If the argument is a pointer to a function, and it is a
3694 Thumb function, create a LOCAL copy of the value and set
3695 the THUMB bit in it. */
3696 if (TYPE_CODE_PTR == typecode
3697 && target_type != NULL
3698 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3700 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3701 if (arm_pc_is_thumb (gdbarch, regval))
3703 bfd_byte *copy = alloca (len);
3704 store_unsigned_integer (copy, len, byte_order,
3705 MAKE_THUMB_ADDR (regval));
3710 /* Copy the argument to general registers or the stack in
3711 register-sized pieces. Large arguments are split between
3712 registers and stack. */
3715 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3717 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3719 /* The argument is being passed in a general purpose
3722 = extract_unsigned_integer (val, partial_len, byte_order);
3723 if (byte_order == BFD_ENDIAN_BIG)
3724 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3726 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3728 gdbarch_register_name
3730 phex (regval, INT_REGISTER_SIZE));
3731 regcache_cooked_write_unsigned (regcache, argreg, regval);
3736 /* Push the arguments onto the stack. */
3738 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3740 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3741 nstack += INT_REGISTER_SIZE;
3748 /* If we have an odd number of words to push, then decrement the stack
3749 by one word now, so first stack argument will be dword aligned. */
3756 write_memory (sp, si->data, si->len);
3757 si = pop_stack_item (si);
3760 /* Finally, update teh SP register. */
3761 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3767 /* Always align the frame to an 8-byte boundary. This is required on
3768 some platforms and harmless on the rest. */
3771 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3773 /* Align the stack to eight bytes. */
3774 return sp & ~ (CORE_ADDR) 7;
3778 print_fpu_flags (int flags)
3780 if (flags & (1 << 0))
3781 fputs ("IVO ", stdout);
3782 if (flags & (1 << 1))
3783 fputs ("DVZ ", stdout);
3784 if (flags & (1 << 2))
3785 fputs ("OFL ", stdout);
3786 if (flags & (1 << 3))
3787 fputs ("UFL ", stdout);
3788 if (flags & (1 << 4))
3789 fputs ("INX ", stdout);
3793 /* Print interesting information about the floating point processor
3794 (if present) or emulator. */
3796 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3797 struct frame_info *frame, const char *args)
3799 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3802 type = (status >> 24) & 127;
3803 if (status & (1 << 31))
3804 printf (_("Hardware FPU type %d\n"), type);
3806 printf (_("Software FPU type %d\n"), type);
3807 /* i18n: [floating point unit] mask */
3808 fputs (_("mask: "), stdout);
3809 print_fpu_flags (status >> 16);
3810 /* i18n: [floating point unit] flags */
3811 fputs (_("flags: "), stdout);
3812 print_fpu_flags (status);
3815 /* Construct the ARM extended floating point type. */
3816 static struct type *
3817 arm_ext_type (struct gdbarch *gdbarch)
3819 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3821 if (!tdep->arm_ext_type)
3823 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3824 floatformats_arm_ext);
3826 return tdep->arm_ext_type;
3829 static struct type *
3830 arm_neon_double_type (struct gdbarch *gdbarch)
3832 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3834 if (tdep->neon_double_type == NULL)
3836 struct type *t, *elem;
3838 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3840 elem = builtin_type (gdbarch)->builtin_uint8;
3841 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3842 elem = builtin_type (gdbarch)->builtin_uint16;
3843 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3844 elem = builtin_type (gdbarch)->builtin_uint32;
3845 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3846 elem = builtin_type (gdbarch)->builtin_uint64;
3847 append_composite_type_field (t, "u64", elem);
3848 elem = builtin_type (gdbarch)->builtin_float;
3849 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3850 elem = builtin_type (gdbarch)->builtin_double;
3851 append_composite_type_field (t, "f64", elem);
3853 TYPE_VECTOR (t) = 1;
3854 TYPE_NAME (t) = "neon_d";
3855 tdep->neon_double_type = t;
3858 return tdep->neon_double_type;
3861 /* FIXME: The vector types are not correctly ordered on big-endian
3862 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3863 bits of d0 - regardless of what unit size is being held in d0. So
3864 the offset of the first uint8 in d0 is 7, but the offset of the
3865 first float is 4. This code works as-is for little-endian
3868 static struct type *
3869 arm_neon_quad_type (struct gdbarch *gdbarch)
3871 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3873 if (tdep->neon_quad_type == NULL)
3875 struct type *t, *elem;
3877 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3879 elem = builtin_type (gdbarch)->builtin_uint8;
3880 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3881 elem = builtin_type (gdbarch)->builtin_uint16;
3882 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3883 elem = builtin_type (gdbarch)->builtin_uint32;
3884 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3885 elem = builtin_type (gdbarch)->builtin_uint64;
3886 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3887 elem = builtin_type (gdbarch)->builtin_float;
3888 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3889 elem = builtin_type (gdbarch)->builtin_double;
3890 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3892 TYPE_VECTOR (t) = 1;
3893 TYPE_NAME (t) = "neon_q";
3894 tdep->neon_quad_type = t;
3897 return tdep->neon_quad_type;
3900 /* Return the GDB type object for the "standard" data type of data in
3903 static struct type *
3904 arm_register_type (struct gdbarch *gdbarch, int regnum)
3906 int num_regs = gdbarch_num_regs (gdbarch);
3908 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3909 && regnum >= num_regs && regnum < num_regs + 32)
3910 return builtin_type (gdbarch)->builtin_float;
3912 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3913 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3914 return arm_neon_quad_type (gdbarch);
3916 /* If the target description has register information, we are only
3917 in this function so that we can override the types of
3918 double-precision registers for NEON. */
3919 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3921 struct type *t = tdesc_register_type (gdbarch, regnum);
3923 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3924 && TYPE_CODE (t) == TYPE_CODE_FLT
3925 && gdbarch_tdep (gdbarch)->have_neon)
3926 return arm_neon_double_type (gdbarch);
3931 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3933 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3934 return builtin_type (gdbarch)->builtin_void;
3936 return arm_ext_type (gdbarch);
3938 else if (regnum == ARM_SP_REGNUM)
3939 return builtin_type (gdbarch)->builtin_data_ptr;
3940 else if (regnum == ARM_PC_REGNUM)
3941 return builtin_type (gdbarch)->builtin_func_ptr;
3942 else if (regnum >= ARRAY_SIZE (arm_register_names))
3943 /* These registers are only supported on targets which supply
3944 an XML description. */
3945 return builtin_type (gdbarch)->builtin_int0;
3947 return builtin_type (gdbarch)->builtin_uint32;
3950 /* Map a DWARF register REGNUM onto the appropriate GDB register
3954 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3956 /* Core integer regs. */
3957 if (reg >= 0 && reg <= 15)
3960 /* Legacy FPA encoding. These were once used in a way which
3961 overlapped with VFP register numbering, so their use is
3962 discouraged, but GDB doesn't support the ARM toolchain
3963 which used them for VFP. */
3964 if (reg >= 16 && reg <= 23)
3965 return ARM_F0_REGNUM + reg - 16;
3967 /* New assignments for the FPA registers. */
3968 if (reg >= 96 && reg <= 103)
3969 return ARM_F0_REGNUM + reg - 96;
3971 /* WMMX register assignments. */
3972 if (reg >= 104 && reg <= 111)
3973 return ARM_WCGR0_REGNUM + reg - 104;
3975 if (reg >= 112 && reg <= 127)
3976 return ARM_WR0_REGNUM + reg - 112;
3978 if (reg >= 192 && reg <= 199)
3979 return ARM_WC0_REGNUM + reg - 192;
3981 /* VFP v2 registers. A double precision value is actually
3982 in d1 rather than s2, but the ABI only defines numbering
3983 for the single precision registers. This will "just work"
3984 in GDB for little endian targets (we'll read eight bytes,
3985 starting in s0 and then progressing to s1), but will be
3986 reversed on big endian targets with VFP. This won't
3987 be a problem for the new Neon quad registers; you're supposed
3988 to use DW_OP_piece for those. */
3989 if (reg >= 64 && reg <= 95)
3993 sprintf (name_buf, "s%d", reg - 64);
3994 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3998 /* VFP v3 / Neon registers. This range is also used for VFP v2
3999 registers, except that it now describes d0 instead of s0. */
4000 if (reg >= 256 && reg <= 287)
4004 sprintf (name_buf, "d%d", reg - 256);
4005 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4012 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4014 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4017 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4019 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4020 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4022 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4023 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4025 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4026 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4028 if (reg < NUM_GREGS)
4029 return SIM_ARM_R0_REGNUM + reg;
4032 if (reg < NUM_FREGS)
4033 return SIM_ARM_FP0_REGNUM + reg;
4036 if (reg < NUM_SREGS)
4037 return SIM_ARM_FPS_REGNUM + reg;
4040 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4043 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4044 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4045 It is thought that this is is the floating-point register format on
4046 little-endian systems. */
4049 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4050 void *dbl, int endianess)
4054 if (endianess == BFD_ENDIAN_BIG)
4055 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4057 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4059 floatformat_from_doublest (fmt, &d, dbl);
4063 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4068 floatformat_to_doublest (fmt, ptr, &d);
4069 if (endianess == BFD_ENDIAN_BIG)
4070 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4072 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4077 condition_true (unsigned long cond, unsigned long status_reg)
4079 if (cond == INST_AL || cond == INST_NV)
4085 return ((status_reg & FLAG_Z) != 0);
4087 return ((status_reg & FLAG_Z) == 0);
4089 return ((status_reg & FLAG_C) != 0);
4091 return ((status_reg & FLAG_C) == 0);
4093 return ((status_reg & FLAG_N) != 0);
4095 return ((status_reg & FLAG_N) == 0);
4097 return ((status_reg & FLAG_V) != 0);
4099 return ((status_reg & FLAG_V) == 0);
4101 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4103 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4105 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4107 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4109 return (((status_reg & FLAG_Z) == 0)
4110 && (((status_reg & FLAG_N) == 0)
4111 == ((status_reg & FLAG_V) == 0)));
4113 return (((status_reg & FLAG_Z) != 0)
4114 || (((status_reg & FLAG_N) == 0)
4115 != ((status_reg & FLAG_V) == 0)));
4120 static unsigned long
4121 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4122 unsigned long pc_val, unsigned long status_reg)
4124 unsigned long res, shift;
4125 int rm = bits (inst, 0, 3);
4126 unsigned long shifttype = bits (inst, 5, 6);
4130 int rs = bits (inst, 8, 11);
4131 shift = (rs == 15 ? pc_val + 8
4132 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4135 shift = bits (inst, 7, 11);
4137 res = (rm == ARM_PC_REGNUM
4138 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4139 : get_frame_register_unsigned (frame, rm));
4144 res = shift >= 32 ? 0 : res << shift;
4148 res = shift >= 32 ? 0 : res >> shift;
4154 res = ((res & 0x80000000L)
4155 ? ~((~res) >> shift) : res >> shift);
4158 case 3: /* ROR/RRX */
4161 res = (res >> 1) | (carry ? 0x80000000L : 0);
4163 res = (res >> shift) | (res << (32 - shift));
4167 return res & 0xffffffff;
4170 /* Return number of 1-bits in VAL. */
4173 bitcount (unsigned long val)
4176 for (nbits = 0; val != 0; nbits++)
4177 val &= val - 1; /* Delete rightmost 1-bit in val. */
4181 /* Return the size in bytes of the complete Thumb instruction whose
4182 first halfword is INST1. */
4185 thumb_insn_size (unsigned short inst1)
4187 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4194 thumb_advance_itstate (unsigned int itstate)
4196 /* Preserve IT[7:5], the first three bits of the condition. Shift
4197 the upcoming condition flags left by one bit. */
4198 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4200 /* If we have finished the IT block, clear the state. */
4201 if ((itstate & 0x0f) == 0)
4207 /* Find the next PC after the current instruction executes. In some
4208 cases we can not statically determine the answer (see the IT state
4209 handling in this function); in that case, a breakpoint may be
4210 inserted in addition to the returned PC, which will be used to set
4211 another breakpoint by our caller. */
4214 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4216 struct gdbarch *gdbarch = get_frame_arch (frame);
4217 struct address_space *aspace = get_frame_address_space (frame);
4218 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4219 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4220 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4221 unsigned short inst1;
4222 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4223 unsigned long offset;
4224 ULONGEST status, itstate;
4226 nextpc = MAKE_THUMB_ADDR (nextpc);
4227 pc_val = MAKE_THUMB_ADDR (pc_val);
4229 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4231 /* Thumb-2 conditional execution support. There are eight bits in
4232 the CPSR which describe conditional execution state. Once
4233 reconstructed (they're in a funny order), the low five bits
4234 describe the low bit of the condition for each instruction and
4235 how many instructions remain. The high three bits describe the
4236 base condition. One of the low four bits will be set if an IT
4237 block is active. These bits read as zero on earlier
4239 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4240 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4242 /* If-Then handling. On GNU/Linux, where this routine is used, we
4243 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4244 can disable execution of the undefined instruction. So we might
4245 miss the breakpoint if we set it on a skipped conditional
4246 instruction. Because conditional instructions can change the
4247 flags, affecting the execution of further instructions, we may
4248 need to set two breakpoints. */
4250 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4252 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4254 /* An IT instruction. Because this instruction does not
4255 modify the flags, we can accurately predict the next
4256 executed instruction. */
4257 itstate = inst1 & 0x00ff;
4258 pc += thumb_insn_size (inst1);
4260 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4262 inst1 = read_memory_unsigned_integer (pc, 2,
4263 byte_order_for_code);
4264 pc += thumb_insn_size (inst1);
4265 itstate = thumb_advance_itstate (itstate);
4268 return MAKE_THUMB_ADDR (pc);
4270 else if (itstate != 0)
4272 /* We are in a conditional block. Check the condition. */
4273 if (! condition_true (itstate >> 4, status))
4275 /* Advance to the next executed instruction. */
4276 pc += thumb_insn_size (inst1);
4277 itstate = thumb_advance_itstate (itstate);
4279 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4281 inst1 = read_memory_unsigned_integer (pc, 2,
4282 byte_order_for_code);
4283 pc += thumb_insn_size (inst1);
4284 itstate = thumb_advance_itstate (itstate);
4287 return MAKE_THUMB_ADDR (pc);
4289 else if ((itstate & 0x0f) == 0x08)
4291 /* This is the last instruction of the conditional
4292 block, and it is executed. We can handle it normally
4293 because the following instruction is not conditional,
4294 and we must handle it normally because it is
4295 permitted to branch. Fall through. */
4301 /* There are conditional instructions after this one.
4302 If this instruction modifies the flags, then we can
4303 not predict what the next executed instruction will
4304 be. Fortunately, this instruction is architecturally
4305 forbidden to branch; we know it will fall through.
4306 Start by skipping past it. */
4307 pc += thumb_insn_size (inst1);
4308 itstate = thumb_advance_itstate (itstate);
4310 /* Set a breakpoint on the following instruction. */
4311 gdb_assert ((itstate & 0x0f) != 0);
4312 arm_insert_single_step_breakpoint (gdbarch, aspace,
4313 MAKE_THUMB_ADDR (pc));
4314 cond_negated = (itstate >> 4) & 1;
4316 /* Skip all following instructions with the same
4317 condition. If there is a later instruction in the IT
4318 block with the opposite condition, set the other
4319 breakpoint there. If not, then set a breakpoint on
4320 the instruction after the IT block. */
4323 inst1 = read_memory_unsigned_integer (pc, 2,
4324 byte_order_for_code);
4325 pc += thumb_insn_size (inst1);
4326 itstate = thumb_advance_itstate (itstate);
4328 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4330 return MAKE_THUMB_ADDR (pc);
4334 else if (itstate & 0x0f)
4336 /* We are in a conditional block. Check the condition. */
4337 int cond = itstate >> 4;
4339 if (! condition_true (cond, status))
4340 /* Advance to the next instruction. All the 32-bit
4341 instructions share a common prefix. */
4342 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4344 /* Otherwise, handle the instruction normally. */
4347 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4351 /* Fetch the saved PC from the stack. It's stored above
4352 all of the other registers. */
4353 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4354 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4355 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4357 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4359 unsigned long cond = bits (inst1, 8, 11);
4360 if (cond == 0x0f) /* 0x0f = SWI */
4362 struct gdbarch_tdep *tdep;
4363 tdep = gdbarch_tdep (gdbarch);
4365 if (tdep->syscall_next_pc != NULL)
4366 nextpc = tdep->syscall_next_pc (frame);
4369 else if (cond != 0x0f && condition_true (cond, status))
4370 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4372 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4374 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4376 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4378 unsigned short inst2;
4379 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4381 /* Default to the next instruction. */
4383 nextpc = MAKE_THUMB_ADDR (nextpc);
4385 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4387 /* Branches and miscellaneous control instructions. */
4389 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4392 int j1, j2, imm1, imm2;
4394 imm1 = sbits (inst1, 0, 10);
4395 imm2 = bits (inst2, 0, 10);
4396 j1 = bit (inst2, 13);
4397 j2 = bit (inst2, 11);
4399 offset = ((imm1 << 12) + (imm2 << 1));
4400 offset ^= ((!j2) << 22) | ((!j1) << 23);
4402 nextpc = pc_val + offset;
4403 /* For BLX make sure to clear the low bits. */
4404 if (bit (inst2, 12) == 0)
4405 nextpc = nextpc & 0xfffffffc;
4407 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4409 /* SUBS PC, LR, #imm8. */
4410 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4411 nextpc -= inst2 & 0x00ff;
4413 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4415 /* Conditional branch. */
4416 if (condition_true (bits (inst1, 6, 9), status))
4418 int sign, j1, j2, imm1, imm2;
4420 sign = sbits (inst1, 10, 10);
4421 imm1 = bits (inst1, 0, 5);
4422 imm2 = bits (inst2, 0, 10);
4423 j1 = bit (inst2, 13);
4424 j2 = bit (inst2, 11);
4426 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4427 offset += (imm1 << 12) + (imm2 << 1);
4429 nextpc = pc_val + offset;
4433 else if ((inst1 & 0xfe50) == 0xe810)
4435 /* Load multiple or RFE. */
4436 int rn, offset, load_pc = 1;
4438 rn = bits (inst1, 0, 3);
4439 if (bit (inst1, 7) && !bit (inst1, 8))
4442 if (!bit (inst2, 15))
4444 offset = bitcount (inst2) * 4 - 4;
4446 else if (!bit (inst1, 7) && bit (inst1, 8))
4449 if (!bit (inst2, 15))
4453 else if (bit (inst1, 7) && bit (inst1, 8))
4458 else if (!bit (inst1, 7) && !bit (inst1, 8))
4468 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4469 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4472 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4474 /* MOV PC or MOVS PC. */
4475 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4476 nextpc = MAKE_THUMB_ADDR (nextpc);
4478 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4482 int rn, load_pc = 1;
4484 rn = bits (inst1, 0, 3);
4485 base = get_frame_register_unsigned (frame, rn);
4486 if (rn == ARM_PC_REGNUM)
4488 base = (base + 4) & ~(CORE_ADDR) 0x3;
4490 base += bits (inst2, 0, 11);
4492 base -= bits (inst2, 0, 11);
4494 else if (bit (inst1, 7))
4495 base += bits (inst2, 0, 11);
4496 else if (bit (inst2, 11))
4498 if (bit (inst2, 10))
4501 base += bits (inst2, 0, 7);
4503 base -= bits (inst2, 0, 7);
4506 else if ((inst2 & 0x0fc0) == 0x0000)
4508 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4509 base += get_frame_register_unsigned (frame, rm) << shift;
4516 nextpc = get_frame_memory_unsigned (frame, base, 4);
4518 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4521 CORE_ADDR tbl_reg, table, offset, length;
4523 tbl_reg = bits (inst1, 0, 3);
4524 if (tbl_reg == 0x0f)
4525 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4527 table = get_frame_register_unsigned (frame, tbl_reg);
4529 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4530 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4531 nextpc = pc_val + length;
4533 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4536 CORE_ADDR tbl_reg, table, offset, length;
4538 tbl_reg = bits (inst1, 0, 3);
4539 if (tbl_reg == 0x0f)
4540 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4542 table = get_frame_register_unsigned (frame, tbl_reg);
4544 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4545 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4546 nextpc = pc_val + length;
4549 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4551 if (bits (inst1, 3, 6) == 0x0f)
4554 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4556 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4558 if (bits (inst1, 3, 6) == 0x0f)
4561 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4563 nextpc = MAKE_THUMB_ADDR (nextpc);
4565 else if ((inst1 & 0xf500) == 0xb100)
4568 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4569 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4571 if (bit (inst1, 11) && reg != 0)
4572 nextpc = pc_val + imm;
4573 else if (!bit (inst1, 11) && reg == 0)
4574 nextpc = pc_val + imm;
4579 /* Get the raw next address. PC is the current program counter, in
4580 FRAME, which is assumed to be executing in ARM mode.
4582 The value returned has the execution state of the next instruction
4583 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4584 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4588 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4590 struct gdbarch *gdbarch = get_frame_arch (frame);
4591 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4592 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4593 unsigned long pc_val;
4594 unsigned long this_instr;
4595 unsigned long status;
4598 pc_val = (unsigned long) pc;
4599 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4601 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4602 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4604 if (bits (this_instr, 28, 31) == INST_NV)
4605 switch (bits (this_instr, 24, 27))
4610 /* Branch with Link and change to Thumb. */
4611 nextpc = BranchDest (pc, this_instr);
4612 nextpc |= bit (this_instr, 24) << 1;
4613 nextpc = MAKE_THUMB_ADDR (nextpc);
4619 /* Coprocessor register transfer. */
4620 if (bits (this_instr, 12, 15) == 15)
4621 error (_("Invalid update to pc in instruction"));
4624 else if (condition_true (bits (this_instr, 28, 31), status))
4626 switch (bits (this_instr, 24, 27))
4629 case 0x1: /* data processing */
4633 unsigned long operand1, operand2, result = 0;
4637 if (bits (this_instr, 12, 15) != 15)
4640 if (bits (this_instr, 22, 25) == 0
4641 && bits (this_instr, 4, 7) == 9) /* multiply */
4642 error (_("Invalid update to pc in instruction"));
4644 /* BX <reg>, BLX <reg> */
4645 if (bits (this_instr, 4, 27) == 0x12fff1
4646 || bits (this_instr, 4, 27) == 0x12fff3)
4648 rn = bits (this_instr, 0, 3);
4649 nextpc = ((rn == ARM_PC_REGNUM)
4651 : get_frame_register_unsigned (frame, rn));
4656 /* Multiply into PC. */
4657 c = (status & FLAG_C) ? 1 : 0;
4658 rn = bits (this_instr, 16, 19);
4659 operand1 = ((rn == ARM_PC_REGNUM)
4661 : get_frame_register_unsigned (frame, rn));
4663 if (bit (this_instr, 25))
4665 unsigned long immval = bits (this_instr, 0, 7);
4666 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4667 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4670 else /* operand 2 is a shifted register. */
4671 operand2 = shifted_reg_val (frame, this_instr, c,
4674 switch (bits (this_instr, 21, 24))
4677 result = operand1 & operand2;
4681 result = operand1 ^ operand2;
4685 result = operand1 - operand2;
4689 result = operand2 - operand1;
4693 result = operand1 + operand2;
4697 result = operand1 + operand2 + c;
4701 result = operand1 - operand2 + c;
4705 result = operand2 - operand1 + c;
4711 case 0xb: /* tst, teq, cmp, cmn */
4712 result = (unsigned long) nextpc;
4716 result = operand1 | operand2;
4720 /* Always step into a function. */
4725 result = operand1 & ~operand2;
4733 /* In 26-bit APCS the bottom two bits of the result are
4734 ignored, and we always end up in ARM state. */
4736 nextpc = arm_addr_bits_remove (gdbarch, result);
4744 case 0x5: /* data transfer */
4747 if (bit (this_instr, 20))
4750 if (bits (this_instr, 12, 15) == 15)
4756 if (bit (this_instr, 22))
4757 error (_("Invalid update to pc in instruction"));
4759 /* byte write to PC */
4760 rn = bits (this_instr, 16, 19);
4761 base = ((rn == ARM_PC_REGNUM)
4763 : get_frame_register_unsigned (frame, rn));
4765 if (bit (this_instr, 24))
4768 int c = (status & FLAG_C) ? 1 : 0;
4769 unsigned long offset =
4770 (bit (this_instr, 25)
4771 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4772 : bits (this_instr, 0, 11));
4774 if (bit (this_instr, 23))
4780 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4787 case 0x9: /* block transfer */
4788 if (bit (this_instr, 20))
4791 if (bit (this_instr, 15))
4795 unsigned long rn_val
4796 = get_frame_register_unsigned (frame,
4797 bits (this_instr, 16, 19));
4799 if (bit (this_instr, 23))
4802 unsigned long reglist = bits (this_instr, 0, 14);
4803 offset = bitcount (reglist) * 4;
4804 if (bit (this_instr, 24)) /* pre */
4807 else if (bit (this_instr, 24))
4811 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4818 case 0xb: /* branch & link */
4819 case 0xa: /* branch */
4821 nextpc = BranchDest (pc, this_instr);
4827 case 0xe: /* coproc ops */
4831 struct gdbarch_tdep *tdep;
4832 tdep = gdbarch_tdep (gdbarch);
4834 if (tdep->syscall_next_pc != NULL)
4835 nextpc = tdep->syscall_next_pc (frame);
4841 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4849 /* Determine next PC after current instruction executes. Will call either
4850 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4851 loop is detected. */
4854 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4858 if (arm_frame_is_thumb (frame))
4860 nextpc = thumb_get_next_pc_raw (frame, pc);
4861 if (nextpc == MAKE_THUMB_ADDR (pc))
4862 error (_("Infinite loop detected"));
4866 nextpc = arm_get_next_pc_raw (frame, pc);
4868 error (_("Infinite loop detected"));
4874 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4875 of the appropriate mode (as encoded in the PC value), even if this
4876 differs from what would be expected according to the symbol tables. */
4879 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4880 struct address_space *aspace,
4883 struct cleanup *old_chain
4884 = make_cleanup_restore_integer (&arm_override_mode);
4886 arm_override_mode = IS_THUMB_ADDR (pc);
4887 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4889 insert_single_step_breakpoint (gdbarch, aspace, pc);
4891 do_cleanups (old_chain);
4894 /* single_step() is called just before we want to resume the inferior,
4895 if we want to single-step it but there is no hardware or kernel
4896 single-step support. We find the target of the coming instruction
4897 and breakpoint it. */
4900 arm_software_single_step (struct frame_info *frame)
4902 struct gdbarch *gdbarch = get_frame_arch (frame);
4903 struct address_space *aspace = get_frame_address_space (frame);
4904 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4906 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4911 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4912 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4913 NULL if an error occurs. BUF is freed. */
4916 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4917 int old_len, int new_len)
4919 gdb_byte *new_buf, *middle;
4920 int bytes_to_read = new_len - old_len;
4922 new_buf = xmalloc (new_len);
4923 memcpy (new_buf + bytes_to_read, buf, old_len);
4925 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4933 /* An IT block is at most the 2-byte IT instruction followed by
4934 four 4-byte instructions. The furthest back we must search to
4935 find an IT block that affects the current instruction is thus
4936 2 + 3 * 4 == 14 bytes. */
4937 #define MAX_IT_BLOCK_PREFIX 14
4939 /* Use a quick scan if there are more than this many bytes of
4941 #define IT_SCAN_THRESHOLD 32
4943 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4944 A breakpoint in an IT block may not be hit, depending on the
4947 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4951 CORE_ADDR boundary, func_start;
4952 int buf_len, buf2_len;
4953 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4954 int i, any, last_it, last_it_count;
4956 /* If we are using BKPT breakpoints, none of this is necessary. */
4957 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4960 /* ARM mode does not have this problem. */
4961 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4964 /* We are setting a breakpoint in Thumb code that could potentially
4965 contain an IT block. The first step is to find how much Thumb
4966 code there is; we do not need to read outside of known Thumb
4968 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4970 /* Thumb-2 code must have mapping symbols to have a chance. */
4973 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4975 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4976 && func_start > boundary)
4977 boundary = func_start;
4979 /* Search for a candidate IT instruction. We have to do some fancy
4980 footwork to distinguish a real IT instruction from the second
4981 half of a 32-bit instruction, but there is no need for that if
4982 there's no candidate. */
4983 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4985 /* No room for an IT instruction. */
4988 buf = xmalloc (buf_len);
4989 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4992 for (i = 0; i < buf_len; i += 2)
4994 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4995 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5007 /* OK, the code bytes before this instruction contain at least one
5008 halfword which resembles an IT instruction. We know that it's
5009 Thumb code, but there are still two possibilities. Either the
5010 halfword really is an IT instruction, or it is the second half of
5011 a 32-bit Thumb instruction. The only way we can tell is to
5012 scan forwards from a known instruction boundary. */
5013 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5017 /* There's a lot of code before this instruction. Start with an
5018 optimistic search; it's easy to recognize halfwords that can
5019 not be the start of a 32-bit instruction, and use that to
5020 lock on to the instruction boundaries. */
5021 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5024 buf_len = IT_SCAN_THRESHOLD;
5027 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5029 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5030 if (thumb_insn_size (inst1) == 2)
5037 /* At this point, if DEFINITE, BUF[I] is the first place we
5038 are sure that we know the instruction boundaries, and it is far
5039 enough from BPADDR that we could not miss an IT instruction
5040 affecting BPADDR. If ! DEFINITE, give up - start from a
5044 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5048 buf_len = bpaddr - boundary;
5054 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5057 buf_len = bpaddr - boundary;
5061 /* Scan forwards. Find the last IT instruction before BPADDR. */
5066 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5068 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5073 else if (inst1 & 0x0002)
5075 else if (inst1 & 0x0004)
5080 i += thumb_insn_size (inst1);
5086 /* There wasn't really an IT instruction after all. */
5089 if (last_it_count < 1)
5090 /* It was too far away. */
5093 /* This really is a trouble spot. Move the breakpoint to the IT
5095 return bpaddr - buf_len + last_it;
5098 /* ARM displaced stepping support.
5100 Generally ARM displaced stepping works as follows:
5102 1. When an instruction is to be single-stepped, it is first decoded by
5103 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5104 Depending on the type of instruction, it is then copied to a scratch
5105 location, possibly in a modified form. The copy_* set of functions
5106 performs such modification, as necessary. A breakpoint is placed after
5107 the modified instruction in the scratch space to return control to GDB.
5108 Note in particular that instructions which modify the PC will no longer
5109 do so after modification.
5111 2. The instruction is single-stepped, by setting the PC to the scratch
5112 location address, and resuming. Control returns to GDB when the
5115 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5116 function used for the current instruction. This function's job is to
5117 put the CPU/memory state back to what it would have been if the
5118 instruction had been executed unmodified in its original location. */
5120 /* NOP instruction (mov r0, r0). */
5121 #define ARM_NOP 0xe1a00000
5122 #define THUMB_NOP 0x4600
5124 /* Helper for register reads for displaced stepping. In particular, this
5125 returns the PC as it would be seen by the instruction at its original
5129 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5133 CORE_ADDR from = dsc->insn_addr;
5135 if (regno == ARM_PC_REGNUM)
5137 /* Compute pipeline offset:
5138 - When executing an ARM instruction, PC reads as the address of the
5139 current instruction plus 8.
5140 - When executing a Thumb instruction, PC reads as the address of the
5141 current instruction plus 4. */
5148 if (debug_displaced)
5149 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5150 (unsigned long) from);
5151 return (ULONGEST) from;
5155 regcache_cooked_read_unsigned (regs, regno, &ret);
5156 if (debug_displaced)
5157 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5158 regno, (unsigned long) ret);
5164 displaced_in_arm_mode (struct regcache *regs)
5167 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5169 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5171 return (ps & t_bit) == 0;
5174 /* Write to the PC as from a branch instruction. */
5177 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5181 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5182 architecture versions < 6. */
5183 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5184 val & ~(ULONGEST) 0x3);
5186 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5187 val & ~(ULONGEST) 0x1);
5190 /* Write to the PC as from a branch-exchange instruction. */
5193 bx_write_pc (struct regcache *regs, ULONGEST val)
5196 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5198 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5202 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5203 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5205 else if ((val & 2) == 0)
5207 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5208 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5212 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5213 mode, align dest to 4 bytes). */
5214 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5215 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5216 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5220 /* Write to the PC as if from a load instruction. */
5223 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5226 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5227 bx_write_pc (regs, val);
5229 branch_write_pc (regs, dsc, val);
5232 /* Write to the PC as if from an ALU instruction. */
5235 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5238 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5239 bx_write_pc (regs, val);
5241 branch_write_pc (regs, dsc, val);
5244 /* Helper for writing to registers for displaced stepping. Writing to the PC
5245 has a varying effects depending on the instruction which does the write:
5246 this is controlled by the WRITE_PC argument. */
5249 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5250 int regno, ULONGEST val, enum pc_write_style write_pc)
5252 if (regno == ARM_PC_REGNUM)
5254 if (debug_displaced)
5255 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5256 (unsigned long) val);
5259 case BRANCH_WRITE_PC:
5260 branch_write_pc (regs, dsc, val);
5264 bx_write_pc (regs, val);
5268 load_write_pc (regs, dsc, val);
5272 alu_write_pc (regs, dsc, val);
5275 case CANNOT_WRITE_PC:
5276 warning (_("Instruction wrote to PC in an unexpected way when "
5277 "single-stepping"));
5281 internal_error (__FILE__, __LINE__,
5282 _("Invalid argument to displaced_write_reg"));
5285 dsc->wrote_to_pc = 1;
5289 if (debug_displaced)
5290 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5291 regno, (unsigned long) val);
5292 regcache_cooked_write_unsigned (regs, regno, val);
5296 /* This function is used to concisely determine if an instruction INSN
5297 references PC. Register fields of interest in INSN should have the
5298 corresponding fields of BITMASK set to 0b1111. The function
5299 returns return 1 if any of these fields in INSN reference the PC
5300 (also 0b1111, r15), else it returns 0. */
5303 insn_references_pc (uint32_t insn, uint32_t bitmask)
5305 uint32_t lowbit = 1;
5307 while (bitmask != 0)
5311 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5317 mask = lowbit * 0xf;
5319 if ((insn & mask) == mask)
5328 /* The simplest copy function. Many instructions have the same effect no
5329 matter what address they are executed at: in those cases, use this. */
5332 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5333 const char *iname, struct displaced_step_closure *dsc)
5335 if (debug_displaced)
5336 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5337 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5340 dsc->modinsn[0] = insn;
5346 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5347 uint16_t insn2, const char *iname,
5348 struct displaced_step_closure *dsc)
5350 if (debug_displaced)
5351 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5352 "opcode/class '%s' unmodified\n", insn1, insn2,
5355 dsc->modinsn[0] = insn1;
5356 dsc->modinsn[1] = insn2;
5362 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5365 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5367 struct displaced_step_closure *dsc)
5369 if (debug_displaced)
5370 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5371 "opcode/class '%s' unmodified\n", insn,
5374 dsc->modinsn[0] = insn;
5379 /* Preload instructions with immediate offset. */
5382 cleanup_preload (struct gdbarch *gdbarch,
5383 struct regcache *regs, struct displaced_step_closure *dsc)
5385 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5386 if (!dsc->u.preload.immed)
5387 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5391 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5392 struct displaced_step_closure *dsc, unsigned int rn)
5395 /* Preload instructions:
5397 {pli/pld} [rn, #+/-imm]
5399 {pli/pld} [r0, #+/-imm]. */
5401 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5402 rn_val = displaced_read_reg (regs, dsc, rn);
5403 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5404 dsc->u.preload.immed = 1;
5406 dsc->cleanup = &cleanup_preload;
5410 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5411 struct displaced_step_closure *dsc)
5413 unsigned int rn = bits (insn, 16, 19);
5415 if (!insn_references_pc (insn, 0x000f0000ul))
5416 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5418 if (debug_displaced)
5419 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5420 (unsigned long) insn);
5422 dsc->modinsn[0] = insn & 0xfff0ffff;
5424 install_preload (gdbarch, regs, dsc, rn);
5430 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5431 struct regcache *regs, struct displaced_step_closure *dsc)
5433 unsigned int rn = bits (insn1, 0, 3);
5434 unsigned int u_bit = bit (insn1, 7);
5435 int imm12 = bits (insn2, 0, 11);
5438 if (rn != ARM_PC_REGNUM)
5439 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5441 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5442 PLD (literal) Encoding T1. */
5443 if (debug_displaced)
5444 fprintf_unfiltered (gdb_stdlog,
5445 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5446 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5452 /* Rewrite instruction {pli/pld} PC imm12 into:
5453 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5457 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5459 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5460 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5462 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5464 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5465 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5466 dsc->u.preload.immed = 0;
5468 /* {pli/pld} [r0, r1] */
5469 dsc->modinsn[0] = insn1 & 0xfff0;
5470 dsc->modinsn[1] = 0xf001;
5473 dsc->cleanup = &cleanup_preload;
5477 /* Preload instructions with register offset. */
5480 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5481 struct displaced_step_closure *dsc, unsigned int rn,
5484 ULONGEST rn_val, rm_val;
5486 /* Preload register-offset instructions:
5488 {pli/pld} [rn, rm {, shift}]
5490 {pli/pld} [r0, r1 {, shift}]. */
5492 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5493 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5494 rn_val = displaced_read_reg (regs, dsc, rn);
5495 rm_val = displaced_read_reg (regs, dsc, rm);
5496 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5497 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5498 dsc->u.preload.immed = 0;
5500 dsc->cleanup = &cleanup_preload;
5504 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5505 struct regcache *regs,
5506 struct displaced_step_closure *dsc)
5508 unsigned int rn = bits (insn, 16, 19);
5509 unsigned int rm = bits (insn, 0, 3);
5512 if (!insn_references_pc (insn, 0x000f000ful))
5513 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5515 if (debug_displaced)
5516 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5517 (unsigned long) insn);
5519 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5521 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5525 /* Copy/cleanup coprocessor load and store instructions. */
5528 cleanup_copro_load_store (struct gdbarch *gdbarch,
5529 struct regcache *regs,
5530 struct displaced_step_closure *dsc)
5532 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5534 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5536 if (dsc->u.ldst.writeback)
5537 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5541 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5542 struct displaced_step_closure *dsc,
5543 int writeback, unsigned int rn)
5547 /* Coprocessor load/store instructions:
5549 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5551 {stc/stc2} [r0, #+/-imm].
5553 ldc/ldc2 are handled identically. */
5555 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5556 rn_val = displaced_read_reg (regs, dsc, rn);
5557 /* PC should be 4-byte aligned. */
5558 rn_val = rn_val & 0xfffffffc;
5559 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5561 dsc->u.ldst.writeback = writeback;
5562 dsc->u.ldst.rn = rn;
5564 dsc->cleanup = &cleanup_copro_load_store;
5568 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5569 struct regcache *regs,
5570 struct displaced_step_closure *dsc)
5572 unsigned int rn = bits (insn, 16, 19);
5574 if (!insn_references_pc (insn, 0x000f0000ul))
5575 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5577 if (debug_displaced)
5578 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5579 "load/store insn %.8lx\n", (unsigned long) insn);
5581 dsc->modinsn[0] = insn & 0xfff0ffff;
5583 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5589 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5590 uint16_t insn2, struct regcache *regs,
5591 struct displaced_step_closure *dsc)
5593 unsigned int rn = bits (insn1, 0, 3);
5595 if (rn != ARM_PC_REGNUM)
5596 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5597 "copro load/store", dsc);
5599 if (debug_displaced)
5600 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5601 "load/store insn %.4x%.4x\n", insn1, insn2);
5603 dsc->modinsn[0] = insn1 & 0xfff0;
5604 dsc->modinsn[1] = insn2;
5607 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5608 doesn't support writeback, so pass 0. */
5609 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
5614 /* Clean up branch instructions (actually perform the branch, by setting
5618 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5619 struct displaced_step_closure *dsc)
5621 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5622 int branch_taken = condition_true (dsc->u.branch.cond, status);
5623 enum pc_write_style write_pc = dsc->u.branch.exchange
5624 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5629 if (dsc->u.branch.link)
5631 /* The value of LR should be the next insn of current one. In order
5632 not to confuse logic hanlding later insn `bx lr', if current insn mode
5633 is Thumb, the bit 0 of LR value should be set to 1. */
5634 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5637 next_insn_addr |= 0x1;
5639 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5643 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5646 /* Copy B/BL/BLX instructions with immediate destinations. */
5649 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5650 struct displaced_step_closure *dsc,
5651 unsigned int cond, int exchange, int link, long offset)
5653 /* Implement "BL<cond> <label>" as:
5655 Preparation: cond <- instruction condition
5656 Insn: mov r0, r0 (nop)
5657 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5659 B<cond> similar, but don't set r14 in cleanup. */
5661 dsc->u.branch.cond = cond;
5662 dsc->u.branch.link = link;
5663 dsc->u.branch.exchange = exchange;
5665 dsc->u.branch.dest = dsc->insn_addr;
5666 if (link && exchange)
5667 /* For BLX, offset is computed from the Align (PC, 4). */
5668 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
5671 dsc->u.branch.dest += 4 + offset;
5673 dsc->u.branch.dest += 8 + offset;
5675 dsc->cleanup = &cleanup_branch;
5678 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5679 struct regcache *regs, struct displaced_step_closure *dsc)
5681 unsigned int cond = bits (insn, 28, 31);
5682 int exchange = (cond == 0xf);
5683 int link = exchange || bit (insn, 24);
5686 if (debug_displaced)
5687 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5688 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5689 (unsigned long) insn);
5691 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5692 then arrange the switch into Thumb mode. */
5693 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5695 offset = bits (insn, 0, 23) << 2;
5697 if (bit (offset, 25))
5698 offset = offset | ~0x3ffffff;
5700 dsc->modinsn[0] = ARM_NOP;
5702 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5707 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
5708 uint16_t insn2, struct regcache *regs,
5709 struct displaced_step_closure *dsc)
5711 int link = bit (insn2, 14);
5712 int exchange = link && !bit (insn2, 12);
5715 int j1 = bit (insn2, 13);
5716 int j2 = bit (insn2, 11);
5717 int s = sbits (insn1, 10, 10);
5718 int i1 = !(j1 ^ bit (insn1, 10));
5719 int i2 = !(j2 ^ bit (insn1, 10));
5721 if (!link && !exchange) /* B */
5723 offset = (bits (insn2, 0, 10) << 1);
5724 if (bit (insn2, 12)) /* Encoding T4 */
5726 offset |= (bits (insn1, 0, 9) << 12)
5732 else /* Encoding T3 */
5734 offset |= (bits (insn1, 0, 5) << 12)
5738 cond = bits (insn1, 6, 9);
5743 offset = (bits (insn1, 0, 9) << 12);
5744 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
5745 offset |= exchange ?
5746 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
5749 if (debug_displaced)
5750 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
5751 "%.4x %.4x with offset %.8lx\n",
5752 link ? (exchange) ? "blx" : "bl" : "b",
5753 insn1, insn2, offset);
5755 dsc->modinsn[0] = THUMB_NOP;
5757 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5761 /* Copy B Thumb instructions. */
5763 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
5764 struct displaced_step_closure *dsc)
5766 unsigned int cond = 0;
5768 unsigned short bit_12_15 = bits (insn, 12, 15);
5769 CORE_ADDR from = dsc->insn_addr;
5771 if (bit_12_15 == 0xd)
5773 /* offset = SignExtend (imm8:0, 32) */
5774 offset = sbits ((insn << 1), 0, 8);
5775 cond = bits (insn, 8, 11);
5777 else if (bit_12_15 == 0xe) /* Encoding T2 */
5779 offset = sbits ((insn << 1), 0, 11);
5783 if (debug_displaced)
5784 fprintf_unfiltered (gdb_stdlog,
5785 "displaced: copying b immediate insn %.4x "
5786 "with offset %d\n", insn, offset);
5788 dsc->u.branch.cond = cond;
5789 dsc->u.branch.link = 0;
5790 dsc->u.branch.exchange = 0;
5791 dsc->u.branch.dest = from + 4 + offset;
5793 dsc->modinsn[0] = THUMB_NOP;
5795 dsc->cleanup = &cleanup_branch;
5800 /* Copy BX/BLX with register-specified destinations. */
5803 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5804 struct displaced_step_closure *dsc, int link,
5805 unsigned int cond, unsigned int rm)
5807 /* Implement {BX,BLX}<cond> <reg>" as:
5809 Preparation: cond <- instruction condition
5810 Insn: mov r0, r0 (nop)
5811 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5813 Don't set r14 in cleanup for BX. */
5815 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5817 dsc->u.branch.cond = cond;
5818 dsc->u.branch.link = link;
5820 dsc->u.branch.exchange = 1;
5822 dsc->cleanup = &cleanup_branch;
5826 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5827 struct regcache *regs, struct displaced_step_closure *dsc)
5829 unsigned int cond = bits (insn, 28, 31);
5832 int link = bit (insn, 5);
5833 unsigned int rm = bits (insn, 0, 3);
5835 if (debug_displaced)
5836 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
5837 (unsigned long) insn);
5839 dsc->modinsn[0] = ARM_NOP;
5841 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5846 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
5847 struct regcache *regs,
5848 struct displaced_step_closure *dsc)
5850 int link = bit (insn, 7);
5851 unsigned int rm = bits (insn, 3, 6);
5853 if (debug_displaced)
5854 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
5855 (unsigned short) insn);
5857 dsc->modinsn[0] = THUMB_NOP;
5859 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
5865 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5868 cleanup_alu_imm (struct gdbarch *gdbarch,
5869 struct regcache *regs, struct displaced_step_closure *dsc)
5871 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5872 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5873 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5874 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5878 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5879 struct displaced_step_closure *dsc)
5881 unsigned int rn = bits (insn, 16, 19);
5882 unsigned int rd = bits (insn, 12, 15);
5883 unsigned int op = bits (insn, 21, 24);
5884 int is_mov = (op == 0xd);
5885 ULONGEST rd_val, rn_val;
5887 if (!insn_references_pc (insn, 0x000ff000ul))
5888 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5890 if (debug_displaced)
5891 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5892 "%.8lx\n", is_mov ? "move" : "ALU",
5893 (unsigned long) insn);
5895 /* Instruction is of form:
5897 <op><cond> rd, [rn,] #imm
5901 Preparation: tmp1, tmp2 <- r0, r1;
5903 Insn: <op><cond> r0, r1, #imm
5904 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5907 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5908 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5909 rn_val = displaced_read_reg (regs, dsc, rn);
5910 rd_val = displaced_read_reg (regs, dsc, rd);
5911 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5912 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5916 dsc->modinsn[0] = insn & 0xfff00fff;
5918 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5920 dsc->cleanup = &cleanup_alu_imm;
5926 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
5927 uint16_t insn2, struct regcache *regs,
5928 struct displaced_step_closure *dsc)
5930 unsigned int op = bits (insn1, 5, 8);
5931 unsigned int rn, rm, rd;
5932 ULONGEST rd_val, rn_val;
5934 rn = bits (insn1, 0, 3); /* Rn */
5935 rm = bits (insn2, 0, 3); /* Rm */
5936 rd = bits (insn2, 8, 11); /* Rd */
5938 /* This routine is only called for instruction MOV. */
5939 gdb_assert (op == 0x2 && rn == 0xf);
5941 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
5942 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
5944 if (debug_displaced)
5945 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
5946 "ALU", insn1, insn2);
5948 /* Instruction is of form:
5950 <op><cond> rd, [rn,] #imm
5954 Preparation: tmp1, tmp2 <- r0, r1;
5956 Insn: <op><cond> r0, r1, #imm
5957 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5960 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5961 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5962 rn_val = displaced_read_reg (regs, dsc, rn);
5963 rd_val = displaced_read_reg (regs, dsc, rd);
5964 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5965 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5968 dsc->modinsn[0] = insn1;
5969 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
5972 dsc->cleanup = &cleanup_alu_imm;
5977 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5980 cleanup_alu_reg (struct gdbarch *gdbarch,
5981 struct regcache *regs, struct displaced_step_closure *dsc)
5986 rd_val = displaced_read_reg (regs, dsc, 0);
5988 for (i = 0; i < 3; i++)
5989 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5991 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5995 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5996 struct displaced_step_closure *dsc,
5997 unsigned int rd, unsigned int rn, unsigned int rm)
5999 ULONGEST rd_val, rn_val, rm_val;
6001 /* Instruction is of form:
6003 <op><cond> rd, [rn,] rm [, <shift>]
6007 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6008 r0, r1, r2 <- rd, rn, rm
6009 Insn: <op><cond> r0, r1, r2 [, <shift>]
6010 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6013 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6014 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6015 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6016 rd_val = displaced_read_reg (regs, dsc, rd);
6017 rn_val = displaced_read_reg (regs, dsc, rn);
6018 rm_val = displaced_read_reg (regs, dsc, rm);
6019 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6020 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6021 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6024 dsc->cleanup = &cleanup_alu_reg;
6028 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6029 struct displaced_step_closure *dsc)
6031 unsigned int op = bits (insn, 21, 24);
6032 int is_mov = (op == 0xd);
6034 if (!insn_references_pc (insn, 0x000ff00ful))
6035 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6037 if (debug_displaced)
6038 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6039 is_mov ? "move" : "ALU", (unsigned long) insn);
6042 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6044 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6046 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6052 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6053 struct regcache *regs,
6054 struct displaced_step_closure *dsc)
6056 unsigned rn, rm, rd;
6058 rd = bits (insn, 3, 6);
6059 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6062 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6063 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6065 if (debug_displaced)
6066 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6067 "ALU", (unsigned short) insn);
6069 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6071 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6076 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6079 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6080 struct regcache *regs,
6081 struct displaced_step_closure *dsc)
6083 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6086 for (i = 0; i < 4; i++)
6087 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6089 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6093 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6094 struct displaced_step_closure *dsc,
6095 unsigned int rd, unsigned int rn, unsigned int rm,
6099 ULONGEST rd_val, rn_val, rm_val, rs_val;
6101 /* Instruction is of form:
6103 <op><cond> rd, [rn,] rm, <shift> rs
6107 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6108 r0, r1, r2, r3 <- rd, rn, rm, rs
6109 Insn: <op><cond> r0, r1, r2, <shift> r3
6111 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6115 for (i = 0; i < 4; i++)
6116 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6118 rd_val = displaced_read_reg (regs, dsc, rd);
6119 rn_val = displaced_read_reg (regs, dsc, rn);
6120 rm_val = displaced_read_reg (regs, dsc, rm);
6121 rs_val = displaced_read_reg (regs, dsc, rs);
6122 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6123 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6124 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6125 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6127 dsc->cleanup = &cleanup_alu_shifted_reg;
6131 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6132 struct regcache *regs,
6133 struct displaced_step_closure *dsc)
6135 unsigned int op = bits (insn, 21, 24);
6136 int is_mov = (op == 0xd);
6137 unsigned int rd, rn, rm, rs;
6139 if (!insn_references_pc (insn, 0x000fff0ful))
6140 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6142 if (debug_displaced)
6143 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6144 "%.8lx\n", is_mov ? "move" : "ALU",
6145 (unsigned long) insn);
6147 rn = bits (insn, 16, 19);
6148 rm = bits (insn, 0, 3);
6149 rs = bits (insn, 8, 11);
6150 rd = bits (insn, 12, 15);
6153 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6155 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6157 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6162 /* Clean up load instructions. */
6165 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6166 struct displaced_step_closure *dsc)
6168 ULONGEST rt_val, rt_val2 = 0, rn_val;
6170 rt_val = displaced_read_reg (regs, dsc, 0);
6171 if (dsc->u.ldst.xfersize == 8)
6172 rt_val2 = displaced_read_reg (regs, dsc, 1);
6173 rn_val = displaced_read_reg (regs, dsc, 2);
6175 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6176 if (dsc->u.ldst.xfersize > 4)
6177 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6178 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6179 if (!dsc->u.ldst.immed)
6180 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6182 /* Handle register writeback. */
6183 if (dsc->u.ldst.writeback)
6184 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6185 /* Put result in right place. */
6186 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6187 if (dsc->u.ldst.xfersize == 8)
6188 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6191 /* Clean up store instructions. */
6194 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6195 struct displaced_step_closure *dsc)
6197 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6199 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6200 if (dsc->u.ldst.xfersize > 4)
6201 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6202 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6203 if (!dsc->u.ldst.immed)
6204 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6205 if (!dsc->u.ldst.restore_r4)
6206 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6209 if (dsc->u.ldst.writeback)
6210 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6213 /* Copy "extra" load/store instructions. These are halfword/doubleword
6214 transfers, which have a different encoding to byte/word transfers. */
6217 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6218 struct regcache *regs, struct displaced_step_closure *dsc)
6220 unsigned int op1 = bits (insn, 20, 24);
6221 unsigned int op2 = bits (insn, 5, 6);
6222 unsigned int rt = bits (insn, 12, 15);
6223 unsigned int rn = bits (insn, 16, 19);
6224 unsigned int rm = bits (insn, 0, 3);
6225 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6226 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6227 int immed = (op1 & 0x4) != 0;
6229 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6231 if (!insn_references_pc (insn, 0x000ff00ful))
6232 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6234 if (debug_displaced)
6235 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6236 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6237 (unsigned long) insn);
6239 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6242 internal_error (__FILE__, __LINE__,
6243 _("copy_extra_ld_st: instruction decode error"));
6245 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6246 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6247 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6249 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6251 rt_val = displaced_read_reg (regs, dsc, rt);
6252 if (bytesize[opcode] == 8)
6253 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6254 rn_val = displaced_read_reg (regs, dsc, rn);
6256 rm_val = displaced_read_reg (regs, dsc, rm);
6258 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6259 if (bytesize[opcode] == 8)
6260 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6261 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6263 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6266 dsc->u.ldst.xfersize = bytesize[opcode];
6267 dsc->u.ldst.rn = rn;
6268 dsc->u.ldst.immed = immed;
6269 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6270 dsc->u.ldst.restore_r4 = 0;
6273 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6275 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6276 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6278 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6280 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6281 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6283 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6288 /* Copy byte/half word/word loads and stores. */
6291 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6292 struct displaced_step_closure *dsc, int load,
6293 int immed, int writeback, int size, int usermode,
6294 int rt, int rm, int rn)
6296 ULONGEST rt_val, rn_val, rm_val = 0;
6298 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6299 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6301 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6303 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6305 rt_val = displaced_read_reg (regs, dsc, rt);
6306 rn_val = displaced_read_reg (regs, dsc, rn);
6308 rm_val = displaced_read_reg (regs, dsc, rm);
6310 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6311 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6313 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6315 dsc->u.ldst.xfersize = size;
6316 dsc->u.ldst.rn = rn;
6317 dsc->u.ldst.immed = immed;
6318 dsc->u.ldst.writeback = writeback;
6320 /* To write PC we can do:
6322 Before this sequence of instructions:
6323 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6324 r2 is the Rn value got from dispalced_read_reg.
6326 Insn1: push {pc} Write address of STR instruction + offset on stack
6327 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6328 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6329 = addr(Insn1) + offset - addr(Insn3) - 8
6331 Insn4: add r4, r4, #8 r4 = offset - 8
6332 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6334 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6336 Otherwise we don't know what value to write for PC, since the offset is
6337 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6338 of this can be found in Section "Saving from r15" in
6339 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6341 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6346 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6347 uint16_t insn2, struct regcache *regs,
6348 struct displaced_step_closure *dsc, int size)
6350 unsigned int u_bit = bit (insn1, 7);
6351 unsigned int rt = bits (insn2, 12, 15);
6352 int imm12 = bits (insn2, 0, 11);
6355 if (debug_displaced)
6356 fprintf_unfiltered (gdb_stdlog,
6357 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6358 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6364 /* Rewrite instruction LDR Rt imm12 into:
6366 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6370 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6373 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6374 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6375 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6377 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6379 pc_val = pc_val & 0xfffffffc;
6381 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6382 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6386 dsc->u.ldst.xfersize = size;
6387 dsc->u.ldst.immed = 0;
6388 dsc->u.ldst.writeback = 0;
6389 dsc->u.ldst.restore_r4 = 0;
6391 /* LDR R0, R2, R3 */
6392 dsc->modinsn[0] = 0xf852;
6393 dsc->modinsn[1] = 0x3;
6396 dsc->cleanup = &cleanup_load;
6402 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6403 uint16_t insn2, struct regcache *regs,
6404 struct displaced_step_closure *dsc,
6405 int writeback, int immed)
6407 unsigned int rt = bits (insn2, 12, 15);
6408 unsigned int rn = bits (insn1, 0, 3);
6409 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6410 /* In LDR (register), there is also a register Rm, which is not allowed to
6411 be PC, so we don't have to check it. */
6413 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6414 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6417 if (debug_displaced)
6418 fprintf_unfiltered (gdb_stdlog,
6419 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6420 rt, rn, insn1, insn2);
6422 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6425 dsc->u.ldst.restore_r4 = 0;
6428 /* ldr[b]<cond> rt, [rn, #imm], etc.
6430 ldr[b]<cond> r0, [r2, #imm]. */
6432 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6433 dsc->modinsn[1] = insn2 & 0x0fff;
6436 /* ldr[b]<cond> rt, [rn, rm], etc.
6438 ldr[b]<cond> r0, [r2, r3]. */
6440 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6441 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6451 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6452 struct regcache *regs,
6453 struct displaced_step_closure *dsc,
6454 int load, int size, int usermode)
6456 int immed = !bit (insn, 25);
6457 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6458 unsigned int rt = bits (insn, 12, 15);
6459 unsigned int rn = bits (insn, 16, 19);
6460 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6462 if (!insn_references_pc (insn, 0x000ff00ful))
6463 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6465 if (debug_displaced)
6466 fprintf_unfiltered (gdb_stdlog,
6467 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6468 load ? (size == 1 ? "ldrb" : "ldr")
6469 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6471 (unsigned long) insn);
6473 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6474 usermode, rt, rm, rn);
6476 if (load || rt != ARM_PC_REGNUM)
6478 dsc->u.ldst.restore_r4 = 0;
6481 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6483 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6484 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6486 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6488 {ldr,str}[b]<cond> r0, [r2, r3]. */
6489 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6493 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6494 dsc->u.ldst.restore_r4 = 1;
6495 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6496 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6497 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6498 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6499 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6503 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6505 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6510 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6515 /* Cleanup LDM instructions with fully-populated register list. This is an
6516 unfortunate corner case: it's impossible to implement correctly by modifying
6517 the instruction. The issue is as follows: we have an instruction,
6521 which we must rewrite to avoid loading PC. A possible solution would be to
6522 do the load in two halves, something like (with suitable cleanup
6526 ldm[id][ab] r8!, {r0-r7}
6528 ldm[id][ab] r8, {r7-r14}
6531 but at present there's no suitable place for <temp>, since the scratch space
6532 is overwritten before the cleanup routine is called. For now, we simply
6533 emulate the instruction. */
6536 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6537 struct displaced_step_closure *dsc)
6539 int inc = dsc->u.block.increment;
6540 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6541 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6542 uint32_t regmask = dsc->u.block.regmask;
6543 int regno = inc ? 0 : 15;
6544 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6545 int exception_return = dsc->u.block.load && dsc->u.block.user
6546 && (regmask & 0x8000) != 0;
6547 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6548 int do_transfer = condition_true (dsc->u.block.cond, status);
6549 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6554 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6555 sensible we can do here. Complain loudly. */
6556 if (exception_return)
6557 error (_("Cannot single-step exception return"));
6559 /* We don't handle any stores here for now. */
6560 gdb_assert (dsc->u.block.load != 0);
6562 if (debug_displaced)
6563 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6564 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6565 dsc->u.block.increment ? "inc" : "dec",
6566 dsc->u.block.before ? "before" : "after");
6573 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6576 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6579 xfer_addr += bump_before;
6581 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6582 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6584 xfer_addr += bump_after;
6586 regmask &= ~(1 << regno);
6589 if (dsc->u.block.writeback)
6590 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6594 /* Clean up an STM which included the PC in the register list. */
6597 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6598 struct displaced_step_closure *dsc)
6600 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6601 int store_executed = condition_true (dsc->u.block.cond, status);
6602 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6603 CORE_ADDR stm_insn_addr;
6606 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6608 /* If condition code fails, there's nothing else to do. */
6609 if (!store_executed)
6612 if (dsc->u.block.increment)
6614 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6616 if (dsc->u.block.before)
6621 pc_stored_at = dsc->u.block.xfer_addr;
6623 if (dsc->u.block.before)
6627 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6628 stm_insn_addr = dsc->scratch_base;
6629 offset = pc_val - stm_insn_addr;
6631 if (debug_displaced)
6632 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6633 "STM instruction\n", offset);
6635 /* Rewrite the stored PC to the proper value for the non-displaced original
6637 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6638 dsc->insn_addr + offset);
6641 /* Clean up an LDM which includes the PC in the register list. We clumped all
6642 the registers in the transferred list into a contiguous range r0...rX (to
6643 avoid loading PC directly and losing control of the debugged program), so we
6644 must undo that here. */
6647 cleanup_block_load_pc (struct gdbarch *gdbarch,
6648 struct regcache *regs,
6649 struct displaced_step_closure *dsc)
6651 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6652 int load_executed = condition_true (dsc->u.block.cond, status), i;
6653 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6654 unsigned int regs_loaded = bitcount (mask);
6655 unsigned int num_to_shuffle = regs_loaded, clobbered;
6657 /* The method employed here will fail if the register list is fully populated
6658 (we need to avoid loading PC directly). */
6659 gdb_assert (num_to_shuffle < 16);
6664 clobbered = (1 << num_to_shuffle) - 1;
6666 while (num_to_shuffle > 0)
6668 if ((mask & (1 << write_reg)) != 0)
6670 unsigned int read_reg = num_to_shuffle - 1;
6672 if (read_reg != write_reg)
6674 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6675 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6676 if (debug_displaced)
6677 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6678 "loaded register r%d to r%d\n"), read_reg,
6681 else if (debug_displaced)
6682 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6683 "r%d already in the right place\n"),
6686 clobbered &= ~(1 << write_reg);
6694 /* Restore any registers we scribbled over. */
6695 for (write_reg = 0; clobbered != 0; write_reg++)
6697 if ((clobbered & (1 << write_reg)) != 0)
6699 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6701 if (debug_displaced)
6702 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6703 "clobbered register r%d\n"), write_reg);
6704 clobbered &= ~(1 << write_reg);
6708 /* Perform register writeback manually. */
6709 if (dsc->u.block.writeback)
6711 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6713 if (dsc->u.block.increment)
6714 new_rn_val += regs_loaded * 4;
6716 new_rn_val -= regs_loaded * 4;
6718 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6723 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6724 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6727 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6728 struct regcache *regs,
6729 struct displaced_step_closure *dsc)
6731 int load = bit (insn, 20);
6732 int user = bit (insn, 22);
6733 int increment = bit (insn, 23);
6734 int before = bit (insn, 24);
6735 int writeback = bit (insn, 21);
6736 int rn = bits (insn, 16, 19);
6738 /* Block transfers which don't mention PC can be run directly
6740 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6741 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6743 if (rn == ARM_PC_REGNUM)
6745 warning (_("displaced: Unpredictable LDM or STM with "
6746 "base register r15"));
6747 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6750 if (debug_displaced)
6751 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6752 "%.8lx\n", (unsigned long) insn);
6754 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6755 dsc->u.block.rn = rn;
6757 dsc->u.block.load = load;
6758 dsc->u.block.user = user;
6759 dsc->u.block.increment = increment;
6760 dsc->u.block.before = before;
6761 dsc->u.block.writeback = writeback;
6762 dsc->u.block.cond = bits (insn, 28, 31);
6764 dsc->u.block.regmask = insn & 0xffff;
6768 if ((insn & 0xffff) == 0xffff)
6770 /* LDM with a fully-populated register list. This case is
6771 particularly tricky. Implement for now by fully emulating the
6772 instruction (which might not behave perfectly in all cases, but
6773 these instructions should be rare enough for that not to matter
6775 dsc->modinsn[0] = ARM_NOP;
6777 dsc->cleanup = &cleanup_block_load_all;
6781 /* LDM of a list of registers which includes PC. Implement by
6782 rewriting the list of registers to be transferred into a
6783 contiguous chunk r0...rX before doing the transfer, then shuffling
6784 registers into the correct places in the cleanup routine. */
6785 unsigned int regmask = insn & 0xffff;
6786 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6787 unsigned int to = 0, from = 0, i, new_rn;
6789 for (i = 0; i < num_in_list; i++)
6790 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6792 /* Writeback makes things complicated. We need to avoid clobbering
6793 the base register with one of the registers in our modified
6794 register list, but just using a different register can't work in
6797 ldm r14!, {r0-r13,pc}
6799 which would need to be rewritten as:
6803 but that can't work, because there's no free register for N.
6805 Solve this by turning off the writeback bit, and emulating
6806 writeback manually in the cleanup routine. */
6811 new_regmask = (1 << num_in_list) - 1;
6813 if (debug_displaced)
6814 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6815 "{..., pc}: original reg list %.4x, modified "
6816 "list %.4x\n"), rn, writeback ? "!" : "",
6817 (int) insn & 0xffff, new_regmask);
6819 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6821 dsc->cleanup = &cleanup_block_load_pc;
6826 /* STM of a list of registers which includes PC. Run the instruction
6827 as-is, but out of line: this will store the wrong value for the PC,
6828 so we must manually fix up the memory in the cleanup routine.
6829 Doing things this way has the advantage that we can auto-detect
6830 the offset of the PC write (which is architecture-dependent) in
6831 the cleanup routine. */
6832 dsc->modinsn[0] = insn;
6834 dsc->cleanup = &cleanup_block_store_pc;
6841 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6842 struct regcache *regs,
6843 struct displaced_step_closure *dsc)
6845 int rn = bits (insn1, 0, 3);
6846 int load = bit (insn1, 4);
6847 int writeback = bit (insn1, 5);
6849 /* Block transfers which don't mention PC can be run directly
6851 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
6852 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
6854 if (rn == ARM_PC_REGNUM)
6856 warning (_("displaced: Unpredictable LDM or STM with "
6857 "base register r15"));
6858 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6859 "unpredictable ldm/stm", dsc);
6862 if (debug_displaced)
6863 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6864 "%.4x%.4x\n", insn1, insn2);
6866 /* Clear bit 13, since it should be always zero. */
6867 dsc->u.block.regmask = (insn2 & 0xdfff);
6868 dsc->u.block.rn = rn;
6870 dsc->u.block.load = load;
6871 dsc->u.block.user = 0;
6872 dsc->u.block.increment = bit (insn1, 7);
6873 dsc->u.block.before = bit (insn1, 8);
6874 dsc->u.block.writeback = writeback;
6875 dsc->u.block.cond = INST_AL;
6876 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6880 if (dsc->u.block.regmask == 0xffff)
6882 /* This branch is impossible to happen. */
6887 unsigned int regmask = dsc->u.block.regmask;
6888 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6889 unsigned int to = 0, from = 0, i, new_rn;
6891 for (i = 0; i < num_in_list; i++)
6892 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6897 new_regmask = (1 << num_in_list) - 1;
6899 if (debug_displaced)
6900 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6901 "{..., pc}: original reg list %.4x, modified "
6902 "list %.4x\n"), rn, writeback ? "!" : "",
6903 (int) dsc->u.block.regmask, new_regmask);
6905 dsc->modinsn[0] = insn1;
6906 dsc->modinsn[1] = (new_regmask & 0xffff);
6909 dsc->cleanup = &cleanup_block_load_pc;
6914 dsc->modinsn[0] = insn1;
6915 dsc->modinsn[1] = insn2;
6917 dsc->cleanup = &cleanup_block_store_pc;
6922 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6923 for Linux, where some SVC instructions must be treated specially. */
6926 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6927 struct displaced_step_closure *dsc)
6929 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6931 if (debug_displaced)
6932 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6933 "%.8lx\n", (unsigned long) resume_addr);
6935 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6939 /* Common copy routine for svc instruciton. */
6942 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
6943 struct displaced_step_closure *dsc)
6945 /* Preparation: none.
6946 Insn: unmodified svc.
6947 Cleanup: pc <- insn_addr + insn_size. */
6949 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6951 dsc->wrote_to_pc = 1;
6953 /* Allow OS-specific code to override SVC handling. */
6954 if (dsc->u.svc.copy_svc_os)
6955 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6958 dsc->cleanup = &cleanup_svc;
6964 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6965 struct regcache *regs, struct displaced_step_closure *dsc)
6968 if (debug_displaced)
6969 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6970 (unsigned long) insn);
6972 dsc->modinsn[0] = insn;
6974 return install_svc (gdbarch, regs, dsc);
6978 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
6979 struct regcache *regs, struct displaced_step_closure *dsc)
6982 if (debug_displaced)
6983 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
6986 dsc->modinsn[0] = insn;
6988 return install_svc (gdbarch, regs, dsc);
6991 /* Copy undefined instructions. */
6994 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6995 struct displaced_step_closure *dsc)
6997 if (debug_displaced)
6998 fprintf_unfiltered (gdb_stdlog,
6999 "displaced: copying undefined insn %.8lx\n",
7000 (unsigned long) insn);
7002 dsc->modinsn[0] = insn;
7008 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7009 struct displaced_step_closure *dsc)
7012 if (debug_displaced)
7013 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7014 "%.4x %.4x\n", (unsigned short) insn1,
7015 (unsigned short) insn2);
7017 dsc->modinsn[0] = insn1;
7018 dsc->modinsn[1] = insn2;
7024 /* Copy unpredictable instructions. */
7027 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7028 struct displaced_step_closure *dsc)
7030 if (debug_displaced)
7031 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7032 "%.8lx\n", (unsigned long) insn);
7034 dsc->modinsn[0] = insn;
7039 /* The decode_* functions are instruction decoding helpers. They mostly follow
7040 the presentation in the ARM ARM. */
7043 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7044 struct regcache *regs,
7045 struct displaced_step_closure *dsc)
7047 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7048 unsigned int rn = bits (insn, 16, 19);
7050 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7051 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7052 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7053 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7054 else if ((op1 & 0x60) == 0x20)
7055 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7056 else if ((op1 & 0x71) == 0x40)
7057 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7059 else if ((op1 & 0x77) == 0x41)
7060 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7061 else if ((op1 & 0x77) == 0x45)
7062 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7063 else if ((op1 & 0x77) == 0x51)
7066 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7068 return arm_copy_unpred (gdbarch, insn, dsc);
7070 else if ((op1 & 0x77) == 0x55)
7071 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7072 else if (op1 == 0x57)
7075 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7076 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7077 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7078 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7079 default: return arm_copy_unpred (gdbarch, insn, dsc);
7081 else if ((op1 & 0x63) == 0x43)
7082 return arm_copy_unpred (gdbarch, insn, dsc);
7083 else if ((op2 & 0x1) == 0x0)
7084 switch (op1 & ~0x80)
7087 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7089 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7090 case 0x71: case 0x75:
7092 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7093 case 0x63: case 0x67: case 0x73: case 0x77:
7094 return arm_copy_unpred (gdbarch, insn, dsc);
7096 return arm_copy_undef (gdbarch, insn, dsc);
7099 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7103 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7104 struct regcache *regs,
7105 struct displaced_step_closure *dsc)
7107 if (bit (insn, 27) == 0)
7108 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7109 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7110 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7113 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7116 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7118 case 0x4: case 0x5: case 0x6: case 0x7:
7119 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7122 switch ((insn & 0xe00000) >> 21)
7124 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7126 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7129 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7132 return arm_copy_undef (gdbarch, insn, dsc);
7137 int rn_f = (bits (insn, 16, 19) == 0xf);
7138 switch ((insn & 0xe00000) >> 21)
7141 /* ldc/ldc2 imm (undefined for rn == pc). */
7142 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7143 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7146 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7148 case 0x4: case 0x5: case 0x6: case 0x7:
7149 /* ldc/ldc2 lit (undefined for rn != pc). */
7150 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7151 : arm_copy_undef (gdbarch, insn, dsc);
7154 return arm_copy_undef (gdbarch, insn, dsc);
7159 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7162 if (bits (insn, 16, 19) == 0xf)
7164 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7166 return arm_copy_undef (gdbarch, insn, dsc);
7170 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7172 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7176 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7178 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7181 return arm_copy_undef (gdbarch, insn, dsc);
7185 /* Decode miscellaneous instructions in dp/misc encoding space. */
7188 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7189 struct regcache *regs,
7190 struct displaced_step_closure *dsc)
7192 unsigned int op2 = bits (insn, 4, 6);
7193 unsigned int op = bits (insn, 21, 22);
7194 unsigned int op1 = bits (insn, 16, 19);
7199 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7202 if (op == 0x1) /* bx. */
7203 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7205 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7207 return arm_copy_undef (gdbarch, insn, dsc);
7211 /* Not really supported. */
7212 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7214 return arm_copy_undef (gdbarch, insn, dsc);
7218 return arm_copy_bx_blx_reg (gdbarch, insn,
7219 regs, dsc); /* blx register. */
7221 return arm_copy_undef (gdbarch, insn, dsc);
7224 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7228 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7230 /* Not really supported. */
7231 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7234 return arm_copy_undef (gdbarch, insn, dsc);
7239 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7240 struct regcache *regs,
7241 struct displaced_step_closure *dsc)
7244 switch (bits (insn, 20, 24))
7247 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7250 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7252 case 0x12: case 0x16:
7253 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7256 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7260 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7262 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7263 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7264 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7265 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7266 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7267 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7268 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7269 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7270 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7271 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7272 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7273 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7274 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7275 /* 2nd arg means "unpriveleged". */
7276 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7280 /* Should be unreachable. */
7285 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7286 struct regcache *regs,
7287 struct displaced_step_closure *dsc)
7289 int a = bit (insn, 25), b = bit (insn, 4);
7290 uint32_t op1 = bits (insn, 20, 24);
7291 int rn_f = bits (insn, 16, 19) == 0xf;
7293 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7294 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7295 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7296 else if ((!a && (op1 & 0x17) == 0x02)
7297 || (a && (op1 & 0x17) == 0x02 && !b))
7298 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7299 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7300 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7301 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7302 else if ((!a && (op1 & 0x17) == 0x03)
7303 || (a && (op1 & 0x17) == 0x03 && !b))
7304 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7305 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7306 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7307 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7308 else if ((!a && (op1 & 0x17) == 0x06)
7309 || (a && (op1 & 0x17) == 0x06 && !b))
7310 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7311 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7312 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7313 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7314 else if ((!a && (op1 & 0x17) == 0x07)
7315 || (a && (op1 & 0x17) == 0x07 && !b))
7316 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7318 /* Should be unreachable. */
7323 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7324 struct displaced_step_closure *dsc)
7326 switch (bits (insn, 20, 24))
7328 case 0x00: case 0x01: case 0x02: case 0x03:
7329 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7331 case 0x04: case 0x05: case 0x06: case 0x07:
7332 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7334 case 0x08: case 0x09: case 0x0a: case 0x0b:
7335 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7336 return arm_copy_unmodified (gdbarch, insn,
7337 "decode/pack/unpack/saturate/reverse", dsc);
7340 if (bits (insn, 5, 7) == 0) /* op2. */
7342 if (bits (insn, 12, 15) == 0xf)
7343 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7345 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7348 return arm_copy_undef (gdbarch, insn, dsc);
7350 case 0x1a: case 0x1b:
7351 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7352 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7354 return arm_copy_undef (gdbarch, insn, dsc);
7356 case 0x1c: case 0x1d:
7357 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7359 if (bits (insn, 0, 3) == 0xf)
7360 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7362 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7365 return arm_copy_undef (gdbarch, insn, dsc);
7367 case 0x1e: case 0x1f:
7368 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7369 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7371 return arm_copy_undef (gdbarch, insn, dsc);
7374 /* Should be unreachable. */
7379 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7380 struct regcache *regs,
7381 struct displaced_step_closure *dsc)
7384 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7386 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7390 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7391 struct regcache *regs,
7392 struct displaced_step_closure *dsc)
7394 unsigned int opcode = bits (insn, 20, 24);
7398 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7399 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7401 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7402 case 0x12: case 0x16:
7403 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7405 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7406 case 0x13: case 0x17:
7407 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7409 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7410 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7411 /* Note: no writeback for these instructions. Bit 25 will always be
7412 zero though (via caller), so the following works OK. */
7413 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7416 /* Should be unreachable. */
7420 /* Decode shifted register instructions. */
7423 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7424 uint16_t insn2, struct regcache *regs,
7425 struct displaced_step_closure *dsc)
7427 /* PC is only allowed to be used in instruction MOV. */
7429 unsigned int op = bits (insn1, 5, 8);
7430 unsigned int rn = bits (insn1, 0, 3);
7432 if (op == 0x2 && rn == 0xf) /* MOV */
7433 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7435 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7436 "dp (shift reg)", dsc);
7440 /* Decode extension register load/store. Exactly the same as
7441 arm_decode_ext_reg_ld_st. */
7444 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7445 uint16_t insn2, struct regcache *regs,
7446 struct displaced_step_closure *dsc)
7448 unsigned int opcode = bits (insn1, 4, 8);
7452 case 0x04: case 0x05:
7453 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7454 "vfp/neon vmov", dsc);
7456 case 0x08: case 0x0c: /* 01x00 */
7457 case 0x0a: case 0x0e: /* 01x10 */
7458 case 0x12: case 0x16: /* 10x10 */
7459 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7460 "vfp/neon vstm/vpush", dsc);
7462 case 0x09: case 0x0d: /* 01x01 */
7463 case 0x0b: case 0x0f: /* 01x11 */
7464 case 0x13: case 0x17: /* 10x11 */
7465 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7466 "vfp/neon vldm/vpop", dsc);
7468 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7469 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7471 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7472 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7475 /* Should be unreachable. */
7480 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7481 struct regcache *regs, struct displaced_step_closure *dsc)
7483 unsigned int op1 = bits (insn, 20, 25);
7484 int op = bit (insn, 4);
7485 unsigned int coproc = bits (insn, 8, 11);
7486 unsigned int rn = bits (insn, 16, 19);
7488 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7489 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7490 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7491 && (coproc & 0xe) != 0xa)
7493 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7494 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7495 && (coproc & 0xe) != 0xa)
7496 /* ldc/ldc2 imm/lit. */
7497 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7498 else if ((op1 & 0x3e) == 0x00)
7499 return arm_copy_undef (gdbarch, insn, dsc);
7500 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7501 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7502 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7503 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7504 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7505 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7506 else if ((op1 & 0x30) == 0x20 && !op)
7508 if ((coproc & 0xe) == 0xa)
7509 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7511 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7513 else if ((op1 & 0x30) == 0x20 && op)
7514 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7515 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7516 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7517 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7518 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7519 else if ((op1 & 0x30) == 0x30)
7520 return arm_copy_svc (gdbarch, insn, regs, dsc);
7522 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7526 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7527 uint16_t insn2, struct regcache *regs,
7528 struct displaced_step_closure *dsc)
7530 unsigned int coproc = bits (insn2, 8, 11);
7531 unsigned int op1 = bits (insn1, 4, 9);
7532 unsigned int bit_5_8 = bits (insn1, 5, 8);
7533 unsigned int bit_9 = bit (insn1, 9);
7534 unsigned int bit_4 = bit (insn1, 4);
7535 unsigned int rn = bits (insn1, 0, 3);
7540 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7541 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7543 else if (bit_5_8 == 0) /* UNDEFINED. */
7544 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7547 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7548 if ((coproc & 0xe) == 0xa)
7549 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7551 else /* coproc is not 101x. */
7553 if (bit_4 == 0) /* STC/STC2. */
7554 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7556 else /* LDC/LDC2 {literal, immeidate}. */
7557 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7563 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7569 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7570 struct displaced_step_closure *dsc, int rd)
7576 Preparation: Rd <- PC
7582 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7583 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7587 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7588 struct displaced_step_closure *dsc,
7589 int rd, unsigned int imm)
7592 /* Encoding T2: ADDS Rd, #imm */
7593 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7595 install_pc_relative (gdbarch, regs, dsc, rd);
7601 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7602 struct regcache *regs,
7603 struct displaced_step_closure *dsc)
7605 unsigned int rd = bits (insn, 8, 10);
7606 unsigned int imm8 = bits (insn, 0, 7);
7608 if (debug_displaced)
7609 fprintf_unfiltered (gdb_stdlog,
7610 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7613 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
7617 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
7618 uint16_t insn2, struct regcache *regs,
7619 struct displaced_step_closure *dsc)
7621 unsigned int rd = bits (insn2, 8, 11);
7622 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7623 extract raw immediate encoding rather than computing immediate. When
7624 generating ADD or SUB instruction, we can simply perform OR operation to
7625 set immediate into ADD. */
7626 unsigned int imm_3_8 = insn2 & 0x70ff;
7627 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
7629 if (debug_displaced)
7630 fprintf_unfiltered (gdb_stdlog,
7631 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7632 rd, imm_i, imm_3_8, insn1, insn2);
7634 if (bit (insn1, 7)) /* Encoding T2 */
7636 /* Encoding T3: SUB Rd, Rd, #imm */
7637 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
7638 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7640 else /* Encoding T3 */
7642 /* Encoding T3: ADD Rd, Rd, #imm */
7643 dsc->modinsn[0] = (0xf100 | rd | imm_i);
7644 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7648 install_pc_relative (gdbarch, regs, dsc, rd);
7654 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
7655 struct regcache *regs,
7656 struct displaced_step_closure *dsc)
7658 unsigned int rt = bits (insn1, 8, 10);
7660 int imm8 = (bits (insn1, 0, 7) << 2);
7661 CORE_ADDR from = dsc->insn_addr;
7667 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7669 Insn: LDR R0, [R2, R3];
7670 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7672 if (debug_displaced)
7673 fprintf_unfiltered (gdb_stdlog,
7674 "displaced: copying thumb ldr r%d [pc #%d]\n"
7677 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
7678 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
7679 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
7680 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7681 /* The assembler calculates the required value of the offset from the
7682 Align(PC,4) value of this instruction to the label. */
7683 pc = pc & 0xfffffffc;
7685 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
7686 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
7689 dsc->u.ldst.xfersize = 4;
7691 dsc->u.ldst.immed = 0;
7692 dsc->u.ldst.writeback = 0;
7693 dsc->u.ldst.restore_r4 = 0;
7695 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7697 dsc->cleanup = &cleanup_load;
7702 /* Copy Thumb cbnz/cbz insruction. */
7705 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
7706 struct regcache *regs,
7707 struct displaced_step_closure *dsc)
7709 int non_zero = bit (insn1, 11);
7710 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
7711 CORE_ADDR from = dsc->insn_addr;
7712 int rn = bits (insn1, 0, 2);
7713 int rn_val = displaced_read_reg (regs, dsc, rn);
7715 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
7716 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7717 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7718 condition is false, let it be, cleanup_branch will do nothing. */
7719 if (dsc->u.branch.cond)
7721 dsc->u.branch.cond = INST_AL;
7722 dsc->u.branch.dest = from + 4 + imm5;
7725 dsc->u.branch.dest = from + 2;
7727 dsc->u.branch.link = 0;
7728 dsc->u.branch.exchange = 0;
7730 if (debug_displaced)
7731 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
7732 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
7733 rn, rn_val, insn1, dsc->u.branch.dest);
7735 dsc->modinsn[0] = THUMB_NOP;
7737 dsc->cleanup = &cleanup_branch;
7741 /* Copy Table Branch Byte/Halfword */
7743 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7744 uint16_t insn2, struct regcache *regs,
7745 struct displaced_step_closure *dsc)
7747 ULONGEST rn_val, rm_val;
7748 int is_tbh = bit (insn2, 4);
7749 CORE_ADDR halfwords = 0;
7750 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7752 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7753 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7759 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7760 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7766 target_read_memory (rn_val + rm_val, buf, 1);
7767 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7770 if (debug_displaced)
7771 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
7772 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
7773 (unsigned int) rn_val, (unsigned int) rm_val,
7774 (unsigned int) halfwords);
7776 dsc->u.branch.cond = INST_AL;
7777 dsc->u.branch.link = 0;
7778 dsc->u.branch.exchange = 0;
7779 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7781 dsc->cleanup = &cleanup_branch;
7787 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
7788 struct displaced_step_closure *dsc)
7791 int val = displaced_read_reg (regs, dsc, 7);
7792 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
7795 val = displaced_read_reg (regs, dsc, 8);
7796 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
7799 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
7804 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
7805 struct regcache *regs,
7806 struct displaced_step_closure *dsc)
7808 dsc->u.block.regmask = insn1 & 0x00ff;
7810 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
7813 (1) register list is full, that is, r0-r7 are used.
7814 Prepare: tmp[0] <- r8
7816 POP {r0, r1, ...., r6, r7}; remove PC from reglist
7817 MOV r8, r7; Move value of r7 to r8;
7818 POP {r7}; Store PC value into r7.
7820 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
7822 (2) register list is not full, supposing there are N registers in
7823 register list (except PC, 0 <= N <= 7).
7824 Prepare: for each i, 0 - N, tmp[i] <- ri.
7826 POP {r0, r1, ...., rN};
7828 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
7829 from tmp[] properly.
7831 if (debug_displaced)
7832 fprintf_unfiltered (gdb_stdlog,
7833 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
7834 dsc->u.block.regmask, insn1);
7836 if (dsc->u.block.regmask == 0xff)
7838 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
7840 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
7841 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
7842 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
7845 dsc->cleanup = &cleanup_pop_pc_16bit_all;
7849 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
7850 unsigned int new_regmask, bit = 1;
7851 unsigned int to = 0, from = 0, i, new_rn;
7853 for (i = 0; i < num_in_list + 1; i++)
7854 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7856 new_regmask = (1 << (num_in_list + 1)) - 1;
7858 if (debug_displaced)
7859 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
7860 "{..., pc}: original reg list %.4x,"
7861 " modified list %.4x\n"),
7862 (int) dsc->u.block.regmask, new_regmask);
7864 dsc->u.block.regmask |= 0x8000;
7865 dsc->u.block.writeback = 0;
7866 dsc->u.block.cond = INST_AL;
7868 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
7870 dsc->cleanup = &cleanup_block_load_pc;
7877 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7878 struct regcache *regs,
7879 struct displaced_step_closure *dsc)
7881 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
7882 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
7885 /* 16-bit thumb instructions. */
7886 switch (op_bit_12_15)
7888 /* Shift (imme), add, subtract, move and compare. */
7889 case 0: case 1: case 2: case 3:
7890 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7891 "shift/add/sub/mov/cmp",
7895 switch (op_bit_10_11)
7897 case 0: /* Data-processing */
7898 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7902 case 1: /* Special data instructions and branch and exchange. */
7904 unsigned short op = bits (insn1, 7, 9);
7905 if (op == 6 || op == 7) /* BX or BLX */
7906 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
7907 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
7908 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
7910 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
7914 default: /* LDR (literal) */
7915 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
7918 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
7919 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
7922 if (op_bit_10_11 < 2) /* Generate PC-relative address */
7923 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
7924 else /* Generate SP-relative address */
7925 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
7927 case 11: /* Misc 16-bit instructions */
7929 switch (bits (insn1, 8, 11))
7931 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
7932 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
7934 case 12: case 13: /* POP */
7935 if (bit (insn1, 8)) /* PC is in register list. */
7936 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
7938 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
7940 case 15: /* If-Then, and hints */
7941 if (bits (insn1, 0, 3))
7942 /* If-Then makes up to four following instructions conditional.
7943 IT instruction itself is not conditional, so handle it as a
7944 common unmodified instruction. */
7945 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
7948 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
7951 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
7956 if (op_bit_10_11 < 2) /* Store multiple registers */
7957 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
7958 else /* Load multiple registers */
7959 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
7961 case 13: /* Conditional branch and supervisor call */
7962 if (bits (insn1, 9, 11) != 7) /* conditional branch */
7963 err = thumb_copy_b (gdbarch, insn1, dsc);
7965 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
7967 case 14: /* Unconditional branch */
7968 err = thumb_copy_b (gdbarch, insn1, dsc);
7975 internal_error (__FILE__, __LINE__,
7976 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
7980 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
7981 uint16_t insn1, uint16_t insn2,
7982 struct regcache *regs,
7983 struct displaced_step_closure *dsc)
7985 int rt = bits (insn2, 12, 15);
7986 int rn = bits (insn1, 0, 3);
7987 int op1 = bits (insn1, 7, 8);
7990 switch (bits (insn1, 5, 6))
7992 case 0: /* Load byte and memory hints */
7993 if (rt == 0xf) /* PLD/PLI */
7996 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
7997 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
7999 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8004 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8005 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8008 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8009 "ldrb{reg, immediate}/ldrbt",
8014 case 1: /* Load halfword and memory hints. */
8015 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8016 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8017 "pld/unalloc memhint", dsc);
8021 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8024 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8028 case 2: /* Load word */
8030 int insn2_bit_8_11 = bits (insn2, 8, 11);
8033 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8034 else if (op1 == 0x1) /* Encoding T3 */
8035 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8037 else /* op1 == 0x0 */
8039 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8040 /* LDR (immediate) */
8041 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8042 dsc, bit (insn2, 8), 1);
8043 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8044 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8047 /* LDR (register) */
8048 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8054 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8061 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8062 uint16_t insn2, struct regcache *regs,
8063 struct displaced_step_closure *dsc)
8066 unsigned short op = bit (insn2, 15);
8067 unsigned int op1 = bits (insn1, 11, 12);
8073 switch (bits (insn1, 9, 10))
8078 /* Load/store {dual, execlusive}, table branch. */
8079 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8080 && bits (insn2, 5, 7) == 0)
8081 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8084 /* PC is not allowed to use in load/store {dual, exclusive}
8086 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8087 "load/store dual/ex", dsc);
8089 else /* load/store multiple */
8091 switch (bits (insn1, 7, 8))
8093 case 0: case 3: /* SRS, RFE */
8094 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8097 case 1: case 2: /* LDM/STM/PUSH/POP */
8098 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8105 /* Data-processing (shift register). */
8106 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8109 default: /* Coprocessor instructions. */
8110 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8115 case 2: /* op1 = 2 */
8116 if (op) /* Branch and misc control. */
8118 if (bit (insn2, 14) /* BLX/BL */
8119 || bit (insn2, 12) /* Unconditional branch */
8120 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8121 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8123 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8128 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8130 int op = bits (insn1, 4, 8);
8131 int rn = bits (insn1, 0, 3);
8132 if ((op == 0 || op == 0xa) && rn == 0xf)
8133 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8136 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8139 else /* Data processing (modified immeidate) */
8140 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8144 case 3: /* op1 = 3 */
8145 switch (bits (insn1, 9, 10))
8149 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8151 else /* NEON Load/Store and Store single data item */
8152 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8153 "neon elt/struct load/store",
8156 case 1: /* op1 = 3, bits (9, 10) == 1 */
8157 switch (bits (insn1, 7, 8))
8159 case 0: case 1: /* Data processing (register) */
8160 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8163 case 2: /* Multiply and absolute difference */
8164 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8165 "mul/mua/diff", dsc);
8167 case 3: /* Long multiply and divide */
8168 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8173 default: /* Coprocessor instructions */
8174 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8183 internal_error (__FILE__, __LINE__,
8184 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8189 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8190 CORE_ADDR to, struct regcache *regs,
8191 struct displaced_step_closure *dsc)
8193 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8195 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8197 if (debug_displaced)
8198 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8199 "at %.8lx\n", insn1, (unsigned long) from);
8202 dsc->insn_size = thumb_insn_size (insn1);
8203 if (thumb_insn_size (insn1) == 4)
8206 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8207 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8210 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8214 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8215 CORE_ADDR to, struct regcache *regs,
8216 struct displaced_step_closure *dsc)
8219 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8222 /* Most displaced instructions use a 1-instruction scratch space, so set this
8223 here and override below if/when necessary. */
8225 dsc->insn_addr = from;
8226 dsc->scratch_base = to;
8227 dsc->cleanup = NULL;
8228 dsc->wrote_to_pc = 0;
8230 if (!displaced_in_arm_mode (regs))
8231 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8235 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8236 if (debug_displaced)
8237 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8238 "at %.8lx\n", (unsigned long) insn,
8239 (unsigned long) from);
8241 if ((insn & 0xf0000000) == 0xf0000000)
8242 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8243 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8245 case 0x0: case 0x1: case 0x2: case 0x3:
8246 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8249 case 0x4: case 0x5: case 0x6:
8250 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8254 err = arm_decode_media (gdbarch, insn, dsc);
8257 case 0x8: case 0x9: case 0xa: case 0xb:
8258 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8261 case 0xc: case 0xd: case 0xe: case 0xf:
8262 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8267 internal_error (__FILE__, __LINE__,
8268 _("arm_process_displaced_insn: Instruction decode error"));
8271 /* Actually set up the scratch space for a displaced instruction. */
8274 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8275 CORE_ADDR to, struct displaced_step_closure *dsc)
8277 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8278 unsigned int i, len, offset;
8279 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8280 int size = dsc->is_thumb? 2 : 4;
8281 const unsigned char *bkp_insn;
8284 /* Poke modified instruction(s). */
8285 for (i = 0; i < dsc->numinsns; i++)
8287 if (debug_displaced)
8289 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8291 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8294 fprintf_unfiltered (gdb_stdlog, "%.4x",
8295 (unsigned short)dsc->modinsn[i]);
8297 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8298 (unsigned long) to + offset);
8301 write_memory_unsigned_integer (to + offset, size,
8302 byte_order_for_code,
8307 /* Choose the correct breakpoint instruction. */
8310 bkp_insn = tdep->thumb_breakpoint;
8311 len = tdep->thumb_breakpoint_size;
8315 bkp_insn = tdep->arm_breakpoint;
8316 len = tdep->arm_breakpoint_size;
8319 /* Put breakpoint afterwards. */
8320 write_memory (to + offset, bkp_insn, len);
8322 if (debug_displaced)
8323 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8324 paddress (gdbarch, from), paddress (gdbarch, to));
8327 /* Entry point for copying an instruction into scratch space for displaced
8330 struct displaced_step_closure *
8331 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8332 CORE_ADDR from, CORE_ADDR to,
8333 struct regcache *regs)
8335 struct displaced_step_closure *dsc
8336 = xmalloc (sizeof (struct displaced_step_closure));
8337 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8338 arm_displaced_init_closure (gdbarch, from, to, dsc);
8343 /* Entry point for cleaning things up after a displaced instruction has been
8347 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8348 struct displaced_step_closure *dsc,
8349 CORE_ADDR from, CORE_ADDR to,
8350 struct regcache *regs)
8353 dsc->cleanup (gdbarch, regs, dsc);
8355 if (!dsc->wrote_to_pc)
8356 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8357 dsc->insn_addr + dsc->insn_size);
8361 #include "bfd-in2.h"
8362 #include "libcoff.h"
8365 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8367 struct gdbarch *gdbarch = info->application_data;
8369 if (arm_pc_is_thumb (gdbarch, memaddr))
8371 static asymbol *asym;
8372 static combined_entry_type ce;
8373 static struct coff_symbol_struct csym;
8374 static struct bfd fake_bfd;
8375 static bfd_target fake_target;
8377 if (csym.native == NULL)
8379 /* Create a fake symbol vector containing a Thumb symbol.
8380 This is solely so that the code in print_insn_little_arm()
8381 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8382 the presence of a Thumb symbol and switch to decoding
8383 Thumb instructions. */
8385 fake_target.flavour = bfd_target_coff_flavour;
8386 fake_bfd.xvec = &fake_target;
8387 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8389 csym.symbol.the_bfd = &fake_bfd;
8390 csym.symbol.name = "fake";
8391 asym = (asymbol *) & csym;
8394 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8395 info->symbols = &asym;
8398 info->symbols = NULL;
8400 if (info->endian == BFD_ENDIAN_BIG)
8401 return print_insn_big_arm (memaddr, info);
8403 return print_insn_little_arm (memaddr, info);
8406 /* The following define instruction sequences that will cause ARM
8407 cpu's to take an undefined instruction trap. These are used to
8408 signal a breakpoint to GDB.
8410 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8411 modes. A different instruction is required for each mode. The ARM
8412 cpu's can also be big or little endian. Thus four different
8413 instructions are needed to support all cases.
8415 Note: ARMv4 defines several new instructions that will take the
8416 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8417 not in fact add the new instructions. The new undefined
8418 instructions in ARMv4 are all instructions that had no defined
8419 behaviour in earlier chips. There is no guarantee that they will
8420 raise an exception, but may be treated as NOP's. In practice, it
8421 may only safe to rely on instructions matching:
8423 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8424 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8425 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8427 Even this may only true if the condition predicate is true. The
8428 following use a condition predicate of ALWAYS so it is always TRUE.
8430 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8431 and NetBSD all use a software interrupt rather than an undefined
8432 instruction to force a trap. This can be handled by by the
8433 abi-specific code during establishment of the gdbarch vector. */
8435 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8436 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8437 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8438 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8440 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8441 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8442 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8443 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8445 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8446 the program counter value to determine whether a 16-bit or 32-bit
8447 breakpoint should be used. It returns a pointer to a string of
8448 bytes that encode a breakpoint instruction, stores the length of
8449 the string to *lenptr, and adjusts the program counter (if
8450 necessary) to point to the actual memory location where the
8451 breakpoint should be inserted. */
8453 static const unsigned char *
8454 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8456 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8457 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8459 if (arm_pc_is_thumb (gdbarch, *pcptr))
8461 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8463 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8464 check whether we are replacing a 32-bit instruction. */
8465 if (tdep->thumb2_breakpoint != NULL)
8468 if (target_read_memory (*pcptr, buf, 2) == 0)
8470 unsigned short inst1;
8471 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8472 if (thumb_insn_size (inst1) == 4)
8474 *lenptr = tdep->thumb2_breakpoint_size;
8475 return tdep->thumb2_breakpoint;
8480 *lenptr = tdep->thumb_breakpoint_size;
8481 return tdep->thumb_breakpoint;
8485 *lenptr = tdep->arm_breakpoint_size;
8486 return tdep->arm_breakpoint;
8491 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8494 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8496 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8498 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8499 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8500 that this is not confused with a 32-bit ARM breakpoint. */
8504 /* Extract from an array REGBUF containing the (raw) register state a
8505 function return value of type TYPE, and copy that, in virtual
8506 format, into VALBUF. */
8509 arm_extract_return_value (struct type *type, struct regcache *regs,
8512 struct gdbarch *gdbarch = get_regcache_arch (regs);
8513 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8515 if (TYPE_CODE_FLT == TYPE_CODE (type))
8517 switch (gdbarch_tdep (gdbarch)->fp_model)
8521 /* The value is in register F0 in internal format. We need to
8522 extract the raw value and then convert it to the desired
8524 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8526 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8527 convert_from_extended (floatformat_from_type (type), tmpbuf,
8528 valbuf, gdbarch_byte_order (gdbarch));
8532 case ARM_FLOAT_SOFT_FPA:
8533 case ARM_FLOAT_SOFT_VFP:
8534 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8535 not using the VFP ABI code. */
8537 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8538 if (TYPE_LENGTH (type) > 4)
8539 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8540 valbuf + INT_REGISTER_SIZE);
8544 internal_error (__FILE__, __LINE__,
8545 _("arm_extract_return_value: "
8546 "Floating point model not supported"));
8550 else if (TYPE_CODE (type) == TYPE_CODE_INT
8551 || TYPE_CODE (type) == TYPE_CODE_CHAR
8552 || TYPE_CODE (type) == TYPE_CODE_BOOL
8553 || TYPE_CODE (type) == TYPE_CODE_PTR
8554 || TYPE_CODE (type) == TYPE_CODE_REF
8555 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8557 /* If the type is a plain integer, then the access is
8558 straight-forward. Otherwise we have to play around a bit
8560 int len = TYPE_LENGTH (type);
8561 int regno = ARM_A1_REGNUM;
8566 /* By using store_unsigned_integer we avoid having to do
8567 anything special for small big-endian values. */
8568 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8569 store_unsigned_integer (valbuf,
8570 (len > INT_REGISTER_SIZE
8571 ? INT_REGISTER_SIZE : len),
8573 len -= INT_REGISTER_SIZE;
8574 valbuf += INT_REGISTER_SIZE;
8579 /* For a structure or union the behaviour is as if the value had
8580 been stored to word-aligned memory and then loaded into
8581 registers with 32-bit load instruction(s). */
8582 int len = TYPE_LENGTH (type);
8583 int regno = ARM_A1_REGNUM;
8584 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8588 regcache_cooked_read (regs, regno++, tmpbuf);
8589 memcpy (valbuf, tmpbuf,
8590 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8591 len -= INT_REGISTER_SIZE;
8592 valbuf += INT_REGISTER_SIZE;
8598 /* Will a function return an aggregate type in memory or in a
8599 register? Return 0 if an aggregate type can be returned in a
8600 register, 1 if it must be returned in memory. */
8603 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8606 enum type_code code;
8608 CHECK_TYPEDEF (type);
8610 /* In the ARM ABI, "integer" like aggregate types are returned in
8611 registers. For an aggregate type to be integer like, its size
8612 must be less than or equal to INT_REGISTER_SIZE and the
8613 offset of each addressable subfield must be zero. Note that bit
8614 fields are not addressable, and all addressable subfields of
8615 unions always start at offset zero.
8617 This function is based on the behaviour of GCC 2.95.1.
8618 See: gcc/arm.c: arm_return_in_memory() for details.
8620 Note: All versions of GCC before GCC 2.95.2 do not set up the
8621 parameters correctly for a function returning the following
8622 structure: struct { float f;}; This should be returned in memory,
8623 not a register. Richard Earnshaw sent me a patch, but I do not
8624 know of any way to detect if a function like the above has been
8625 compiled with the correct calling convention. */
8627 /* All aggregate types that won't fit in a register must be returned
8629 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
8634 /* The AAPCS says all aggregates not larger than a word are returned
8636 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
8639 /* The only aggregate types that can be returned in a register are
8640 structs and unions. Arrays must be returned in memory. */
8641 code = TYPE_CODE (type);
8642 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
8647 /* Assume all other aggregate types can be returned in a register.
8648 Run a check for structures, unions and arrays. */
8651 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
8654 /* Need to check if this struct/union is "integer" like. For
8655 this to be true, its size must be less than or equal to
8656 INT_REGISTER_SIZE and the offset of each addressable
8657 subfield must be zero. Note that bit fields are not
8658 addressable, and unions always start at offset zero. If any
8659 of the subfields is a floating point type, the struct/union
8660 cannot be an integer type. */
8662 /* For each field in the object, check:
8663 1) Is it FP? --> yes, nRc = 1;
8664 2) Is it addressable (bitpos != 0) and
8665 not packed (bitsize == 0)?
8669 for (i = 0; i < TYPE_NFIELDS (type); i++)
8671 enum type_code field_type_code;
8672 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
8675 /* Is it a floating point type field? */
8676 if (field_type_code == TYPE_CODE_FLT)
8682 /* If bitpos != 0, then we have to care about it. */
8683 if (TYPE_FIELD_BITPOS (type, i) != 0)
8685 /* Bitfields are not addressable. If the field bitsize is
8686 zero, then the field is not packed. Hence it cannot be
8687 a bitfield or any other packed type. */
8688 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8700 /* Write into appropriate registers a function return value of type
8701 TYPE, given in virtual format. */
8704 arm_store_return_value (struct type *type, struct regcache *regs,
8705 const gdb_byte *valbuf)
8707 struct gdbarch *gdbarch = get_regcache_arch (regs);
8708 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8710 if (TYPE_CODE (type) == TYPE_CODE_FLT)
8712 char buf[MAX_REGISTER_SIZE];
8714 switch (gdbarch_tdep (gdbarch)->fp_model)
8718 convert_to_extended (floatformat_from_type (type), buf, valbuf,
8719 gdbarch_byte_order (gdbarch));
8720 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
8723 case ARM_FLOAT_SOFT_FPA:
8724 case ARM_FLOAT_SOFT_VFP:
8725 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8726 not using the VFP ABI code. */
8728 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
8729 if (TYPE_LENGTH (type) > 4)
8730 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
8731 valbuf + INT_REGISTER_SIZE);
8735 internal_error (__FILE__, __LINE__,
8736 _("arm_store_return_value: Floating "
8737 "point model not supported"));
8741 else if (TYPE_CODE (type) == TYPE_CODE_INT
8742 || TYPE_CODE (type) == TYPE_CODE_CHAR
8743 || TYPE_CODE (type) == TYPE_CODE_BOOL
8744 || TYPE_CODE (type) == TYPE_CODE_PTR
8745 || TYPE_CODE (type) == TYPE_CODE_REF
8746 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8748 if (TYPE_LENGTH (type) <= 4)
8750 /* Values of one word or less are zero/sign-extended and
8752 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8753 LONGEST val = unpack_long (type, valbuf);
8755 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
8756 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
8760 /* Integral values greater than one word are stored in consecutive
8761 registers starting with r0. This will always be a multiple of
8762 the regiser size. */
8763 int len = TYPE_LENGTH (type);
8764 int regno = ARM_A1_REGNUM;
8768 regcache_cooked_write (regs, regno++, valbuf);
8769 len -= INT_REGISTER_SIZE;
8770 valbuf += INT_REGISTER_SIZE;
8776 /* For a structure or union the behaviour is as if the value had
8777 been stored to word-aligned memory and then loaded into
8778 registers with 32-bit load instruction(s). */
8779 int len = TYPE_LENGTH (type);
8780 int regno = ARM_A1_REGNUM;
8781 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8785 memcpy (tmpbuf, valbuf,
8786 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8787 regcache_cooked_write (regs, regno++, tmpbuf);
8788 len -= INT_REGISTER_SIZE;
8789 valbuf += INT_REGISTER_SIZE;
8795 /* Handle function return values. */
8797 static enum return_value_convention
8798 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
8799 struct type *valtype, struct regcache *regcache,
8800 gdb_byte *readbuf, const gdb_byte *writebuf)
8802 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8803 enum arm_vfp_cprc_base_type vfp_base_type;
8806 if (arm_vfp_abi_for_function (gdbarch, func_type)
8807 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
8809 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
8810 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
8812 for (i = 0; i < vfp_base_count; i++)
8814 if (reg_char == 'q')
8817 arm_neon_quad_write (gdbarch, regcache, i,
8818 writebuf + i * unit_length);
8821 arm_neon_quad_read (gdbarch, regcache, i,
8822 readbuf + i * unit_length);
8829 sprintf (name_buf, "%c%d", reg_char, i);
8830 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8833 regcache_cooked_write (regcache, regnum,
8834 writebuf + i * unit_length);
8836 regcache_cooked_read (regcache, regnum,
8837 readbuf + i * unit_length);
8840 return RETURN_VALUE_REGISTER_CONVENTION;
8843 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
8844 || TYPE_CODE (valtype) == TYPE_CODE_UNION
8845 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
8847 if (tdep->struct_return == pcc_struct_return
8848 || arm_return_in_memory (gdbarch, valtype))
8849 return RETURN_VALUE_STRUCT_CONVENTION;
8853 arm_store_return_value (valtype, regcache, writebuf);
8856 arm_extract_return_value (valtype, regcache, readbuf);
8858 return RETURN_VALUE_REGISTER_CONVENTION;
8863 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
8865 struct gdbarch *gdbarch = get_frame_arch (frame);
8866 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8867 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8869 char buf[INT_REGISTER_SIZE];
8871 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
8873 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
8877 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
8881 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
8882 return the target PC. Otherwise return 0. */
8885 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
8889 CORE_ADDR start_addr;
8891 /* Find the starting address and name of the function containing the PC. */
8892 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
8895 /* If PC is in a Thumb call or return stub, return the address of the
8896 target PC, which is in a register. The thunk functions are called
8897 _call_via_xx, where x is the register name. The possible names
8898 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
8899 functions, named __ARM_call_via_r[0-7]. */
8900 if (strncmp (name, "_call_via_", 10) == 0
8901 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
8903 /* Use the name suffix to determine which register contains the
8905 static char *table[15] =
8906 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
8907 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
8910 int offset = strlen (name) - 2;
8912 for (regno = 0; regno <= 14; regno++)
8913 if (strcmp (&name[offset], table[regno]) == 0)
8914 return get_frame_register_unsigned (frame, regno);
8917 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
8918 non-interworking calls to foo. We could decode the stubs
8919 to find the target but it's easier to use the symbol table. */
8920 namelen = strlen (name);
8921 if (name[0] == '_' && name[1] == '_'
8922 && ((namelen > 2 + strlen ("_from_thumb")
8923 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
8924 strlen ("_from_thumb")) == 0)
8925 || (namelen > 2 + strlen ("_from_arm")
8926 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
8927 strlen ("_from_arm")) == 0)))
8930 int target_len = namelen - 2;
8931 struct minimal_symbol *minsym;
8932 struct objfile *objfile;
8933 struct obj_section *sec;
8935 if (name[namelen - 1] == 'b')
8936 target_len -= strlen ("_from_thumb");
8938 target_len -= strlen ("_from_arm");
8940 target_name = alloca (target_len + 1);
8941 memcpy (target_name, name + 2, target_len);
8942 target_name[target_len] = '\0';
8944 sec = find_pc_section (pc);
8945 objfile = (sec == NULL) ? NULL : sec->objfile;
8946 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
8948 return SYMBOL_VALUE_ADDRESS (minsym);
8953 return 0; /* not a stub */
8957 set_arm_command (char *args, int from_tty)
8959 printf_unfiltered (_("\
8960 \"set arm\" must be followed by an apporpriate subcommand.\n"));
8961 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
8965 show_arm_command (char *args, int from_tty)
8967 cmd_show_list (showarmcmdlist, from_tty, "");
8971 arm_update_current_architecture (void)
8973 struct gdbarch_info info;
8975 /* If the current architecture is not ARM, we have nothing to do. */
8976 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
8979 /* Update the architecture. */
8980 gdbarch_info_init (&info);
8982 if (!gdbarch_update_p (info))
8983 internal_error (__FILE__, __LINE__, _("could not update architecture"));
8987 set_fp_model_sfunc (char *args, int from_tty,
8988 struct cmd_list_element *c)
8990 enum arm_float_model fp_model;
8992 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
8993 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
8995 arm_fp_model = fp_model;
8999 if (fp_model == ARM_FLOAT_LAST)
9000 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9003 arm_update_current_architecture ();
9007 show_fp_model (struct ui_file *file, int from_tty,
9008 struct cmd_list_element *c, const char *value)
9010 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9012 if (arm_fp_model == ARM_FLOAT_AUTO
9013 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9014 fprintf_filtered (file, _("\
9015 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9016 fp_model_strings[tdep->fp_model]);
9018 fprintf_filtered (file, _("\
9019 The current ARM floating point model is \"%s\".\n"),
9020 fp_model_strings[arm_fp_model]);
9024 arm_set_abi (char *args, int from_tty,
9025 struct cmd_list_element *c)
9027 enum arm_abi_kind arm_abi;
9029 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9030 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9032 arm_abi_global = arm_abi;
9036 if (arm_abi == ARM_ABI_LAST)
9037 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9040 arm_update_current_architecture ();
9044 arm_show_abi (struct ui_file *file, int from_tty,
9045 struct cmd_list_element *c, const char *value)
9047 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9049 if (arm_abi_global == ARM_ABI_AUTO
9050 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9051 fprintf_filtered (file, _("\
9052 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9053 arm_abi_strings[tdep->arm_abi]);
9055 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9060 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9061 struct cmd_list_element *c, const char *value)
9063 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9065 fprintf_filtered (file,
9066 _("The current execution mode assumed "
9067 "(when symbols are unavailable) is \"%s\".\n"),
9068 arm_fallback_mode_string);
9072 arm_show_force_mode (struct ui_file *file, int from_tty,
9073 struct cmd_list_element *c, const char *value)
9075 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9077 fprintf_filtered (file,
9078 _("The current execution mode assumed "
9079 "(even when symbols are available) is \"%s\".\n"),
9080 arm_force_mode_string);
9083 /* If the user changes the register disassembly style used for info
9084 register and other commands, we have to also switch the style used
9085 in opcodes for disassembly output. This function is run in the "set
9086 arm disassembly" command, and does that. */
9089 set_disassembly_style_sfunc (char *args, int from_tty,
9090 struct cmd_list_element *c)
9092 set_disassembly_style ();
9095 /* Return the ARM register name corresponding to register I. */
9097 arm_register_name (struct gdbarch *gdbarch, int i)
9099 const int num_regs = gdbarch_num_regs (gdbarch);
9101 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9102 && i >= num_regs && i < num_regs + 32)
9104 static const char *const vfp_pseudo_names[] = {
9105 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9106 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9107 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9108 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9111 return vfp_pseudo_names[i - num_regs];
9114 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9115 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9117 static const char *const neon_pseudo_names[] = {
9118 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9119 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9122 return neon_pseudo_names[i - num_regs - 32];
9125 if (i >= ARRAY_SIZE (arm_register_names))
9126 /* These registers are only supported on targets which supply
9127 an XML description. */
9130 return arm_register_names[i];
9134 set_disassembly_style (void)
9138 /* Find the style that the user wants. */
9139 for (current = 0; current < num_disassembly_options; current++)
9140 if (disassembly_style == valid_disassembly_styles[current])
9142 gdb_assert (current < num_disassembly_options);
9144 /* Synchronize the disassembler. */
9145 set_arm_regname_option (current);
9148 /* Test whether the coff symbol specific value corresponds to a Thumb
9152 coff_sym_is_thumb (int val)
9154 return (val == C_THUMBEXT
9155 || val == C_THUMBSTAT
9156 || val == C_THUMBEXTFUNC
9157 || val == C_THUMBSTATFUNC
9158 || val == C_THUMBLABEL);
9161 /* arm_coff_make_msymbol_special()
9162 arm_elf_make_msymbol_special()
9164 These functions test whether the COFF or ELF symbol corresponds to
9165 an address in thumb code, and set a "special" bit in a minimal
9166 symbol to indicate that it does. */
9169 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9171 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9172 == ST_BRANCH_TO_THUMB)
9173 MSYMBOL_SET_SPECIAL (msym);
9177 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9179 if (coff_sym_is_thumb (val))
9180 MSYMBOL_SET_SPECIAL (msym);
9184 arm_objfile_data_free (struct objfile *objfile, void *arg)
9186 struct arm_per_objfile *data = arg;
9189 for (i = 0; i < objfile->obfd->section_count; i++)
9190 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9194 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9197 const char *name = bfd_asymbol_name (sym);
9198 struct arm_per_objfile *data;
9199 VEC(arm_mapping_symbol_s) **map_p;
9200 struct arm_mapping_symbol new_map_sym;
9202 gdb_assert (name[0] == '$');
9203 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9206 data = objfile_data (objfile, arm_objfile_data_key);
9209 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9210 struct arm_per_objfile);
9211 set_objfile_data (objfile, arm_objfile_data_key, data);
9212 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9213 objfile->obfd->section_count,
9214 VEC(arm_mapping_symbol_s) *);
9216 map_p = &data->section_maps[bfd_get_section (sym)->index];
9218 new_map_sym.value = sym->value;
9219 new_map_sym.type = name[1];
9221 /* Assume that most mapping symbols appear in order of increasing
9222 value. If they were randomly distributed, it would be faster to
9223 always push here and then sort at first use. */
9224 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9226 struct arm_mapping_symbol *prev_map_sym;
9228 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9229 if (prev_map_sym->value >= sym->value)
9232 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9233 arm_compare_mapping_symbols);
9234 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9239 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9243 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9245 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9246 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9248 /* If necessary, set the T bit. */
9251 ULONGEST val, t_bit;
9252 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9253 t_bit = arm_psr_thumb_bit (gdbarch);
9254 if (arm_pc_is_thumb (gdbarch, pc))
9255 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9258 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9263 /* Read the contents of a NEON quad register, by reading from two
9264 double registers. This is used to implement the quad pseudo
9265 registers, and for argument passing in case the quad registers are
9266 missing; vectors are passed in quad registers when using the VFP
9267 ABI, even if a NEON unit is not present. REGNUM is the index of
9268 the quad register, in [0, 15]. */
9270 static enum register_status
9271 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9272 int regnum, gdb_byte *buf)
9275 gdb_byte reg_buf[8];
9276 int offset, double_regnum;
9277 enum register_status status;
9279 sprintf (name_buf, "d%d", regnum << 1);
9280 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9283 /* d0 is always the least significant half of q0. */
9284 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9289 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9290 if (status != REG_VALID)
9292 memcpy (buf + offset, reg_buf, 8);
9294 offset = 8 - offset;
9295 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9296 if (status != REG_VALID)
9298 memcpy (buf + offset, reg_buf, 8);
9303 static enum register_status
9304 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9305 int regnum, gdb_byte *buf)
9307 const int num_regs = gdbarch_num_regs (gdbarch);
9309 gdb_byte reg_buf[8];
9310 int offset, double_regnum;
9312 gdb_assert (regnum >= num_regs);
9315 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9316 /* Quad-precision register. */
9317 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9320 enum register_status status;
9322 /* Single-precision register. */
9323 gdb_assert (regnum < 32);
9325 /* s0 is always the least significant half of d0. */
9326 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9327 offset = (regnum & 1) ? 0 : 4;
9329 offset = (regnum & 1) ? 4 : 0;
9331 sprintf (name_buf, "d%d", regnum >> 1);
9332 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9335 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9336 if (status == REG_VALID)
9337 memcpy (buf, reg_buf + offset, 4);
9342 /* Store the contents of BUF to a NEON quad register, by writing to
9343 two double registers. This is used to implement the quad pseudo
9344 registers, and for argument passing in case the quad registers are
9345 missing; vectors are passed in quad registers when using the VFP
9346 ABI, even if a NEON unit is not present. REGNUM is the index
9347 of the quad register, in [0, 15]. */
9350 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9351 int regnum, const gdb_byte *buf)
9354 gdb_byte reg_buf[8];
9355 int offset, double_regnum;
9357 sprintf (name_buf, "d%d", regnum << 1);
9358 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9361 /* d0 is always the least significant half of q0. */
9362 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9367 regcache_raw_write (regcache, double_regnum, buf + offset);
9368 offset = 8 - offset;
9369 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9373 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9374 int regnum, const gdb_byte *buf)
9376 const int num_regs = gdbarch_num_regs (gdbarch);
9378 gdb_byte reg_buf[8];
9379 int offset, double_regnum;
9381 gdb_assert (regnum >= num_regs);
9384 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9385 /* Quad-precision register. */
9386 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9389 /* Single-precision register. */
9390 gdb_assert (regnum < 32);
9392 /* s0 is always the least significant half of d0. */
9393 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9394 offset = (regnum & 1) ? 0 : 4;
9396 offset = (regnum & 1) ? 4 : 0;
9398 sprintf (name_buf, "d%d", regnum >> 1);
9399 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9402 regcache_raw_read (regcache, double_regnum, reg_buf);
9403 memcpy (reg_buf + offset, buf, 4);
9404 regcache_raw_write (regcache, double_regnum, reg_buf);
9408 static struct value *
9409 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9411 const int *reg_p = baton;
9412 return value_of_register (*reg_p, frame);
9415 static enum gdb_osabi
9416 arm_elf_osabi_sniffer (bfd *abfd)
9418 unsigned int elfosabi;
9419 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9421 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9423 if (elfosabi == ELFOSABI_ARM)
9424 /* GNU tools use this value. Check note sections in this case,
9426 bfd_map_over_sections (abfd,
9427 generic_elf_osabi_sniff_abi_tag_sections,
9430 /* Anything else will be handled by the generic ELF sniffer. */
9435 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9436 struct reggroup *group)
9438 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9439 this, FPS register belongs to save_regroup, restore_reggroup, and
9440 all_reggroup, of course. */
9441 if (regnum == ARM_FPS_REGNUM)
9442 return (group == float_reggroup
9443 || group == save_reggroup
9444 || group == restore_reggroup
9445 || group == all_reggroup);
9447 return default_register_reggroup_p (gdbarch, regnum, group);
9451 /* Initialize the current architecture based on INFO. If possible,
9452 re-use an architecture from ARCHES, which is a list of
9453 architectures already created during this debugging session.
9455 Called e.g. at program startup, when reading a core file, and when
9456 reading a binary file. */
9458 static struct gdbarch *
9459 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9461 struct gdbarch_tdep *tdep;
9462 struct gdbarch *gdbarch;
9463 struct gdbarch_list *best_arch;
9464 enum arm_abi_kind arm_abi = arm_abi_global;
9465 enum arm_float_model fp_model = arm_fp_model;
9466 struct tdesc_arch_data *tdesc_data = NULL;
9468 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9470 int have_fpa_registers = 1;
9471 const struct target_desc *tdesc = info.target_desc;
9473 /* If we have an object to base this architecture on, try to determine
9476 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9478 int ei_osabi, e_flags;
9480 switch (bfd_get_flavour (info.abfd))
9482 case bfd_target_aout_flavour:
9483 /* Assume it's an old APCS-style ABI. */
9484 arm_abi = ARM_ABI_APCS;
9487 case bfd_target_coff_flavour:
9488 /* Assume it's an old APCS-style ABI. */
9490 arm_abi = ARM_ABI_APCS;
9493 case bfd_target_elf_flavour:
9494 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9495 e_flags = elf_elfheader (info.abfd)->e_flags;
9497 if (ei_osabi == ELFOSABI_ARM)
9499 /* GNU tools used to use this value, but do not for EABI
9500 objects. There's nowhere to tag an EABI version
9501 anyway, so assume APCS. */
9502 arm_abi = ARM_ABI_APCS;
9504 else if (ei_osabi == ELFOSABI_NONE)
9506 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9507 int attr_arch, attr_profile;
9511 case EF_ARM_EABI_UNKNOWN:
9512 /* Assume GNU tools. */
9513 arm_abi = ARM_ABI_APCS;
9516 case EF_ARM_EABI_VER4:
9517 case EF_ARM_EABI_VER5:
9518 arm_abi = ARM_ABI_AAPCS;
9519 /* EABI binaries default to VFP float ordering.
9520 They may also contain build attributes that can
9521 be used to identify if the VFP argument-passing
9523 if (fp_model == ARM_FLOAT_AUTO)
9526 switch (bfd_elf_get_obj_attr_int (info.abfd,
9531 /* "The user intended FP parameter/result
9532 passing to conform to AAPCS, base
9534 fp_model = ARM_FLOAT_SOFT_VFP;
9537 /* "The user intended FP parameter/result
9538 passing to conform to AAPCS, VFP
9540 fp_model = ARM_FLOAT_VFP;
9543 /* "The user intended FP parameter/result
9544 passing to conform to tool chain-specific
9545 conventions" - we don't know any such
9546 conventions, so leave it as "auto". */
9549 /* Attribute value not mentioned in the
9550 October 2008 ABI, so leave it as
9555 fp_model = ARM_FLOAT_SOFT_VFP;
9561 /* Leave it as "auto". */
9562 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
9567 /* Detect M-profile programs. This only works if the
9568 executable file includes build attributes; GCC does
9569 copy them to the executable, but e.g. RealView does
9571 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9573 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
9575 Tag_CPU_arch_profile);
9576 /* GCC specifies the profile for v6-M; RealView only
9577 specifies the profile for architectures starting with
9578 V7 (as opposed to architectures with a tag
9579 numerically greater than TAG_CPU_ARCH_V7). */
9580 if (!tdesc_has_registers (tdesc)
9581 && (attr_arch == TAG_CPU_ARCH_V6_M
9582 || attr_arch == TAG_CPU_ARCH_V6S_M
9583 || attr_profile == 'M'))
9584 tdesc = tdesc_arm_with_m;
9588 if (fp_model == ARM_FLOAT_AUTO)
9590 int e_flags = elf_elfheader (info.abfd)->e_flags;
9592 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9595 /* Leave it as "auto". Strictly speaking this case
9596 means FPA, but almost nobody uses that now, and
9597 many toolchains fail to set the appropriate bits
9598 for the floating-point model they use. */
9600 case EF_ARM_SOFT_FLOAT:
9601 fp_model = ARM_FLOAT_SOFT_FPA;
9603 case EF_ARM_VFP_FLOAT:
9604 fp_model = ARM_FLOAT_VFP;
9606 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9607 fp_model = ARM_FLOAT_SOFT_VFP;
9612 if (e_flags & EF_ARM_BE8)
9613 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9618 /* Leave it as "auto". */
9623 /* Check any target description for validity. */
9624 if (tdesc_has_registers (tdesc))
9626 /* For most registers we require GDB's default names; but also allow
9627 the numeric names for sp / lr / pc, as a convenience. */
9628 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9629 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9630 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9632 const struct tdesc_feature *feature;
9635 feature = tdesc_find_feature (tdesc,
9636 "org.gnu.gdb.arm.core");
9637 if (feature == NULL)
9639 feature = tdesc_find_feature (tdesc,
9640 "org.gnu.gdb.arm.m-profile");
9641 if (feature == NULL)
9647 tdesc_data = tdesc_data_alloc ();
9650 for (i = 0; i < ARM_SP_REGNUM; i++)
9651 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9652 arm_register_names[i]);
9653 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9656 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9659 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9663 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9664 ARM_PS_REGNUM, "xpsr");
9666 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9667 ARM_PS_REGNUM, "cpsr");
9671 tdesc_data_cleanup (tdesc_data);
9675 feature = tdesc_find_feature (tdesc,
9676 "org.gnu.gdb.arm.fpa");
9677 if (feature != NULL)
9680 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9681 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9682 arm_register_names[i]);
9685 tdesc_data_cleanup (tdesc_data);
9690 have_fpa_registers = 0;
9692 feature = tdesc_find_feature (tdesc,
9693 "org.gnu.gdb.xscale.iwmmxt");
9694 if (feature != NULL)
9696 static const char *const iwmmxt_names[] = {
9697 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9698 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9699 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9700 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9704 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9706 &= tdesc_numbered_register (feature, tdesc_data, i,
9707 iwmmxt_names[i - ARM_WR0_REGNUM]);
9709 /* Check for the control registers, but do not fail if they
9711 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9712 tdesc_numbered_register (feature, tdesc_data, i,
9713 iwmmxt_names[i - ARM_WR0_REGNUM]);
9715 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9717 &= tdesc_numbered_register (feature, tdesc_data, i,
9718 iwmmxt_names[i - ARM_WR0_REGNUM]);
9722 tdesc_data_cleanup (tdesc_data);
9727 /* If we have a VFP unit, check whether the single precision registers
9728 are present. If not, then we will synthesize them as pseudo
9730 feature = tdesc_find_feature (tdesc,
9731 "org.gnu.gdb.arm.vfp");
9732 if (feature != NULL)
9734 static const char *const vfp_double_names[] = {
9735 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9736 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9737 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9738 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9741 /* Require the double precision registers. There must be either
9744 for (i = 0; i < 32; i++)
9746 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9748 vfp_double_names[i]);
9752 if (!valid_p && i == 16)
9755 /* Also require FPSCR. */
9756 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9757 ARM_FPSCR_REGNUM, "fpscr");
9760 tdesc_data_cleanup (tdesc_data);
9764 if (tdesc_unnumbered_register (feature, "s0") == 0)
9765 have_vfp_pseudos = 1;
9767 have_vfp_registers = 1;
9769 /* If we have VFP, also check for NEON. The architecture allows
9770 NEON without VFP (integer vector operations only), but GDB
9771 does not support that. */
9772 feature = tdesc_find_feature (tdesc,
9773 "org.gnu.gdb.arm.neon");
9774 if (feature != NULL)
9776 /* NEON requires 32 double-precision registers. */
9779 tdesc_data_cleanup (tdesc_data);
9783 /* If there are quad registers defined by the stub, use
9784 their type; otherwise (normally) provide them with
9785 the default type. */
9786 if (tdesc_unnumbered_register (feature, "q0") == 0)
9787 have_neon_pseudos = 1;
9794 /* If there is already a candidate, use it. */
9795 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
9797 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
9799 if (arm_abi != ARM_ABI_AUTO
9800 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
9803 if (fp_model != ARM_FLOAT_AUTO
9804 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
9807 /* There are various other properties in tdep that we do not
9808 need to check here: those derived from a target description,
9809 since gdbarches with a different target description are
9810 automatically disqualified. */
9812 /* Do check is_m, though, since it might come from the binary. */
9813 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
9816 /* Found a match. */
9820 if (best_arch != NULL)
9822 if (tdesc_data != NULL)
9823 tdesc_data_cleanup (tdesc_data);
9824 return best_arch->gdbarch;
9827 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
9828 gdbarch = gdbarch_alloc (&info, tdep);
9830 /* Record additional information about the architecture we are defining.
9831 These are gdbarch discriminators, like the OSABI. */
9832 tdep->arm_abi = arm_abi;
9833 tdep->fp_model = fp_model;
9835 tdep->have_fpa_registers = have_fpa_registers;
9836 tdep->have_vfp_registers = have_vfp_registers;
9837 tdep->have_vfp_pseudos = have_vfp_pseudos;
9838 tdep->have_neon_pseudos = have_neon_pseudos;
9839 tdep->have_neon = have_neon;
9842 switch (info.byte_order_for_code)
9844 case BFD_ENDIAN_BIG:
9845 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
9846 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
9847 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
9848 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
9852 case BFD_ENDIAN_LITTLE:
9853 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
9854 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
9855 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
9856 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
9861 internal_error (__FILE__, __LINE__,
9862 _("arm_gdbarch_init: bad byte order for float format"));
9865 /* On ARM targets char defaults to unsigned. */
9866 set_gdbarch_char_signed (gdbarch, 0);
9868 /* Note: for displaced stepping, this includes the breakpoint, and one word
9869 of additional scratch space. This setting isn't used for anything beside
9870 displaced stepping at present. */
9871 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
9873 /* This should be low enough for everything. */
9874 tdep->lowest_pc = 0x20;
9875 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
9877 /* The default, for both APCS and AAPCS, is to return small
9878 structures in registers. */
9879 tdep->struct_return = reg_struct_return;
9881 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
9882 set_gdbarch_frame_align (gdbarch, arm_frame_align);
9884 set_gdbarch_write_pc (gdbarch, arm_write_pc);
9886 /* Frame handling. */
9887 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
9888 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
9889 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
9891 frame_base_set_default (gdbarch, &arm_normal_base);
9893 /* Address manipulation. */
9894 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
9895 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
9897 /* Advance PC across function entry code. */
9898 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
9900 /* Detect whether PC is in function epilogue. */
9901 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
9903 /* Skip trampolines. */
9904 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
9906 /* The stack grows downward. */
9907 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
9909 /* Breakpoint manipulation. */
9910 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
9911 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
9912 arm_remote_breakpoint_from_pc);
9914 /* Information about registers, etc. */
9915 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
9916 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
9917 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
9918 set_gdbarch_register_type (gdbarch, arm_register_type);
9919 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
9921 /* This "info float" is FPA-specific. Use the generic version if we
9923 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
9924 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
9926 /* Internal <-> external register number maps. */
9927 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
9928 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
9930 set_gdbarch_register_name (gdbarch, arm_register_name);
9932 /* Returning results. */
9933 set_gdbarch_return_value (gdbarch, arm_return_value);
9936 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
9938 /* Minsymbol frobbing. */
9939 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
9940 set_gdbarch_coff_make_msymbol_special (gdbarch,
9941 arm_coff_make_msymbol_special);
9942 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
9944 /* Thumb-2 IT block support. */
9945 set_gdbarch_adjust_breakpoint_address (gdbarch,
9946 arm_adjust_breakpoint_address);
9948 /* Virtual tables. */
9949 set_gdbarch_vbit_in_delta (gdbarch, 1);
9951 /* Hook in the ABI-specific overrides, if they have been registered. */
9952 gdbarch_init_osabi (info, gdbarch);
9954 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
9956 /* Add some default predicates. */
9957 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
9958 dwarf2_append_unwinders (gdbarch);
9959 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
9960 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
9962 /* Now we have tuned the configuration, set a few final things,
9963 based on what the OS ABI has told us. */
9965 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
9966 binaries are always marked. */
9967 if (tdep->arm_abi == ARM_ABI_AUTO)
9968 tdep->arm_abi = ARM_ABI_APCS;
9970 /* Watchpoints are not steppable. */
9971 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9973 /* We used to default to FPA for generic ARM, but almost nobody
9974 uses that now, and we now provide a way for the user to force
9975 the model. So default to the most useful variant. */
9976 if (tdep->fp_model == ARM_FLOAT_AUTO)
9977 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
9979 if (tdep->jb_pc >= 0)
9980 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
9982 /* Floating point sizes and format. */
9983 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
9984 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
9986 set_gdbarch_double_format
9987 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9988 set_gdbarch_long_double_format
9989 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9993 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
9994 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
9997 if (have_vfp_pseudos)
9999 /* NOTE: These are the only pseudo registers used by
10000 the ARM target at the moment. If more are added, a
10001 little more care in numbering will be needed. */
10003 int num_pseudos = 32;
10004 if (have_neon_pseudos)
10006 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10007 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10008 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10013 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10015 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10017 /* Override tdesc_register_type to adjust the types of VFP
10018 registers for NEON. */
10019 set_gdbarch_register_type (gdbarch, arm_register_type);
10022 /* Add standard register aliases. We add aliases even for those
10023 nanes which are used by the current architecture - it's simpler,
10024 and does no harm, since nothing ever lists user registers. */
10025 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10026 user_reg_add (gdbarch, arm_register_aliases[i].name,
10027 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10033 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10035 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10040 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10041 (unsigned long) tdep->lowest_pc);
10044 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10047 _initialize_arm_tdep (void)
10049 struct ui_file *stb;
10051 struct cmd_list_element *new_set, *new_show;
10052 const char *setname;
10053 const char *setdesc;
10054 const char *const *regnames;
10056 static char *helptext;
10057 char regdesc[1024], *rdptr = regdesc;
10058 size_t rest = sizeof (regdesc);
10060 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10062 arm_objfile_data_key
10063 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10065 /* Add ourselves to objfile event chain. */
10066 observer_attach_new_objfile (arm_exidx_new_objfile);
10068 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10070 /* Register an ELF OS ABI sniffer for ARM binaries. */
10071 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10072 bfd_target_elf_flavour,
10073 arm_elf_osabi_sniffer);
10075 /* Initialize the standard target descriptions. */
10076 initialize_tdesc_arm_with_m ();
10077 initialize_tdesc_arm_with_iwmmxt ();
10078 initialize_tdesc_arm_with_vfpv2 ();
10079 initialize_tdesc_arm_with_vfpv3 ();
10080 initialize_tdesc_arm_with_neon ();
10082 /* Get the number of possible sets of register names defined in opcodes. */
10083 num_disassembly_options = get_arm_regname_num_options ();
10085 /* Add root prefix command for all "set arm"/"show arm" commands. */
10086 add_prefix_cmd ("arm", no_class, set_arm_command,
10087 _("Various ARM-specific commands."),
10088 &setarmcmdlist, "set arm ", 0, &setlist);
10090 add_prefix_cmd ("arm", no_class, show_arm_command,
10091 _("Various ARM-specific commands."),
10092 &showarmcmdlist, "show arm ", 0, &showlist);
10094 /* Sync the opcode insn printer with our register viewer. */
10095 parse_arm_disassembler_option ("reg-names-std");
10097 /* Initialize the array that will be passed to
10098 add_setshow_enum_cmd(). */
10099 valid_disassembly_styles
10100 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10101 for (i = 0; i < num_disassembly_options; i++)
10103 numregs = get_arm_regnames (i, &setname, &setdesc, ®names);
10104 valid_disassembly_styles[i] = setname;
10105 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10108 /* When we find the default names, tell the disassembler to use
10110 if (!strcmp (setname, "std"))
10112 disassembly_style = setname;
10113 set_arm_regname_option (i);
10116 /* Mark the end of valid options. */
10117 valid_disassembly_styles[num_disassembly_options] = NULL;
10119 /* Create the help text. */
10120 stb = mem_fileopen ();
10121 fprintf_unfiltered (stb, "%s%s%s",
10122 _("The valid values are:\n"),
10124 _("The default is \"std\"."));
10125 helptext = ui_file_xstrdup (stb, NULL);
10126 ui_file_delete (stb);
10128 add_setshow_enum_cmd("disassembler", no_class,
10129 valid_disassembly_styles, &disassembly_style,
10130 _("Set the disassembly style."),
10131 _("Show the disassembly style."),
10133 set_disassembly_style_sfunc,
10134 NULL, /* FIXME: i18n: The disassembly style is
10136 &setarmcmdlist, &showarmcmdlist);
10138 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10139 _("Set usage of ARM 32-bit mode."),
10140 _("Show usage of ARM 32-bit mode."),
10141 _("When off, a 26-bit PC will be used."),
10143 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10145 &setarmcmdlist, &showarmcmdlist);
10147 /* Add a command to allow the user to force the FPU model. */
10148 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, ¤t_fp_model,
10149 _("Set the floating point type."),
10150 _("Show the floating point type."),
10151 _("auto - Determine the FP typefrom the OS-ABI.\n\
10152 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10153 fpa - FPA co-processor (GCC compiled).\n\
10154 softvfp - Software FP with pure-endian doubles.\n\
10155 vfp - VFP co-processor."),
10156 set_fp_model_sfunc, show_fp_model,
10157 &setarmcmdlist, &showarmcmdlist);
10159 /* Add a command to allow the user to force the ABI. */
10160 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10162 _("Show the ABI."),
10163 NULL, arm_set_abi, arm_show_abi,
10164 &setarmcmdlist, &showarmcmdlist);
10166 /* Add two commands to allow the user to force the assumed
10168 add_setshow_enum_cmd ("fallback-mode", class_support,
10169 arm_mode_strings, &arm_fallback_mode_string,
10170 _("Set the mode assumed when symbols are unavailable."),
10171 _("Show the mode assumed when symbols are unavailable."),
10172 NULL, NULL, arm_show_fallback_mode,
10173 &setarmcmdlist, &showarmcmdlist);
10174 add_setshow_enum_cmd ("force-mode", class_support,
10175 arm_mode_strings, &arm_force_mode_string,
10176 _("Set the mode assumed even when symbols are available."),
10177 _("Show the mode assumed even when symbols are available."),
10178 NULL, NULL, arm_show_force_mode,
10179 &setarmcmdlist, &showarmcmdlist);
10181 /* Debugging flag. */
10182 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10183 _("Set ARM debugging."),
10184 _("Show ARM debugging."),
10185 _("When on, arm-specific debugging is enabled."),
10187 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10188 &setdebuglist, &showdebuglist);