1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include <ctype.h> /* XXX for isupper (). */
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
32 #include "reggroups.h"
35 #include "arch-utils.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
41 #include "dwarf2-frame.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
49 #include "gdb/sim-arm.h"
52 #include "coff/internal.h"
55 #include "gdb_assert.h"
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-iwmmxt.c"
60 #include "features/arm-with-vfpv2.c"
61 #include "features/arm-with-vfpv3.c"
62 #include "features/arm-with-neon.c"
66 /* Macros for setting and testing a bit in a minimal symbol that marks
67 it as Thumb function. The MSB of the minimal symbol's "info" field
68 is used for this purpose.
70 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
71 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
73 #define MSYMBOL_SET_SPECIAL(msym) \
74 MSYMBOL_TARGET_FLAG_1 (msym) = 1
76 #define MSYMBOL_IS_SPECIAL(msym) \
77 MSYMBOL_TARGET_FLAG_1 (msym)
79 /* Per-objfile data used for mapping symbols. */
80 static const struct objfile_data *arm_objfile_data_key;
82 struct arm_mapping_symbol
87 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
88 DEF_VEC_O(arm_mapping_symbol_s);
90 struct arm_per_objfile
92 VEC(arm_mapping_symbol_s) **section_maps;
95 /* The list of available "set arm ..." and "show arm ..." commands. */
96 static struct cmd_list_element *setarmcmdlist = NULL;
97 static struct cmd_list_element *showarmcmdlist = NULL;
99 /* The type of floating-point to use. Keep this in sync with enum
100 arm_float_model, and the help string in _initialize_arm_tdep. */
101 static const char *fp_model_strings[] =
111 /* A variable that can be configured by the user. */
112 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
113 static const char *current_fp_model = "auto";
115 /* The ABI to use. Keep this in sync with arm_abi_kind. */
116 static const char *arm_abi_strings[] =
124 /* A variable that can be configured by the user. */
125 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
126 static const char *arm_abi_string = "auto";
128 /* The execution mode to assume. */
129 static const char *arm_mode_strings[] =
137 static const char *arm_fallback_mode_string = "auto";
138 static const char *arm_force_mode_string = "auto";
140 /* Internal override of the execution mode. -1 means no override,
141 0 means override to ARM mode, 1 means override to Thumb mode.
142 The effect is the same as if arm_force_mode has been set by the
143 user (except the internal override has precedence over a user's
144 arm_force_mode override). */
145 static int arm_override_mode = -1;
147 /* Number of different reg name sets (options). */
148 static int num_disassembly_options;
150 /* The standard register names, and all the valid aliases for them. Note
151 that `fp', `sp' and `pc' are not added in this alias list, because they
152 have been added as builtin user registers in
153 std-regs.c:_initialize_frame_reg. */
158 } arm_register_aliases[] = {
159 /* Basic register numbers. */
176 /* Synonyms (argument and variable registers). */
189 /* Other platform-specific names for r9. */
195 /* Names used by GCC (not listed in the ARM EABI). */
197 /* A special name from the older ATPCS. */
201 static const char *const arm_register_names[] =
202 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
203 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
204 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
205 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
206 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
207 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
208 "fps", "cpsr" }; /* 24 25 */
210 /* Valid register name styles. */
211 static const char **valid_disassembly_styles;
213 /* Disassembly style to use. Default to "std" register names. */
214 static const char *disassembly_style;
216 /* This is used to keep the bfd arch_info in sync with the disassembly
218 static void set_disassembly_style_sfunc(char *, int,
219 struct cmd_list_element *);
220 static void set_disassembly_style (void);
222 static void convert_from_extended (const struct floatformat *, const void *,
224 static void convert_to_extended (const struct floatformat *, void *,
227 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
228 struct regcache *regcache,
229 int regnum, gdb_byte *buf);
230 static void arm_neon_quad_write (struct gdbarch *gdbarch,
231 struct regcache *regcache,
232 int regnum, const gdb_byte *buf);
234 struct arm_prologue_cache
236 /* The stack pointer at the time this frame was created; i.e. the
237 caller's stack pointer when this function was called. It is used
238 to identify this frame. */
241 /* The frame base for this frame is just prev_sp - frame size.
242 FRAMESIZE is the distance from the frame pointer to the
243 initial stack pointer. */
247 /* The register used to hold the frame pointer for this frame. */
250 /* Saved register offsets. */
251 struct trad_frame_saved_reg *saved_regs;
254 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
255 CORE_ADDR prologue_start,
256 CORE_ADDR prologue_end,
257 struct arm_prologue_cache *cache);
259 /* Architecture version for displaced stepping. This effects the behaviour of
260 certain instructions, and really should not be hard-wired. */
262 #define DISPLACED_STEPPING_ARCH_VERSION 5
264 /* Addresses for calling Thumb functions have the bit 0 set.
265 Here are some macros to test, set, or clear bit 0 of addresses. */
266 #define IS_THUMB_ADDR(addr) ((addr) & 1)
267 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
268 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
270 /* Set to true if the 32-bit mode is in use. */
274 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
277 arm_psr_thumb_bit (struct gdbarch *gdbarch)
279 if (gdbarch_tdep (gdbarch)->is_m)
285 /* Determine if FRAME is executing in Thumb mode. */
288 arm_frame_is_thumb (struct frame_info *frame)
291 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
293 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
294 directly (from a signal frame or dummy frame) or by interpreting
295 the saved LR (from a prologue or DWARF frame). So consult it and
296 trust the unwinders. */
297 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
299 return (cpsr & t_bit) != 0;
302 /* Callback for VEC_lower_bound. */
305 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
306 const struct arm_mapping_symbol *rhs)
308 return lhs->value < rhs->value;
311 /* Search for the mapping symbol covering MEMADDR. If one is found,
312 return its type. Otherwise, return 0. If START is non-NULL,
313 set *START to the location of the mapping symbol. */
316 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
318 struct obj_section *sec;
320 /* If there are mapping symbols, consult them. */
321 sec = find_pc_section (memaddr);
324 struct arm_per_objfile *data;
325 VEC(arm_mapping_symbol_s) *map;
326 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
330 data = objfile_data (sec->objfile, arm_objfile_data_key);
333 map = data->section_maps[sec->the_bfd_section->index];
334 if (!VEC_empty (arm_mapping_symbol_s, map))
336 struct arm_mapping_symbol *map_sym;
338 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
339 arm_compare_mapping_symbols);
341 /* VEC_lower_bound finds the earliest ordered insertion
342 point. If the following symbol starts at this exact
343 address, we use that; otherwise, the preceding
344 mapping symbol covers this address. */
345 if (idx < VEC_length (arm_mapping_symbol_s, map))
347 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
348 if (map_sym->value == map_key.value)
351 *start = map_sym->value + obj_section_addr (sec);
352 return map_sym->type;
358 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
360 *start = map_sym->value + obj_section_addr (sec);
361 return map_sym->type;
370 /* Determine if the program counter specified in MEMADDR is in a Thumb
371 function. This function should be called for addresses unrelated to
372 any executing frame; otherwise, prefer arm_frame_is_thumb. */
375 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
377 struct obj_section *sec;
378 struct minimal_symbol *sym;
380 struct displaced_step_closure* dsc
381 = get_displaced_step_closure_by_addr(memaddr);
383 /* If checking the mode of displaced instruction in copy area, the mode
384 should be determined by instruction on the original address. */
388 fprintf_unfiltered (gdb_stdlog,
389 "displaced: check mode of %.8lx instead of %.8lx\n",
390 (unsigned long) dsc->insn_addr,
391 (unsigned long) memaddr);
392 memaddr = dsc->insn_addr;
395 /* If bit 0 of the address is set, assume this is a Thumb address. */
396 if (IS_THUMB_ADDR (memaddr))
399 /* Respect internal mode override if active. */
400 if (arm_override_mode != -1)
401 return arm_override_mode;
403 /* If the user wants to override the symbol table, let him. */
404 if (strcmp (arm_force_mode_string, "arm") == 0)
406 if (strcmp (arm_force_mode_string, "thumb") == 0)
409 /* ARM v6-M and v7-M are always in Thumb mode. */
410 if (gdbarch_tdep (gdbarch)->is_m)
413 /* If there are mapping symbols, consult them. */
414 type = arm_find_mapping_symbol (memaddr, NULL);
418 /* Thumb functions have a "special" bit set in minimal symbols. */
419 sym = lookup_minimal_symbol_by_pc (memaddr);
421 return (MSYMBOL_IS_SPECIAL (sym));
423 /* If the user wants to override the fallback mode, let them. */
424 if (strcmp (arm_fallback_mode_string, "arm") == 0)
426 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
429 /* If we couldn't find any symbol, but we're talking to a running
430 target, then trust the current value of $cpsr. This lets
431 "display/i $pc" always show the correct mode (though if there is
432 a symbol table we will not reach here, so it still may not be
433 displayed in the mode it will be executed). */
434 if (target_has_registers)
435 return arm_frame_is_thumb (get_current_frame ());
437 /* Otherwise we're out of luck; we assume ARM. */
441 /* Remove useless bits from addresses in a running program. */
443 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
446 return UNMAKE_THUMB_ADDR (val);
448 return (val & 0x03fffffc);
451 /* When reading symbols, we need to zap the low bit of the address,
452 which may be set to 1 for Thumb functions. */
454 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
459 /* Return 1 if PC is the start of a compiler helper function which
460 can be safely ignored during prologue skipping. IS_THUMB is true
461 if the function is known to be a Thumb function due to the way it
464 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
466 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
467 struct minimal_symbol *msym;
469 msym = lookup_minimal_symbol_by_pc (pc);
471 && SYMBOL_VALUE_ADDRESS (msym) == pc
472 && SYMBOL_LINKAGE_NAME (msym) != NULL)
474 const char *name = SYMBOL_LINKAGE_NAME (msym);
476 /* The GNU linker's Thumb call stub to foo is named
478 if (strstr (name, "_from_thumb") != NULL)
481 /* On soft-float targets, __truncdfsf2 is called to convert promoted
482 arguments to their argument types in non-prototyped
484 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
486 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
489 /* Internal functions related to thread-local storage. */
490 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
492 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
497 /* If we run against a stripped glibc, we may be unable to identify
498 special functions by name. Check for one important case,
499 __aeabi_read_tp, by comparing the *code* against the default
500 implementation (this is hand-written ARM assembler in glibc). */
503 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
504 == 0xe3e00a0f /* mov r0, #0xffff0fff */
505 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
506 == 0xe240f01f) /* sub pc, r0, #31 */
513 /* Support routines for instruction parsing. */
514 #define submask(x) ((1L << ((x) + 1)) - 1)
515 #define bit(obj,st) (((obj) >> (st)) & 1)
516 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
517 #define sbits(obj,st,fn) \
518 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
519 #define BranchDest(addr,instr) \
520 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
522 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
523 the first 16-bit of instruction, and INSN2 is the second 16-bit of
525 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
526 ((bits ((insn1), 0, 3) << 12) \
527 | (bits ((insn1), 10, 10) << 11) \
528 | (bits ((insn2), 12, 14) << 8) \
529 | bits ((insn2), 0, 7))
531 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
532 the 32-bit instruction. */
533 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
534 ((bits ((insn), 16, 19) << 12) \
535 | bits ((insn), 0, 11))
537 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
540 thumb_expand_immediate (unsigned int imm)
542 unsigned int count = imm >> 7;
550 return (imm & 0xff) | ((imm & 0xff) << 16);
552 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
554 return (imm & 0xff) | ((imm & 0xff) << 8)
555 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
558 return (0x80 | (imm & 0x7f)) << (32 - count);
561 /* Return 1 if the 16-bit Thumb instruction INST might change
562 control flow, 0 otherwise. */
565 thumb_instruction_changes_pc (unsigned short inst)
567 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
570 if ((inst & 0xf000) == 0xd000) /* conditional branch */
573 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
576 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
579 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
582 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
588 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
589 might change control flow, 0 otherwise. */
592 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
594 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
596 /* Branches and miscellaneous control instructions. */
598 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
603 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
605 /* SUBS PC, LR, #imm8. */
608 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
610 /* Conditional branch. */
617 if ((inst1 & 0xfe50) == 0xe810)
619 /* Load multiple or RFE. */
621 if (bit (inst1, 7) && !bit (inst1, 8))
627 else if (!bit (inst1, 7) && bit (inst1, 8))
633 else if (bit (inst1, 7) && bit (inst1, 8))
638 else if (!bit (inst1, 7) && !bit (inst1, 8))
647 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
649 /* MOV PC or MOVS PC. */
653 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
656 if (bits (inst1, 0, 3) == 15)
662 if ((inst2 & 0x0fc0) == 0x0000)
668 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
674 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
683 /* Analyze a Thumb prologue, looking for a recognizable stack frame
684 and frame pointer. Scan until we encounter a store that could
685 clobber the stack frame unexpectedly, or an unknown instruction.
686 Return the last address which is definitely safe to skip for an
687 initial breakpoint. */
690 thumb_analyze_prologue (struct gdbarch *gdbarch,
691 CORE_ADDR start, CORE_ADDR limit,
692 struct arm_prologue_cache *cache)
694 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
695 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
698 struct pv_area *stack;
699 struct cleanup *back_to;
701 CORE_ADDR unrecognized_pc = 0;
703 for (i = 0; i < 16; i++)
704 regs[i] = pv_register (i, 0);
705 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
706 back_to = make_cleanup_free_pv_area (stack);
708 while (start < limit)
712 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
714 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
719 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
722 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
723 whether to save LR (R14). */
724 mask = (insn & 0xff) | ((insn & 0x100) << 6);
726 /* Calculate offsets of saved R0-R7 and LR. */
727 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
728 if (mask & (1 << regno))
730 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
732 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
735 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
738 offset = (insn & 0x7f) << 2; /* get scaled offset */
739 if (insn & 0x80) /* Check for SUB. */
740 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
743 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
746 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
747 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
749 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
750 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
751 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
753 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
754 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
755 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
757 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
758 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
759 && pv_is_constant (regs[bits (insn, 3, 5)]))
760 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
761 regs[bits (insn, 6, 8)]);
762 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
763 && pv_is_constant (regs[bits (insn, 3, 6)]))
765 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
766 int rm = bits (insn, 3, 6);
767 regs[rd] = pv_add (regs[rd], regs[rm]);
769 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
771 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
772 int src_reg = (insn & 0x78) >> 3;
773 regs[dst_reg] = regs[src_reg];
775 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
777 /* Handle stores to the stack. Normally pushes are used,
778 but with GCC -mtpcs-frame, there may be other stores
779 in the prologue to create the frame. */
780 int regno = (insn >> 8) & 0x7;
783 offset = (insn & 0xff) << 2;
784 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
786 if (pv_area_store_would_trash (stack, addr))
789 pv_area_store (stack, addr, 4, regs[regno]);
791 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
793 int rd = bits (insn, 0, 2);
794 int rn = bits (insn, 3, 5);
797 offset = bits (insn, 6, 10) << 2;
798 addr = pv_add_constant (regs[rn], offset);
800 if (pv_area_store_would_trash (stack, addr))
803 pv_area_store (stack, addr, 4, regs[rd]);
805 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
806 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
807 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
808 /* Ignore stores of argument registers to the stack. */
810 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
811 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
812 /* Ignore block loads from the stack, potentially copying
813 parameters from memory. */
815 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
816 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
817 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
818 /* Similarly ignore single loads from the stack. */
820 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
821 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
822 /* Skip register copies, i.e. saves to another register
823 instead of the stack. */
825 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
826 /* Recognize constant loads; even with small stacks these are necessary
828 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
829 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
831 /* Constant pool loads, for the same reason. */
832 unsigned int constant;
835 loc = start + 4 + bits (insn, 0, 7) * 4;
836 constant = read_memory_unsigned_integer (loc, 4, byte_order);
837 regs[bits (insn, 8, 10)] = pv_constant (constant);
839 else if ((insn & 0xe000) == 0xe000)
841 unsigned short inst2;
843 inst2 = read_memory_unsigned_integer (start + 2, 2,
844 byte_order_for_code);
846 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
848 /* BL, BLX. Allow some special function calls when
849 skipping the prologue; GCC generates these before
850 storing arguments to the stack. */
852 int j1, j2, imm1, imm2;
854 imm1 = sbits (insn, 0, 10);
855 imm2 = bits (inst2, 0, 10);
856 j1 = bit (inst2, 13);
857 j2 = bit (inst2, 11);
859 offset = ((imm1 << 12) + (imm2 << 1));
860 offset ^= ((!j2) << 22) | ((!j1) << 23);
862 nextpc = start + 4 + offset;
863 /* For BLX make sure to clear the low bits. */
864 if (bit (inst2, 12) == 0)
865 nextpc = nextpc & 0xfffffffc;
867 if (!skip_prologue_function (gdbarch, nextpc,
868 bit (inst2, 12) != 0))
872 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
874 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
876 pv_t addr = regs[bits (insn, 0, 3)];
879 if (pv_area_store_would_trash (stack, addr))
882 /* Calculate offsets of saved registers. */
883 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
884 if (inst2 & (1 << regno))
886 addr = pv_add_constant (addr, -4);
887 pv_area_store (stack, addr, 4, regs[regno]);
891 regs[bits (insn, 0, 3)] = addr;
894 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
896 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
898 int regno1 = bits (inst2, 12, 15);
899 int regno2 = bits (inst2, 8, 11);
900 pv_t addr = regs[bits (insn, 0, 3)];
902 offset = inst2 & 0xff;
904 addr = pv_add_constant (addr, offset);
906 addr = pv_add_constant (addr, -offset);
908 if (pv_area_store_would_trash (stack, addr))
911 pv_area_store (stack, addr, 4, regs[regno1]);
912 pv_area_store (stack, pv_add_constant (addr, 4),
916 regs[bits (insn, 0, 3)] = addr;
919 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
920 && (inst2 & 0x0c00) == 0x0c00
921 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
923 int regno = bits (inst2, 12, 15);
924 pv_t addr = regs[bits (insn, 0, 3)];
926 offset = inst2 & 0xff;
928 addr = pv_add_constant (addr, offset);
930 addr = pv_add_constant (addr, -offset);
932 if (pv_area_store_would_trash (stack, addr))
935 pv_area_store (stack, addr, 4, regs[regno]);
938 regs[bits (insn, 0, 3)] = addr;
941 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
942 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
944 int regno = bits (inst2, 12, 15);
947 offset = inst2 & 0xfff;
948 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
950 if (pv_area_store_would_trash (stack, addr))
953 pv_area_store (stack, addr, 4, regs[regno]);
956 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
957 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
958 /* Ignore stores of argument registers to the stack. */
961 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
962 && (inst2 & 0x0d00) == 0x0c00
963 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
964 /* Ignore stores of argument registers to the stack. */
967 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
969 && (inst2 & 0x8000) == 0x0000
970 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
971 /* Ignore block loads from the stack, potentially copying
972 parameters from memory. */
975 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
977 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
978 /* Similarly ignore dual loads from the stack. */
981 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
982 && (inst2 & 0x0d00) == 0x0c00
983 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
984 /* Similarly ignore single loads from the stack. */
987 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
988 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
989 /* Similarly ignore single loads from the stack. */
992 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
993 && (inst2 & 0x8000) == 0x0000)
995 unsigned int imm = ((bits (insn, 10, 10) << 11)
996 | (bits (inst2, 12, 14) << 8)
997 | bits (inst2, 0, 7));
999 regs[bits (inst2, 8, 11)]
1000 = pv_add_constant (regs[bits (insn, 0, 3)],
1001 thumb_expand_immediate (imm));
1004 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1005 && (inst2 & 0x8000) == 0x0000)
1007 unsigned int imm = ((bits (insn, 10, 10) << 11)
1008 | (bits (inst2, 12, 14) << 8)
1009 | bits (inst2, 0, 7));
1011 regs[bits (inst2, 8, 11)]
1012 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1015 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1016 && (inst2 & 0x8000) == 0x0000)
1018 unsigned int imm = ((bits (insn, 10, 10) << 11)
1019 | (bits (inst2, 12, 14) << 8)
1020 | bits (inst2, 0, 7));
1022 regs[bits (inst2, 8, 11)]
1023 = pv_add_constant (regs[bits (insn, 0, 3)],
1024 - (CORE_ADDR) thumb_expand_immediate (imm));
1027 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1028 && (inst2 & 0x8000) == 0x0000)
1030 unsigned int imm = ((bits (insn, 10, 10) << 11)
1031 | (bits (inst2, 12, 14) << 8)
1032 | bits (inst2, 0, 7));
1034 regs[bits (inst2, 8, 11)]
1035 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1038 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1040 unsigned int imm = ((bits (insn, 10, 10) << 11)
1041 | (bits (inst2, 12, 14) << 8)
1042 | bits (inst2, 0, 7));
1044 regs[bits (inst2, 8, 11)]
1045 = pv_constant (thumb_expand_immediate (imm));
1048 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1051 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1053 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1056 else if (insn == 0xea5f /* mov.w Rd,Rm */
1057 && (inst2 & 0xf0f0) == 0)
1059 int dst_reg = (inst2 & 0x0f00) >> 8;
1060 int src_reg = inst2 & 0xf;
1061 regs[dst_reg] = regs[src_reg];
1064 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1066 /* Constant pool loads. */
1067 unsigned int constant;
1070 offset = bits (insn, 0, 11);
1072 loc = start + 4 + offset;
1074 loc = start + 4 - offset;
1076 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1077 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1080 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1082 /* Constant pool loads. */
1083 unsigned int constant;
1086 offset = bits (insn, 0, 7) << 2;
1088 loc = start + 4 + offset;
1090 loc = start + 4 - offset;
1092 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1093 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1095 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1096 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1099 else if (thumb2_instruction_changes_pc (insn, inst2))
1101 /* Don't scan past anything that might change control flow. */
1106 /* The optimizer might shove anything into the prologue,
1107 so we just skip what we don't recognize. */
1108 unrecognized_pc = start;
1113 else if (thumb_instruction_changes_pc (insn))
1115 /* Don't scan past anything that might change control flow. */
1120 /* The optimizer might shove anything into the prologue,
1121 so we just skip what we don't recognize. */
1122 unrecognized_pc = start;
1129 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1130 paddress (gdbarch, start));
1132 if (unrecognized_pc == 0)
1133 unrecognized_pc = start;
1137 do_cleanups (back_to);
1138 return unrecognized_pc;
1141 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1143 /* Frame pointer is fp. Frame size is constant. */
1144 cache->framereg = ARM_FP_REGNUM;
1145 cache->framesize = -regs[ARM_FP_REGNUM].k;
1147 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1149 /* Frame pointer is r7. Frame size is constant. */
1150 cache->framereg = THUMB_FP_REGNUM;
1151 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1153 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1155 /* Try the stack pointer... this is a bit desperate. */
1156 cache->framereg = ARM_SP_REGNUM;
1157 cache->framesize = -regs[ARM_SP_REGNUM].k;
1161 /* We're just out of luck. We don't know where the frame is. */
1162 cache->framereg = -1;
1163 cache->framesize = 0;
1166 for (i = 0; i < 16; i++)
1167 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1168 cache->saved_regs[i].addr = offset;
1170 do_cleanups (back_to);
1171 return unrecognized_pc;
1175 /* Try to analyze the instructions starting from PC, which load symbol
1176 __stack_chk_guard. Return the address of instruction after loading this
1177 symbol, set the dest register number to *BASEREG, and set the size of
1178 instructions for loading symbol in OFFSET. Return 0 if instructions are
1182 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1183 unsigned int *destreg, int *offset)
1185 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1186 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1187 unsigned int low, high, address;
1192 unsigned short insn1
1193 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1195 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1197 *destreg = bits (insn1, 8, 10);
1199 address = bits (insn1, 0, 7);
1201 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1203 unsigned short insn2
1204 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1206 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1209 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1211 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1213 /* movt Rd, #const */
1214 if ((insn1 & 0xfbc0) == 0xf2c0)
1216 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1217 *destreg = bits (insn2, 8, 11);
1219 address = (high << 16 | low);
1226 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1228 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1230 address = bits (insn, 0, 11);
1231 *destreg = bits (insn, 12, 15);
1234 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1236 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1239 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1241 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1243 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1244 *destreg = bits (insn, 12, 15);
1246 address = (high << 16 | low);
1254 /* Try to skip a sequence of instructions used for stack protector. If PC
1255 points to the first instruction of this sequence, return the address of
1256 first instruction after this sequence, otherwise, return original PC.
1258 On arm, this sequence of instructions is composed of mainly three steps,
1259 Step 1: load symbol __stack_chk_guard,
1260 Step 2: load from address of __stack_chk_guard,
1261 Step 3: store it to somewhere else.
1263 Usually, instructions on step 2 and step 3 are the same on various ARM
1264 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1265 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1266 instructions in step 1 vary from different ARM architectures. On ARMv7,
1269 movw Rn, #:lower16:__stack_chk_guard
1270 movt Rn, #:upper16:__stack_chk_guard
1277 .word __stack_chk_guard
1279 Since ldr/str is a very popular instruction, we can't use them as
1280 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1281 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1282 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1285 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1287 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1288 unsigned int address, basereg;
1289 struct minimal_symbol *stack_chk_guard;
1291 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1294 /* Try to parse the instructions in Step 1. */
1295 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1300 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1301 /* If name of symbol doesn't start with '__stack_chk_guard', this
1302 instruction sequence is not for stack protector. If symbol is
1303 removed, we conservatively think this sequence is for stack protector. */
1305 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1306 strlen ("__stack_chk_guard")) != 0)
1311 unsigned int destreg;
1313 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1315 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1316 if ((insn & 0xf800) != 0x6800)
1318 if (bits (insn, 3, 5) != basereg)
1320 destreg = bits (insn, 0, 2);
1322 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1323 byte_order_for_code);
1324 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1325 if ((insn & 0xf800) != 0x6000)
1327 if (destreg != bits (insn, 0, 2))
1332 unsigned int destreg;
1334 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1336 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1337 if ((insn & 0x0e500000) != 0x04100000)
1339 if (bits (insn, 16, 19) != basereg)
1341 destreg = bits (insn, 12, 15);
1342 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1343 insn = read_memory_unsigned_integer (pc + offset + 4,
1344 4, byte_order_for_code);
1345 if ((insn & 0x0e500000) != 0x04000000)
1347 if (bits (insn, 12, 15) != destreg)
1350 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1353 return pc + offset + 4;
1355 return pc + offset + 8;
1358 /* Advance the PC across any function entry prologue instructions to
1359 reach some "real" code.
1361 The APCS (ARM Procedure Call Standard) defines the following
1365 [stmfd sp!, {a1,a2,a3,a4}]
1366 stmfd sp!, {...,fp,ip,lr,pc}
1367 [stfe f7, [sp, #-12]!]
1368 [stfe f6, [sp, #-12]!]
1369 [stfe f5, [sp, #-12]!]
1370 [stfe f4, [sp, #-12]!]
1371 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1374 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1376 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1379 CORE_ADDR func_addr, limit_pc;
1380 struct symtab_and_line sal;
1382 /* See if we can determine the end of the prologue via the symbol table.
1383 If so, then return either PC, or the PC after the prologue, whichever
1385 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1387 CORE_ADDR post_prologue_pc
1388 = skip_prologue_using_sal (gdbarch, func_addr);
1389 struct symtab *s = find_pc_symtab (func_addr);
1391 if (post_prologue_pc)
1393 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1396 /* GCC always emits a line note before the prologue and another
1397 one after, even if the two are at the same address or on the
1398 same line. Take advantage of this so that we do not need to
1399 know every instruction that might appear in the prologue. We
1400 will have producer information for most binaries; if it is
1401 missing (e.g. for -gstabs), assuming the GNU tools. */
1402 if (post_prologue_pc
1404 || s->producer == NULL
1405 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1406 return post_prologue_pc;
1408 if (post_prologue_pc != 0)
1410 CORE_ADDR analyzed_limit;
1412 /* For non-GCC compilers, make sure the entire line is an
1413 acceptable prologue; GDB will round this function's
1414 return value up to the end of the following line so we
1415 can not skip just part of a line (and we do not want to).
1417 RealView does not treat the prologue specially, but does
1418 associate prologue code with the opening brace; so this
1419 lets us skip the first line if we think it is the opening
1421 if (arm_pc_is_thumb (gdbarch, func_addr))
1422 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1423 post_prologue_pc, NULL);
1425 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1426 post_prologue_pc, NULL);
1428 if (analyzed_limit != post_prologue_pc)
1431 return post_prologue_pc;
1435 /* Can't determine prologue from the symbol table, need to examine
1438 /* Find an upper limit on the function prologue using the debug
1439 information. If the debug information could not be used to provide
1440 that bound, then use an arbitrary large number as the upper bound. */
1441 /* Like arm_scan_prologue, stop no later than pc + 64. */
1442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1444 limit_pc = pc + 64; /* Magic. */
1447 /* Check if this is Thumb code. */
1448 if (arm_pc_is_thumb (gdbarch, pc))
1449 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1451 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1453 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1455 /* "mov ip, sp" is no longer a required part of the prologue. */
1456 if (inst == 0xe1a0c00d) /* mov ip, sp */
1459 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1462 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1465 /* Some prologues begin with "str lr, [sp, #-4]!". */
1466 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1469 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1472 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1475 /* Any insns after this point may float into the code, if it makes
1476 for better instruction scheduling, so we skip them only if we
1477 find them, but still consider the function to be frame-ful. */
1479 /* We may have either one sfmfd instruction here, or several stfe
1480 insns, depending on the version of floating point code we
1482 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1485 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1488 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1491 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1494 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1495 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1496 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1499 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1500 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1501 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1504 /* Un-recognized instruction; stop scanning. */
1508 return skip_pc; /* End of prologue. */
1512 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1513 This function decodes a Thumb function prologue to determine:
1514 1) the size of the stack frame
1515 2) which registers are saved on it
1516 3) the offsets of saved regs
1517 4) the offset from the stack pointer to the frame pointer
1519 A typical Thumb function prologue would create this stack frame
1520 (offsets relative to FP)
1521 old SP -> 24 stack parameters
1524 R7 -> 0 local variables (16 bytes)
1525 SP -> -12 additional stack space (12 bytes)
1526 The frame size would thus be 36 bytes, and the frame offset would be
1527 12 bytes. The frame register is R7.
1529 The comments for thumb_skip_prolog() describe the algorithm we use
1530 to detect the end of the prolog. */
1534 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1535 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1537 CORE_ADDR prologue_start;
1538 CORE_ADDR prologue_end;
1539 CORE_ADDR current_pc;
1541 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1544 /* See comment in arm_scan_prologue for an explanation of
1546 if (prologue_end > prologue_start + 64)
1548 prologue_end = prologue_start + 64;
1552 /* We're in the boondocks: we have no idea where the start of the
1556 prologue_end = min (prologue_end, prev_pc);
1558 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1561 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1564 arm_instruction_changes_pc (uint32_t this_instr)
1566 if (bits (this_instr, 28, 31) == INST_NV)
1567 /* Unconditional instructions. */
1568 switch (bits (this_instr, 24, 27))
1572 /* Branch with Link and change to Thumb. */
1577 /* Coprocessor register transfer. */
1578 if (bits (this_instr, 12, 15) == 15)
1579 error (_("Invalid update to pc in instruction"));
1585 switch (bits (this_instr, 25, 27))
1588 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1590 /* Multiplies and extra load/stores. */
1591 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1592 /* Neither multiplies nor extension load/stores are allowed
1596 /* Otherwise, miscellaneous instructions. */
1598 /* BX <reg>, BXJ <reg>, BLX <reg> */
1599 if (bits (this_instr, 4, 27) == 0x12fff1
1600 || bits (this_instr, 4, 27) == 0x12fff2
1601 || bits (this_instr, 4, 27) == 0x12fff3)
1604 /* Other miscellaneous instructions are unpredictable if they
1608 /* Data processing instruction. Fall through. */
1611 if (bits (this_instr, 12, 15) == 15)
1618 /* Media instructions and architecturally undefined instructions. */
1619 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1623 if (bit (this_instr, 20) == 0)
1627 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1633 /* Load/store multiple. */
1634 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1640 /* Branch and branch with link. */
1645 /* Coprocessor transfers or SWIs can not affect PC. */
1649 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1653 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1654 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1655 fill it in. Return the first address not recognized as a prologue
1658 We recognize all the instructions typically found in ARM prologues,
1659 plus harmless instructions which can be skipped (either for analysis
1660 purposes, or a more restrictive set that can be skipped when finding
1661 the end of the prologue). */
1664 arm_analyze_prologue (struct gdbarch *gdbarch,
1665 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1666 struct arm_prologue_cache *cache)
1668 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1669 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1671 CORE_ADDR offset, current_pc;
1672 pv_t regs[ARM_FPS_REGNUM];
1673 struct pv_area *stack;
1674 struct cleanup *back_to;
1675 int framereg, framesize;
1676 CORE_ADDR unrecognized_pc = 0;
1678 /* Search the prologue looking for instructions that set up the
1679 frame pointer, adjust the stack pointer, and save registers.
1681 Be careful, however, and if it doesn't look like a prologue,
1682 don't try to scan it. If, for instance, a frameless function
1683 begins with stmfd sp!, then we will tell ourselves there is
1684 a frame, which will confuse stack traceback, as well as "finish"
1685 and other operations that rely on a knowledge of the stack
1688 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1689 regs[regno] = pv_register (regno, 0);
1690 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1691 back_to = make_cleanup_free_pv_area (stack);
1693 for (current_pc = prologue_start;
1694 current_pc < prologue_end;
1698 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1700 if (insn == 0xe1a0c00d) /* mov ip, sp */
1702 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1705 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1706 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1708 unsigned imm = insn & 0xff; /* immediate value */
1709 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1710 int rd = bits (insn, 12, 15);
1711 imm = (imm >> rot) | (imm << (32 - rot));
1712 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1715 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1716 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1718 unsigned imm = insn & 0xff; /* immediate value */
1719 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1720 int rd = bits (insn, 12, 15);
1721 imm = (imm >> rot) | (imm << (32 - rot));
1722 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1725 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1728 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1730 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1731 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1732 regs[bits (insn, 12, 15)]);
1735 else if ((insn & 0xffff0000) == 0xe92d0000)
1736 /* stmfd sp!, {..., fp, ip, lr, pc}
1738 stmfd sp!, {a1, a2, a3, a4} */
1740 int mask = insn & 0xffff;
1742 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1745 /* Calculate offsets of saved registers. */
1746 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1747 if (mask & (1 << regno))
1750 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1751 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1754 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1755 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1756 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1758 /* No need to add this to saved_regs -- it's just an arg reg. */
1761 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1762 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1763 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1765 /* No need to add this to saved_regs -- it's just an arg reg. */
1768 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1770 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1772 /* No need to add this to saved_regs -- it's just arg regs. */
1775 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1777 unsigned imm = insn & 0xff; /* immediate value */
1778 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1779 imm = (imm >> rot) | (imm << (32 - rot));
1780 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1782 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1784 unsigned imm = insn & 0xff; /* immediate value */
1785 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1786 imm = (imm >> rot) | (imm << (32 - rot));
1787 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1789 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1791 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1793 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1796 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1797 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1798 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1800 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1802 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1804 int n_saved_fp_regs;
1805 unsigned int fp_start_reg, fp_bound_reg;
1807 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1810 if ((insn & 0x800) == 0x800) /* N0 is set */
1812 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1813 n_saved_fp_regs = 3;
1815 n_saved_fp_regs = 1;
1819 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1820 n_saved_fp_regs = 2;
1822 n_saved_fp_regs = 4;
1825 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1826 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1827 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1829 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1830 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1831 regs[fp_start_reg++]);
1834 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1836 /* Allow some special function calls when skipping the
1837 prologue; GCC generates these before storing arguments to
1839 CORE_ADDR dest = BranchDest (current_pc, insn);
1841 if (skip_prologue_function (gdbarch, dest, 0))
1846 else if ((insn & 0xf0000000) != 0xe0000000)
1847 break; /* Condition not true, exit early. */
1848 else if (arm_instruction_changes_pc (insn))
1849 /* Don't scan past anything that might change control flow. */
1851 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1852 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1853 /* Ignore block loads from the stack, potentially copying
1854 parameters from memory. */
1856 else if ((insn & 0xfc500000) == 0xe4100000
1857 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1858 /* Similarly ignore single loads from the stack. */
1860 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1861 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1862 register instead of the stack. */
1866 /* The optimizer might shove anything into the prologue,
1867 so we just skip what we don't recognize. */
1868 unrecognized_pc = current_pc;
1873 if (unrecognized_pc == 0)
1874 unrecognized_pc = current_pc;
1876 /* The frame size is just the distance from the frame register
1877 to the original stack pointer. */
1878 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1880 /* Frame pointer is fp. */
1881 framereg = ARM_FP_REGNUM;
1882 framesize = -regs[ARM_FP_REGNUM].k;
1884 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1886 /* Try the stack pointer... this is a bit desperate. */
1887 framereg = ARM_SP_REGNUM;
1888 framesize = -regs[ARM_SP_REGNUM].k;
1892 /* We're just out of luck. We don't know where the frame is. */
1899 cache->framereg = framereg;
1900 cache->framesize = framesize;
1902 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1903 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1904 cache->saved_regs[regno].addr = offset;
1908 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1909 paddress (gdbarch, unrecognized_pc));
1911 do_cleanups (back_to);
1912 return unrecognized_pc;
1916 arm_scan_prologue (struct frame_info *this_frame,
1917 struct arm_prologue_cache *cache)
1919 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1922 CORE_ADDR prologue_start, prologue_end, current_pc;
1923 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1924 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1925 pv_t regs[ARM_FPS_REGNUM];
1926 struct pv_area *stack;
1927 struct cleanup *back_to;
1930 /* Assume there is no frame until proven otherwise. */
1931 cache->framereg = ARM_SP_REGNUM;
1932 cache->framesize = 0;
1934 /* Check for Thumb prologue. */
1935 if (arm_frame_is_thumb (this_frame))
1937 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1941 /* Find the function prologue. If we can't find the function in
1942 the symbol table, peek in the stack frame to find the PC. */
1943 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1946 /* One way to find the end of the prologue (which works well
1947 for unoptimized code) is to do the following:
1949 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1952 prologue_end = prev_pc;
1953 else if (sal.end < prologue_end)
1954 prologue_end = sal.end;
1956 This mechanism is very accurate so long as the optimizer
1957 doesn't move any instructions from the function body into the
1958 prologue. If this happens, sal.end will be the last
1959 instruction in the first hunk of prologue code just before
1960 the first instruction that the scheduler has moved from
1961 the body to the prologue.
1963 In order to make sure that we scan all of the prologue
1964 instructions, we use a slightly less accurate mechanism which
1965 may scan more than necessary. To help compensate for this
1966 lack of accuracy, the prologue scanning loop below contains
1967 several clauses which'll cause the loop to terminate early if
1968 an implausible prologue instruction is encountered.
1974 is a suitable endpoint since it accounts for the largest
1975 possible prologue plus up to five instructions inserted by
1978 if (prologue_end > prologue_start + 64)
1980 prologue_end = prologue_start + 64; /* See above. */
1985 /* We have no symbol information. Our only option is to assume this
1986 function has a standard stack frame and the normal frame register.
1987 Then, we can find the value of our frame pointer on entrance to
1988 the callee (or at the present moment if this is the innermost frame).
1989 The value stored there should be the address of the stmfd + 8. */
1990 CORE_ADDR frame_loc;
1991 LONGEST return_value;
1993 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1994 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1998 prologue_start = gdbarch_addr_bits_remove
1999 (gdbarch, return_value) - 8;
2000 prologue_end = prologue_start + 64; /* See above. */
2004 if (prev_pc < prologue_end)
2005 prologue_end = prev_pc;
2007 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2010 static struct arm_prologue_cache *
2011 arm_make_prologue_cache (struct frame_info *this_frame)
2014 struct arm_prologue_cache *cache;
2015 CORE_ADDR unwound_fp;
2017 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2018 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2020 arm_scan_prologue (this_frame, cache);
2022 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2023 if (unwound_fp == 0)
2026 cache->prev_sp = unwound_fp + cache->framesize;
2028 /* Calculate actual addresses of saved registers using offsets
2029 determined by arm_scan_prologue. */
2030 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2031 if (trad_frame_addr_p (cache->saved_regs, reg))
2032 cache->saved_regs[reg].addr += cache->prev_sp;
2037 /* Our frame ID for a normal frame is the current function's starting PC
2038 and the caller's SP when we were called. */
2041 arm_prologue_this_id (struct frame_info *this_frame,
2043 struct frame_id *this_id)
2045 struct arm_prologue_cache *cache;
2049 if (*this_cache == NULL)
2050 *this_cache = arm_make_prologue_cache (this_frame);
2051 cache = *this_cache;
2053 /* This is meant to halt the backtrace at "_start". */
2054 pc = get_frame_pc (this_frame);
2055 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2058 /* If we've hit a wall, stop. */
2059 if (cache->prev_sp == 0)
2062 /* Use function start address as part of the frame ID. If we cannot
2063 identify the start address (due to missing symbol information),
2064 fall back to just using the current PC. */
2065 func = get_frame_func (this_frame);
2069 id = frame_id_build (cache->prev_sp, func);
2073 static struct value *
2074 arm_prologue_prev_register (struct frame_info *this_frame,
2078 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2079 struct arm_prologue_cache *cache;
2081 if (*this_cache == NULL)
2082 *this_cache = arm_make_prologue_cache (this_frame);
2083 cache = *this_cache;
2085 /* If we are asked to unwind the PC, then we need to return the LR
2086 instead. The prologue may save PC, but it will point into this
2087 frame's prologue, not the next frame's resume location. Also
2088 strip the saved T bit. A valid LR may have the low bit set, but
2089 a valid PC never does. */
2090 if (prev_regnum == ARM_PC_REGNUM)
2094 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2095 return frame_unwind_got_constant (this_frame, prev_regnum,
2096 arm_addr_bits_remove (gdbarch, lr));
2099 /* SP is generally not saved to the stack, but this frame is
2100 identified by the next frame's stack pointer at the time of the call.
2101 The value was already reconstructed into PREV_SP. */
2102 if (prev_regnum == ARM_SP_REGNUM)
2103 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2105 /* The CPSR may have been changed by the call instruction and by the
2106 called function. The only bit we can reconstruct is the T bit,
2107 by checking the low bit of LR as of the call. This is a reliable
2108 indicator of Thumb-ness except for some ARM v4T pre-interworking
2109 Thumb code, which could get away with a clear low bit as long as
2110 the called function did not use bx. Guess that all other
2111 bits are unchanged; the condition flags are presumably lost,
2112 but the processor status is likely valid. */
2113 if (prev_regnum == ARM_PS_REGNUM)
2116 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2118 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2119 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2120 if (IS_THUMB_ADDR (lr))
2124 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2127 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2131 struct frame_unwind arm_prologue_unwind = {
2133 default_frame_unwind_stop_reason,
2134 arm_prologue_this_id,
2135 arm_prologue_prev_register,
2137 default_frame_sniffer
2140 /* Maintain a list of ARM exception table entries per objfile, similar to the
2141 list of mapping symbols. We only cache entries for standard ARM-defined
2142 personality routines; the cache will contain only the frame unwinding
2143 instructions associated with the entry (not the descriptors). */
2145 static const struct objfile_data *arm_exidx_data_key;
2147 struct arm_exidx_entry
2152 typedef struct arm_exidx_entry arm_exidx_entry_s;
2153 DEF_VEC_O(arm_exidx_entry_s);
2155 struct arm_exidx_data
2157 VEC(arm_exidx_entry_s) **section_maps;
2161 arm_exidx_data_free (struct objfile *objfile, void *arg)
2163 struct arm_exidx_data *data = arg;
2166 for (i = 0; i < objfile->obfd->section_count; i++)
2167 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2171 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2172 const struct arm_exidx_entry *rhs)
2174 return lhs->addr < rhs->addr;
2177 static struct obj_section *
2178 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2180 struct obj_section *osect;
2182 ALL_OBJFILE_OSECTIONS (objfile, osect)
2183 if (bfd_get_section_flags (objfile->obfd,
2184 osect->the_bfd_section) & SEC_ALLOC)
2186 bfd_vma start, size;
2187 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2188 size = bfd_get_section_size (osect->the_bfd_section);
2190 if (start <= vma && vma < start + size)
2197 /* Parse contents of exception table and exception index sections
2198 of OBJFILE, and fill in the exception table entry cache.
2200 For each entry that refers to a standard ARM-defined personality
2201 routine, extract the frame unwinding instructions (from either
2202 the index or the table section). The unwinding instructions
2204 - extracting them from the rest of the table data
2205 - converting to host endianness
2206 - appending the implicit 0xb0 ("Finish") code
2208 The extracted and normalized instructions are stored for later
2209 retrieval by the arm_find_exidx_entry routine. */
2212 arm_exidx_new_objfile (struct objfile *objfile)
2214 struct cleanup *cleanups;
2215 struct arm_exidx_data *data;
2216 asection *exidx, *extab;
2217 bfd_vma exidx_vma = 0, extab_vma = 0;
2218 bfd_size_type exidx_size = 0, extab_size = 0;
2219 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2222 /* If we've already touched this file, do nothing. */
2223 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2225 cleanups = make_cleanup (null_cleanup, NULL);
2227 /* Read contents of exception table and index. */
2228 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2231 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2232 exidx_size = bfd_get_section_size (exidx);
2233 exidx_data = xmalloc (exidx_size);
2234 make_cleanup (xfree, exidx_data);
2236 if (!bfd_get_section_contents (objfile->obfd, exidx,
2237 exidx_data, 0, exidx_size))
2239 do_cleanups (cleanups);
2244 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2247 extab_vma = bfd_section_vma (objfile->obfd, extab);
2248 extab_size = bfd_get_section_size (extab);
2249 extab_data = xmalloc (extab_size);
2250 make_cleanup (xfree, extab_data);
2252 if (!bfd_get_section_contents (objfile->obfd, extab,
2253 extab_data, 0, extab_size))
2255 do_cleanups (cleanups);
2260 /* Allocate exception table data structure. */
2261 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2262 set_objfile_data (objfile, arm_exidx_data_key, data);
2263 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2264 objfile->obfd->section_count,
2265 VEC(arm_exidx_entry_s) *);
2267 /* Fill in exception table. */
2268 for (i = 0; i < exidx_size / 8; i++)
2270 struct arm_exidx_entry new_exidx_entry;
2271 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2272 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2273 bfd_vma addr = 0, word = 0;
2274 int n_bytes = 0, n_words = 0;
2275 struct obj_section *sec;
2276 gdb_byte *entry = NULL;
2278 /* Extract address of start of function. */
2279 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2280 idx += exidx_vma + i * 8;
2282 /* Find section containing function and compute section offset. */
2283 sec = arm_obj_section_from_vma (objfile, idx);
2286 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2288 /* Determine address of exception table entry. */
2291 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2293 else if ((val & 0xff000000) == 0x80000000)
2295 /* Exception table entry embedded in .ARM.exidx
2296 -- must be short form. */
2300 else if (!(val & 0x80000000))
2302 /* Exception table entry in .ARM.extab. */
2303 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2304 addr += exidx_vma + i * 8 + 4;
2306 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2308 word = bfd_h_get_32 (objfile->obfd,
2309 extab_data + addr - extab_vma);
2312 if ((word & 0xff000000) == 0x80000000)
2317 else if ((word & 0xff000000) == 0x81000000
2318 || (word & 0xff000000) == 0x82000000)
2322 n_words = ((word >> 16) & 0xff);
2324 else if (!(word & 0x80000000))
2327 struct obj_section *pers_sec;
2328 int gnu_personality = 0;
2330 /* Custom personality routine. */
2331 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2332 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2334 /* Check whether we've got one of the variants of the
2335 GNU personality routines. */
2336 pers_sec = arm_obj_section_from_vma (objfile, pers);
2339 static const char *personality[] =
2341 "__gcc_personality_v0",
2342 "__gxx_personality_v0",
2343 "__gcj_personality_v0",
2344 "__gnu_objc_personality_v0",
2348 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2351 for (k = 0; personality[k]; k++)
2352 if (lookup_minimal_symbol_by_pc_name
2353 (pc, personality[k], objfile))
2355 gnu_personality = 1;
2360 /* If so, the next word contains a word count in the high
2361 byte, followed by the same unwind instructions as the
2362 pre-defined forms. */
2364 && addr + 4 <= extab_vma + extab_size)
2366 word = bfd_h_get_32 (objfile->obfd,
2367 extab_data + addr - extab_vma);
2370 n_words = ((word >> 24) & 0xff);
2376 /* Sanity check address. */
2378 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2379 n_words = n_bytes = 0;
2381 /* The unwind instructions reside in WORD (only the N_BYTES least
2382 significant bytes are valid), followed by N_WORDS words in the
2383 extab section starting at ADDR. */
2384 if (n_bytes || n_words)
2386 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2387 n_bytes + n_words * 4 + 1);
2390 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2394 word = bfd_h_get_32 (objfile->obfd,
2395 extab_data + addr - extab_vma);
2398 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2399 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2400 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2401 *p++ = (gdb_byte) (word & 0xff);
2404 /* Implied "Finish" to terminate the list. */
2408 /* Push entry onto vector. They are guaranteed to always
2409 appear in order of increasing addresses. */
2410 new_exidx_entry.addr = idx;
2411 new_exidx_entry.entry = entry;
2412 VEC_safe_push (arm_exidx_entry_s,
2413 data->section_maps[sec->the_bfd_section->index],
2417 do_cleanups (cleanups);
2420 /* Search for the exception table entry covering MEMADDR. If one is found,
2421 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2422 set *START to the start of the region covered by this entry. */
2425 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2427 struct obj_section *sec;
2429 sec = find_pc_section (memaddr);
2432 struct arm_exidx_data *data;
2433 VEC(arm_exidx_entry_s) *map;
2434 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2437 data = objfile_data (sec->objfile, arm_exidx_data_key);
2440 map = data->section_maps[sec->the_bfd_section->index];
2441 if (!VEC_empty (arm_exidx_entry_s, map))
2443 struct arm_exidx_entry *map_sym;
2445 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2446 arm_compare_exidx_entries);
2448 /* VEC_lower_bound finds the earliest ordered insertion
2449 point. If the following symbol starts at this exact
2450 address, we use that; otherwise, the preceding
2451 exception table entry covers this address. */
2452 if (idx < VEC_length (arm_exidx_entry_s, map))
2454 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2455 if (map_sym->addr == map_key.addr)
2458 *start = map_sym->addr + obj_section_addr (sec);
2459 return map_sym->entry;
2465 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2467 *start = map_sym->addr + obj_section_addr (sec);
2468 return map_sym->entry;
2477 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2478 instruction list from the ARM exception table entry ENTRY, allocate and
2479 return a prologue cache structure describing how to unwind this frame.
2481 Return NULL if the unwinding instruction list contains a "spare",
2482 "reserved" or "refuse to unwind" instruction as defined in section
2483 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2484 for the ARM Architecture" document. */
2486 static struct arm_prologue_cache *
2487 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2492 struct arm_prologue_cache *cache;
2493 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2494 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2500 /* Whenever we reload SP, we actually have to retrieve its
2501 actual value in the current frame. */
2504 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2506 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2507 vsp = get_frame_register_unsigned (this_frame, reg);
2511 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2512 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2518 /* Decode next unwind instruction. */
2521 if ((insn & 0xc0) == 0)
2523 int offset = insn & 0x3f;
2524 vsp += (offset << 2) + 4;
2526 else if ((insn & 0xc0) == 0x40)
2528 int offset = insn & 0x3f;
2529 vsp -= (offset << 2) + 4;
2531 else if ((insn & 0xf0) == 0x80)
2533 int mask = ((insn & 0xf) << 8) | *entry++;
2536 /* The special case of an all-zero mask identifies
2537 "Refuse to unwind". We return NULL to fall back
2538 to the prologue analyzer. */
2542 /* Pop registers r4..r15 under mask. */
2543 for (i = 0; i < 12; i++)
2544 if (mask & (1 << i))
2546 cache->saved_regs[4 + i].addr = vsp;
2550 /* Special-case popping SP -- we need to reload vsp. */
2551 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2554 else if ((insn & 0xf0) == 0x90)
2556 int reg = insn & 0xf;
2558 /* Reserved cases. */
2559 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2562 /* Set SP from another register and mark VSP for reload. */
2563 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2566 else if ((insn & 0xf0) == 0xa0)
2568 int count = insn & 0x7;
2569 int pop_lr = (insn & 0x8) != 0;
2572 /* Pop r4..r[4+count]. */
2573 for (i = 0; i <= count; i++)
2575 cache->saved_regs[4 + i].addr = vsp;
2579 /* If indicated by flag, pop LR as well. */
2582 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2586 else if (insn == 0xb0)
2588 /* We could only have updated PC by popping into it; if so, it
2589 will show up as address. Otherwise, copy LR into PC. */
2590 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2591 cache->saved_regs[ARM_PC_REGNUM]
2592 = cache->saved_regs[ARM_LR_REGNUM];
2597 else if (insn == 0xb1)
2599 int mask = *entry++;
2602 /* All-zero mask and mask >= 16 is "spare". */
2603 if (mask == 0 || mask >= 16)
2606 /* Pop r0..r3 under mask. */
2607 for (i = 0; i < 4; i++)
2608 if (mask & (1 << i))
2610 cache->saved_regs[i].addr = vsp;
2614 else if (insn == 0xb2)
2616 ULONGEST offset = 0;
2621 offset |= (*entry & 0x7f) << shift;
2624 while (*entry++ & 0x80);
2626 vsp += 0x204 + (offset << 2);
2628 else if (insn == 0xb3)
2630 int start = *entry >> 4;
2631 int count = (*entry++) & 0xf;
2634 /* Only registers D0..D15 are valid here. */
2635 if (start + count >= 16)
2638 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2639 for (i = 0; i <= count; i++)
2641 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2645 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2648 else if ((insn & 0xf8) == 0xb8)
2650 int count = insn & 0x7;
2653 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2654 for (i = 0; i <= count; i++)
2656 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2660 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2663 else if (insn == 0xc6)
2665 int start = *entry >> 4;
2666 int count = (*entry++) & 0xf;
2669 /* Only registers WR0..WR15 are valid. */
2670 if (start + count >= 16)
2673 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2674 for (i = 0; i <= count; i++)
2676 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2680 else if (insn == 0xc7)
2682 int mask = *entry++;
2685 /* All-zero mask and mask >= 16 is "spare". */
2686 if (mask == 0 || mask >= 16)
2689 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2690 for (i = 0; i < 4; i++)
2691 if (mask & (1 << i))
2693 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2697 else if ((insn & 0xf8) == 0xc0)
2699 int count = insn & 0x7;
2702 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2703 for (i = 0; i <= count; i++)
2705 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2709 else if (insn == 0xc8)
2711 int start = *entry >> 4;
2712 int count = (*entry++) & 0xf;
2715 /* Only registers D0..D31 are valid. */
2716 if (start + count >= 16)
2719 /* Pop VFP double-precision registers
2720 D[16+start]..D[16+start+count]. */
2721 for (i = 0; i <= count; i++)
2723 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2727 else if (insn == 0xc9)
2729 int start = *entry >> 4;
2730 int count = (*entry++) & 0xf;
2733 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2734 for (i = 0; i <= count; i++)
2736 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2740 else if ((insn & 0xf8) == 0xd0)
2742 int count = insn & 0x7;
2745 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2746 for (i = 0; i <= count; i++)
2748 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2754 /* Everything else is "spare". */
2759 /* If we restore SP from a register, assume this was the frame register.
2760 Otherwise just fall back to SP as frame register. */
2761 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2762 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2764 cache->framereg = ARM_SP_REGNUM;
2766 /* Determine offset to previous frame. */
2768 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2770 /* We already got the previous SP. */
2771 cache->prev_sp = vsp;
2776 /* Unwinding via ARM exception table entries. Note that the sniffer
2777 already computes a filled-in prologue cache, which is then used
2778 with the same arm_prologue_this_id and arm_prologue_prev_register
2779 routines also used for prologue-parsing based unwinding. */
2782 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2783 struct frame_info *this_frame,
2784 void **this_prologue_cache)
2786 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2787 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2788 CORE_ADDR addr_in_block, exidx_region, func_start;
2789 struct arm_prologue_cache *cache;
2792 /* See if we have an ARM exception table entry covering this address. */
2793 addr_in_block = get_frame_address_in_block (this_frame);
2794 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2798 /* The ARM exception table does not describe unwind information
2799 for arbitrary PC values, but is guaranteed to be correct only
2800 at call sites. We have to decide here whether we want to use
2801 ARM exception table information for this frame, or fall back
2802 to using prologue parsing. (Note that if we have DWARF CFI,
2803 this sniffer isn't even called -- CFI is always preferred.)
2805 Before we make this decision, however, we check whether we
2806 actually have *symbol* information for the current frame.
2807 If not, prologue parsing would not work anyway, so we might
2808 as well use the exception table and hope for the best. */
2809 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2813 /* If the next frame is "normal", we are at a call site in this
2814 frame, so exception information is guaranteed to be valid. */
2815 if (get_next_frame (this_frame)
2816 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2819 /* We also assume exception information is valid if we're currently
2820 blocked in a system call. The system library is supposed to
2821 ensure this, so that e.g. pthread cancellation works. */
2822 if (arm_frame_is_thumb (this_frame))
2826 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2827 byte_order_for_code, &insn)
2828 && (insn & 0xff00) == 0xdf00 /* svc */)
2835 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2836 byte_order_for_code, &insn)
2837 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2841 /* Bail out if we don't know that exception information is valid. */
2845 /* The ARM exception index does not mark the *end* of the region
2846 covered by the entry, and some functions will not have any entry.
2847 To correctly recognize the end of the covered region, the linker
2848 should have inserted dummy records with a CANTUNWIND marker.
2850 Unfortunately, current versions of GNU ld do not reliably do
2851 this, and thus we may have found an incorrect entry above.
2852 As a (temporary) sanity check, we only use the entry if it
2853 lies *within* the bounds of the function. Note that this check
2854 might reject perfectly valid entries that just happen to cover
2855 multiple functions; therefore this check ought to be removed
2856 once the linker is fixed. */
2857 if (func_start > exidx_region)
2861 /* Decode the list of unwinding instructions into a prologue cache.
2862 Note that this may fail due to e.g. a "refuse to unwind" code. */
2863 cache = arm_exidx_fill_cache (this_frame, entry);
2867 *this_prologue_cache = cache;
2871 struct frame_unwind arm_exidx_unwind = {
2873 default_frame_unwind_stop_reason,
2874 arm_prologue_this_id,
2875 arm_prologue_prev_register,
2877 arm_exidx_unwind_sniffer
2880 static struct arm_prologue_cache *
2881 arm_make_stub_cache (struct frame_info *this_frame)
2883 struct arm_prologue_cache *cache;
2885 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2886 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2888 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2893 /* Our frame ID for a stub frame is the current SP and LR. */
2896 arm_stub_this_id (struct frame_info *this_frame,
2898 struct frame_id *this_id)
2900 struct arm_prologue_cache *cache;
2902 if (*this_cache == NULL)
2903 *this_cache = arm_make_stub_cache (this_frame);
2904 cache = *this_cache;
2906 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2910 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2911 struct frame_info *this_frame,
2912 void **this_prologue_cache)
2914 CORE_ADDR addr_in_block;
2917 addr_in_block = get_frame_address_in_block (this_frame);
2918 if (in_plt_section (addr_in_block, NULL)
2919 /* We also use the stub winder if the target memory is unreadable
2920 to avoid having the prologue unwinder trying to read it. */
2921 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2927 struct frame_unwind arm_stub_unwind = {
2929 default_frame_unwind_stop_reason,
2931 arm_prologue_prev_register,
2933 arm_stub_unwind_sniffer
2937 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2939 struct arm_prologue_cache *cache;
2941 if (*this_cache == NULL)
2942 *this_cache = arm_make_prologue_cache (this_frame);
2943 cache = *this_cache;
2945 return cache->prev_sp - cache->framesize;
2948 struct frame_base arm_normal_base = {
2949 &arm_prologue_unwind,
2950 arm_normal_frame_base,
2951 arm_normal_frame_base,
2952 arm_normal_frame_base
2955 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2956 dummy frame. The frame ID's base needs to match the TOS value
2957 saved by save_dummy_frame_tos() and returned from
2958 arm_push_dummy_call, and the PC needs to match the dummy frame's
2961 static struct frame_id
2962 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2964 return frame_id_build (get_frame_register_unsigned (this_frame,
2966 get_frame_pc (this_frame));
2969 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2970 be used to construct the previous frame's ID, after looking up the
2971 containing function). */
2974 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2977 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2978 return arm_addr_bits_remove (gdbarch, pc);
2982 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2984 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2987 static struct value *
2988 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2991 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2993 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2998 /* The PC is normally copied from the return column, which
2999 describes saves of LR. However, that version may have an
3000 extra bit set to indicate Thumb state. The bit is not
3002 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3003 return frame_unwind_got_constant (this_frame, regnum,
3004 arm_addr_bits_remove (gdbarch, lr));
3007 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3008 cpsr = get_frame_register_unsigned (this_frame, regnum);
3009 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3010 if (IS_THUMB_ADDR (lr))
3014 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3017 internal_error (__FILE__, __LINE__,
3018 _("Unexpected register %d"), regnum);
3023 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3024 struct dwarf2_frame_state_reg *reg,
3025 struct frame_info *this_frame)
3031 reg->how = DWARF2_FRAME_REG_FN;
3032 reg->loc.fn = arm_dwarf2_prev_register;
3035 reg->how = DWARF2_FRAME_REG_CFA;
3040 /* Return true if we are in the function's epilogue, i.e. after the
3041 instruction that destroyed the function's stack frame. */
3044 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3046 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3047 unsigned int insn, insn2;
3048 int found_return = 0, found_stack_adjust = 0;
3049 CORE_ADDR func_start, func_end;
3053 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3056 /* The epilogue is a sequence of instructions along the following lines:
3058 - add stack frame size to SP or FP
3059 - [if frame pointer used] restore SP from FP
3060 - restore registers from SP [may include PC]
3061 - a return-type instruction [if PC wasn't already restored]
3063 In a first pass, we scan forward from the current PC and verify the
3064 instructions we find as compatible with this sequence, ending in a
3067 However, this is not sufficient to distinguish indirect function calls
3068 within a function from indirect tail calls in the epilogue in some cases.
3069 Therefore, if we didn't already find any SP-changing instruction during
3070 forward scan, we add a backward scanning heuristic to ensure we actually
3071 are in the epilogue. */
3074 while (scan_pc < func_end && !found_return)
3076 if (target_read_memory (scan_pc, buf, 2))
3080 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3082 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3084 else if (insn == 0x46f7) /* mov pc, lr */
3086 else if (insn == 0x46bd) /* mov sp, r7 */
3087 found_stack_adjust = 1;
3088 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3089 found_stack_adjust = 1;
3090 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3092 found_stack_adjust = 1;
3093 if (insn & 0x0100) /* <registers> include PC. */
3096 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3098 if (target_read_memory (scan_pc, buf, 2))
3102 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3104 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3106 found_stack_adjust = 1;
3107 if (insn2 & 0x8000) /* <registers> include PC. */
3110 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3111 && (insn2 & 0x0fff) == 0x0b04)
3113 found_stack_adjust = 1;
3114 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3117 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3118 && (insn2 & 0x0e00) == 0x0a00)
3119 found_stack_adjust = 1;
3130 /* Since any instruction in the epilogue sequence, with the possible
3131 exception of return itself, updates the stack pointer, we need to
3132 scan backwards for at most one instruction. Try either a 16-bit or
3133 a 32-bit instruction. This is just a heuristic, so we do not worry
3134 too much about false positives. */
3136 if (!found_stack_adjust)
3138 if (pc - 4 < func_start)
3140 if (target_read_memory (pc - 4, buf, 4))
3143 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3144 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3146 if (insn2 == 0x46bd) /* mov sp, r7 */
3147 found_stack_adjust = 1;
3148 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3149 found_stack_adjust = 1;
3150 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3151 found_stack_adjust = 1;
3152 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3153 found_stack_adjust = 1;
3154 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3155 && (insn2 & 0x0fff) == 0x0b04)
3156 found_stack_adjust = 1;
3157 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3158 && (insn2 & 0x0e00) == 0x0a00)
3159 found_stack_adjust = 1;
3162 return found_stack_adjust;
3165 /* Return true if we are in the function's epilogue, i.e. after the
3166 instruction that destroyed the function's stack frame. */
3169 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3171 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3173 int found_return, found_stack_adjust;
3174 CORE_ADDR func_start, func_end;
3176 if (arm_pc_is_thumb (gdbarch, pc))
3177 return thumb_in_function_epilogue_p (gdbarch, pc);
3179 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3182 /* We are in the epilogue if the previous instruction was a stack
3183 adjustment and the next instruction is a possible return (bx, mov
3184 pc, or pop). We could have to scan backwards to find the stack
3185 adjustment, or forwards to find the return, but this is a decent
3186 approximation. First scan forwards. */
3189 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3190 if (bits (insn, 28, 31) != INST_NV)
3192 if ((insn & 0x0ffffff0) == 0x012fff10)
3195 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3198 else if ((insn & 0x0fff0000) == 0x08bd0000
3199 && (insn & 0x0000c000) != 0)
3200 /* POP (LDMIA), including PC or LR. */
3207 /* Scan backwards. This is just a heuristic, so do not worry about
3208 false positives from mode changes. */
3210 if (pc < func_start + 4)
3213 found_stack_adjust = 0;
3214 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3215 if (bits (insn, 28, 31) != INST_NV)
3217 if ((insn & 0x0df0f000) == 0x0080d000)
3218 /* ADD SP (register or immediate). */
3219 found_stack_adjust = 1;
3220 else if ((insn & 0x0df0f000) == 0x0040d000)
3221 /* SUB SP (register or immediate). */
3222 found_stack_adjust = 1;
3223 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3225 found_stack_adjust = 1;
3226 else if ((insn & 0x0fff0000) == 0x08bd0000)
3228 found_stack_adjust = 1;
3231 if (found_stack_adjust)
3238 /* When arguments must be pushed onto the stack, they go on in reverse
3239 order. The code below implements a FILO (stack) to do this. */
3244 struct stack_item *prev;
3248 static struct stack_item *
3249 push_stack_item (struct stack_item *prev, const void *contents, int len)
3251 struct stack_item *si;
3252 si = xmalloc (sizeof (struct stack_item));
3253 si->data = xmalloc (len);
3256 memcpy (si->data, contents, len);
3260 static struct stack_item *
3261 pop_stack_item (struct stack_item *si)
3263 struct stack_item *dead = si;
3271 /* Return the alignment (in bytes) of the given type. */
3274 arm_type_align (struct type *t)
3280 t = check_typedef (t);
3281 switch (TYPE_CODE (t))
3284 /* Should never happen. */
3285 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3289 case TYPE_CODE_ENUM:
3293 case TYPE_CODE_RANGE:
3294 case TYPE_CODE_BITSTRING:
3296 case TYPE_CODE_CHAR:
3297 case TYPE_CODE_BOOL:
3298 return TYPE_LENGTH (t);
3300 case TYPE_CODE_ARRAY:
3301 case TYPE_CODE_COMPLEX:
3302 /* TODO: What about vector types? */
3303 return arm_type_align (TYPE_TARGET_TYPE (t));
3305 case TYPE_CODE_STRUCT:
3306 case TYPE_CODE_UNION:
3308 for (n = 0; n < TYPE_NFIELDS (t); n++)
3310 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3318 /* Possible base types for a candidate for passing and returning in
3321 enum arm_vfp_cprc_base_type
3330 /* The length of one element of base type B. */
3333 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3337 case VFP_CPRC_SINGLE:
3339 case VFP_CPRC_DOUBLE:
3341 case VFP_CPRC_VEC64:
3343 case VFP_CPRC_VEC128:
3346 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3351 /* The character ('s', 'd' or 'q') for the type of VFP register used
3352 for passing base type B. */
3355 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3359 case VFP_CPRC_SINGLE:
3361 case VFP_CPRC_DOUBLE:
3363 case VFP_CPRC_VEC64:
3365 case VFP_CPRC_VEC128:
3368 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3373 /* Determine whether T may be part of a candidate for passing and
3374 returning in VFP registers, ignoring the limit on the total number
3375 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3376 classification of the first valid component found; if it is not
3377 VFP_CPRC_UNKNOWN, all components must have the same classification
3378 as *BASE_TYPE. If it is found that T contains a type not permitted
3379 for passing and returning in VFP registers, a type differently
3380 classified from *BASE_TYPE, or two types differently classified
3381 from each other, return -1, otherwise return the total number of
3382 base-type elements found (possibly 0 in an empty structure or
3383 array). Vectors and complex types are not currently supported,
3384 matching the generic AAPCS support. */
3387 arm_vfp_cprc_sub_candidate (struct type *t,
3388 enum arm_vfp_cprc_base_type *base_type)
3390 t = check_typedef (t);
3391 switch (TYPE_CODE (t))
3394 switch (TYPE_LENGTH (t))
3397 if (*base_type == VFP_CPRC_UNKNOWN)
3398 *base_type = VFP_CPRC_SINGLE;
3399 else if (*base_type != VFP_CPRC_SINGLE)
3404 if (*base_type == VFP_CPRC_UNKNOWN)
3405 *base_type = VFP_CPRC_DOUBLE;
3406 else if (*base_type != VFP_CPRC_DOUBLE)
3415 case TYPE_CODE_ARRAY:
3419 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3422 if (TYPE_LENGTH (t) == 0)
3424 gdb_assert (count == 0);
3427 else if (count == 0)
3429 unitlen = arm_vfp_cprc_unit_length (*base_type);
3430 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3431 return TYPE_LENGTH (t) / unitlen;
3435 case TYPE_CODE_STRUCT:
3440 for (i = 0; i < TYPE_NFIELDS (t); i++)
3442 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3444 if (sub_count == -1)
3448 if (TYPE_LENGTH (t) == 0)
3450 gdb_assert (count == 0);
3453 else if (count == 0)
3455 unitlen = arm_vfp_cprc_unit_length (*base_type);
3456 if (TYPE_LENGTH (t) != unitlen * count)
3461 case TYPE_CODE_UNION:
3466 for (i = 0; i < TYPE_NFIELDS (t); i++)
3468 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3470 if (sub_count == -1)
3472 count = (count > sub_count ? count : sub_count);
3474 if (TYPE_LENGTH (t) == 0)
3476 gdb_assert (count == 0);
3479 else if (count == 0)
3481 unitlen = arm_vfp_cprc_unit_length (*base_type);
3482 if (TYPE_LENGTH (t) != unitlen * count)
3494 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3495 if passed to or returned from a non-variadic function with the VFP
3496 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3497 *BASE_TYPE to the base type for T and *COUNT to the number of
3498 elements of that base type before returning. */
3501 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3504 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3505 int c = arm_vfp_cprc_sub_candidate (t, &b);
3506 if (c <= 0 || c > 4)
3513 /* Return 1 if the VFP ABI should be used for passing arguments to and
3514 returning values from a function of type FUNC_TYPE, 0
3518 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3520 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3521 /* Variadic functions always use the base ABI. Assume that functions
3522 without debug info are not variadic. */
3523 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3525 /* The VFP ABI is only supported as a variant of AAPCS. */
3526 if (tdep->arm_abi != ARM_ABI_AAPCS)
3528 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3531 /* We currently only support passing parameters in integer registers, which
3532 conforms with GCC's default model, and VFP argument passing following
3533 the VFP variant of AAPCS. Several other variants exist and
3534 we should probably support some of them based on the selected ABI. */
3537 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3538 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3539 struct value **args, CORE_ADDR sp, int struct_return,
3540 CORE_ADDR struct_addr)
3542 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3546 struct stack_item *si = NULL;
3549 unsigned vfp_regs_free = (1 << 16) - 1;
3551 /* Determine the type of this function and whether the VFP ABI
3553 ftype = check_typedef (value_type (function));
3554 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3555 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3556 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3558 /* Set the return address. For the ARM, the return breakpoint is
3559 always at BP_ADDR. */
3560 if (arm_pc_is_thumb (gdbarch, bp_addr))
3562 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3564 /* Walk through the list of args and determine how large a temporary
3565 stack is required. Need to take care here as structs may be
3566 passed on the stack, and we have to push them. */
3569 argreg = ARM_A1_REGNUM;
3572 /* The struct_return pointer occupies the first parameter
3573 passing register. */
3577 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3578 gdbarch_register_name (gdbarch, argreg),
3579 paddress (gdbarch, struct_addr));
3580 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3584 for (argnum = 0; argnum < nargs; argnum++)
3587 struct type *arg_type;
3588 struct type *target_type;
3589 enum type_code typecode;
3590 const bfd_byte *val;
3592 enum arm_vfp_cprc_base_type vfp_base_type;
3594 int may_use_core_reg = 1;
3596 arg_type = check_typedef (value_type (args[argnum]));
3597 len = TYPE_LENGTH (arg_type);
3598 target_type = TYPE_TARGET_TYPE (arg_type);
3599 typecode = TYPE_CODE (arg_type);
3600 val = value_contents (args[argnum]);
3602 align = arm_type_align (arg_type);
3603 /* Round alignment up to a whole number of words. */
3604 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3605 /* Different ABIs have different maximum alignments. */
3606 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3608 /* The APCS ABI only requires word alignment. */
3609 align = INT_REGISTER_SIZE;
3613 /* The AAPCS requires at most doubleword alignment. */
3614 if (align > INT_REGISTER_SIZE * 2)
3615 align = INT_REGISTER_SIZE * 2;
3619 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3627 /* Because this is a CPRC it cannot go in a core register or
3628 cause a core register to be skipped for alignment.
3629 Either it goes in VFP registers and the rest of this loop
3630 iteration is skipped for this argument, or it goes on the
3631 stack (and the stack alignment code is correct for this
3633 may_use_core_reg = 0;
3635 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3636 shift = unit_length / 4;
3637 mask = (1 << (shift * vfp_base_count)) - 1;
3638 for (regno = 0; regno < 16; regno += shift)
3639 if (((vfp_regs_free >> regno) & mask) == mask)
3648 vfp_regs_free &= ~(mask << regno);
3649 reg_scaled = regno / shift;
3650 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3651 for (i = 0; i < vfp_base_count; i++)
3655 if (reg_char == 'q')
3656 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3657 val + i * unit_length);
3660 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3661 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3663 regcache_cooked_write (regcache, regnum,
3664 val + i * unit_length);
3671 /* This CPRC could not go in VFP registers, so all VFP
3672 registers are now marked as used. */
3677 /* Push stack padding for dowubleword alignment. */
3678 if (nstack & (align - 1))
3680 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3681 nstack += INT_REGISTER_SIZE;
3684 /* Doubleword aligned quantities must go in even register pairs. */
3685 if (may_use_core_reg
3686 && argreg <= ARM_LAST_ARG_REGNUM
3687 && align > INT_REGISTER_SIZE
3691 /* If the argument is a pointer to a function, and it is a
3692 Thumb function, create a LOCAL copy of the value and set
3693 the THUMB bit in it. */
3694 if (TYPE_CODE_PTR == typecode
3695 && target_type != NULL
3696 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3698 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3699 if (arm_pc_is_thumb (gdbarch, regval))
3701 bfd_byte *copy = alloca (len);
3702 store_unsigned_integer (copy, len, byte_order,
3703 MAKE_THUMB_ADDR (regval));
3708 /* Copy the argument to general registers or the stack in
3709 register-sized pieces. Large arguments are split between
3710 registers and stack. */
3713 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3715 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3717 /* The argument is being passed in a general purpose
3720 = extract_unsigned_integer (val, partial_len, byte_order);
3721 if (byte_order == BFD_ENDIAN_BIG)
3722 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3724 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3726 gdbarch_register_name
3728 phex (regval, INT_REGISTER_SIZE));
3729 regcache_cooked_write_unsigned (regcache, argreg, regval);
3734 /* Push the arguments onto the stack. */
3736 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3738 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3739 nstack += INT_REGISTER_SIZE;
3746 /* If we have an odd number of words to push, then decrement the stack
3747 by one word now, so first stack argument will be dword aligned. */
3754 write_memory (sp, si->data, si->len);
3755 si = pop_stack_item (si);
3758 /* Finally, update teh SP register. */
3759 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3765 /* Always align the frame to an 8-byte boundary. This is required on
3766 some platforms and harmless on the rest. */
3769 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3771 /* Align the stack to eight bytes. */
3772 return sp & ~ (CORE_ADDR) 7;
3776 print_fpu_flags (int flags)
3778 if (flags & (1 << 0))
3779 fputs ("IVO ", stdout);
3780 if (flags & (1 << 1))
3781 fputs ("DVZ ", stdout);
3782 if (flags & (1 << 2))
3783 fputs ("OFL ", stdout);
3784 if (flags & (1 << 3))
3785 fputs ("UFL ", stdout);
3786 if (flags & (1 << 4))
3787 fputs ("INX ", stdout);
3791 /* Print interesting information about the floating point processor
3792 (if present) or emulator. */
3794 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3795 struct frame_info *frame, const char *args)
3797 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3800 type = (status >> 24) & 127;
3801 if (status & (1 << 31))
3802 printf (_("Hardware FPU type %d\n"), type);
3804 printf (_("Software FPU type %d\n"), type);
3805 /* i18n: [floating point unit] mask */
3806 fputs (_("mask: "), stdout);
3807 print_fpu_flags (status >> 16);
3808 /* i18n: [floating point unit] flags */
3809 fputs (_("flags: "), stdout);
3810 print_fpu_flags (status);
3813 /* Construct the ARM extended floating point type. */
3814 static struct type *
3815 arm_ext_type (struct gdbarch *gdbarch)
3817 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3819 if (!tdep->arm_ext_type)
3821 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3822 floatformats_arm_ext);
3824 return tdep->arm_ext_type;
3827 static struct type *
3828 arm_neon_double_type (struct gdbarch *gdbarch)
3830 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3832 if (tdep->neon_double_type == NULL)
3834 struct type *t, *elem;
3836 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3838 elem = builtin_type (gdbarch)->builtin_uint8;
3839 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3840 elem = builtin_type (gdbarch)->builtin_uint16;
3841 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3842 elem = builtin_type (gdbarch)->builtin_uint32;
3843 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3844 elem = builtin_type (gdbarch)->builtin_uint64;
3845 append_composite_type_field (t, "u64", elem);
3846 elem = builtin_type (gdbarch)->builtin_float;
3847 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3848 elem = builtin_type (gdbarch)->builtin_double;
3849 append_composite_type_field (t, "f64", elem);
3851 TYPE_VECTOR (t) = 1;
3852 TYPE_NAME (t) = "neon_d";
3853 tdep->neon_double_type = t;
3856 return tdep->neon_double_type;
3859 /* FIXME: The vector types are not correctly ordered on big-endian
3860 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3861 bits of d0 - regardless of what unit size is being held in d0. So
3862 the offset of the first uint8 in d0 is 7, but the offset of the
3863 first float is 4. This code works as-is for little-endian
3866 static struct type *
3867 arm_neon_quad_type (struct gdbarch *gdbarch)
3869 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3871 if (tdep->neon_quad_type == NULL)
3873 struct type *t, *elem;
3875 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3877 elem = builtin_type (gdbarch)->builtin_uint8;
3878 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3879 elem = builtin_type (gdbarch)->builtin_uint16;
3880 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3881 elem = builtin_type (gdbarch)->builtin_uint32;
3882 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3883 elem = builtin_type (gdbarch)->builtin_uint64;
3884 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3885 elem = builtin_type (gdbarch)->builtin_float;
3886 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3887 elem = builtin_type (gdbarch)->builtin_double;
3888 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3890 TYPE_VECTOR (t) = 1;
3891 TYPE_NAME (t) = "neon_q";
3892 tdep->neon_quad_type = t;
3895 return tdep->neon_quad_type;
3898 /* Return the GDB type object for the "standard" data type of data in
3901 static struct type *
3902 arm_register_type (struct gdbarch *gdbarch, int regnum)
3904 int num_regs = gdbarch_num_regs (gdbarch);
3906 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3907 && regnum >= num_regs && regnum < num_regs + 32)
3908 return builtin_type (gdbarch)->builtin_float;
3910 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3911 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3912 return arm_neon_quad_type (gdbarch);
3914 /* If the target description has register information, we are only
3915 in this function so that we can override the types of
3916 double-precision registers for NEON. */
3917 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3919 struct type *t = tdesc_register_type (gdbarch, regnum);
3921 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3922 && TYPE_CODE (t) == TYPE_CODE_FLT
3923 && gdbarch_tdep (gdbarch)->have_neon)
3924 return arm_neon_double_type (gdbarch);
3929 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3931 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3932 return builtin_type (gdbarch)->builtin_void;
3934 return arm_ext_type (gdbarch);
3936 else if (regnum == ARM_SP_REGNUM)
3937 return builtin_type (gdbarch)->builtin_data_ptr;
3938 else if (regnum == ARM_PC_REGNUM)
3939 return builtin_type (gdbarch)->builtin_func_ptr;
3940 else if (regnum >= ARRAY_SIZE (arm_register_names))
3941 /* These registers are only supported on targets which supply
3942 an XML description. */
3943 return builtin_type (gdbarch)->builtin_int0;
3945 return builtin_type (gdbarch)->builtin_uint32;
3948 /* Map a DWARF register REGNUM onto the appropriate GDB register
3952 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3954 /* Core integer regs. */
3955 if (reg >= 0 && reg <= 15)
3958 /* Legacy FPA encoding. These were once used in a way which
3959 overlapped with VFP register numbering, so their use is
3960 discouraged, but GDB doesn't support the ARM toolchain
3961 which used them for VFP. */
3962 if (reg >= 16 && reg <= 23)
3963 return ARM_F0_REGNUM + reg - 16;
3965 /* New assignments for the FPA registers. */
3966 if (reg >= 96 && reg <= 103)
3967 return ARM_F0_REGNUM + reg - 96;
3969 /* WMMX register assignments. */
3970 if (reg >= 104 && reg <= 111)
3971 return ARM_WCGR0_REGNUM + reg - 104;
3973 if (reg >= 112 && reg <= 127)
3974 return ARM_WR0_REGNUM + reg - 112;
3976 if (reg >= 192 && reg <= 199)
3977 return ARM_WC0_REGNUM + reg - 192;
3979 /* VFP v2 registers. A double precision value is actually
3980 in d1 rather than s2, but the ABI only defines numbering
3981 for the single precision registers. This will "just work"
3982 in GDB for little endian targets (we'll read eight bytes,
3983 starting in s0 and then progressing to s1), but will be
3984 reversed on big endian targets with VFP. This won't
3985 be a problem for the new Neon quad registers; you're supposed
3986 to use DW_OP_piece for those. */
3987 if (reg >= 64 && reg <= 95)
3991 sprintf (name_buf, "s%d", reg - 64);
3992 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3996 /* VFP v3 / Neon registers. This range is also used for VFP v2
3997 registers, except that it now describes d0 instead of s0. */
3998 if (reg >= 256 && reg <= 287)
4002 sprintf (name_buf, "d%d", reg - 256);
4003 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4010 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4012 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4015 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4017 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4018 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4020 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4021 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4023 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4024 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4026 if (reg < NUM_GREGS)
4027 return SIM_ARM_R0_REGNUM + reg;
4030 if (reg < NUM_FREGS)
4031 return SIM_ARM_FP0_REGNUM + reg;
4034 if (reg < NUM_SREGS)
4035 return SIM_ARM_FPS_REGNUM + reg;
4038 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4041 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4042 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4043 It is thought that this is is the floating-point register format on
4044 little-endian systems. */
4047 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4048 void *dbl, int endianess)
4052 if (endianess == BFD_ENDIAN_BIG)
4053 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4055 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4057 floatformat_from_doublest (fmt, &d, dbl);
4061 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4066 floatformat_to_doublest (fmt, ptr, &d);
4067 if (endianess == BFD_ENDIAN_BIG)
4068 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4070 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4075 condition_true (unsigned long cond, unsigned long status_reg)
4077 if (cond == INST_AL || cond == INST_NV)
4083 return ((status_reg & FLAG_Z) != 0);
4085 return ((status_reg & FLAG_Z) == 0);
4087 return ((status_reg & FLAG_C) != 0);
4089 return ((status_reg & FLAG_C) == 0);
4091 return ((status_reg & FLAG_N) != 0);
4093 return ((status_reg & FLAG_N) == 0);
4095 return ((status_reg & FLAG_V) != 0);
4097 return ((status_reg & FLAG_V) == 0);
4099 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4101 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4103 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4105 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4107 return (((status_reg & FLAG_Z) == 0)
4108 && (((status_reg & FLAG_N) == 0)
4109 == ((status_reg & FLAG_V) == 0)));
4111 return (((status_reg & FLAG_Z) != 0)
4112 || (((status_reg & FLAG_N) == 0)
4113 != ((status_reg & FLAG_V) == 0)));
4118 static unsigned long
4119 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4120 unsigned long pc_val, unsigned long status_reg)
4122 unsigned long res, shift;
4123 int rm = bits (inst, 0, 3);
4124 unsigned long shifttype = bits (inst, 5, 6);
4128 int rs = bits (inst, 8, 11);
4129 shift = (rs == 15 ? pc_val + 8
4130 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4133 shift = bits (inst, 7, 11);
4135 res = (rm == ARM_PC_REGNUM
4136 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4137 : get_frame_register_unsigned (frame, rm));
4142 res = shift >= 32 ? 0 : res << shift;
4146 res = shift >= 32 ? 0 : res >> shift;
4152 res = ((res & 0x80000000L)
4153 ? ~((~res) >> shift) : res >> shift);
4156 case 3: /* ROR/RRX */
4159 res = (res >> 1) | (carry ? 0x80000000L : 0);
4161 res = (res >> shift) | (res << (32 - shift));
4165 return res & 0xffffffff;
4168 /* Return number of 1-bits in VAL. */
4171 bitcount (unsigned long val)
4174 for (nbits = 0; val != 0; nbits++)
4175 val &= val - 1; /* Delete rightmost 1-bit in val. */
4179 /* Return the size in bytes of the complete Thumb instruction whose
4180 first halfword is INST1. */
4183 thumb_insn_size (unsigned short inst1)
4185 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4192 thumb_advance_itstate (unsigned int itstate)
4194 /* Preserve IT[7:5], the first three bits of the condition. Shift
4195 the upcoming condition flags left by one bit. */
4196 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4198 /* If we have finished the IT block, clear the state. */
4199 if ((itstate & 0x0f) == 0)
4205 /* Find the next PC after the current instruction executes. In some
4206 cases we can not statically determine the answer (see the IT state
4207 handling in this function); in that case, a breakpoint may be
4208 inserted in addition to the returned PC, which will be used to set
4209 another breakpoint by our caller. */
4212 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4214 struct gdbarch *gdbarch = get_frame_arch (frame);
4215 struct address_space *aspace = get_frame_address_space (frame);
4216 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4217 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4218 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4219 unsigned short inst1;
4220 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4221 unsigned long offset;
4222 ULONGEST status, itstate;
4224 nextpc = MAKE_THUMB_ADDR (nextpc);
4225 pc_val = MAKE_THUMB_ADDR (pc_val);
4227 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4229 /* Thumb-2 conditional execution support. There are eight bits in
4230 the CPSR which describe conditional execution state. Once
4231 reconstructed (they're in a funny order), the low five bits
4232 describe the low bit of the condition for each instruction and
4233 how many instructions remain. The high three bits describe the
4234 base condition. One of the low four bits will be set if an IT
4235 block is active. These bits read as zero on earlier
4237 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4238 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4240 /* If-Then handling. On GNU/Linux, where this routine is used, we
4241 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4242 can disable execution of the undefined instruction. So we might
4243 miss the breakpoint if we set it on a skipped conditional
4244 instruction. Because conditional instructions can change the
4245 flags, affecting the execution of further instructions, we may
4246 need to set two breakpoints. */
4248 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4250 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4252 /* An IT instruction. Because this instruction does not
4253 modify the flags, we can accurately predict the next
4254 executed instruction. */
4255 itstate = inst1 & 0x00ff;
4256 pc += thumb_insn_size (inst1);
4258 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4260 inst1 = read_memory_unsigned_integer (pc, 2,
4261 byte_order_for_code);
4262 pc += thumb_insn_size (inst1);
4263 itstate = thumb_advance_itstate (itstate);
4266 return MAKE_THUMB_ADDR (pc);
4268 else if (itstate != 0)
4270 /* We are in a conditional block. Check the condition. */
4271 if (! condition_true (itstate >> 4, status))
4273 /* Advance to the next executed instruction. */
4274 pc += thumb_insn_size (inst1);
4275 itstate = thumb_advance_itstate (itstate);
4277 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4279 inst1 = read_memory_unsigned_integer (pc, 2,
4280 byte_order_for_code);
4281 pc += thumb_insn_size (inst1);
4282 itstate = thumb_advance_itstate (itstate);
4285 return MAKE_THUMB_ADDR (pc);
4287 else if ((itstate & 0x0f) == 0x08)
4289 /* This is the last instruction of the conditional
4290 block, and it is executed. We can handle it normally
4291 because the following instruction is not conditional,
4292 and we must handle it normally because it is
4293 permitted to branch. Fall through. */
4299 /* There are conditional instructions after this one.
4300 If this instruction modifies the flags, then we can
4301 not predict what the next executed instruction will
4302 be. Fortunately, this instruction is architecturally
4303 forbidden to branch; we know it will fall through.
4304 Start by skipping past it. */
4305 pc += thumb_insn_size (inst1);
4306 itstate = thumb_advance_itstate (itstate);
4308 /* Set a breakpoint on the following instruction. */
4309 gdb_assert ((itstate & 0x0f) != 0);
4310 arm_insert_single_step_breakpoint (gdbarch, aspace,
4311 MAKE_THUMB_ADDR (pc));
4312 cond_negated = (itstate >> 4) & 1;
4314 /* Skip all following instructions with the same
4315 condition. If there is a later instruction in the IT
4316 block with the opposite condition, set the other
4317 breakpoint there. If not, then set a breakpoint on
4318 the instruction after the IT block. */
4321 inst1 = read_memory_unsigned_integer (pc, 2,
4322 byte_order_for_code);
4323 pc += thumb_insn_size (inst1);
4324 itstate = thumb_advance_itstate (itstate);
4326 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4328 return MAKE_THUMB_ADDR (pc);
4332 else if (itstate & 0x0f)
4334 /* We are in a conditional block. Check the condition. */
4335 int cond = itstate >> 4;
4337 if (! condition_true (cond, status))
4339 /* Advance to the next instruction. All the 32-bit
4340 instructions share a common prefix. */
4341 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4342 return MAKE_THUMB_ADDR (pc + 4);
4344 return MAKE_THUMB_ADDR (pc + 2);
4347 /* Otherwise, handle the instruction normally. */
4350 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4354 /* Fetch the saved PC from the stack. It's stored above
4355 all of the other registers. */
4356 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4357 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4358 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4360 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4362 unsigned long cond = bits (inst1, 8, 11);
4363 if (cond == 0x0f) /* 0x0f = SWI */
4365 struct gdbarch_tdep *tdep;
4366 tdep = gdbarch_tdep (gdbarch);
4368 if (tdep->syscall_next_pc != NULL)
4369 nextpc = tdep->syscall_next_pc (frame);
4372 else if (cond != 0x0f && condition_true (cond, status))
4373 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4375 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4377 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4379 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4381 unsigned short inst2;
4382 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4384 /* Default to the next instruction. */
4386 nextpc = MAKE_THUMB_ADDR (nextpc);
4388 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4390 /* Branches and miscellaneous control instructions. */
4392 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4395 int j1, j2, imm1, imm2;
4397 imm1 = sbits (inst1, 0, 10);
4398 imm2 = bits (inst2, 0, 10);
4399 j1 = bit (inst2, 13);
4400 j2 = bit (inst2, 11);
4402 offset = ((imm1 << 12) + (imm2 << 1));
4403 offset ^= ((!j2) << 22) | ((!j1) << 23);
4405 nextpc = pc_val + offset;
4406 /* For BLX make sure to clear the low bits. */
4407 if (bit (inst2, 12) == 0)
4408 nextpc = nextpc & 0xfffffffc;
4410 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4412 /* SUBS PC, LR, #imm8. */
4413 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4414 nextpc -= inst2 & 0x00ff;
4416 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4418 /* Conditional branch. */
4419 if (condition_true (bits (inst1, 6, 9), status))
4421 int sign, j1, j2, imm1, imm2;
4423 sign = sbits (inst1, 10, 10);
4424 imm1 = bits (inst1, 0, 5);
4425 imm2 = bits (inst2, 0, 10);
4426 j1 = bit (inst2, 13);
4427 j2 = bit (inst2, 11);
4429 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4430 offset += (imm1 << 12) + (imm2 << 1);
4432 nextpc = pc_val + offset;
4436 else if ((inst1 & 0xfe50) == 0xe810)
4438 /* Load multiple or RFE. */
4439 int rn, offset, load_pc = 1;
4441 rn = bits (inst1, 0, 3);
4442 if (bit (inst1, 7) && !bit (inst1, 8))
4445 if (!bit (inst2, 15))
4447 offset = bitcount (inst2) * 4 - 4;
4449 else if (!bit (inst1, 7) && bit (inst1, 8))
4452 if (!bit (inst2, 15))
4456 else if (bit (inst1, 7) && bit (inst1, 8))
4461 else if (!bit (inst1, 7) && !bit (inst1, 8))
4471 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4472 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4475 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4477 /* MOV PC or MOVS PC. */
4478 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4479 nextpc = MAKE_THUMB_ADDR (nextpc);
4481 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4485 int rn, load_pc = 1;
4487 rn = bits (inst1, 0, 3);
4488 base = get_frame_register_unsigned (frame, rn);
4489 if (rn == ARM_PC_REGNUM)
4491 base = (base + 4) & ~(CORE_ADDR) 0x3;
4493 base += bits (inst2, 0, 11);
4495 base -= bits (inst2, 0, 11);
4497 else if (bit (inst1, 7))
4498 base += bits (inst2, 0, 11);
4499 else if (bit (inst2, 11))
4501 if (bit (inst2, 10))
4504 base += bits (inst2, 0, 7);
4506 base -= bits (inst2, 0, 7);
4509 else if ((inst2 & 0x0fc0) == 0x0000)
4511 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4512 base += get_frame_register_unsigned (frame, rm) << shift;
4519 nextpc = get_frame_memory_unsigned (frame, base, 4);
4521 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4524 CORE_ADDR tbl_reg, table, offset, length;
4526 tbl_reg = bits (inst1, 0, 3);
4527 if (tbl_reg == 0x0f)
4528 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4530 table = get_frame_register_unsigned (frame, tbl_reg);
4532 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4533 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4534 nextpc = pc_val + length;
4536 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4539 CORE_ADDR tbl_reg, table, offset, length;
4541 tbl_reg = bits (inst1, 0, 3);
4542 if (tbl_reg == 0x0f)
4543 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4545 table = get_frame_register_unsigned (frame, tbl_reg);
4547 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4548 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4549 nextpc = pc_val + length;
4552 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4554 if (bits (inst1, 3, 6) == 0x0f)
4557 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4559 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4561 if (bits (inst1, 3, 6) == 0x0f)
4564 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4566 nextpc = MAKE_THUMB_ADDR (nextpc);
4568 else if ((inst1 & 0xf500) == 0xb100)
4571 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4572 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4574 if (bit (inst1, 11) && reg != 0)
4575 nextpc = pc_val + imm;
4576 else if (!bit (inst1, 11) && reg == 0)
4577 nextpc = pc_val + imm;
4582 /* Get the raw next address. PC is the current program counter, in
4583 FRAME, which is assumed to be executing in ARM mode.
4585 The value returned has the execution state of the next instruction
4586 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4587 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4591 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4593 struct gdbarch *gdbarch = get_frame_arch (frame);
4594 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4595 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4596 unsigned long pc_val;
4597 unsigned long this_instr;
4598 unsigned long status;
4601 pc_val = (unsigned long) pc;
4602 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4604 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4605 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4607 if (bits (this_instr, 28, 31) == INST_NV)
4608 switch (bits (this_instr, 24, 27))
4613 /* Branch with Link and change to Thumb. */
4614 nextpc = BranchDest (pc, this_instr);
4615 nextpc |= bit (this_instr, 24) << 1;
4616 nextpc = MAKE_THUMB_ADDR (nextpc);
4622 /* Coprocessor register transfer. */
4623 if (bits (this_instr, 12, 15) == 15)
4624 error (_("Invalid update to pc in instruction"));
4627 else if (condition_true (bits (this_instr, 28, 31), status))
4629 switch (bits (this_instr, 24, 27))
4632 case 0x1: /* data processing */
4636 unsigned long operand1, operand2, result = 0;
4640 if (bits (this_instr, 12, 15) != 15)
4643 if (bits (this_instr, 22, 25) == 0
4644 && bits (this_instr, 4, 7) == 9) /* multiply */
4645 error (_("Invalid update to pc in instruction"));
4647 /* BX <reg>, BLX <reg> */
4648 if (bits (this_instr, 4, 27) == 0x12fff1
4649 || bits (this_instr, 4, 27) == 0x12fff3)
4651 rn = bits (this_instr, 0, 3);
4652 nextpc = ((rn == ARM_PC_REGNUM)
4654 : get_frame_register_unsigned (frame, rn));
4659 /* Multiply into PC. */
4660 c = (status & FLAG_C) ? 1 : 0;
4661 rn = bits (this_instr, 16, 19);
4662 operand1 = ((rn == ARM_PC_REGNUM)
4664 : get_frame_register_unsigned (frame, rn));
4666 if (bit (this_instr, 25))
4668 unsigned long immval = bits (this_instr, 0, 7);
4669 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4670 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4673 else /* operand 2 is a shifted register. */
4674 operand2 = shifted_reg_val (frame, this_instr, c,
4677 switch (bits (this_instr, 21, 24))
4680 result = operand1 & operand2;
4684 result = operand1 ^ operand2;
4688 result = operand1 - operand2;
4692 result = operand2 - operand1;
4696 result = operand1 + operand2;
4700 result = operand1 + operand2 + c;
4704 result = operand1 - operand2 + c;
4708 result = operand2 - operand1 + c;
4714 case 0xb: /* tst, teq, cmp, cmn */
4715 result = (unsigned long) nextpc;
4719 result = operand1 | operand2;
4723 /* Always step into a function. */
4728 result = operand1 & ~operand2;
4736 /* In 26-bit APCS the bottom two bits of the result are
4737 ignored, and we always end up in ARM state. */
4739 nextpc = arm_addr_bits_remove (gdbarch, result);
4747 case 0x5: /* data transfer */
4750 if (bit (this_instr, 20))
4753 if (bits (this_instr, 12, 15) == 15)
4759 if (bit (this_instr, 22))
4760 error (_("Invalid update to pc in instruction"));
4762 /* byte write to PC */
4763 rn = bits (this_instr, 16, 19);
4764 base = ((rn == ARM_PC_REGNUM)
4766 : get_frame_register_unsigned (frame, rn));
4768 if (bit (this_instr, 24))
4771 int c = (status & FLAG_C) ? 1 : 0;
4772 unsigned long offset =
4773 (bit (this_instr, 25)
4774 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4775 : bits (this_instr, 0, 11));
4777 if (bit (this_instr, 23))
4782 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4789 case 0x9: /* block transfer */
4790 if (bit (this_instr, 20))
4793 if (bit (this_instr, 15))
4798 if (bit (this_instr, 23))
4801 unsigned long reglist = bits (this_instr, 0, 14);
4802 offset = bitcount (reglist) * 4;
4803 if (bit (this_instr, 24)) /* pre */
4806 else if (bit (this_instr, 24))
4810 unsigned long rn_val =
4811 get_frame_register_unsigned (frame,
4812 bits (this_instr, 16, 19));
4814 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4822 case 0xb: /* branch & link */
4823 case 0xa: /* branch */
4825 nextpc = BranchDest (pc, this_instr);
4831 case 0xe: /* coproc ops */
4835 struct gdbarch_tdep *tdep;
4836 tdep = gdbarch_tdep (gdbarch);
4838 if (tdep->syscall_next_pc != NULL)
4839 nextpc = tdep->syscall_next_pc (frame);
4845 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4853 /* Determine next PC after current instruction executes. Will call either
4854 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4855 loop is detected. */
4858 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4862 if (arm_frame_is_thumb (frame))
4864 nextpc = thumb_get_next_pc_raw (frame, pc);
4865 if (nextpc == MAKE_THUMB_ADDR (pc))
4866 error (_("Infinite loop detected"));
4870 nextpc = arm_get_next_pc_raw (frame, pc);
4872 error (_("Infinite loop detected"));
4878 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4879 of the appropriate mode (as encoded in the PC value), even if this
4880 differs from what would be expected according to the symbol tables. */
4883 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4884 struct address_space *aspace,
4887 struct cleanup *old_chain
4888 = make_cleanup_restore_integer (&arm_override_mode);
4890 arm_override_mode = IS_THUMB_ADDR (pc);
4891 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4893 insert_single_step_breakpoint (gdbarch, aspace, pc);
4895 do_cleanups (old_chain);
4898 /* single_step() is called just before we want to resume the inferior,
4899 if we want to single-step it but there is no hardware or kernel
4900 single-step support. We find the target of the coming instruction
4901 and breakpoint it. */
4904 arm_software_single_step (struct frame_info *frame)
4906 struct gdbarch *gdbarch = get_frame_arch (frame);
4907 struct address_space *aspace = get_frame_address_space (frame);
4908 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4910 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4915 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4916 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4917 NULL if an error occurs. BUF is freed. */
4920 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4921 int old_len, int new_len)
4923 gdb_byte *new_buf, *middle;
4924 int bytes_to_read = new_len - old_len;
4926 new_buf = xmalloc (new_len);
4927 memcpy (new_buf + bytes_to_read, buf, old_len);
4929 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4937 /* An IT block is at most the 2-byte IT instruction followed by
4938 four 4-byte instructions. The furthest back we must search to
4939 find an IT block that affects the current instruction is thus
4940 2 + 3 * 4 == 14 bytes. */
4941 #define MAX_IT_BLOCK_PREFIX 14
4943 /* Use a quick scan if there are more than this many bytes of
4945 #define IT_SCAN_THRESHOLD 32
4947 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4948 A breakpoint in an IT block may not be hit, depending on the
4951 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4955 CORE_ADDR boundary, func_start;
4956 int buf_len, buf2_len;
4957 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4958 int i, any, last_it, last_it_count;
4960 /* If we are using BKPT breakpoints, none of this is necessary. */
4961 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4964 /* ARM mode does not have this problem. */
4965 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4968 /* We are setting a breakpoint in Thumb code that could potentially
4969 contain an IT block. The first step is to find how much Thumb
4970 code there is; we do not need to read outside of known Thumb
4972 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4974 /* Thumb-2 code must have mapping symbols to have a chance. */
4977 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4979 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4980 && func_start > boundary)
4981 boundary = func_start;
4983 /* Search for a candidate IT instruction. We have to do some fancy
4984 footwork to distinguish a real IT instruction from the second
4985 half of a 32-bit instruction, but there is no need for that if
4986 there's no candidate. */
4987 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4989 /* No room for an IT instruction. */
4992 buf = xmalloc (buf_len);
4993 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4996 for (i = 0; i < buf_len; i += 2)
4998 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4999 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5011 /* OK, the code bytes before this instruction contain at least one
5012 halfword which resembles an IT instruction. We know that it's
5013 Thumb code, but there are still two possibilities. Either the
5014 halfword really is an IT instruction, or it is the second half of
5015 a 32-bit Thumb instruction. The only way we can tell is to
5016 scan forwards from a known instruction boundary. */
5017 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5021 /* There's a lot of code before this instruction. Start with an
5022 optimistic search; it's easy to recognize halfwords that can
5023 not be the start of a 32-bit instruction, and use that to
5024 lock on to the instruction boundaries. */
5025 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5028 buf_len = IT_SCAN_THRESHOLD;
5031 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5033 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5034 if (thumb_insn_size (inst1) == 2)
5041 /* At this point, if DEFINITE, BUF[I] is the first place we
5042 are sure that we know the instruction boundaries, and it is far
5043 enough from BPADDR that we could not miss an IT instruction
5044 affecting BPADDR. If ! DEFINITE, give up - start from a
5048 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5052 buf_len = bpaddr - boundary;
5058 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5061 buf_len = bpaddr - boundary;
5065 /* Scan forwards. Find the last IT instruction before BPADDR. */
5070 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5072 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5077 else if (inst1 & 0x0002)
5079 else if (inst1 & 0x0004)
5084 i += thumb_insn_size (inst1);
5090 /* There wasn't really an IT instruction after all. */
5093 if (last_it_count < 1)
5094 /* It was too far away. */
5097 /* This really is a trouble spot. Move the breakpoint to the IT
5099 return bpaddr - buf_len + last_it;
5102 /* ARM displaced stepping support.
5104 Generally ARM displaced stepping works as follows:
5106 1. When an instruction is to be single-stepped, it is first decoded by
5107 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5108 Depending on the type of instruction, it is then copied to a scratch
5109 location, possibly in a modified form. The copy_* set of functions
5110 performs such modification, as necessary. A breakpoint is placed after
5111 the modified instruction in the scratch space to return control to GDB.
5112 Note in particular that instructions which modify the PC will no longer
5113 do so after modification.
5115 2. The instruction is single-stepped, by setting the PC to the scratch
5116 location address, and resuming. Control returns to GDB when the
5119 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5120 function used for the current instruction. This function's job is to
5121 put the CPU/memory state back to what it would have been if the
5122 instruction had been executed unmodified in its original location. */
5124 /* NOP instruction (mov r0, r0). */
5125 #define ARM_NOP 0xe1a00000
5127 /* Helper for register reads for displaced stepping. In particular, this
5128 returns the PC as it would be seen by the instruction at its original
5132 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5136 CORE_ADDR from = dsc->insn_addr;
5138 if (regno == ARM_PC_REGNUM)
5140 /* Compute pipeline offset:
5141 - When executing an ARM instruction, PC reads as the address of the
5142 current instruction plus 8.
5143 - When executing a Thumb instruction, PC reads as the address of the
5144 current instruction plus 4. */
5151 if (debug_displaced)
5152 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5153 (unsigned long) from);
5154 return (ULONGEST) from;
5158 regcache_cooked_read_unsigned (regs, regno, &ret);
5159 if (debug_displaced)
5160 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5161 regno, (unsigned long) ret);
5167 displaced_in_arm_mode (struct regcache *regs)
5170 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5172 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5174 return (ps & t_bit) == 0;
5177 /* Write to the PC as from a branch instruction. */
5180 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5184 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5185 architecture versions < 6. */
5186 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5187 val & ~(ULONGEST) 0x3);
5189 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5190 val & ~(ULONGEST) 0x1);
5193 /* Write to the PC as from a branch-exchange instruction. */
5196 bx_write_pc (struct regcache *regs, ULONGEST val)
5199 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5201 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5205 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5206 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5208 else if ((val & 2) == 0)
5210 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5211 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5215 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5216 mode, align dest to 4 bytes). */
5217 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5218 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5219 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5223 /* Write to the PC as if from a load instruction. */
5226 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5229 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5230 bx_write_pc (regs, val);
5232 branch_write_pc (regs, dsc, val);
5235 /* Write to the PC as if from an ALU instruction. */
5238 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5241 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5242 bx_write_pc (regs, val);
5244 branch_write_pc (regs, dsc, val);
5247 /* Helper for writing to registers for displaced stepping. Writing to the PC
5248 has a varying effects depending on the instruction which does the write:
5249 this is controlled by the WRITE_PC argument. */
5252 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5253 int regno, ULONGEST val, enum pc_write_style write_pc)
5255 if (regno == ARM_PC_REGNUM)
5257 if (debug_displaced)
5258 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5259 (unsigned long) val);
5262 case BRANCH_WRITE_PC:
5263 branch_write_pc (regs, dsc, val);
5267 bx_write_pc (regs, val);
5271 load_write_pc (regs, dsc, val);
5275 alu_write_pc (regs, dsc, val);
5278 case CANNOT_WRITE_PC:
5279 warning (_("Instruction wrote to PC in an unexpected way when "
5280 "single-stepping"));
5284 internal_error (__FILE__, __LINE__,
5285 _("Invalid argument to displaced_write_reg"));
5288 dsc->wrote_to_pc = 1;
5292 if (debug_displaced)
5293 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5294 regno, (unsigned long) val);
5295 regcache_cooked_write_unsigned (regs, regno, val);
5299 /* This function is used to concisely determine if an instruction INSN
5300 references PC. Register fields of interest in INSN should have the
5301 corresponding fields of BITMASK set to 0b1111. The function
5302 returns return 1 if any of these fields in INSN reference the PC
5303 (also 0b1111, r15), else it returns 0. */
5306 insn_references_pc (uint32_t insn, uint32_t bitmask)
5308 uint32_t lowbit = 1;
5310 while (bitmask != 0)
5314 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5320 mask = lowbit * 0xf;
5322 if ((insn & mask) == mask)
5331 /* The simplest copy function. Many instructions have the same effect no
5332 matter what address they are executed at: in those cases, use this. */
5335 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5336 const char *iname, struct displaced_step_closure *dsc)
5338 if (debug_displaced)
5339 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5340 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5343 dsc->modinsn[0] = insn;
5348 /* Preload instructions with immediate offset. */
5351 cleanup_preload (struct gdbarch *gdbarch,
5352 struct regcache *regs, struct displaced_step_closure *dsc)
5354 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5355 if (!dsc->u.preload.immed)
5356 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5360 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5361 struct displaced_step_closure *dsc, unsigned int rn)
5364 /* Preload instructions:
5366 {pli/pld} [rn, #+/-imm]
5368 {pli/pld} [r0, #+/-imm]. */
5370 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5371 rn_val = displaced_read_reg (regs, dsc, rn);
5372 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5373 dsc->u.preload.immed = 1;
5375 dsc->cleanup = &cleanup_preload;
5379 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5380 struct displaced_step_closure *dsc)
5382 unsigned int rn = bits (insn, 16, 19);
5384 if (!insn_references_pc (insn, 0x000f0000ul))
5385 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5387 if (debug_displaced)
5388 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5389 (unsigned long) insn);
5391 dsc->modinsn[0] = insn & 0xfff0ffff;
5393 install_preload (gdbarch, regs, dsc, rn);
5398 /* Preload instructions with register offset. */
5401 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5402 struct displaced_step_closure *dsc, unsigned int rn,
5405 ULONGEST rn_val, rm_val;
5407 /* Preload register-offset instructions:
5409 {pli/pld} [rn, rm {, shift}]
5411 {pli/pld} [r0, r1 {, shift}]. */
5413 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5414 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5415 rn_val = displaced_read_reg (regs, dsc, rn);
5416 rm_val = displaced_read_reg (regs, dsc, rm);
5417 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5418 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5419 dsc->u.preload.immed = 0;
5421 dsc->cleanup = &cleanup_preload;
5425 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5426 struct regcache *regs,
5427 struct displaced_step_closure *dsc)
5429 unsigned int rn = bits (insn, 16, 19);
5430 unsigned int rm = bits (insn, 0, 3);
5433 if (!insn_references_pc (insn, 0x000f000ful))
5434 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5436 if (debug_displaced)
5437 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5438 (unsigned long) insn);
5440 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5442 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5446 /* Copy/cleanup coprocessor load and store instructions. */
5449 cleanup_copro_load_store (struct gdbarch *gdbarch,
5450 struct regcache *regs,
5451 struct displaced_step_closure *dsc)
5453 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5455 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5457 if (dsc->u.ldst.writeback)
5458 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5462 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5463 struct displaced_step_closure *dsc,
5464 int writeback, unsigned int rn)
5468 /* Coprocessor load/store instructions:
5470 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5472 {stc/stc2} [r0, #+/-imm].
5474 ldc/ldc2 are handled identically. */
5476 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5477 rn_val = displaced_read_reg (regs, dsc, rn);
5478 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5480 dsc->u.ldst.writeback = writeback;
5481 dsc->u.ldst.rn = rn;
5483 dsc->cleanup = &cleanup_copro_load_store;
5487 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5488 struct regcache *regs,
5489 struct displaced_step_closure *dsc)
5491 unsigned int rn = bits (insn, 16, 19);
5493 if (!insn_references_pc (insn, 0x000f0000ul))
5494 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5496 if (debug_displaced)
5497 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5498 "load/store insn %.8lx\n", (unsigned long) insn);
5500 dsc->modinsn[0] = insn & 0xfff0ffff;
5502 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5507 /* Clean up branch instructions (actually perform the branch, by setting
5511 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5512 struct displaced_step_closure *dsc)
5514 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5515 int branch_taken = condition_true (dsc->u.branch.cond, status);
5516 enum pc_write_style write_pc = dsc->u.branch.exchange
5517 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5522 if (dsc->u.branch.link)
5524 /* The value of LR should be the next insn of current one. In order
5525 not to confuse logic hanlding later insn `bx lr', if current insn mode
5526 is Thumb, the bit 0 of LR value should be set to 1. */
5527 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5530 next_insn_addr |= 0x1;
5532 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5536 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5539 /* Copy B/BL/BLX instructions with immediate destinations. */
5542 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5543 struct displaced_step_closure *dsc,
5544 unsigned int cond, int exchange, int link, long offset)
5546 /* Implement "BL<cond> <label>" as:
5548 Preparation: cond <- instruction condition
5549 Insn: mov r0, r0 (nop)
5550 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5552 B<cond> similar, but don't set r14 in cleanup. */
5554 dsc->u.branch.cond = cond;
5555 dsc->u.branch.link = link;
5556 dsc->u.branch.exchange = exchange;
5559 dsc->u.branch.dest = dsc->insn_addr + 4 + offset;
5561 dsc->u.branch.dest = dsc->insn_addr + 8 + offset;
5563 dsc->cleanup = &cleanup_branch;
5566 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5567 struct regcache *regs, struct displaced_step_closure *dsc)
5569 unsigned int cond = bits (insn, 28, 31);
5570 int exchange = (cond == 0xf);
5571 int link = exchange || bit (insn, 24);
5574 if (debug_displaced)
5575 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5576 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5577 (unsigned long) insn);
5579 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5580 then arrange the switch into Thumb mode. */
5581 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5583 offset = bits (insn, 0, 23) << 2;
5585 if (bit (offset, 25))
5586 offset = offset | ~0x3ffffff;
5588 dsc->modinsn[0] = ARM_NOP;
5590 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5594 /* Copy BX/BLX with register-specified destinations. */
5597 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5598 struct displaced_step_closure *dsc, int link,
5599 unsigned int cond, unsigned int rm)
5601 /* Implement {BX,BLX}<cond> <reg>" as:
5603 Preparation: cond <- instruction condition
5604 Insn: mov r0, r0 (nop)
5605 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5607 Don't set r14 in cleanup for BX. */
5609 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5611 dsc->u.branch.cond = cond;
5612 dsc->u.branch.link = link;
5614 dsc->u.branch.exchange = 1;
5616 dsc->cleanup = &cleanup_branch;
5620 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5621 struct regcache *regs, struct displaced_step_closure *dsc)
5623 unsigned int cond = bits (insn, 28, 31);
5626 int link = bit (insn, 5);
5627 unsigned int rm = bits (insn, 0, 3);
5629 if (debug_displaced)
5630 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
5631 (unsigned long) insn);
5633 dsc->modinsn[0] = ARM_NOP;
5635 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5639 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5642 cleanup_alu_imm (struct gdbarch *gdbarch,
5643 struct regcache *regs, struct displaced_step_closure *dsc)
5645 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5646 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5647 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5648 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5652 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5653 struct displaced_step_closure *dsc)
5655 unsigned int rn = bits (insn, 16, 19);
5656 unsigned int rd = bits (insn, 12, 15);
5657 unsigned int op = bits (insn, 21, 24);
5658 int is_mov = (op == 0xd);
5659 ULONGEST rd_val, rn_val;
5661 if (!insn_references_pc (insn, 0x000ff000ul))
5662 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5664 if (debug_displaced)
5665 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5666 "%.8lx\n", is_mov ? "move" : "ALU",
5667 (unsigned long) insn);
5669 /* Instruction is of form:
5671 <op><cond> rd, [rn,] #imm
5675 Preparation: tmp1, tmp2 <- r0, r1;
5677 Insn: <op><cond> r0, r1, #imm
5678 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5681 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5682 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5683 rn_val = displaced_read_reg (regs, dsc, rn);
5684 rd_val = displaced_read_reg (regs, dsc, rd);
5685 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5686 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5690 dsc->modinsn[0] = insn & 0xfff00fff;
5692 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5694 dsc->cleanup = &cleanup_alu_imm;
5699 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5702 cleanup_alu_reg (struct gdbarch *gdbarch,
5703 struct regcache *regs, struct displaced_step_closure *dsc)
5708 rd_val = displaced_read_reg (regs, dsc, 0);
5710 for (i = 0; i < 3; i++)
5711 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5713 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5717 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5718 struct displaced_step_closure *dsc,
5719 unsigned int rd, unsigned int rn, unsigned int rm)
5721 ULONGEST rd_val, rn_val, rm_val;
5723 /* Instruction is of form:
5725 <op><cond> rd, [rn,] rm [, <shift>]
5729 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5730 r0, r1, r2 <- rd, rn, rm
5731 Insn: <op><cond> r0, r1, r2 [, <shift>]
5732 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5735 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5736 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5737 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5738 rd_val = displaced_read_reg (regs, dsc, rd);
5739 rn_val = displaced_read_reg (regs, dsc, rn);
5740 rm_val = displaced_read_reg (regs, dsc, rm);
5741 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5742 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5743 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5746 dsc->cleanup = &cleanup_alu_reg;
5750 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5751 struct displaced_step_closure *dsc)
5753 unsigned int op = bits (insn, 21, 24);
5754 int is_mov = (op == 0xd);
5756 if (!insn_references_pc (insn, 0x000ff00ful))
5757 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5759 if (debug_displaced)
5760 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5761 is_mov ? "move" : "ALU", (unsigned long) insn);
5764 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5766 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5768 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
5773 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5776 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5777 struct regcache *regs,
5778 struct displaced_step_closure *dsc)
5780 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5783 for (i = 0; i < 4; i++)
5784 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5786 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5790 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
5791 struct displaced_step_closure *dsc,
5792 unsigned int rd, unsigned int rn, unsigned int rm,
5796 ULONGEST rd_val, rn_val, rm_val, rs_val;
5798 /* Instruction is of form:
5800 <op><cond> rd, [rn,] rm, <shift> rs
5804 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5805 r0, r1, r2, r3 <- rd, rn, rm, rs
5806 Insn: <op><cond> r0, r1, r2, <shift> r3
5808 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5812 for (i = 0; i < 4; i++)
5813 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5815 rd_val = displaced_read_reg (regs, dsc, rd);
5816 rn_val = displaced_read_reg (regs, dsc, rn);
5817 rm_val = displaced_read_reg (regs, dsc, rm);
5818 rs_val = displaced_read_reg (regs, dsc, rs);
5819 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5820 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5821 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5822 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5824 dsc->cleanup = &cleanup_alu_shifted_reg;
5828 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5829 struct regcache *regs,
5830 struct displaced_step_closure *dsc)
5832 unsigned int op = bits (insn, 21, 24);
5833 int is_mov = (op == 0xd);
5834 unsigned int rd, rn, rm, rs;
5836 if (!insn_references_pc (insn, 0x000fff0ful))
5837 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5839 if (debug_displaced)
5840 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5841 "%.8lx\n", is_mov ? "move" : "ALU",
5842 (unsigned long) insn);
5844 rn = bits (insn, 16, 19);
5845 rm = bits (insn, 0, 3);
5846 rs = bits (insn, 8, 11);
5847 rd = bits (insn, 12, 15);
5850 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5852 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5854 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
5859 /* Clean up load instructions. */
5862 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5863 struct displaced_step_closure *dsc)
5865 ULONGEST rt_val, rt_val2 = 0, rn_val;
5867 rt_val = displaced_read_reg (regs, dsc, 0);
5868 if (dsc->u.ldst.xfersize == 8)
5869 rt_val2 = displaced_read_reg (regs, dsc, 1);
5870 rn_val = displaced_read_reg (regs, dsc, 2);
5872 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5873 if (dsc->u.ldst.xfersize > 4)
5874 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5875 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5876 if (!dsc->u.ldst.immed)
5877 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5879 /* Handle register writeback. */
5880 if (dsc->u.ldst.writeback)
5881 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5882 /* Put result in right place. */
5883 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5884 if (dsc->u.ldst.xfersize == 8)
5885 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5888 /* Clean up store instructions. */
5891 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5892 struct displaced_step_closure *dsc)
5894 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5896 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5897 if (dsc->u.ldst.xfersize > 4)
5898 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5899 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5900 if (!dsc->u.ldst.immed)
5901 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5902 if (!dsc->u.ldst.restore_r4)
5903 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5906 if (dsc->u.ldst.writeback)
5907 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5910 /* Copy "extra" load/store instructions. These are halfword/doubleword
5911 transfers, which have a different encoding to byte/word transfers. */
5914 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5915 struct regcache *regs, struct displaced_step_closure *dsc)
5917 unsigned int op1 = bits (insn, 20, 24);
5918 unsigned int op2 = bits (insn, 5, 6);
5919 unsigned int rt = bits (insn, 12, 15);
5920 unsigned int rn = bits (insn, 16, 19);
5921 unsigned int rm = bits (insn, 0, 3);
5922 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5923 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5924 int immed = (op1 & 0x4) != 0;
5926 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5928 if (!insn_references_pc (insn, 0x000ff00ful))
5929 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5931 if (debug_displaced)
5932 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5933 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5934 (unsigned long) insn);
5936 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5939 internal_error (__FILE__, __LINE__,
5940 _("copy_extra_ld_st: instruction decode error"));
5942 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5943 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5944 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5946 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5948 rt_val = displaced_read_reg (regs, dsc, rt);
5949 if (bytesize[opcode] == 8)
5950 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5951 rn_val = displaced_read_reg (regs, dsc, rn);
5953 rm_val = displaced_read_reg (regs, dsc, rm);
5955 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5956 if (bytesize[opcode] == 8)
5957 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5958 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5960 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5963 dsc->u.ldst.xfersize = bytesize[opcode];
5964 dsc->u.ldst.rn = rn;
5965 dsc->u.ldst.immed = immed;
5966 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5967 dsc->u.ldst.restore_r4 = 0;
5970 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5972 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5973 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5975 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5977 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5978 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5980 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5985 /* Copy byte/word loads and stores. */
5988 install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
5989 struct displaced_step_closure *dsc, int load,
5990 int immed, int writeback, int byte, int usermode,
5991 int rt, int rm, int rn)
5993 ULONGEST rt_val, rn_val, rm_val = 0;
5995 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5996 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5998 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6000 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6002 rt_val = displaced_read_reg (regs, dsc, rt);
6003 rn_val = displaced_read_reg (regs, dsc, rn);
6005 rm_val = displaced_read_reg (regs, dsc, rm);
6007 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6008 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6010 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6012 dsc->u.ldst.xfersize = byte ? 1 : 4;
6013 dsc->u.ldst.rn = rn;
6014 dsc->u.ldst.immed = immed;
6015 dsc->u.ldst.writeback = writeback;
6017 /* To write PC we can do:
6019 Before this sequence of instructions:
6020 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6021 r2 is the Rn value got from dispalced_read_reg.
6023 Insn1: push {pc} Write address of STR instruction + offset on stack
6024 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6025 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6026 = addr(Insn1) + offset - addr(Insn3) - 8
6028 Insn4: add r4, r4, #8 r4 = offset - 8
6029 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6031 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6033 Otherwise we don't know what value to write for PC, since the offset is
6034 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6035 of this can be found in Section "Saving from r15" in
6036 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6038 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6042 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6043 struct regcache *regs,
6044 struct displaced_step_closure *dsc,
6045 int load, int byte, int usermode)
6047 int immed = !bit (insn, 25);
6048 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6049 unsigned int rt = bits (insn, 12, 15);
6050 unsigned int rn = bits (insn, 16, 19);
6051 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6053 if (!insn_references_pc (insn, 0x000ff00ful))
6054 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6056 if (debug_displaced)
6057 fprintf_unfiltered (gdb_stdlog,
6058 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6059 load ? (byte ? "ldrb" : "ldr")
6060 : (byte ? "strb" : "str"), usermode ? "t" : "",
6062 (unsigned long) insn);
6064 install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, immed, writeback, byte,
6065 usermode, rt, rm, rn);
6067 if (load || rt != ARM_PC_REGNUM)
6069 dsc->u.ldst.restore_r4 = 0;
6072 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6074 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6075 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6077 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6079 {ldr,str}[b]<cond> r0, [r2, r3]. */
6080 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6084 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6085 dsc->u.ldst.restore_r4 = 1;
6086 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6087 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6088 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6089 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6090 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6094 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6096 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6101 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6106 /* Cleanup LDM instructions with fully-populated register list. This is an
6107 unfortunate corner case: it's impossible to implement correctly by modifying
6108 the instruction. The issue is as follows: we have an instruction,
6112 which we must rewrite to avoid loading PC. A possible solution would be to
6113 do the load in two halves, something like (with suitable cleanup
6117 ldm[id][ab] r8!, {r0-r7}
6119 ldm[id][ab] r8, {r7-r14}
6122 but at present there's no suitable place for <temp>, since the scratch space
6123 is overwritten before the cleanup routine is called. For now, we simply
6124 emulate the instruction. */
6127 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6128 struct displaced_step_closure *dsc)
6130 int inc = dsc->u.block.increment;
6131 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6132 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6133 uint32_t regmask = dsc->u.block.regmask;
6134 int regno = inc ? 0 : 15;
6135 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6136 int exception_return = dsc->u.block.load && dsc->u.block.user
6137 && (regmask & 0x8000) != 0;
6138 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6139 int do_transfer = condition_true (dsc->u.block.cond, status);
6140 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6145 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6146 sensible we can do here. Complain loudly. */
6147 if (exception_return)
6148 error (_("Cannot single-step exception return"));
6150 /* We don't handle any stores here for now. */
6151 gdb_assert (dsc->u.block.load != 0);
6153 if (debug_displaced)
6154 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6155 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6156 dsc->u.block.increment ? "inc" : "dec",
6157 dsc->u.block.before ? "before" : "after");
6164 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6167 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6170 xfer_addr += bump_before;
6172 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6173 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6175 xfer_addr += bump_after;
6177 regmask &= ~(1 << regno);
6180 if (dsc->u.block.writeback)
6181 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6185 /* Clean up an STM which included the PC in the register list. */
6188 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6189 struct displaced_step_closure *dsc)
6191 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6192 int store_executed = condition_true (dsc->u.block.cond, status);
6193 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6194 CORE_ADDR stm_insn_addr;
6197 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6199 /* If condition code fails, there's nothing else to do. */
6200 if (!store_executed)
6203 if (dsc->u.block.increment)
6205 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6207 if (dsc->u.block.before)
6212 pc_stored_at = dsc->u.block.xfer_addr;
6214 if (dsc->u.block.before)
6218 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6219 stm_insn_addr = dsc->scratch_base;
6220 offset = pc_val - stm_insn_addr;
6222 if (debug_displaced)
6223 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6224 "STM instruction\n", offset);
6226 /* Rewrite the stored PC to the proper value for the non-displaced original
6228 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6229 dsc->insn_addr + offset);
6232 /* Clean up an LDM which includes the PC in the register list. We clumped all
6233 the registers in the transferred list into a contiguous range r0...rX (to
6234 avoid loading PC directly and losing control of the debugged program), so we
6235 must undo that here. */
6238 cleanup_block_load_pc (struct gdbarch *gdbarch,
6239 struct regcache *regs,
6240 struct displaced_step_closure *dsc)
6242 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6243 int load_executed = condition_true (dsc->u.block.cond, status), i;
6244 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6245 unsigned int regs_loaded = bitcount (mask);
6246 unsigned int num_to_shuffle = regs_loaded, clobbered;
6248 /* The method employed here will fail if the register list is fully populated
6249 (we need to avoid loading PC directly). */
6250 gdb_assert (num_to_shuffle < 16);
6255 clobbered = (1 << num_to_shuffle) - 1;
6257 while (num_to_shuffle > 0)
6259 if ((mask & (1 << write_reg)) != 0)
6261 unsigned int read_reg = num_to_shuffle - 1;
6263 if (read_reg != write_reg)
6265 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6266 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6267 if (debug_displaced)
6268 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6269 "loaded register r%d to r%d\n"), read_reg,
6272 else if (debug_displaced)
6273 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6274 "r%d already in the right place\n"),
6277 clobbered &= ~(1 << write_reg);
6285 /* Restore any registers we scribbled over. */
6286 for (write_reg = 0; clobbered != 0; write_reg++)
6288 if ((clobbered & (1 << write_reg)) != 0)
6290 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6292 if (debug_displaced)
6293 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6294 "clobbered register r%d\n"), write_reg);
6295 clobbered &= ~(1 << write_reg);
6299 /* Perform register writeback manually. */
6300 if (dsc->u.block.writeback)
6302 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6304 if (dsc->u.block.increment)
6305 new_rn_val += regs_loaded * 4;
6307 new_rn_val -= regs_loaded * 4;
6309 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6314 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6315 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6318 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6319 struct regcache *regs,
6320 struct displaced_step_closure *dsc)
6322 int load = bit (insn, 20);
6323 int user = bit (insn, 22);
6324 int increment = bit (insn, 23);
6325 int before = bit (insn, 24);
6326 int writeback = bit (insn, 21);
6327 int rn = bits (insn, 16, 19);
6329 /* Block transfers which don't mention PC can be run directly
6331 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6332 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6334 if (rn == ARM_PC_REGNUM)
6336 warning (_("displaced: Unpredictable LDM or STM with "
6337 "base register r15"));
6338 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6341 if (debug_displaced)
6342 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6343 "%.8lx\n", (unsigned long) insn);
6345 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6346 dsc->u.block.rn = rn;
6348 dsc->u.block.load = load;
6349 dsc->u.block.user = user;
6350 dsc->u.block.increment = increment;
6351 dsc->u.block.before = before;
6352 dsc->u.block.writeback = writeback;
6353 dsc->u.block.cond = bits (insn, 28, 31);
6355 dsc->u.block.regmask = insn & 0xffff;
6359 if ((insn & 0xffff) == 0xffff)
6361 /* LDM with a fully-populated register list. This case is
6362 particularly tricky. Implement for now by fully emulating the
6363 instruction (which might not behave perfectly in all cases, but
6364 these instructions should be rare enough for that not to matter
6366 dsc->modinsn[0] = ARM_NOP;
6368 dsc->cleanup = &cleanup_block_load_all;
6372 /* LDM of a list of registers which includes PC. Implement by
6373 rewriting the list of registers to be transferred into a
6374 contiguous chunk r0...rX before doing the transfer, then shuffling
6375 registers into the correct places in the cleanup routine. */
6376 unsigned int regmask = insn & 0xffff;
6377 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6378 unsigned int to = 0, from = 0, i, new_rn;
6380 for (i = 0; i < num_in_list; i++)
6381 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6383 /* Writeback makes things complicated. We need to avoid clobbering
6384 the base register with one of the registers in our modified
6385 register list, but just using a different register can't work in
6388 ldm r14!, {r0-r13,pc}
6390 which would need to be rewritten as:
6394 but that can't work, because there's no free register for N.
6396 Solve this by turning off the writeback bit, and emulating
6397 writeback manually in the cleanup routine. */
6402 new_regmask = (1 << num_in_list) - 1;
6404 if (debug_displaced)
6405 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6406 "{..., pc}: original reg list %.4x, modified "
6407 "list %.4x\n"), rn, writeback ? "!" : "",
6408 (int) insn & 0xffff, new_regmask);
6410 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6412 dsc->cleanup = &cleanup_block_load_pc;
6417 /* STM of a list of registers which includes PC. Run the instruction
6418 as-is, but out of line: this will store the wrong value for the PC,
6419 so we must manually fix up the memory in the cleanup routine.
6420 Doing things this way has the advantage that we can auto-detect
6421 the offset of the PC write (which is architecture-dependent) in
6422 the cleanup routine. */
6423 dsc->modinsn[0] = insn;
6425 dsc->cleanup = &cleanup_block_store_pc;
6431 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6432 for Linux, where some SVC instructions must be treated specially. */
6435 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6436 struct displaced_step_closure *dsc)
6438 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6440 if (debug_displaced)
6441 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6442 "%.8lx\n", (unsigned long) resume_addr);
6444 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6449 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6450 struct regcache *regs, struct displaced_step_closure *dsc)
6453 if (debug_displaced)
6454 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6455 (unsigned long) insn);
6457 /* Preparation: none.
6458 Insn: unmodified svc.
6459 Cleanup: pc <- insn_addr + 4. */
6461 dsc->modinsn[0] = insn;
6463 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6465 dsc->wrote_to_pc = 1;
6467 /* Allow OS-specific code to override SVC handling. */
6468 if (dsc->u.svc.copy_svc_os)
6469 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6472 dsc->cleanup = &cleanup_svc;
6478 /* Copy undefined instructions. */
6481 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6482 struct displaced_step_closure *dsc)
6484 if (debug_displaced)
6485 fprintf_unfiltered (gdb_stdlog,
6486 "displaced: copying undefined insn %.8lx\n",
6487 (unsigned long) insn);
6489 dsc->modinsn[0] = insn;
6494 /* Copy unpredictable instructions. */
6497 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6498 struct displaced_step_closure *dsc)
6500 if (debug_displaced)
6501 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6502 "%.8lx\n", (unsigned long) insn);
6504 dsc->modinsn[0] = insn;
6509 /* The decode_* functions are instruction decoding helpers. They mostly follow
6510 the presentation in the ARM ARM. */
6513 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6514 struct regcache *regs,
6515 struct displaced_step_closure *dsc)
6517 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6518 unsigned int rn = bits (insn, 16, 19);
6520 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6521 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
6522 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6523 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
6524 else if ((op1 & 0x60) == 0x20)
6525 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6526 else if ((op1 & 0x71) == 0x40)
6527 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
6529 else if ((op1 & 0x77) == 0x41)
6530 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6531 else if ((op1 & 0x77) == 0x45)
6532 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6533 else if ((op1 & 0x77) == 0x51)
6536 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6538 return arm_copy_unpred (gdbarch, insn, dsc);
6540 else if ((op1 & 0x77) == 0x55)
6541 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6542 else if (op1 == 0x57)
6545 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
6546 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
6547 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
6548 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
6549 default: return arm_copy_unpred (gdbarch, insn, dsc);
6551 else if ((op1 & 0x63) == 0x43)
6552 return arm_copy_unpred (gdbarch, insn, dsc);
6553 else if ((op2 & 0x1) == 0x0)
6554 switch (op1 & ~0x80)
6557 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6559 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6560 case 0x71: case 0x75:
6562 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
6563 case 0x63: case 0x67: case 0x73: case 0x77:
6564 return arm_copy_unpred (gdbarch, insn, dsc);
6566 return arm_copy_undef (gdbarch, insn, dsc);
6569 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6573 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6574 struct regcache *regs,
6575 struct displaced_step_closure *dsc)
6577 if (bit (insn, 27) == 0)
6578 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6579 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6580 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6583 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
6586 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
6588 case 0x4: case 0x5: case 0x6: case 0x7:
6589 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6592 switch ((insn & 0xe00000) >> 21)
6594 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6596 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6599 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6602 return arm_copy_undef (gdbarch, insn, dsc);
6607 int rn_f = (bits (insn, 16, 19) == 0xf);
6608 switch ((insn & 0xe00000) >> 21)
6611 /* ldc/ldc2 imm (undefined for rn == pc). */
6612 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
6613 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6616 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6618 case 0x4: case 0x5: case 0x6: case 0x7:
6619 /* ldc/ldc2 lit (undefined for rn != pc). */
6620 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
6621 : arm_copy_undef (gdbarch, insn, dsc);
6624 return arm_copy_undef (gdbarch, insn, dsc);
6629 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6632 if (bits (insn, 16, 19) == 0xf)
6634 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6636 return arm_copy_undef (gdbarch, insn, dsc);
6640 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6642 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6646 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6648 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6651 return arm_copy_undef (gdbarch, insn, dsc);
6655 /* Decode miscellaneous instructions in dp/misc encoding space. */
6658 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6659 struct regcache *regs,
6660 struct displaced_step_closure *dsc)
6662 unsigned int op2 = bits (insn, 4, 6);
6663 unsigned int op = bits (insn, 21, 22);
6664 unsigned int op1 = bits (insn, 16, 19);
6669 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6672 if (op == 0x1) /* bx. */
6673 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6675 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
6677 return arm_copy_undef (gdbarch, insn, dsc);
6681 /* Not really supported. */
6682 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
6684 return arm_copy_undef (gdbarch, insn, dsc);
6688 return arm_copy_bx_blx_reg (gdbarch, insn,
6689 regs, dsc); /* blx register. */
6691 return arm_copy_undef (gdbarch, insn, dsc);
6694 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6698 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
6700 /* Not really supported. */
6701 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
6704 return arm_copy_undef (gdbarch, insn, dsc);
6709 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
6710 struct regcache *regs,
6711 struct displaced_step_closure *dsc)
6714 switch (bits (insn, 20, 24))
6717 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
6720 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
6722 case 0x12: case 0x16:
6723 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
6726 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
6730 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6732 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6733 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
6734 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6735 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6736 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6737 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
6738 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6739 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6740 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6741 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6742 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6743 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
6744 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6745 /* 2nd arg means "unpriveleged". */
6746 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6750 /* Should be unreachable. */
6755 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6756 struct regcache *regs,
6757 struct displaced_step_closure *dsc)
6759 int a = bit (insn, 25), b = bit (insn, 4);
6760 uint32_t op1 = bits (insn, 20, 24);
6761 int rn_f = bits (insn, 16, 19) == 0xf;
6763 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6764 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6765 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
6766 else if ((!a && (op1 & 0x17) == 0x02)
6767 || (a && (op1 & 0x17) == 0x02 && !b))
6768 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
6769 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6770 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6771 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
6772 else if ((!a && (op1 & 0x17) == 0x03)
6773 || (a && (op1 & 0x17) == 0x03 && !b))
6774 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
6775 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6776 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6777 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6778 else if ((!a && (op1 & 0x17) == 0x06)
6779 || (a && (op1 & 0x17) == 0x06 && !b))
6780 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6781 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6782 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6783 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6784 else if ((!a && (op1 & 0x17) == 0x07)
6785 || (a && (op1 & 0x17) == 0x07 && !b))
6786 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6788 /* Should be unreachable. */
6793 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
6794 struct displaced_step_closure *dsc)
6796 switch (bits (insn, 20, 24))
6798 case 0x00: case 0x01: case 0x02: case 0x03:
6799 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6801 case 0x04: case 0x05: case 0x06: case 0x07:
6802 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6804 case 0x08: case 0x09: case 0x0a: case 0x0b:
6805 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6806 return arm_copy_unmodified (gdbarch, insn,
6807 "decode/pack/unpack/saturate/reverse", dsc);
6810 if (bits (insn, 5, 7) == 0) /* op2. */
6812 if (bits (insn, 12, 15) == 0xf)
6813 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
6815 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
6818 return arm_copy_undef (gdbarch, insn, dsc);
6820 case 0x1a: case 0x1b:
6821 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6822 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
6824 return arm_copy_undef (gdbarch, insn, dsc);
6826 case 0x1c: case 0x1d:
6827 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6829 if (bits (insn, 0, 3) == 0xf)
6830 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
6832 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
6835 return arm_copy_undef (gdbarch, insn, dsc);
6837 case 0x1e: case 0x1f:
6838 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6839 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
6841 return arm_copy_undef (gdbarch, insn, dsc);
6844 /* Should be unreachable. */
6849 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6850 struct regcache *regs,
6851 struct displaced_step_closure *dsc)
6854 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6856 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
6860 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6861 struct regcache *regs,
6862 struct displaced_step_closure *dsc)
6864 unsigned int opcode = bits (insn, 20, 24);
6868 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6869 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6871 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6872 case 0x12: case 0x16:
6873 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6875 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6876 case 0x13: case 0x17:
6877 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6879 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6880 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6881 /* Note: no writeback for these instructions. Bit 25 will always be
6882 zero though (via caller), so the following works OK. */
6883 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6886 /* Should be unreachable. */
6891 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6892 struct regcache *regs, struct displaced_step_closure *dsc)
6894 unsigned int op1 = bits (insn, 20, 25);
6895 int op = bit (insn, 4);
6896 unsigned int coproc = bits (insn, 8, 11);
6897 unsigned int rn = bits (insn, 16, 19);
6899 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6900 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6901 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6902 && (coproc & 0xe) != 0xa)
6904 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6905 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6906 && (coproc & 0xe) != 0xa)
6907 /* ldc/ldc2 imm/lit. */
6908 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6909 else if ((op1 & 0x3e) == 0x00)
6910 return arm_copy_undef (gdbarch, insn, dsc);
6911 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6912 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6913 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6914 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6915 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6916 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6917 else if ((op1 & 0x30) == 0x20 && !op)
6919 if ((coproc & 0xe) == 0xa)
6920 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6922 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6924 else if ((op1 & 0x30) == 0x20 && op)
6925 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6926 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6927 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6928 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6929 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6930 else if ((op1 & 0x30) == 0x30)
6931 return arm_copy_svc (gdbarch, insn, regs, dsc);
6933 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6937 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6938 CORE_ADDR to, struct regcache *regs,
6939 struct displaced_step_closure *dsc)
6941 error (_("Displaced stepping is only supported in ARM mode"));
6945 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6946 CORE_ADDR to, struct regcache *regs,
6947 struct displaced_step_closure *dsc)
6950 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6953 /* Most displaced instructions use a 1-instruction scratch space, so set this
6954 here and override below if/when necessary. */
6956 dsc->insn_addr = from;
6957 dsc->scratch_base = to;
6958 dsc->cleanup = NULL;
6959 dsc->wrote_to_pc = 0;
6961 if (!displaced_in_arm_mode (regs))
6962 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6966 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6967 if (debug_displaced)
6968 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6969 "at %.8lx\n", (unsigned long) insn,
6970 (unsigned long) from);
6972 if ((insn & 0xf0000000) == 0xf0000000)
6973 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
6974 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6976 case 0x0: case 0x1: case 0x2: case 0x3:
6977 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
6980 case 0x4: case 0x5: case 0x6:
6981 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6985 err = arm_decode_media (gdbarch, insn, dsc);
6988 case 0x8: case 0x9: case 0xa: case 0xb:
6989 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6992 case 0xc: case 0xd: case 0xe: case 0xf:
6993 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
6998 internal_error (__FILE__, __LINE__,
6999 _("arm_process_displaced_insn: Instruction decode error"));
7002 /* Actually set up the scratch space for a displaced instruction. */
7005 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
7006 CORE_ADDR to, struct displaced_step_closure *dsc)
7008 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7009 unsigned int i, len, offset;
7010 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7011 int size = dsc->is_thumb? 2 : 4;
7012 const unsigned char *bkp_insn;
7015 /* Poke modified instruction(s). */
7016 for (i = 0; i < dsc->numinsns; i++)
7018 if (debug_displaced)
7020 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
7022 fprintf_unfiltered (gdb_stdlog, "%.8lx",
7025 fprintf_unfiltered (gdb_stdlog, "%.4x",
7026 (unsigned short)dsc->modinsn[i]);
7028 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
7029 (unsigned long) to + offset);
7032 write_memory_unsigned_integer (to + offset, size,
7033 byte_order_for_code,
7038 /* Choose the correct breakpoint instruction. */
7041 bkp_insn = tdep->thumb_breakpoint;
7042 len = tdep->thumb_breakpoint_size;
7046 bkp_insn = tdep->arm_breakpoint;
7047 len = tdep->arm_breakpoint_size;
7050 /* Put breakpoint afterwards. */
7051 write_memory (to + offset, bkp_insn, len);
7053 if (debug_displaced)
7054 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
7055 paddress (gdbarch, from), paddress (gdbarch, to));
7058 /* Entry point for copying an instruction into scratch space for displaced
7061 struct displaced_step_closure *
7062 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
7063 CORE_ADDR from, CORE_ADDR to,
7064 struct regcache *regs)
7066 struct displaced_step_closure *dsc
7067 = xmalloc (sizeof (struct displaced_step_closure));
7068 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
7069 arm_displaced_init_closure (gdbarch, from, to, dsc);
7074 /* Entry point for cleaning things up after a displaced instruction has been
7078 arm_displaced_step_fixup (struct gdbarch *gdbarch,
7079 struct displaced_step_closure *dsc,
7080 CORE_ADDR from, CORE_ADDR to,
7081 struct regcache *regs)
7084 dsc->cleanup (gdbarch, regs, dsc);
7086 if (!dsc->wrote_to_pc)
7087 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
7088 dsc->insn_addr + dsc->insn_size);
7092 #include "bfd-in2.h"
7093 #include "libcoff.h"
7096 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
7098 struct gdbarch *gdbarch = info->application_data;
7100 if (arm_pc_is_thumb (gdbarch, memaddr))
7102 static asymbol *asym;
7103 static combined_entry_type ce;
7104 static struct coff_symbol_struct csym;
7105 static struct bfd fake_bfd;
7106 static bfd_target fake_target;
7108 if (csym.native == NULL)
7110 /* Create a fake symbol vector containing a Thumb symbol.
7111 This is solely so that the code in print_insn_little_arm()
7112 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7113 the presence of a Thumb symbol and switch to decoding
7114 Thumb instructions. */
7116 fake_target.flavour = bfd_target_coff_flavour;
7117 fake_bfd.xvec = &fake_target;
7118 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7120 csym.symbol.the_bfd = &fake_bfd;
7121 csym.symbol.name = "fake";
7122 asym = (asymbol *) & csym;
7125 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7126 info->symbols = &asym;
7129 info->symbols = NULL;
7131 if (info->endian == BFD_ENDIAN_BIG)
7132 return print_insn_big_arm (memaddr, info);
7134 return print_insn_little_arm (memaddr, info);
7137 /* The following define instruction sequences that will cause ARM
7138 cpu's to take an undefined instruction trap. These are used to
7139 signal a breakpoint to GDB.
7141 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7142 modes. A different instruction is required for each mode. The ARM
7143 cpu's can also be big or little endian. Thus four different
7144 instructions are needed to support all cases.
7146 Note: ARMv4 defines several new instructions that will take the
7147 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7148 not in fact add the new instructions. The new undefined
7149 instructions in ARMv4 are all instructions that had no defined
7150 behaviour in earlier chips. There is no guarantee that they will
7151 raise an exception, but may be treated as NOP's. In practice, it
7152 may only safe to rely on instructions matching:
7154 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7155 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7156 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7158 Even this may only true if the condition predicate is true. The
7159 following use a condition predicate of ALWAYS so it is always TRUE.
7161 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7162 and NetBSD all use a software interrupt rather than an undefined
7163 instruction to force a trap. This can be handled by by the
7164 abi-specific code during establishment of the gdbarch vector. */
7166 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7167 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7168 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7169 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7171 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7172 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7173 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7174 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7176 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7177 the program counter value to determine whether a 16-bit or 32-bit
7178 breakpoint should be used. It returns a pointer to a string of
7179 bytes that encode a breakpoint instruction, stores the length of
7180 the string to *lenptr, and adjusts the program counter (if
7181 necessary) to point to the actual memory location where the
7182 breakpoint should be inserted. */
7184 static const unsigned char *
7185 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7187 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7188 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7190 if (arm_pc_is_thumb (gdbarch, *pcptr))
7192 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7194 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7195 check whether we are replacing a 32-bit instruction. */
7196 if (tdep->thumb2_breakpoint != NULL)
7199 if (target_read_memory (*pcptr, buf, 2) == 0)
7201 unsigned short inst1;
7202 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7203 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7205 *lenptr = tdep->thumb2_breakpoint_size;
7206 return tdep->thumb2_breakpoint;
7211 *lenptr = tdep->thumb_breakpoint_size;
7212 return tdep->thumb_breakpoint;
7216 *lenptr = tdep->arm_breakpoint_size;
7217 return tdep->arm_breakpoint;
7222 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7225 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7227 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7229 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7230 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7231 that this is not confused with a 32-bit ARM breakpoint. */
7235 /* Extract from an array REGBUF containing the (raw) register state a
7236 function return value of type TYPE, and copy that, in virtual
7237 format, into VALBUF. */
7240 arm_extract_return_value (struct type *type, struct regcache *regs,
7243 struct gdbarch *gdbarch = get_regcache_arch (regs);
7244 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7246 if (TYPE_CODE_FLT == TYPE_CODE (type))
7248 switch (gdbarch_tdep (gdbarch)->fp_model)
7252 /* The value is in register F0 in internal format. We need to
7253 extract the raw value and then convert it to the desired
7255 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7257 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7258 convert_from_extended (floatformat_from_type (type), tmpbuf,
7259 valbuf, gdbarch_byte_order (gdbarch));
7263 case ARM_FLOAT_SOFT_FPA:
7264 case ARM_FLOAT_SOFT_VFP:
7265 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7266 not using the VFP ABI code. */
7268 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7269 if (TYPE_LENGTH (type) > 4)
7270 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7271 valbuf + INT_REGISTER_SIZE);
7275 internal_error (__FILE__, __LINE__,
7276 _("arm_extract_return_value: "
7277 "Floating point model not supported"));
7281 else if (TYPE_CODE (type) == TYPE_CODE_INT
7282 || TYPE_CODE (type) == TYPE_CODE_CHAR
7283 || TYPE_CODE (type) == TYPE_CODE_BOOL
7284 || TYPE_CODE (type) == TYPE_CODE_PTR
7285 || TYPE_CODE (type) == TYPE_CODE_REF
7286 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7288 /* If the type is a plain integer, then the access is
7289 straight-forward. Otherwise we have to play around a bit
7291 int len = TYPE_LENGTH (type);
7292 int regno = ARM_A1_REGNUM;
7297 /* By using store_unsigned_integer we avoid having to do
7298 anything special for small big-endian values. */
7299 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7300 store_unsigned_integer (valbuf,
7301 (len > INT_REGISTER_SIZE
7302 ? INT_REGISTER_SIZE : len),
7304 len -= INT_REGISTER_SIZE;
7305 valbuf += INT_REGISTER_SIZE;
7310 /* For a structure or union the behaviour is as if the value had
7311 been stored to word-aligned memory and then loaded into
7312 registers with 32-bit load instruction(s). */
7313 int len = TYPE_LENGTH (type);
7314 int regno = ARM_A1_REGNUM;
7315 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7319 regcache_cooked_read (regs, regno++, tmpbuf);
7320 memcpy (valbuf, tmpbuf,
7321 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7322 len -= INT_REGISTER_SIZE;
7323 valbuf += INT_REGISTER_SIZE;
7329 /* Will a function return an aggregate type in memory or in a
7330 register? Return 0 if an aggregate type can be returned in a
7331 register, 1 if it must be returned in memory. */
7334 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7337 enum type_code code;
7339 CHECK_TYPEDEF (type);
7341 /* In the ARM ABI, "integer" like aggregate types are returned in
7342 registers. For an aggregate type to be integer like, its size
7343 must be less than or equal to INT_REGISTER_SIZE and the
7344 offset of each addressable subfield must be zero. Note that bit
7345 fields are not addressable, and all addressable subfields of
7346 unions always start at offset zero.
7348 This function is based on the behaviour of GCC 2.95.1.
7349 See: gcc/arm.c: arm_return_in_memory() for details.
7351 Note: All versions of GCC before GCC 2.95.2 do not set up the
7352 parameters correctly for a function returning the following
7353 structure: struct { float f;}; This should be returned in memory,
7354 not a register. Richard Earnshaw sent me a patch, but I do not
7355 know of any way to detect if a function like the above has been
7356 compiled with the correct calling convention. */
7358 /* All aggregate types that won't fit in a register must be returned
7360 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7365 /* The AAPCS says all aggregates not larger than a word are returned
7367 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7370 /* The only aggregate types that can be returned in a register are
7371 structs and unions. Arrays must be returned in memory. */
7372 code = TYPE_CODE (type);
7373 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7378 /* Assume all other aggregate types can be returned in a register.
7379 Run a check for structures, unions and arrays. */
7382 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7385 /* Need to check if this struct/union is "integer" like. For
7386 this to be true, its size must be less than or equal to
7387 INT_REGISTER_SIZE and the offset of each addressable
7388 subfield must be zero. Note that bit fields are not
7389 addressable, and unions always start at offset zero. If any
7390 of the subfields is a floating point type, the struct/union
7391 cannot be an integer type. */
7393 /* For each field in the object, check:
7394 1) Is it FP? --> yes, nRc = 1;
7395 2) Is it addressable (bitpos != 0) and
7396 not packed (bitsize == 0)?
7400 for (i = 0; i < TYPE_NFIELDS (type); i++)
7402 enum type_code field_type_code;
7403 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7406 /* Is it a floating point type field? */
7407 if (field_type_code == TYPE_CODE_FLT)
7413 /* If bitpos != 0, then we have to care about it. */
7414 if (TYPE_FIELD_BITPOS (type, i) != 0)
7416 /* Bitfields are not addressable. If the field bitsize is
7417 zero, then the field is not packed. Hence it cannot be
7418 a bitfield or any other packed type. */
7419 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7431 /* Write into appropriate registers a function return value of type
7432 TYPE, given in virtual format. */
7435 arm_store_return_value (struct type *type, struct regcache *regs,
7436 const gdb_byte *valbuf)
7438 struct gdbarch *gdbarch = get_regcache_arch (regs);
7439 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7441 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7443 char buf[MAX_REGISTER_SIZE];
7445 switch (gdbarch_tdep (gdbarch)->fp_model)
7449 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7450 gdbarch_byte_order (gdbarch));
7451 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7454 case ARM_FLOAT_SOFT_FPA:
7455 case ARM_FLOAT_SOFT_VFP:
7456 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7457 not using the VFP ABI code. */
7459 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7460 if (TYPE_LENGTH (type) > 4)
7461 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7462 valbuf + INT_REGISTER_SIZE);
7466 internal_error (__FILE__, __LINE__,
7467 _("arm_store_return_value: Floating "
7468 "point model not supported"));
7472 else if (TYPE_CODE (type) == TYPE_CODE_INT
7473 || TYPE_CODE (type) == TYPE_CODE_CHAR
7474 || TYPE_CODE (type) == TYPE_CODE_BOOL
7475 || TYPE_CODE (type) == TYPE_CODE_PTR
7476 || TYPE_CODE (type) == TYPE_CODE_REF
7477 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7479 if (TYPE_LENGTH (type) <= 4)
7481 /* Values of one word or less are zero/sign-extended and
7483 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7484 LONGEST val = unpack_long (type, valbuf);
7486 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7487 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7491 /* Integral values greater than one word are stored in consecutive
7492 registers starting with r0. This will always be a multiple of
7493 the regiser size. */
7494 int len = TYPE_LENGTH (type);
7495 int regno = ARM_A1_REGNUM;
7499 regcache_cooked_write (regs, regno++, valbuf);
7500 len -= INT_REGISTER_SIZE;
7501 valbuf += INT_REGISTER_SIZE;
7507 /* For a structure or union the behaviour is as if the value had
7508 been stored to word-aligned memory and then loaded into
7509 registers with 32-bit load instruction(s). */
7510 int len = TYPE_LENGTH (type);
7511 int regno = ARM_A1_REGNUM;
7512 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7516 memcpy (tmpbuf, valbuf,
7517 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7518 regcache_cooked_write (regs, regno++, tmpbuf);
7519 len -= INT_REGISTER_SIZE;
7520 valbuf += INT_REGISTER_SIZE;
7526 /* Handle function return values. */
7528 static enum return_value_convention
7529 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7530 struct type *valtype, struct regcache *regcache,
7531 gdb_byte *readbuf, const gdb_byte *writebuf)
7533 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7534 enum arm_vfp_cprc_base_type vfp_base_type;
7537 if (arm_vfp_abi_for_function (gdbarch, func_type)
7538 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7540 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7541 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7543 for (i = 0; i < vfp_base_count; i++)
7545 if (reg_char == 'q')
7548 arm_neon_quad_write (gdbarch, regcache, i,
7549 writebuf + i * unit_length);
7552 arm_neon_quad_read (gdbarch, regcache, i,
7553 readbuf + i * unit_length);
7560 sprintf (name_buf, "%c%d", reg_char, i);
7561 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7564 regcache_cooked_write (regcache, regnum,
7565 writebuf + i * unit_length);
7567 regcache_cooked_read (regcache, regnum,
7568 readbuf + i * unit_length);
7571 return RETURN_VALUE_REGISTER_CONVENTION;
7574 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7575 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7576 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7578 if (tdep->struct_return == pcc_struct_return
7579 || arm_return_in_memory (gdbarch, valtype))
7580 return RETURN_VALUE_STRUCT_CONVENTION;
7584 arm_store_return_value (valtype, regcache, writebuf);
7587 arm_extract_return_value (valtype, regcache, readbuf);
7589 return RETURN_VALUE_REGISTER_CONVENTION;
7594 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7596 struct gdbarch *gdbarch = get_frame_arch (frame);
7597 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7598 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7600 char buf[INT_REGISTER_SIZE];
7602 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7604 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7608 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7612 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7613 return the target PC. Otherwise return 0. */
7616 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7620 CORE_ADDR start_addr;
7622 /* Find the starting address and name of the function containing the PC. */
7623 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7626 /* If PC is in a Thumb call or return stub, return the address of the
7627 target PC, which is in a register. The thunk functions are called
7628 _call_via_xx, where x is the register name. The possible names
7629 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7630 functions, named __ARM_call_via_r[0-7]. */
7631 if (strncmp (name, "_call_via_", 10) == 0
7632 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7634 /* Use the name suffix to determine which register contains the
7636 static char *table[15] =
7637 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7638 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7641 int offset = strlen (name) - 2;
7643 for (regno = 0; regno <= 14; regno++)
7644 if (strcmp (&name[offset], table[regno]) == 0)
7645 return get_frame_register_unsigned (frame, regno);
7648 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7649 non-interworking calls to foo. We could decode the stubs
7650 to find the target but it's easier to use the symbol table. */
7651 namelen = strlen (name);
7652 if (name[0] == '_' && name[1] == '_'
7653 && ((namelen > 2 + strlen ("_from_thumb")
7654 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7655 strlen ("_from_thumb")) == 0)
7656 || (namelen > 2 + strlen ("_from_arm")
7657 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7658 strlen ("_from_arm")) == 0)))
7661 int target_len = namelen - 2;
7662 struct minimal_symbol *minsym;
7663 struct objfile *objfile;
7664 struct obj_section *sec;
7666 if (name[namelen - 1] == 'b')
7667 target_len -= strlen ("_from_thumb");
7669 target_len -= strlen ("_from_arm");
7671 target_name = alloca (target_len + 1);
7672 memcpy (target_name, name + 2, target_len);
7673 target_name[target_len] = '\0';
7675 sec = find_pc_section (pc);
7676 objfile = (sec == NULL) ? NULL : sec->objfile;
7677 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7679 return SYMBOL_VALUE_ADDRESS (minsym);
7684 return 0; /* not a stub */
7688 set_arm_command (char *args, int from_tty)
7690 printf_unfiltered (_("\
7691 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7692 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7696 show_arm_command (char *args, int from_tty)
7698 cmd_show_list (showarmcmdlist, from_tty, "");
7702 arm_update_current_architecture (void)
7704 struct gdbarch_info info;
7706 /* If the current architecture is not ARM, we have nothing to do. */
7707 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7710 /* Update the architecture. */
7711 gdbarch_info_init (&info);
7713 if (!gdbarch_update_p (info))
7714 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7718 set_fp_model_sfunc (char *args, int from_tty,
7719 struct cmd_list_element *c)
7721 enum arm_float_model fp_model;
7723 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7724 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7726 arm_fp_model = fp_model;
7730 if (fp_model == ARM_FLOAT_LAST)
7731 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7734 arm_update_current_architecture ();
7738 show_fp_model (struct ui_file *file, int from_tty,
7739 struct cmd_list_element *c, const char *value)
7741 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7743 if (arm_fp_model == ARM_FLOAT_AUTO
7744 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7745 fprintf_filtered (file, _("\
7746 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7747 fp_model_strings[tdep->fp_model]);
7749 fprintf_filtered (file, _("\
7750 The current ARM floating point model is \"%s\".\n"),
7751 fp_model_strings[arm_fp_model]);
7755 arm_set_abi (char *args, int from_tty,
7756 struct cmd_list_element *c)
7758 enum arm_abi_kind arm_abi;
7760 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7761 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7763 arm_abi_global = arm_abi;
7767 if (arm_abi == ARM_ABI_LAST)
7768 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7771 arm_update_current_architecture ();
7775 arm_show_abi (struct ui_file *file, int from_tty,
7776 struct cmd_list_element *c, const char *value)
7778 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7780 if (arm_abi_global == ARM_ABI_AUTO
7781 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7782 fprintf_filtered (file, _("\
7783 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7784 arm_abi_strings[tdep->arm_abi]);
7786 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7791 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7792 struct cmd_list_element *c, const char *value)
7794 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7796 fprintf_filtered (file,
7797 _("The current execution mode assumed "
7798 "(when symbols are unavailable) is \"%s\".\n"),
7799 arm_fallback_mode_string);
7803 arm_show_force_mode (struct ui_file *file, int from_tty,
7804 struct cmd_list_element *c, const char *value)
7806 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7808 fprintf_filtered (file,
7809 _("The current execution mode assumed "
7810 "(even when symbols are available) is \"%s\".\n"),
7811 arm_force_mode_string);
7814 /* If the user changes the register disassembly style used for info
7815 register and other commands, we have to also switch the style used
7816 in opcodes for disassembly output. This function is run in the "set
7817 arm disassembly" command, and does that. */
7820 set_disassembly_style_sfunc (char *args, int from_tty,
7821 struct cmd_list_element *c)
7823 set_disassembly_style ();
7826 /* Return the ARM register name corresponding to register I. */
7828 arm_register_name (struct gdbarch *gdbarch, int i)
7830 const int num_regs = gdbarch_num_regs (gdbarch);
7832 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7833 && i >= num_regs && i < num_regs + 32)
7835 static const char *const vfp_pseudo_names[] = {
7836 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7837 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7838 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7839 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7842 return vfp_pseudo_names[i - num_regs];
7845 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7846 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7848 static const char *const neon_pseudo_names[] = {
7849 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7850 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7853 return neon_pseudo_names[i - num_regs - 32];
7856 if (i >= ARRAY_SIZE (arm_register_names))
7857 /* These registers are only supported on targets which supply
7858 an XML description. */
7861 return arm_register_names[i];
7865 set_disassembly_style (void)
7869 /* Find the style that the user wants. */
7870 for (current = 0; current < num_disassembly_options; current++)
7871 if (disassembly_style == valid_disassembly_styles[current])
7873 gdb_assert (current < num_disassembly_options);
7875 /* Synchronize the disassembler. */
7876 set_arm_regname_option (current);
7879 /* Test whether the coff symbol specific value corresponds to a Thumb
7883 coff_sym_is_thumb (int val)
7885 return (val == C_THUMBEXT
7886 || val == C_THUMBSTAT
7887 || val == C_THUMBEXTFUNC
7888 || val == C_THUMBSTATFUNC
7889 || val == C_THUMBLABEL);
7892 /* arm_coff_make_msymbol_special()
7893 arm_elf_make_msymbol_special()
7895 These functions test whether the COFF or ELF symbol corresponds to
7896 an address in thumb code, and set a "special" bit in a minimal
7897 symbol to indicate that it does. */
7900 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7902 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
7903 == ST_BRANCH_TO_THUMB)
7904 MSYMBOL_SET_SPECIAL (msym);
7908 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7910 if (coff_sym_is_thumb (val))
7911 MSYMBOL_SET_SPECIAL (msym);
7915 arm_objfile_data_free (struct objfile *objfile, void *arg)
7917 struct arm_per_objfile *data = arg;
7920 for (i = 0; i < objfile->obfd->section_count; i++)
7921 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7925 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7928 const char *name = bfd_asymbol_name (sym);
7929 struct arm_per_objfile *data;
7930 VEC(arm_mapping_symbol_s) **map_p;
7931 struct arm_mapping_symbol new_map_sym;
7933 gdb_assert (name[0] == '$');
7934 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7937 data = objfile_data (objfile, arm_objfile_data_key);
7940 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7941 struct arm_per_objfile);
7942 set_objfile_data (objfile, arm_objfile_data_key, data);
7943 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7944 objfile->obfd->section_count,
7945 VEC(arm_mapping_symbol_s) *);
7947 map_p = &data->section_maps[bfd_get_section (sym)->index];
7949 new_map_sym.value = sym->value;
7950 new_map_sym.type = name[1];
7952 /* Assume that most mapping symbols appear in order of increasing
7953 value. If they were randomly distributed, it would be faster to
7954 always push here and then sort at first use. */
7955 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7957 struct arm_mapping_symbol *prev_map_sym;
7959 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7960 if (prev_map_sym->value >= sym->value)
7963 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7964 arm_compare_mapping_symbols);
7965 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7970 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7974 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7976 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7977 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7979 /* If necessary, set the T bit. */
7982 ULONGEST val, t_bit;
7983 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7984 t_bit = arm_psr_thumb_bit (gdbarch);
7985 if (arm_pc_is_thumb (gdbarch, pc))
7986 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7989 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7994 /* Read the contents of a NEON quad register, by reading from two
7995 double registers. This is used to implement the quad pseudo
7996 registers, and for argument passing in case the quad registers are
7997 missing; vectors are passed in quad registers when using the VFP
7998 ABI, even if a NEON unit is not present. REGNUM is the index of
7999 the quad register, in [0, 15]. */
8001 static enum register_status
8002 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
8003 int regnum, gdb_byte *buf)
8006 gdb_byte reg_buf[8];
8007 int offset, double_regnum;
8008 enum register_status status;
8010 sprintf (name_buf, "d%d", regnum << 1);
8011 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8014 /* d0 is always the least significant half of q0. */
8015 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8020 status = regcache_raw_read (regcache, double_regnum, reg_buf);
8021 if (status != REG_VALID)
8023 memcpy (buf + offset, reg_buf, 8);
8025 offset = 8 - offset;
8026 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
8027 if (status != REG_VALID)
8029 memcpy (buf + offset, reg_buf, 8);
8034 static enum register_status
8035 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
8036 int regnum, gdb_byte *buf)
8038 const int num_regs = gdbarch_num_regs (gdbarch);
8040 gdb_byte reg_buf[8];
8041 int offset, double_regnum;
8043 gdb_assert (regnum >= num_regs);
8046 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8047 /* Quad-precision register. */
8048 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
8051 enum register_status status;
8053 /* Single-precision register. */
8054 gdb_assert (regnum < 32);
8056 /* s0 is always the least significant half of d0. */
8057 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8058 offset = (regnum & 1) ? 0 : 4;
8060 offset = (regnum & 1) ? 4 : 0;
8062 sprintf (name_buf, "d%d", regnum >> 1);
8063 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8066 status = regcache_raw_read (regcache, double_regnum, reg_buf);
8067 if (status == REG_VALID)
8068 memcpy (buf, reg_buf + offset, 4);
8073 /* Store the contents of BUF to a NEON quad register, by writing to
8074 two double registers. This is used to implement the quad pseudo
8075 registers, and for argument passing in case the quad registers are
8076 missing; vectors are passed in quad registers when using the VFP
8077 ABI, even if a NEON unit is not present. REGNUM is the index
8078 of the quad register, in [0, 15]. */
8081 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
8082 int regnum, const gdb_byte *buf)
8085 gdb_byte reg_buf[8];
8086 int offset, double_regnum;
8088 sprintf (name_buf, "d%d", regnum << 1);
8089 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8092 /* d0 is always the least significant half of q0. */
8093 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8098 regcache_raw_write (regcache, double_regnum, buf + offset);
8099 offset = 8 - offset;
8100 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
8104 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8105 int regnum, const gdb_byte *buf)
8107 const int num_regs = gdbarch_num_regs (gdbarch);
8109 gdb_byte reg_buf[8];
8110 int offset, double_regnum;
8112 gdb_assert (regnum >= num_regs);
8115 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8116 /* Quad-precision register. */
8117 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8120 /* Single-precision register. */
8121 gdb_assert (regnum < 32);
8123 /* s0 is always the least significant half of d0. */
8124 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8125 offset = (regnum & 1) ? 0 : 4;
8127 offset = (regnum & 1) ? 4 : 0;
8129 sprintf (name_buf, "d%d", regnum >> 1);
8130 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8133 regcache_raw_read (regcache, double_regnum, reg_buf);
8134 memcpy (reg_buf + offset, buf, 4);
8135 regcache_raw_write (regcache, double_regnum, reg_buf);
8139 static struct value *
8140 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8142 const int *reg_p = baton;
8143 return value_of_register (*reg_p, frame);
8146 static enum gdb_osabi
8147 arm_elf_osabi_sniffer (bfd *abfd)
8149 unsigned int elfosabi;
8150 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8152 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8154 if (elfosabi == ELFOSABI_ARM)
8155 /* GNU tools use this value. Check note sections in this case,
8157 bfd_map_over_sections (abfd,
8158 generic_elf_osabi_sniff_abi_tag_sections,
8161 /* Anything else will be handled by the generic ELF sniffer. */
8166 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8167 struct reggroup *group)
8169 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8170 this, FPS register belongs to save_regroup, restore_reggroup, and
8171 all_reggroup, of course. */
8172 if (regnum == ARM_FPS_REGNUM)
8173 return (group == float_reggroup
8174 || group == save_reggroup
8175 || group == restore_reggroup
8176 || group == all_reggroup);
8178 return default_register_reggroup_p (gdbarch, regnum, group);
8182 /* Initialize the current architecture based on INFO. If possible,
8183 re-use an architecture from ARCHES, which is a list of
8184 architectures already created during this debugging session.
8186 Called e.g. at program startup, when reading a core file, and when
8187 reading a binary file. */
8189 static struct gdbarch *
8190 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8192 struct gdbarch_tdep *tdep;
8193 struct gdbarch *gdbarch;
8194 struct gdbarch_list *best_arch;
8195 enum arm_abi_kind arm_abi = arm_abi_global;
8196 enum arm_float_model fp_model = arm_fp_model;
8197 struct tdesc_arch_data *tdesc_data = NULL;
8199 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8201 int have_fpa_registers = 1;
8202 const struct target_desc *tdesc = info.target_desc;
8204 /* If we have an object to base this architecture on, try to determine
8207 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8209 int ei_osabi, e_flags;
8211 switch (bfd_get_flavour (info.abfd))
8213 case bfd_target_aout_flavour:
8214 /* Assume it's an old APCS-style ABI. */
8215 arm_abi = ARM_ABI_APCS;
8218 case bfd_target_coff_flavour:
8219 /* Assume it's an old APCS-style ABI. */
8221 arm_abi = ARM_ABI_APCS;
8224 case bfd_target_elf_flavour:
8225 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8226 e_flags = elf_elfheader (info.abfd)->e_flags;
8228 if (ei_osabi == ELFOSABI_ARM)
8230 /* GNU tools used to use this value, but do not for EABI
8231 objects. There's nowhere to tag an EABI version
8232 anyway, so assume APCS. */
8233 arm_abi = ARM_ABI_APCS;
8235 else if (ei_osabi == ELFOSABI_NONE)
8237 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8238 int attr_arch, attr_profile;
8242 case EF_ARM_EABI_UNKNOWN:
8243 /* Assume GNU tools. */
8244 arm_abi = ARM_ABI_APCS;
8247 case EF_ARM_EABI_VER4:
8248 case EF_ARM_EABI_VER5:
8249 arm_abi = ARM_ABI_AAPCS;
8250 /* EABI binaries default to VFP float ordering.
8251 They may also contain build attributes that can
8252 be used to identify if the VFP argument-passing
8254 if (fp_model == ARM_FLOAT_AUTO)
8257 switch (bfd_elf_get_obj_attr_int (info.abfd,
8262 /* "The user intended FP parameter/result
8263 passing to conform to AAPCS, base
8265 fp_model = ARM_FLOAT_SOFT_VFP;
8268 /* "The user intended FP parameter/result
8269 passing to conform to AAPCS, VFP
8271 fp_model = ARM_FLOAT_VFP;
8274 /* "The user intended FP parameter/result
8275 passing to conform to tool chain-specific
8276 conventions" - we don't know any such
8277 conventions, so leave it as "auto". */
8280 /* Attribute value not mentioned in the
8281 October 2008 ABI, so leave it as
8286 fp_model = ARM_FLOAT_SOFT_VFP;
8292 /* Leave it as "auto". */
8293 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8298 /* Detect M-profile programs. This only works if the
8299 executable file includes build attributes; GCC does
8300 copy them to the executable, but e.g. RealView does
8302 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8304 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8306 Tag_CPU_arch_profile);
8307 /* GCC specifies the profile for v6-M; RealView only
8308 specifies the profile for architectures starting with
8309 V7 (as opposed to architectures with a tag
8310 numerically greater than TAG_CPU_ARCH_V7). */
8311 if (!tdesc_has_registers (tdesc)
8312 && (attr_arch == TAG_CPU_ARCH_V6_M
8313 || attr_arch == TAG_CPU_ARCH_V6S_M
8314 || attr_profile == 'M'))
8315 tdesc = tdesc_arm_with_m;
8319 if (fp_model == ARM_FLOAT_AUTO)
8321 int e_flags = elf_elfheader (info.abfd)->e_flags;
8323 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8326 /* Leave it as "auto". Strictly speaking this case
8327 means FPA, but almost nobody uses that now, and
8328 many toolchains fail to set the appropriate bits
8329 for the floating-point model they use. */
8331 case EF_ARM_SOFT_FLOAT:
8332 fp_model = ARM_FLOAT_SOFT_FPA;
8334 case EF_ARM_VFP_FLOAT:
8335 fp_model = ARM_FLOAT_VFP;
8337 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8338 fp_model = ARM_FLOAT_SOFT_VFP;
8343 if (e_flags & EF_ARM_BE8)
8344 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8349 /* Leave it as "auto". */
8354 /* Check any target description for validity. */
8355 if (tdesc_has_registers (tdesc))
8357 /* For most registers we require GDB's default names; but also allow
8358 the numeric names for sp / lr / pc, as a convenience. */
8359 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8360 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8361 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8363 const struct tdesc_feature *feature;
8366 feature = tdesc_find_feature (tdesc,
8367 "org.gnu.gdb.arm.core");
8368 if (feature == NULL)
8370 feature = tdesc_find_feature (tdesc,
8371 "org.gnu.gdb.arm.m-profile");
8372 if (feature == NULL)
8378 tdesc_data = tdesc_data_alloc ();
8381 for (i = 0; i < ARM_SP_REGNUM; i++)
8382 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8383 arm_register_names[i]);
8384 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8387 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8390 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8394 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8395 ARM_PS_REGNUM, "xpsr");
8397 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8398 ARM_PS_REGNUM, "cpsr");
8402 tdesc_data_cleanup (tdesc_data);
8406 feature = tdesc_find_feature (tdesc,
8407 "org.gnu.gdb.arm.fpa");
8408 if (feature != NULL)
8411 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8412 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8413 arm_register_names[i]);
8416 tdesc_data_cleanup (tdesc_data);
8421 have_fpa_registers = 0;
8423 feature = tdesc_find_feature (tdesc,
8424 "org.gnu.gdb.xscale.iwmmxt");
8425 if (feature != NULL)
8427 static const char *const iwmmxt_names[] = {
8428 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8429 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8430 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8431 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8435 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8437 &= tdesc_numbered_register (feature, tdesc_data, i,
8438 iwmmxt_names[i - ARM_WR0_REGNUM]);
8440 /* Check for the control registers, but do not fail if they
8442 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8443 tdesc_numbered_register (feature, tdesc_data, i,
8444 iwmmxt_names[i - ARM_WR0_REGNUM]);
8446 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8448 &= tdesc_numbered_register (feature, tdesc_data, i,
8449 iwmmxt_names[i - ARM_WR0_REGNUM]);
8453 tdesc_data_cleanup (tdesc_data);
8458 /* If we have a VFP unit, check whether the single precision registers
8459 are present. If not, then we will synthesize them as pseudo
8461 feature = tdesc_find_feature (tdesc,
8462 "org.gnu.gdb.arm.vfp");
8463 if (feature != NULL)
8465 static const char *const vfp_double_names[] = {
8466 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8467 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8468 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8469 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8472 /* Require the double precision registers. There must be either
8475 for (i = 0; i < 32; i++)
8477 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8479 vfp_double_names[i]);
8483 if (!valid_p && i == 16)
8486 /* Also require FPSCR. */
8487 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8488 ARM_FPSCR_REGNUM, "fpscr");
8491 tdesc_data_cleanup (tdesc_data);
8495 if (tdesc_unnumbered_register (feature, "s0") == 0)
8496 have_vfp_pseudos = 1;
8498 have_vfp_registers = 1;
8500 /* If we have VFP, also check for NEON. The architecture allows
8501 NEON without VFP (integer vector operations only), but GDB
8502 does not support that. */
8503 feature = tdesc_find_feature (tdesc,
8504 "org.gnu.gdb.arm.neon");
8505 if (feature != NULL)
8507 /* NEON requires 32 double-precision registers. */
8510 tdesc_data_cleanup (tdesc_data);
8514 /* If there are quad registers defined by the stub, use
8515 their type; otherwise (normally) provide them with
8516 the default type. */
8517 if (tdesc_unnumbered_register (feature, "q0") == 0)
8518 have_neon_pseudos = 1;
8525 /* If there is already a candidate, use it. */
8526 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8528 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8530 if (arm_abi != ARM_ABI_AUTO
8531 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8534 if (fp_model != ARM_FLOAT_AUTO
8535 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8538 /* There are various other properties in tdep that we do not
8539 need to check here: those derived from a target description,
8540 since gdbarches with a different target description are
8541 automatically disqualified. */
8543 /* Do check is_m, though, since it might come from the binary. */
8544 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8547 /* Found a match. */
8551 if (best_arch != NULL)
8553 if (tdesc_data != NULL)
8554 tdesc_data_cleanup (tdesc_data);
8555 return best_arch->gdbarch;
8558 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8559 gdbarch = gdbarch_alloc (&info, tdep);
8561 /* Record additional information about the architecture we are defining.
8562 These are gdbarch discriminators, like the OSABI. */
8563 tdep->arm_abi = arm_abi;
8564 tdep->fp_model = fp_model;
8566 tdep->have_fpa_registers = have_fpa_registers;
8567 tdep->have_vfp_registers = have_vfp_registers;
8568 tdep->have_vfp_pseudos = have_vfp_pseudos;
8569 tdep->have_neon_pseudos = have_neon_pseudos;
8570 tdep->have_neon = have_neon;
8573 switch (info.byte_order_for_code)
8575 case BFD_ENDIAN_BIG:
8576 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8577 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8578 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8579 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8583 case BFD_ENDIAN_LITTLE:
8584 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8585 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8586 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8587 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8592 internal_error (__FILE__, __LINE__,
8593 _("arm_gdbarch_init: bad byte order for float format"));
8596 /* On ARM targets char defaults to unsigned. */
8597 set_gdbarch_char_signed (gdbarch, 0);
8599 /* Note: for displaced stepping, this includes the breakpoint, and one word
8600 of additional scratch space. This setting isn't used for anything beside
8601 displaced stepping at present. */
8602 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8604 /* This should be low enough for everything. */
8605 tdep->lowest_pc = 0x20;
8606 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8608 /* The default, for both APCS and AAPCS, is to return small
8609 structures in registers. */
8610 tdep->struct_return = reg_struct_return;
8612 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8613 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8615 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8617 /* Frame handling. */
8618 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8619 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8620 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8622 frame_base_set_default (gdbarch, &arm_normal_base);
8624 /* Address manipulation. */
8625 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8626 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8628 /* Advance PC across function entry code. */
8629 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8631 /* Detect whether PC is in function epilogue. */
8632 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8634 /* Skip trampolines. */
8635 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8637 /* The stack grows downward. */
8638 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8640 /* Breakpoint manipulation. */
8641 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8642 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8643 arm_remote_breakpoint_from_pc);
8645 /* Information about registers, etc. */
8646 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8647 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8648 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8649 set_gdbarch_register_type (gdbarch, arm_register_type);
8650 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8652 /* This "info float" is FPA-specific. Use the generic version if we
8654 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8655 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8657 /* Internal <-> external register number maps. */
8658 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8659 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8661 set_gdbarch_register_name (gdbarch, arm_register_name);
8663 /* Returning results. */
8664 set_gdbarch_return_value (gdbarch, arm_return_value);
8667 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8669 /* Minsymbol frobbing. */
8670 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8671 set_gdbarch_coff_make_msymbol_special (gdbarch,
8672 arm_coff_make_msymbol_special);
8673 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8675 /* Thumb-2 IT block support. */
8676 set_gdbarch_adjust_breakpoint_address (gdbarch,
8677 arm_adjust_breakpoint_address);
8679 /* Virtual tables. */
8680 set_gdbarch_vbit_in_delta (gdbarch, 1);
8682 /* Hook in the ABI-specific overrides, if they have been registered. */
8683 gdbarch_init_osabi (info, gdbarch);
8685 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8687 /* Add some default predicates. */
8688 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8689 dwarf2_append_unwinders (gdbarch);
8690 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8691 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8693 /* Now we have tuned the configuration, set a few final things,
8694 based on what the OS ABI has told us. */
8696 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8697 binaries are always marked. */
8698 if (tdep->arm_abi == ARM_ABI_AUTO)
8699 tdep->arm_abi = ARM_ABI_APCS;
8701 /* Watchpoints are not steppable. */
8702 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8704 /* We used to default to FPA for generic ARM, but almost nobody
8705 uses that now, and we now provide a way for the user to force
8706 the model. So default to the most useful variant. */
8707 if (tdep->fp_model == ARM_FLOAT_AUTO)
8708 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8710 if (tdep->jb_pc >= 0)
8711 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8713 /* Floating point sizes and format. */
8714 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8715 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8717 set_gdbarch_double_format
8718 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8719 set_gdbarch_long_double_format
8720 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8724 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8725 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8728 if (have_vfp_pseudos)
8730 /* NOTE: These are the only pseudo registers used by
8731 the ARM target at the moment. If more are added, a
8732 little more care in numbering will be needed. */
8734 int num_pseudos = 32;
8735 if (have_neon_pseudos)
8737 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8738 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8739 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8744 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8746 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8748 /* Override tdesc_register_type to adjust the types of VFP
8749 registers for NEON. */
8750 set_gdbarch_register_type (gdbarch, arm_register_type);
8753 /* Add standard register aliases. We add aliases even for those
8754 nanes which are used by the current architecture - it's simpler,
8755 and does no harm, since nothing ever lists user registers. */
8756 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8757 user_reg_add (gdbarch, arm_register_aliases[i].name,
8758 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8764 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8766 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8771 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8772 (unsigned long) tdep->lowest_pc);
8775 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8778 _initialize_arm_tdep (void)
8780 struct ui_file *stb;
8782 struct cmd_list_element *new_set, *new_show;
8783 const char *setname;
8784 const char *setdesc;
8785 const char *const *regnames;
8787 static char *helptext;
8788 char regdesc[1024], *rdptr = regdesc;
8789 size_t rest = sizeof (regdesc);
8791 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8793 arm_objfile_data_key
8794 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8796 /* Add ourselves to objfile event chain. */
8797 observer_attach_new_objfile (arm_exidx_new_objfile);
8799 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8801 /* Register an ELF OS ABI sniffer for ARM binaries. */
8802 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8803 bfd_target_elf_flavour,
8804 arm_elf_osabi_sniffer);
8806 /* Initialize the standard target descriptions. */
8807 initialize_tdesc_arm_with_m ();
8808 initialize_tdesc_arm_with_iwmmxt ();
8809 initialize_tdesc_arm_with_vfpv2 ();
8810 initialize_tdesc_arm_with_vfpv3 ();
8811 initialize_tdesc_arm_with_neon ();
8813 /* Get the number of possible sets of register names defined in opcodes. */
8814 num_disassembly_options = get_arm_regname_num_options ();
8816 /* Add root prefix command for all "set arm"/"show arm" commands. */
8817 add_prefix_cmd ("arm", no_class, set_arm_command,
8818 _("Various ARM-specific commands."),
8819 &setarmcmdlist, "set arm ", 0, &setlist);
8821 add_prefix_cmd ("arm", no_class, show_arm_command,
8822 _("Various ARM-specific commands."),
8823 &showarmcmdlist, "show arm ", 0, &showlist);
8825 /* Sync the opcode insn printer with our register viewer. */
8826 parse_arm_disassembler_option ("reg-names-std");
8828 /* Initialize the array that will be passed to
8829 add_setshow_enum_cmd(). */
8830 valid_disassembly_styles
8831 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8832 for (i = 0; i < num_disassembly_options; i++)
8834 numregs = get_arm_regnames (i, &setname, &setdesc, ®names);
8835 valid_disassembly_styles[i] = setname;
8836 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8839 /* When we find the default names, tell the disassembler to use
8841 if (!strcmp (setname, "std"))
8843 disassembly_style = setname;
8844 set_arm_regname_option (i);
8847 /* Mark the end of valid options. */
8848 valid_disassembly_styles[num_disassembly_options] = NULL;
8850 /* Create the help text. */
8851 stb = mem_fileopen ();
8852 fprintf_unfiltered (stb, "%s%s%s",
8853 _("The valid values are:\n"),
8855 _("The default is \"std\"."));
8856 helptext = ui_file_xstrdup (stb, NULL);
8857 ui_file_delete (stb);
8859 add_setshow_enum_cmd("disassembler", no_class,
8860 valid_disassembly_styles, &disassembly_style,
8861 _("Set the disassembly style."),
8862 _("Show the disassembly style."),
8864 set_disassembly_style_sfunc,
8865 NULL, /* FIXME: i18n: The disassembly style is
8867 &setarmcmdlist, &showarmcmdlist);
8869 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8870 _("Set usage of ARM 32-bit mode."),
8871 _("Show usage of ARM 32-bit mode."),
8872 _("When off, a 26-bit PC will be used."),
8874 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8876 &setarmcmdlist, &showarmcmdlist);
8878 /* Add a command to allow the user to force the FPU model. */
8879 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, ¤t_fp_model,
8880 _("Set the floating point type."),
8881 _("Show the floating point type."),
8882 _("auto - Determine the FP typefrom the OS-ABI.\n\
8883 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8884 fpa - FPA co-processor (GCC compiled).\n\
8885 softvfp - Software FP with pure-endian doubles.\n\
8886 vfp - VFP co-processor."),
8887 set_fp_model_sfunc, show_fp_model,
8888 &setarmcmdlist, &showarmcmdlist);
8890 /* Add a command to allow the user to force the ABI. */
8891 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8894 NULL, arm_set_abi, arm_show_abi,
8895 &setarmcmdlist, &showarmcmdlist);
8897 /* Add two commands to allow the user to force the assumed
8899 add_setshow_enum_cmd ("fallback-mode", class_support,
8900 arm_mode_strings, &arm_fallback_mode_string,
8901 _("Set the mode assumed when symbols are unavailable."),
8902 _("Show the mode assumed when symbols are unavailable."),
8903 NULL, NULL, arm_show_fallback_mode,
8904 &setarmcmdlist, &showarmcmdlist);
8905 add_setshow_enum_cmd ("force-mode", class_support,
8906 arm_mode_strings, &arm_force_mode_string,
8907 _("Set the mode assumed even when symbols are available."),
8908 _("Show the mode assumed even when symbols are available."),
8909 NULL, NULL, arm_show_force_mode,
8910 &setarmcmdlist, &showarmcmdlist);
8912 /* Debugging flag. */
8913 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8914 _("Set ARM debugging."),
8915 _("Show ARM debugging."),
8916 _("When on, arm-specific debugging is enabled."),
8918 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8919 &setdebuglist, &showdebuglist);