1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include <ctype.h> /* XXX for isupper (). */
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
32 #include "reggroups.h"
35 #include "arch-utils.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
41 #include "dwarf2-frame.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
49 #include "gdb/sim-arm.h"
52 #include "coff/internal.h"
55 #include "gdb_assert.h"
58 #include "features/arm-with-m.c"
62 /* Macros for setting and testing a bit in a minimal symbol that marks
63 it as Thumb function. The MSB of the minimal symbol's "info" field
64 is used for this purpose.
66 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
67 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
69 #define MSYMBOL_SET_SPECIAL(msym) \
70 MSYMBOL_TARGET_FLAG_1 (msym) = 1
72 #define MSYMBOL_IS_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym)
75 /* Per-objfile data used for mapping symbols. */
76 static const struct objfile_data *arm_objfile_data_key;
78 struct arm_mapping_symbol
83 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
84 DEF_VEC_O(arm_mapping_symbol_s);
86 struct arm_per_objfile
88 VEC(arm_mapping_symbol_s) **section_maps;
91 /* The list of available "set arm ..." and "show arm ..." commands. */
92 static struct cmd_list_element *setarmcmdlist = NULL;
93 static struct cmd_list_element *showarmcmdlist = NULL;
95 /* The type of floating-point to use. Keep this in sync with enum
96 arm_float_model, and the help string in _initialize_arm_tdep. */
97 static const char *fp_model_strings[] =
107 /* A variable that can be configured by the user. */
108 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
109 static const char *current_fp_model = "auto";
111 /* The ABI to use. Keep this in sync with arm_abi_kind. */
112 static const char *arm_abi_strings[] =
120 /* A variable that can be configured by the user. */
121 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
122 static const char *arm_abi_string = "auto";
124 /* The execution mode to assume. */
125 static const char *arm_mode_strings[] =
133 static const char *arm_fallback_mode_string = "auto";
134 static const char *arm_force_mode_string = "auto";
136 /* Number of different reg name sets (options). */
137 static int num_disassembly_options;
139 /* The standard register names, and all the valid aliases for them. Note
140 that `fp', `sp' and `pc' are not added in this alias list, because they
141 have been added as builtin user registers in
142 std-regs.c:_initialize_frame_reg. */
147 } arm_register_aliases[] = {
148 /* Basic register numbers. */
165 /* Synonyms (argument and variable registers). */
178 /* Other platform-specific names for r9. */
184 /* Names used by GCC (not listed in the ARM EABI). */
186 /* A special name from the older ATPCS. */
190 static const char *const arm_register_names[] =
191 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
192 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
193 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
194 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
195 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
196 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
197 "fps", "cpsr" }; /* 24 25 */
199 /* Valid register name styles. */
200 static const char **valid_disassembly_styles;
202 /* Disassembly style to use. Default to "std" register names. */
203 static const char *disassembly_style;
205 /* This is used to keep the bfd arch_info in sync with the disassembly
207 static void set_disassembly_style_sfunc(char *, int,
208 struct cmd_list_element *);
209 static void set_disassembly_style (void);
211 static void convert_from_extended (const struct floatformat *, const void *,
213 static void convert_to_extended (const struct floatformat *, void *,
216 static void arm_neon_quad_read (struct gdbarch *gdbarch,
217 struct regcache *regcache,
218 int regnum, gdb_byte *buf);
219 static void arm_neon_quad_write (struct gdbarch *gdbarch,
220 struct regcache *regcache,
221 int regnum, const gdb_byte *buf);
223 struct arm_prologue_cache
225 /* The stack pointer at the time this frame was created; i.e. the
226 caller's stack pointer when this function was called. It is used
227 to identify this frame. */
230 /* The frame base for this frame is just prev_sp - frame size.
231 FRAMESIZE is the distance from the frame pointer to the
232 initial stack pointer. */
236 /* The register used to hold the frame pointer for this frame. */
239 /* Saved register offsets. */
240 struct trad_frame_saved_reg *saved_regs;
243 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
244 CORE_ADDR prologue_start,
245 CORE_ADDR prologue_end,
246 struct arm_prologue_cache *cache);
248 /* Architecture version for displaced stepping. This effects the behaviour of
249 certain instructions, and really should not be hard-wired. */
251 #define DISPLACED_STEPPING_ARCH_VERSION 5
253 /* Addresses for calling Thumb functions have the bit 0 set.
254 Here are some macros to test, set, or clear bit 0 of addresses. */
255 #define IS_THUMB_ADDR(addr) ((addr) & 1)
256 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
257 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
259 /* Set to true if the 32-bit mode is in use. */
263 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
266 arm_psr_thumb_bit (struct gdbarch *gdbarch)
268 if (gdbarch_tdep (gdbarch)->is_m)
274 /* Determine if FRAME is executing in Thumb mode. */
277 arm_frame_is_thumb (struct frame_info *frame)
280 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
282 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
283 directly (from a signal frame or dummy frame) or by interpreting
284 the saved LR (from a prologue or DWARF frame). So consult it and
285 trust the unwinders. */
286 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
288 return (cpsr & t_bit) != 0;
291 /* Callback for VEC_lower_bound. */
294 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
295 const struct arm_mapping_symbol *rhs)
297 return lhs->value < rhs->value;
300 /* Search for the mapping symbol covering MEMADDR. If one is found,
301 return its type. Otherwise, return 0. If START is non-NULL,
302 set *START to the location of the mapping symbol. */
305 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
307 struct obj_section *sec;
309 /* If there are mapping symbols, consult them. */
310 sec = find_pc_section (memaddr);
313 struct arm_per_objfile *data;
314 VEC(arm_mapping_symbol_s) *map;
315 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
319 data = objfile_data (sec->objfile, arm_objfile_data_key);
322 map = data->section_maps[sec->the_bfd_section->index];
323 if (!VEC_empty (arm_mapping_symbol_s, map))
325 struct arm_mapping_symbol *map_sym;
327 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
328 arm_compare_mapping_symbols);
330 /* VEC_lower_bound finds the earliest ordered insertion
331 point. If the following symbol starts at this exact
332 address, we use that; otherwise, the preceding
333 mapping symbol covers this address. */
334 if (idx < VEC_length (arm_mapping_symbol_s, map))
336 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
337 if (map_sym->value == map_key.value)
340 *start = map_sym->value + obj_section_addr (sec);
341 return map_sym->type;
347 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
349 *start = map_sym->value + obj_section_addr (sec);
350 return map_sym->type;
359 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
360 CORE_ADDR pc, int insert_bkpt);
362 /* Determine if the program counter specified in MEMADDR is in a Thumb
363 function. This function should be called for addresses unrelated to
364 any executing frame; otherwise, prefer arm_frame_is_thumb. */
367 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
369 struct obj_section *sec;
370 struct minimal_symbol *sym;
372 struct displaced_step_closure* dsc
373 = get_displaced_step_closure_by_addr(memaddr);
375 /* If checking the mode of displaced instruction in copy area, the mode
376 should be determined by instruction on the original address. */
380 fprintf_unfiltered (gdb_stdlog,
381 "displaced: check mode of %.8lx instead of %.8lx\n",
382 (unsigned long) dsc->insn_addr,
383 (unsigned long) memaddr);
384 memaddr = dsc->insn_addr;
387 /* If bit 0 of the address is set, assume this is a Thumb address. */
388 if (IS_THUMB_ADDR (memaddr))
391 /* If the user wants to override the symbol table, let him. */
392 if (strcmp (arm_force_mode_string, "arm") == 0)
394 if (strcmp (arm_force_mode_string, "thumb") == 0)
397 /* ARM v6-M and v7-M are always in Thumb mode. */
398 if (gdbarch_tdep (gdbarch)->is_m)
401 /* If there are mapping symbols, consult them. */
402 type = arm_find_mapping_symbol (memaddr, NULL);
406 /* Thumb functions have a "special" bit set in minimal symbols. */
407 sym = lookup_minimal_symbol_by_pc (memaddr);
409 return (MSYMBOL_IS_SPECIAL (sym));
411 /* If the user wants to override the fallback mode, let them. */
412 if (strcmp (arm_fallback_mode_string, "arm") == 0)
414 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
417 /* If we couldn't find any symbol, but we're talking to a running
418 target, then trust the current value of $cpsr. This lets
419 "display/i $pc" always show the correct mode (though if there is
420 a symbol table we will not reach here, so it still may not be
421 displayed in the mode it will be executed).
423 As a further heuristic if we detect that we are doing a single-step we
424 see what state executing the current instruction ends up with us being
426 if (target_has_registers)
428 struct frame_info *current_frame = get_current_frame ();
429 CORE_ADDR current_pc = get_frame_pc (current_frame);
430 int is_thumb = arm_frame_is_thumb (current_frame);
432 if (memaddr == current_pc)
436 struct gdbarch *gdbarch = get_frame_arch (current_frame);
437 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
438 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
439 return IS_THUMB_ADDR (next_pc);
445 /* Otherwise we're out of luck; we assume ARM. */
449 /* Remove useless bits from addresses in a running program. */
451 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
454 return UNMAKE_THUMB_ADDR (val);
456 return (val & 0x03fffffc);
459 /* When reading symbols, we need to zap the low bit of the address,
460 which may be set to 1 for Thumb functions. */
462 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
467 /* Return 1 if PC is the start of a compiler helper function which
468 can be safely ignored during prologue skipping. IS_THUMB is true
469 if the function is known to be a Thumb function due to the way it
472 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
474 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
475 struct minimal_symbol *msym;
477 msym = lookup_minimal_symbol_by_pc (pc);
479 && SYMBOL_VALUE_ADDRESS (msym) == pc
480 && SYMBOL_LINKAGE_NAME (msym) != NULL)
482 const char *name = SYMBOL_LINKAGE_NAME (msym);
484 /* The GNU linker's Thumb call stub to foo is named
486 if (strstr (name, "_from_thumb") != NULL)
489 /* On soft-float targets, __truncdfsf2 is called to convert promoted
490 arguments to their argument types in non-prototyped
492 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
494 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
497 /* Internal functions related to thread-local storage. */
498 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
500 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
505 /* If we run against a stripped glibc, we may be unable to identify
506 special functions by name. Check for one important case,
507 __aeabi_read_tp, by comparing the *code* against the default
508 implementation (this is hand-written ARM assembler in glibc). */
511 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
512 == 0xe3e00a0f /* mov r0, #0xffff0fff */
513 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
514 == 0xe240f01f) /* sub pc, r0, #31 */
521 /* Support routines for instruction parsing. */
522 #define submask(x) ((1L << ((x) + 1)) - 1)
523 #define bit(obj,st) (((obj) >> (st)) & 1)
524 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
525 #define sbits(obj,st,fn) \
526 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
527 #define BranchDest(addr,instr) \
528 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
530 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
531 the first 16-bit of instruction, and INSN2 is the second 16-bit of
533 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
534 ((bits ((insn1), 0, 3) << 12) \
535 | (bits ((insn1), 10, 10) << 11) \
536 | (bits ((insn2), 12, 14) << 8) \
537 | bits ((insn2), 0, 7))
539 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
540 the 32-bit instruction. */
541 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
542 ((bits ((insn), 16, 19) << 12) \
543 | bits ((insn), 0, 11))
545 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
548 thumb_expand_immediate (unsigned int imm)
550 unsigned int count = imm >> 7;
558 return (imm & 0xff) | ((imm & 0xff) << 16);
560 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
562 return (imm & 0xff) | ((imm & 0xff) << 8)
563 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
566 return (0x80 | (imm & 0x7f)) << (32 - count);
569 /* Return 1 if the 16-bit Thumb instruction INST might change
570 control flow, 0 otherwise. */
573 thumb_instruction_changes_pc (unsigned short inst)
575 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
578 if ((inst & 0xf000) == 0xd000) /* conditional branch */
581 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
584 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
587 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
590 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
596 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
597 might change control flow, 0 otherwise. */
600 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
602 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
604 /* Branches and miscellaneous control instructions. */
606 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
611 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
613 /* SUBS PC, LR, #imm8. */
616 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
618 /* Conditional branch. */
625 if ((inst1 & 0xfe50) == 0xe810)
627 /* Load multiple or RFE. */
629 if (bit (inst1, 7) && !bit (inst1, 8))
635 else if (!bit (inst1, 7) && bit (inst1, 8))
641 else if (bit (inst1, 7) && bit (inst1, 8))
646 else if (!bit (inst1, 7) && !bit (inst1, 8))
655 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
657 /* MOV PC or MOVS PC. */
661 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
664 if (bits (inst1, 0, 3) == 15)
670 if ((inst2 & 0x0fc0) == 0x0000)
676 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
682 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
691 /* Analyze a Thumb prologue, looking for a recognizable stack frame
692 and frame pointer. Scan until we encounter a store that could
693 clobber the stack frame unexpectedly, or an unknown instruction.
694 Return the last address which is definitely safe to skip for an
695 initial breakpoint. */
698 thumb_analyze_prologue (struct gdbarch *gdbarch,
699 CORE_ADDR start, CORE_ADDR limit,
700 struct arm_prologue_cache *cache)
702 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
703 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
706 struct pv_area *stack;
707 struct cleanup *back_to;
709 CORE_ADDR unrecognized_pc = 0;
711 for (i = 0; i < 16; i++)
712 regs[i] = pv_register (i, 0);
713 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
714 back_to = make_cleanup_free_pv_area (stack);
716 while (start < limit)
720 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
722 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
727 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
730 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
731 whether to save LR (R14). */
732 mask = (insn & 0xff) | ((insn & 0x100) << 6);
734 /* Calculate offsets of saved R0-R7 and LR. */
735 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
736 if (mask & (1 << regno))
738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
740 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
743 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
746 offset = (insn & 0x7f) << 2; /* get scaled offset */
747 if (insn & 0x80) /* Check for SUB. */
748 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
751 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
754 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
755 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
757 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
758 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
759 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
761 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
762 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
763 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
765 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
766 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
767 && pv_is_constant (regs[bits (insn, 3, 5)]))
768 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
769 regs[bits (insn, 6, 8)]);
770 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
771 && pv_is_constant (regs[bits (insn, 3, 6)]))
773 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
774 int rm = bits (insn, 3, 6);
775 regs[rd] = pv_add (regs[rd], regs[rm]);
777 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
779 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
780 int src_reg = (insn & 0x78) >> 3;
781 regs[dst_reg] = regs[src_reg];
783 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
785 /* Handle stores to the stack. Normally pushes are used,
786 but with GCC -mtpcs-frame, there may be other stores
787 in the prologue to create the frame. */
788 int regno = (insn >> 8) & 0x7;
791 offset = (insn & 0xff) << 2;
792 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
794 if (pv_area_store_would_trash (stack, addr))
797 pv_area_store (stack, addr, 4, regs[regno]);
799 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
801 int rd = bits (insn, 0, 2);
802 int rn = bits (insn, 3, 5);
805 offset = bits (insn, 6, 10) << 2;
806 addr = pv_add_constant (regs[rn], offset);
808 if (pv_area_store_would_trash (stack, addr))
811 pv_area_store (stack, addr, 4, regs[rd]);
813 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
814 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
815 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
816 /* Ignore stores of argument registers to the stack. */
818 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
819 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
820 /* Ignore block loads from the stack, potentially copying
821 parameters from memory. */
823 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
824 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
825 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
826 /* Similarly ignore single loads from the stack. */
828 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
829 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
830 /* Skip register copies, i.e. saves to another register
831 instead of the stack. */
833 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
834 /* Recognize constant loads; even with small stacks these are necessary
836 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
837 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
839 /* Constant pool loads, for the same reason. */
840 unsigned int constant;
843 loc = start + 4 + bits (insn, 0, 7) * 4;
844 constant = read_memory_unsigned_integer (loc, 4, byte_order);
845 regs[bits (insn, 8, 10)] = pv_constant (constant);
847 else if ((insn & 0xe000) == 0xe000)
849 unsigned short inst2;
851 inst2 = read_memory_unsigned_integer (start + 2, 2,
852 byte_order_for_code);
854 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
856 /* BL, BLX. Allow some special function calls when
857 skipping the prologue; GCC generates these before
858 storing arguments to the stack. */
860 int j1, j2, imm1, imm2;
862 imm1 = sbits (insn, 0, 10);
863 imm2 = bits (inst2, 0, 10);
864 j1 = bit (inst2, 13);
865 j2 = bit (inst2, 11);
867 offset = ((imm1 << 12) + (imm2 << 1));
868 offset ^= ((!j2) << 22) | ((!j1) << 23);
870 nextpc = start + 4 + offset;
871 /* For BLX make sure to clear the low bits. */
872 if (bit (inst2, 12) == 0)
873 nextpc = nextpc & 0xfffffffc;
875 if (!skip_prologue_function (gdbarch, nextpc,
876 bit (inst2, 12) != 0))
880 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
882 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
884 pv_t addr = regs[bits (insn, 0, 3)];
887 if (pv_area_store_would_trash (stack, addr))
890 /* Calculate offsets of saved registers. */
891 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
892 if (inst2 & (1 << regno))
894 addr = pv_add_constant (addr, -4);
895 pv_area_store (stack, addr, 4, regs[regno]);
899 regs[bits (insn, 0, 3)] = addr;
902 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
904 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
906 int regno1 = bits (inst2, 12, 15);
907 int regno2 = bits (inst2, 8, 11);
908 pv_t addr = regs[bits (insn, 0, 3)];
910 offset = inst2 & 0xff;
912 addr = pv_add_constant (addr, offset);
914 addr = pv_add_constant (addr, -offset);
916 if (pv_area_store_would_trash (stack, addr))
919 pv_area_store (stack, addr, 4, regs[regno1]);
920 pv_area_store (stack, pv_add_constant (addr, 4),
924 regs[bits (insn, 0, 3)] = addr;
927 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
928 && (inst2 & 0x0c00) == 0x0c00
929 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
931 int regno = bits (inst2, 12, 15);
932 pv_t addr = regs[bits (insn, 0, 3)];
934 offset = inst2 & 0xff;
936 addr = pv_add_constant (addr, offset);
938 addr = pv_add_constant (addr, -offset);
940 if (pv_area_store_would_trash (stack, addr))
943 pv_area_store (stack, addr, 4, regs[regno]);
946 regs[bits (insn, 0, 3)] = addr;
949 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
950 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
952 int regno = bits (inst2, 12, 15);
955 offset = inst2 & 0xfff;
956 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
958 if (pv_area_store_would_trash (stack, addr))
961 pv_area_store (stack, addr, 4, regs[regno]);
964 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
965 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
966 /* Ignore stores of argument registers to the stack. */
969 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
970 && (inst2 & 0x0d00) == 0x0c00
971 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
972 /* Ignore stores of argument registers to the stack. */
975 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
977 && (inst2 & 0x8000) == 0x0000
978 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
979 /* Ignore block loads from the stack, potentially copying
980 parameters from memory. */
983 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
985 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
986 /* Similarly ignore dual loads from the stack. */
989 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
990 && (inst2 & 0x0d00) == 0x0c00
991 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
992 /* Similarly ignore single loads from the stack. */
995 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
996 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
997 /* Similarly ignore single loads from the stack. */
1000 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1001 && (inst2 & 0x8000) == 0x0000)
1003 unsigned int imm = ((bits (insn, 10, 10) << 11)
1004 | (bits (inst2, 12, 14) << 8)
1005 | bits (inst2, 0, 7));
1007 regs[bits (inst2, 8, 11)]
1008 = pv_add_constant (regs[bits (insn, 0, 3)],
1009 thumb_expand_immediate (imm));
1012 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1013 && (inst2 & 0x8000) == 0x0000)
1015 unsigned int imm = ((bits (insn, 10, 10) << 11)
1016 | (bits (inst2, 12, 14) << 8)
1017 | bits (inst2, 0, 7));
1019 regs[bits (inst2, 8, 11)]
1020 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1023 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1024 && (inst2 & 0x8000) == 0x0000)
1026 unsigned int imm = ((bits (insn, 10, 10) << 11)
1027 | (bits (inst2, 12, 14) << 8)
1028 | bits (inst2, 0, 7));
1030 regs[bits (inst2, 8, 11)]
1031 = pv_add_constant (regs[bits (insn, 0, 3)],
1032 - (CORE_ADDR) thumb_expand_immediate (imm));
1035 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1036 && (inst2 & 0x8000) == 0x0000)
1038 unsigned int imm = ((bits (insn, 10, 10) << 11)
1039 | (bits (inst2, 12, 14) << 8)
1040 | bits (inst2, 0, 7));
1042 regs[bits (inst2, 8, 11)]
1043 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1046 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1048 unsigned int imm = ((bits (insn, 10, 10) << 11)
1049 | (bits (inst2, 12, 14) << 8)
1050 | bits (inst2, 0, 7));
1052 regs[bits (inst2, 8, 11)]
1053 = pv_constant (thumb_expand_immediate (imm));
1056 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1059 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1061 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1064 else if (insn == 0xea5f /* mov.w Rd,Rm */
1065 && (inst2 & 0xf0f0) == 0)
1067 int dst_reg = (inst2 & 0x0f00) >> 8;
1068 int src_reg = inst2 & 0xf;
1069 regs[dst_reg] = regs[src_reg];
1072 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1074 /* Constant pool loads. */
1075 unsigned int constant;
1078 offset = bits (insn, 0, 11);
1080 loc = start + 4 + offset;
1082 loc = start + 4 - offset;
1084 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1085 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1088 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1090 /* Constant pool loads. */
1091 unsigned int constant;
1094 offset = bits (insn, 0, 7) << 2;
1096 loc = start + 4 + offset;
1098 loc = start + 4 - offset;
1100 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1101 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1103 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1104 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1107 else if (thumb2_instruction_changes_pc (insn, inst2))
1109 /* Don't scan past anything that might change control flow. */
1114 /* The optimizer might shove anything into the prologue,
1115 so we just skip what we don't recognize. */
1116 unrecognized_pc = start;
1121 else if (thumb_instruction_changes_pc (insn))
1123 /* Don't scan past anything that might change control flow. */
1128 /* The optimizer might shove anything into the prologue,
1129 so we just skip what we don't recognize. */
1130 unrecognized_pc = start;
1137 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1138 paddress (gdbarch, start));
1140 if (unrecognized_pc == 0)
1141 unrecognized_pc = start;
1145 do_cleanups (back_to);
1146 return unrecognized_pc;
1149 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1151 /* Frame pointer is fp. Frame size is constant. */
1152 cache->framereg = ARM_FP_REGNUM;
1153 cache->framesize = -regs[ARM_FP_REGNUM].k;
1155 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1157 /* Frame pointer is r7. Frame size is constant. */
1158 cache->framereg = THUMB_FP_REGNUM;
1159 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1161 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1163 /* Try the stack pointer... this is a bit desperate. */
1164 cache->framereg = ARM_SP_REGNUM;
1165 cache->framesize = -regs[ARM_SP_REGNUM].k;
1169 /* We're just out of luck. We don't know where the frame is. */
1170 cache->framereg = -1;
1171 cache->framesize = 0;
1174 for (i = 0; i < 16; i++)
1175 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1176 cache->saved_regs[i].addr = offset;
1178 do_cleanups (back_to);
1179 return unrecognized_pc;
1183 /* Try to analyze the instructions starting from PC, which load symbol
1184 __stack_chk_guard. Return the address of instruction after loading this
1185 symbol, set the dest register number to *BASEREG, and set the size of
1186 instructions for loading symbol in OFFSET. Return 0 if instructions are
1190 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1191 unsigned int *destreg, int *offset)
1193 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1194 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1195 unsigned int low, high, address;
1200 unsigned short insn1
1201 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1203 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1205 *destreg = bits (insn1, 8, 10);
1207 address = bits (insn1, 0, 7);
1209 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1211 unsigned short insn2
1212 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1214 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1217 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1219 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1221 /* movt Rd, #const */
1222 if ((insn1 & 0xfbc0) == 0xf2c0)
1224 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1225 *destreg = bits (insn2, 8, 11);
1227 address = (high << 16 | low);
1234 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1236 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1238 address = bits (insn, 0, 11);
1239 *destreg = bits (insn, 12, 15);
1242 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1244 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1247 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1249 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1251 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1252 *destreg = bits (insn, 12, 15);
1254 address = (high << 16 | low);
1262 /* Try to skip a sequence of instructions used for stack protector. If PC
1263 points to the first instruction of this sequence, return the address of
1264 first instruction after this sequence, otherwise, return original PC.
1266 On arm, this sequence of instructions is composed of mainly three steps,
1267 Step 1: load symbol __stack_chk_guard,
1268 Step 2: load from address of __stack_chk_guard,
1269 Step 3: store it to somewhere else.
1271 Usually, instructions on step 2 and step 3 are the same on various ARM
1272 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1273 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1274 instructions in step 1 vary from different ARM architectures. On ARMv7,
1277 movw Rn, #:lower16:__stack_chk_guard
1278 movt Rn, #:upper16:__stack_chk_guard
1285 .word __stack_chk_guard
1287 Since ldr/str is a very popular instruction, we can't use them as
1288 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1289 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1290 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1293 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1295 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1296 unsigned int address, basereg;
1297 struct minimal_symbol *stack_chk_guard;
1299 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1302 /* Try to parse the instructions in Step 1. */
1303 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1308 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1309 /* If name of symbol doesn't start with '__stack_chk_guard', this
1310 instruction sequence is not for stack protector. If symbol is
1311 removed, we conservatively think this sequence is for stack protector. */
1313 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1314 strlen ("__stack_chk_guard")) != 0)
1319 unsigned int destreg;
1321 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1323 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1324 if ((insn & 0xf800) != 0x6800)
1326 if (bits (insn, 3, 5) != basereg)
1328 destreg = bits (insn, 0, 2);
1330 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1331 byte_order_for_code);
1332 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1333 if ((insn & 0xf800) != 0x6000)
1335 if (destreg != bits (insn, 0, 2))
1340 unsigned int destreg;
1342 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1344 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1345 if ((insn & 0x0e500000) != 0x04100000)
1347 if (bits (insn, 16, 19) != basereg)
1349 destreg = bits (insn, 12, 15);
1350 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1351 insn = read_memory_unsigned_integer (pc + offset + 4,
1352 4, byte_order_for_code);
1353 if ((insn & 0x0e500000) != 0x04000000)
1355 if (bits (insn, 12, 15) != destreg)
1358 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1361 return pc + offset + 4;
1363 return pc + offset + 8;
1366 /* Advance the PC across any function entry prologue instructions to
1367 reach some "real" code.
1369 The APCS (ARM Procedure Call Standard) defines the following
1373 [stmfd sp!, {a1,a2,a3,a4}]
1374 stmfd sp!, {...,fp,ip,lr,pc}
1375 [stfe f7, [sp, #-12]!]
1376 [stfe f6, [sp, #-12]!]
1377 [stfe f5, [sp, #-12]!]
1378 [stfe f4, [sp, #-12]!]
1379 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1382 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1384 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1387 CORE_ADDR func_addr, limit_pc;
1388 struct symtab_and_line sal;
1390 /* See if we can determine the end of the prologue via the symbol table.
1391 If so, then return either PC, or the PC after the prologue, whichever
1393 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1395 CORE_ADDR post_prologue_pc
1396 = skip_prologue_using_sal (gdbarch, func_addr);
1397 struct symtab *s = find_pc_symtab (func_addr);
1399 if (post_prologue_pc)
1401 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1404 /* GCC always emits a line note before the prologue and another
1405 one after, even if the two are at the same address or on the
1406 same line. Take advantage of this so that we do not need to
1407 know every instruction that might appear in the prologue. We
1408 will have producer information for most binaries; if it is
1409 missing (e.g. for -gstabs), assuming the GNU tools. */
1410 if (post_prologue_pc
1412 || s->producer == NULL
1413 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1414 return post_prologue_pc;
1416 if (post_prologue_pc != 0)
1418 CORE_ADDR analyzed_limit;
1420 /* For non-GCC compilers, make sure the entire line is an
1421 acceptable prologue; GDB will round this function's
1422 return value up to the end of the following line so we
1423 can not skip just part of a line (and we do not want to).
1425 RealView does not treat the prologue specially, but does
1426 associate prologue code with the opening brace; so this
1427 lets us skip the first line if we think it is the opening
1429 if (arm_pc_is_thumb (gdbarch, func_addr))
1430 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1431 post_prologue_pc, NULL);
1433 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1434 post_prologue_pc, NULL);
1436 if (analyzed_limit != post_prologue_pc)
1439 return post_prologue_pc;
1443 /* Can't determine prologue from the symbol table, need to examine
1446 /* Find an upper limit on the function prologue using the debug
1447 information. If the debug information could not be used to provide
1448 that bound, then use an arbitrary large number as the upper bound. */
1449 /* Like arm_scan_prologue, stop no later than pc + 64. */
1450 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1452 limit_pc = pc + 64; /* Magic. */
1455 /* Check if this is Thumb code. */
1456 if (arm_pc_is_thumb (gdbarch, pc))
1457 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1459 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1461 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1463 /* "mov ip, sp" is no longer a required part of the prologue. */
1464 if (inst == 0xe1a0c00d) /* mov ip, sp */
1467 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1470 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1473 /* Some prologues begin with "str lr, [sp, #-4]!". */
1474 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1477 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1480 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1483 /* Any insns after this point may float into the code, if it makes
1484 for better instruction scheduling, so we skip them only if we
1485 find them, but still consider the function to be frame-ful. */
1487 /* We may have either one sfmfd instruction here, or several stfe
1488 insns, depending on the version of floating point code we
1490 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1493 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1496 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1499 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1502 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1503 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1504 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1507 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1508 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1509 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1512 /* Un-recognized instruction; stop scanning. */
1516 return skip_pc; /* End of prologue. */
1520 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1521 This function decodes a Thumb function prologue to determine:
1522 1) the size of the stack frame
1523 2) which registers are saved on it
1524 3) the offsets of saved regs
1525 4) the offset from the stack pointer to the frame pointer
1527 A typical Thumb function prologue would create this stack frame
1528 (offsets relative to FP)
1529 old SP -> 24 stack parameters
1532 R7 -> 0 local variables (16 bytes)
1533 SP -> -12 additional stack space (12 bytes)
1534 The frame size would thus be 36 bytes, and the frame offset would be
1535 12 bytes. The frame register is R7.
1537 The comments for thumb_skip_prolog() describe the algorithm we use
1538 to detect the end of the prolog. */
1542 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1543 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1545 CORE_ADDR prologue_start;
1546 CORE_ADDR prologue_end;
1547 CORE_ADDR current_pc;
1549 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1552 /* See comment in arm_scan_prologue for an explanation of
1554 if (prologue_end > prologue_start + 64)
1556 prologue_end = prologue_start + 64;
1560 /* We're in the boondocks: we have no idea where the start of the
1564 prologue_end = min (prologue_end, prev_pc);
1566 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1569 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1572 arm_instruction_changes_pc (uint32_t this_instr)
1574 if (bits (this_instr, 28, 31) == INST_NV)
1575 /* Unconditional instructions. */
1576 switch (bits (this_instr, 24, 27))
1580 /* Branch with Link and change to Thumb. */
1585 /* Coprocessor register transfer. */
1586 if (bits (this_instr, 12, 15) == 15)
1587 error (_("Invalid update to pc in instruction"));
1593 switch (bits (this_instr, 25, 27))
1596 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1598 /* Multiplies and extra load/stores. */
1599 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1600 /* Neither multiplies nor extension load/stores are allowed
1604 /* Otherwise, miscellaneous instructions. */
1606 /* BX <reg>, BXJ <reg>, BLX <reg> */
1607 if (bits (this_instr, 4, 27) == 0x12fff1
1608 || bits (this_instr, 4, 27) == 0x12fff2
1609 || bits (this_instr, 4, 27) == 0x12fff3)
1612 /* Other miscellaneous instructions are unpredictable if they
1616 /* Data processing instruction. Fall through. */
1619 if (bits (this_instr, 12, 15) == 15)
1626 /* Media instructions and architecturally undefined instructions. */
1627 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1631 if (bit (this_instr, 20) == 0)
1635 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1641 /* Load/store multiple. */
1642 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1648 /* Branch and branch with link. */
1653 /* Coprocessor transfers or SWIs can not affect PC. */
1657 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1661 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1662 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1663 fill it in. Return the first address not recognized as a prologue
1666 We recognize all the instructions typically found in ARM prologues,
1667 plus harmless instructions which can be skipped (either for analysis
1668 purposes, or a more restrictive set that can be skipped when finding
1669 the end of the prologue). */
1672 arm_analyze_prologue (struct gdbarch *gdbarch,
1673 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1674 struct arm_prologue_cache *cache)
1676 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1677 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1679 CORE_ADDR offset, current_pc;
1680 pv_t regs[ARM_FPS_REGNUM];
1681 struct pv_area *stack;
1682 struct cleanup *back_to;
1683 int framereg, framesize;
1684 CORE_ADDR unrecognized_pc = 0;
1686 /* Search the prologue looking for instructions that set up the
1687 frame pointer, adjust the stack pointer, and save registers.
1689 Be careful, however, and if it doesn't look like a prologue,
1690 don't try to scan it. If, for instance, a frameless function
1691 begins with stmfd sp!, then we will tell ourselves there is
1692 a frame, which will confuse stack traceback, as well as "finish"
1693 and other operations that rely on a knowledge of the stack
1696 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1697 regs[regno] = pv_register (regno, 0);
1698 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1699 back_to = make_cleanup_free_pv_area (stack);
1701 for (current_pc = prologue_start;
1702 current_pc < prologue_end;
1706 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1708 if (insn == 0xe1a0c00d) /* mov ip, sp */
1710 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1713 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1714 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1716 unsigned imm = insn & 0xff; /* immediate value */
1717 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1718 int rd = bits (insn, 12, 15);
1719 imm = (imm >> rot) | (imm << (32 - rot));
1720 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1723 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1724 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1726 unsigned imm = insn & 0xff; /* immediate value */
1727 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1728 int rd = bits (insn, 12, 15);
1729 imm = (imm >> rot) | (imm << (32 - rot));
1730 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1733 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1736 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1739 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1740 regs[bits (insn, 12, 15)]);
1743 else if ((insn & 0xffff0000) == 0xe92d0000)
1744 /* stmfd sp!, {..., fp, ip, lr, pc}
1746 stmfd sp!, {a1, a2, a3, a4} */
1748 int mask = insn & 0xffff;
1750 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1753 /* Calculate offsets of saved registers. */
1754 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1755 if (mask & (1 << regno))
1758 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1759 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1762 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1763 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1764 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1766 /* No need to add this to saved_regs -- it's just an arg reg. */
1769 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1770 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1771 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1773 /* No need to add this to saved_regs -- it's just an arg reg. */
1776 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1778 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1780 /* No need to add this to saved_regs -- it's just arg regs. */
1783 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1785 unsigned imm = insn & 0xff; /* immediate value */
1786 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1787 imm = (imm >> rot) | (imm << (32 - rot));
1788 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1790 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1792 unsigned imm = insn & 0xff; /* immediate value */
1793 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1794 imm = (imm >> rot) | (imm << (32 - rot));
1795 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1797 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1799 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1801 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1804 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1805 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1806 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1808 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1810 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1812 int n_saved_fp_regs;
1813 unsigned int fp_start_reg, fp_bound_reg;
1815 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1818 if ((insn & 0x800) == 0x800) /* N0 is set */
1820 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1821 n_saved_fp_regs = 3;
1823 n_saved_fp_regs = 1;
1827 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1828 n_saved_fp_regs = 2;
1830 n_saved_fp_regs = 4;
1833 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1834 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1835 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1837 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1838 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1839 regs[fp_start_reg++]);
1842 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1844 /* Allow some special function calls when skipping the
1845 prologue; GCC generates these before storing arguments to
1847 CORE_ADDR dest = BranchDest (current_pc, insn);
1849 if (skip_prologue_function (gdbarch, dest, 0))
1854 else if ((insn & 0xf0000000) != 0xe0000000)
1855 break; /* Condition not true, exit early. */
1856 else if (arm_instruction_changes_pc (insn))
1857 /* Don't scan past anything that might change control flow. */
1859 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1861 /* Ignore block loads from the stack, potentially copying
1862 parameters from memory. */
1863 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1868 else if ((insn & 0xfc500000) == 0xe4100000)
1870 /* Similarly ignore single loads from the stack. */
1871 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1876 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1877 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1878 register instead of the stack. */
1882 /* The optimizer might shove anything into the prologue,
1883 so we just skip what we don't recognize. */
1884 unrecognized_pc = current_pc;
1889 if (unrecognized_pc == 0)
1890 unrecognized_pc = current_pc;
1892 /* The frame size is just the distance from the frame register
1893 to the original stack pointer. */
1894 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1896 /* Frame pointer is fp. */
1897 framereg = ARM_FP_REGNUM;
1898 framesize = -regs[ARM_FP_REGNUM].k;
1900 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1902 /* Try the stack pointer... this is a bit desperate. */
1903 framereg = ARM_SP_REGNUM;
1904 framesize = -regs[ARM_SP_REGNUM].k;
1908 /* We're just out of luck. We don't know where the frame is. */
1915 cache->framereg = framereg;
1916 cache->framesize = framesize;
1918 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1919 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1920 cache->saved_regs[regno].addr = offset;
1924 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1925 paddress (gdbarch, unrecognized_pc));
1927 do_cleanups (back_to);
1928 return unrecognized_pc;
1932 arm_scan_prologue (struct frame_info *this_frame,
1933 struct arm_prologue_cache *cache)
1935 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1938 CORE_ADDR prologue_start, prologue_end, current_pc;
1939 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1940 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1941 pv_t regs[ARM_FPS_REGNUM];
1942 struct pv_area *stack;
1943 struct cleanup *back_to;
1946 /* Assume there is no frame until proven otherwise. */
1947 cache->framereg = ARM_SP_REGNUM;
1948 cache->framesize = 0;
1950 /* Check for Thumb prologue. */
1951 if (arm_frame_is_thumb (this_frame))
1953 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1957 /* Find the function prologue. If we can't find the function in
1958 the symbol table, peek in the stack frame to find the PC. */
1959 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1962 /* One way to find the end of the prologue (which works well
1963 for unoptimized code) is to do the following:
1965 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1968 prologue_end = prev_pc;
1969 else if (sal.end < prologue_end)
1970 prologue_end = sal.end;
1972 This mechanism is very accurate so long as the optimizer
1973 doesn't move any instructions from the function body into the
1974 prologue. If this happens, sal.end will be the last
1975 instruction in the first hunk of prologue code just before
1976 the first instruction that the scheduler has moved from
1977 the body to the prologue.
1979 In order to make sure that we scan all of the prologue
1980 instructions, we use a slightly less accurate mechanism which
1981 may scan more than necessary. To help compensate for this
1982 lack of accuracy, the prologue scanning loop below contains
1983 several clauses which'll cause the loop to terminate early if
1984 an implausible prologue instruction is encountered.
1990 is a suitable endpoint since it accounts for the largest
1991 possible prologue plus up to five instructions inserted by
1994 if (prologue_end > prologue_start + 64)
1996 prologue_end = prologue_start + 64; /* See above. */
2001 /* We have no symbol information. Our only option is to assume this
2002 function has a standard stack frame and the normal frame register.
2003 Then, we can find the value of our frame pointer on entrance to
2004 the callee (or at the present moment if this is the innermost frame).
2005 The value stored there should be the address of the stmfd + 8. */
2006 CORE_ADDR frame_loc;
2007 LONGEST return_value;
2009 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
2010 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
2014 prologue_start = gdbarch_addr_bits_remove
2015 (gdbarch, return_value) - 8;
2016 prologue_end = prologue_start + 64; /* See above. */
2020 if (prev_pc < prologue_end)
2021 prologue_end = prev_pc;
2023 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2026 static struct arm_prologue_cache *
2027 arm_make_prologue_cache (struct frame_info *this_frame)
2030 struct arm_prologue_cache *cache;
2031 CORE_ADDR unwound_fp;
2033 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2034 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2036 arm_scan_prologue (this_frame, cache);
2038 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2039 if (unwound_fp == 0)
2042 cache->prev_sp = unwound_fp + cache->framesize;
2044 /* Calculate actual addresses of saved registers using offsets
2045 determined by arm_scan_prologue. */
2046 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2047 if (trad_frame_addr_p (cache->saved_regs, reg))
2048 cache->saved_regs[reg].addr += cache->prev_sp;
2053 /* Our frame ID for a normal frame is the current function's starting PC
2054 and the caller's SP when we were called. */
2057 arm_prologue_this_id (struct frame_info *this_frame,
2059 struct frame_id *this_id)
2061 struct arm_prologue_cache *cache;
2065 if (*this_cache == NULL)
2066 *this_cache = arm_make_prologue_cache (this_frame);
2067 cache = *this_cache;
2069 /* This is meant to halt the backtrace at "_start". */
2070 pc = get_frame_pc (this_frame);
2071 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2074 /* If we've hit a wall, stop. */
2075 if (cache->prev_sp == 0)
2078 /* Use function start address as part of the frame ID. If we cannot
2079 identify the start address (due to missing symbol information),
2080 fall back to just using the current PC. */
2081 func = get_frame_func (this_frame);
2085 id = frame_id_build (cache->prev_sp, func);
2089 static struct value *
2090 arm_prologue_prev_register (struct frame_info *this_frame,
2094 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2095 struct arm_prologue_cache *cache;
2097 if (*this_cache == NULL)
2098 *this_cache = arm_make_prologue_cache (this_frame);
2099 cache = *this_cache;
2101 /* If we are asked to unwind the PC, then we need to return the LR
2102 instead. The prologue may save PC, but it will point into this
2103 frame's prologue, not the next frame's resume location. Also
2104 strip the saved T bit. A valid LR may have the low bit set, but
2105 a valid PC never does. */
2106 if (prev_regnum == ARM_PC_REGNUM)
2110 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2111 return frame_unwind_got_constant (this_frame, prev_regnum,
2112 arm_addr_bits_remove (gdbarch, lr));
2115 /* SP is generally not saved to the stack, but this frame is
2116 identified by the next frame's stack pointer at the time of the call.
2117 The value was already reconstructed into PREV_SP. */
2118 if (prev_regnum == ARM_SP_REGNUM)
2119 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2121 /* The CPSR may have been changed by the call instruction and by the
2122 called function. The only bit we can reconstruct is the T bit,
2123 by checking the low bit of LR as of the call. This is a reliable
2124 indicator of Thumb-ness except for some ARM v4T pre-interworking
2125 Thumb code, which could get away with a clear low bit as long as
2126 the called function did not use bx. Guess that all other
2127 bits are unchanged; the condition flags are presumably lost,
2128 but the processor status is likely valid. */
2129 if (prev_regnum == ARM_PS_REGNUM)
2132 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2134 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2135 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2136 if (IS_THUMB_ADDR (lr))
2140 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2143 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2147 struct frame_unwind arm_prologue_unwind = {
2149 arm_prologue_this_id,
2150 arm_prologue_prev_register,
2152 default_frame_sniffer
2155 /* Maintain a list of ARM exception table entries per objfile, similar to the
2156 list of mapping symbols. We only cache entries for standard ARM-defined
2157 personality routines; the cache will contain only the frame unwinding
2158 instructions associated with the entry (not the descriptors). */
2160 static const struct objfile_data *arm_exidx_data_key;
2162 struct arm_exidx_entry
2167 typedef struct arm_exidx_entry arm_exidx_entry_s;
2168 DEF_VEC_O(arm_exidx_entry_s);
2170 struct arm_exidx_data
2172 VEC(arm_exidx_entry_s) **section_maps;
2176 arm_exidx_data_free (struct objfile *objfile, void *arg)
2178 struct arm_exidx_data *data = arg;
2181 for (i = 0; i < objfile->obfd->section_count; i++)
2182 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2186 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2187 const struct arm_exidx_entry *rhs)
2189 return lhs->addr < rhs->addr;
2192 static struct obj_section *
2193 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2195 struct obj_section *osect;
2197 ALL_OBJFILE_OSECTIONS (objfile, osect)
2198 if (bfd_get_section_flags (objfile->obfd,
2199 osect->the_bfd_section) & SEC_ALLOC)
2201 bfd_vma start, size;
2202 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2203 size = bfd_get_section_size (osect->the_bfd_section);
2205 if (start <= vma && vma < start + size)
2212 /* Parse contents of exception table and exception index sections
2213 of OBJFILE, and fill in the exception table entry cache.
2215 For each entry that refers to a standard ARM-defined personality
2216 routine, extract the frame unwinding instructions (from either
2217 the index or the table section). The unwinding instructions
2219 - extracting them from the rest of the table data
2220 - converting to host endianness
2221 - appending the implicit 0xb0 ("Finish") code
2223 The extracted and normalized instructions are stored for later
2224 retrieval by the arm_find_exidx_entry routine. */
2227 arm_exidx_new_objfile (struct objfile *objfile)
2229 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2230 struct arm_exidx_data *data;
2231 asection *exidx, *extab;
2232 bfd_vma exidx_vma = 0, extab_vma = 0;
2233 bfd_size_type exidx_size = 0, extab_size = 0;
2234 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2237 /* If we've already touched this file, do nothing. */
2238 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2241 /* Read contents of exception table and index. */
2242 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2245 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2246 exidx_size = bfd_get_section_size (exidx);
2247 exidx_data = xmalloc (exidx_size);
2248 make_cleanup (xfree, exidx_data);
2250 if (!bfd_get_section_contents (objfile->obfd, exidx,
2251 exidx_data, 0, exidx_size))
2253 do_cleanups (cleanups);
2258 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2261 extab_vma = bfd_section_vma (objfile->obfd, extab);
2262 extab_size = bfd_get_section_size (extab);
2263 extab_data = xmalloc (extab_size);
2264 make_cleanup (xfree, extab_data);
2266 if (!bfd_get_section_contents (objfile->obfd, extab,
2267 extab_data, 0, extab_size))
2269 do_cleanups (cleanups);
2274 /* Allocate exception table data structure. */
2275 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2276 set_objfile_data (objfile, arm_exidx_data_key, data);
2277 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2278 objfile->obfd->section_count,
2279 VEC(arm_exidx_entry_s) *);
2281 /* Fill in exception table. */
2282 for (i = 0; i < exidx_size / 8; i++)
2284 struct arm_exidx_entry new_exidx_entry;
2285 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2286 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2287 bfd_vma addr = 0, word = 0;
2288 int n_bytes = 0, n_words = 0;
2289 struct obj_section *sec;
2290 gdb_byte *entry = NULL;
2292 /* Extract address of start of function. */
2293 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2294 idx += exidx_vma + i * 8;
2296 /* Find section containing function and compute section offset. */
2297 sec = arm_obj_section_from_vma (objfile, idx);
2300 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2302 /* Determine address of exception table entry. */
2305 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2307 else if ((val & 0xff000000) == 0x80000000)
2309 /* Exception table entry embedded in .ARM.exidx
2310 -- must be short form. */
2314 else if (!(val & 0x80000000))
2316 /* Exception table entry in .ARM.extab. */
2317 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2318 addr += exidx_vma + i * 8 + 4;
2320 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2322 word = bfd_h_get_32 (objfile->obfd,
2323 extab_data + addr - extab_vma);
2326 if ((word & 0xff000000) == 0x80000000)
2331 else if ((word & 0xff000000) == 0x81000000
2332 || (word & 0xff000000) == 0x82000000)
2336 n_words = ((word >> 16) & 0xff);
2338 else if (!(word & 0x80000000))
2341 struct obj_section *pers_sec;
2342 int gnu_personality = 0;
2344 /* Custom personality routine. */
2345 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2346 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2348 /* Check whether we've got one of the variants of the
2349 GNU personality routines. */
2350 pers_sec = arm_obj_section_from_vma (objfile, pers);
2353 static const char *personality[] =
2355 "__gcc_personality_v0",
2356 "__gxx_personality_v0",
2357 "__gcj_personality_v0",
2358 "__gnu_objc_personality_v0",
2362 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2365 for (k = 0; personality[k]; k++)
2366 if (lookup_minimal_symbol_by_pc_name
2367 (pc, personality[k], objfile))
2369 gnu_personality = 1;
2374 /* If so, the next word contains a word count in the high
2375 byte, followed by the same unwind instructions as the
2376 pre-defined forms. */
2378 && addr + 4 <= extab_vma + extab_size)
2380 word = bfd_h_get_32 (objfile->obfd,
2381 extab_data + addr - extab_vma);
2384 n_words = ((word >> 24) & 0xff);
2390 /* Sanity check address. */
2392 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2393 n_words = n_bytes = 0;
2395 /* The unwind instructions reside in WORD (only the N_BYTES least
2396 significant bytes are valid), followed by N_WORDS words in the
2397 extab section starting at ADDR. */
2398 if (n_bytes || n_words)
2400 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2401 n_bytes + n_words * 4 + 1);
2404 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2408 word = bfd_h_get_32 (objfile->obfd,
2409 extab_data + addr - extab_vma);
2412 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2413 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2414 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2415 *p++ = (gdb_byte) (word & 0xff);
2418 /* Implied "Finish" to terminate the list. */
2422 /* Push entry onto vector. They are guaranteed to always
2423 appear in order of increasing addresses. */
2424 new_exidx_entry.addr = idx;
2425 new_exidx_entry.entry = entry;
2426 VEC_safe_push (arm_exidx_entry_s,
2427 data->section_maps[sec->the_bfd_section->index],
2431 do_cleanups (cleanups);
2434 /* Search for the exception table entry covering MEMADDR. If one is found,
2435 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2436 set *START to the start of the region covered by this entry. */
2439 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2441 struct obj_section *sec;
2443 sec = find_pc_section (memaddr);
2446 struct arm_exidx_data *data;
2447 VEC(arm_exidx_entry_s) *map;
2448 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2451 data = objfile_data (sec->objfile, arm_exidx_data_key);
2454 map = data->section_maps[sec->the_bfd_section->index];
2455 if (!VEC_empty (arm_exidx_entry_s, map))
2457 struct arm_exidx_entry *map_sym;
2459 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2460 arm_compare_exidx_entries);
2462 /* VEC_lower_bound finds the earliest ordered insertion
2463 point. If the following symbol starts at this exact
2464 address, we use that; otherwise, the preceding
2465 exception table entry covers this address. */
2466 if (idx < VEC_length (arm_exidx_entry_s, map))
2468 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2469 if (map_sym->addr == map_key.addr)
2472 *start = map_sym->addr + obj_section_addr (sec);
2473 return map_sym->entry;
2479 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2481 *start = map_sym->addr + obj_section_addr (sec);
2482 return map_sym->entry;
2491 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2492 instruction list from the ARM exception table entry ENTRY, allocate and
2493 return a prologue cache structure describing how to unwind this frame.
2495 Return NULL if the unwinding instruction list contains a "spare",
2496 "reserved" or "refuse to unwind" instruction as defined in section
2497 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2498 for the ARM Architecture" document. */
2500 static struct arm_prologue_cache *
2501 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2506 struct arm_prologue_cache *cache;
2507 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2508 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2514 /* Whenever we reload SP, we actually have to retrieve its
2515 actual value in the current frame. */
2518 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2520 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2521 vsp = get_frame_register_unsigned (this_frame, reg);
2525 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2526 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2532 /* Decode next unwind instruction. */
2535 if ((insn & 0xc0) == 0)
2537 int offset = insn & 0x3f;
2538 vsp += (offset << 2) + 4;
2540 else if ((insn & 0xc0) == 0x40)
2542 int offset = insn & 0x3f;
2543 vsp -= (offset << 2) + 4;
2545 else if ((insn & 0xf0) == 0x80)
2547 int mask = ((insn & 0xf) << 8) | *entry++;
2550 /* The special case of an all-zero mask identifies
2551 "Refuse to unwind". We return NULL to fall back
2552 to the prologue analyzer. */
2556 /* Pop registers r4..r15 under mask. */
2557 for (i = 0; i < 12; i++)
2558 if (mask & (1 << i))
2560 cache->saved_regs[4 + i].addr = vsp;
2564 /* Special-case popping SP -- we need to reload vsp. */
2565 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2568 else if ((insn & 0xf0) == 0x90)
2570 int reg = insn & 0xf;
2572 /* Reserved cases. */
2573 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2576 /* Set SP from another register and mark VSP for reload. */
2577 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2580 else if ((insn & 0xf0) == 0xa0)
2582 int count = insn & 0x7;
2583 int pop_lr = (insn & 0x8) != 0;
2586 /* Pop r4..r[4+count]. */
2587 for (i = 0; i <= count; i++)
2589 cache->saved_regs[4 + i].addr = vsp;
2593 /* If indicated by flag, pop LR as well. */
2596 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2600 else if (insn == 0xb0)
2602 /* We could only have updated PC by popping into it; if so, it
2603 will show up as address. Otherwise, copy LR into PC. */
2604 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2605 cache->saved_regs[ARM_PC_REGNUM]
2606 = cache->saved_regs[ARM_LR_REGNUM];
2611 else if (insn == 0xb1)
2613 int mask = *entry++;
2616 /* All-zero mask and mask >= 16 is "spare". */
2617 if (mask == 0 || mask >= 16)
2620 /* Pop r0..r3 under mask. */
2621 for (i = 0; i < 4; i++)
2622 if (mask & (1 << i))
2624 cache->saved_regs[i].addr = vsp;
2628 else if (insn == 0xb2)
2630 ULONGEST offset = 0;
2635 offset |= (*entry & 0x7f) << shift;
2638 while (*entry++ & 0x80);
2640 vsp += 0x204 + (offset << 2);
2642 else if (insn == 0xb3)
2644 int start = *entry >> 4;
2645 int count = (*entry++) & 0xf;
2648 /* Only registers D0..D15 are valid here. */
2649 if (start + count >= 16)
2652 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2653 for (i = 0; i <= count; i++)
2655 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2659 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2662 else if ((insn & 0xf8) == 0xb8)
2664 int count = insn & 0x7;
2667 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2668 for (i = 0; i <= count; i++)
2670 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2674 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2677 else if (insn == 0xc6)
2679 int start = *entry >> 4;
2680 int count = (*entry++) & 0xf;
2683 /* Only registers WR0..WR15 are valid. */
2684 if (start + count >= 16)
2687 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2688 for (i = 0; i <= count; i++)
2690 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2694 else if (insn == 0xc7)
2696 int mask = *entry++;
2699 /* All-zero mask and mask >= 16 is "spare". */
2700 if (mask == 0 || mask >= 16)
2703 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2704 for (i = 0; i < 4; i++)
2705 if (mask & (1 << i))
2707 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2711 else if ((insn & 0xf8) == 0xc0)
2713 int count = insn & 0x7;
2716 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2717 for (i = 0; i <= count; i++)
2719 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2723 else if (insn == 0xc8)
2725 int start = *entry >> 4;
2726 int count = (*entry++) & 0xf;
2729 /* Only registers D0..D31 are valid. */
2730 if (start + count >= 16)
2733 /* Pop VFP double-precision registers
2734 D[16+start]..D[16+start+count]. */
2735 for (i = 0; i <= count; i++)
2737 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2741 else if (insn == 0xc9)
2743 int start = *entry >> 4;
2744 int count = (*entry++) & 0xf;
2747 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2748 for (i = 0; i <= count; i++)
2750 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2754 else if ((insn & 0xf8) == 0xd0)
2756 int count = insn & 0x7;
2759 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2760 for (i = 0; i <= count; i++)
2762 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2768 /* Everything else is "spare". */
2773 /* If we restore SP from a register, assume this was the frame register.
2774 Otherwise just fall back to SP as frame register. */
2775 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2776 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2778 cache->framereg = ARM_SP_REGNUM;
2780 /* Determine offset to previous frame. */
2782 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2784 /* We already got the previous SP. */
2785 cache->prev_sp = vsp;
2790 /* Unwinding via ARM exception table entries. Note that the sniffer
2791 already computes a filled-in prologue cache, which is then used
2792 with the same arm_prologue_this_id and arm_prologue_prev_register
2793 routines also used for prologue-parsing based unwinding. */
2796 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2797 struct frame_info *this_frame,
2798 void **this_prologue_cache)
2800 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2801 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2802 CORE_ADDR addr_in_block, exidx_region, func_start;
2803 struct arm_prologue_cache *cache;
2806 /* See if we have an ARM exception table entry covering this address. */
2807 addr_in_block = get_frame_address_in_block (this_frame);
2808 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2812 /* The ARM exception table does not describe unwind information
2813 for arbitrary PC values, but is guaranteed to be correct only
2814 at call sites. We have to decide here whether we want to use
2815 ARM exception table information for this frame, or fall back
2816 to using prologue parsing. (Note that if we have DWARF CFI,
2817 this sniffer isn't even called -- CFI is always preferred.)
2819 Before we make this decision, however, we check whether we
2820 actually have *symbol* information for the current frame.
2821 If not, prologue parsing would not work anyway, so we might
2822 as well use the exception table and hope for the best. */
2823 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2827 /* If the next frame is "normal", we are at a call site in this
2828 frame, so exception information is guaranteed to be valid. */
2829 if (get_next_frame (this_frame)
2830 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2833 /* We also assume exception information is valid if we're currently
2834 blocked in a system call. The system library is supposed to
2835 ensure this, so that e.g. pthread cancellation works. */
2836 if (arm_frame_is_thumb (this_frame))
2840 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2841 byte_order_for_code, &insn)
2842 && (insn & 0xff00) == 0xdf00 /* svc */)
2849 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2850 byte_order_for_code, &insn)
2851 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2855 /* Bail out if we don't know that exception information is valid. */
2859 /* The ARM exception index does not mark the *end* of the region
2860 covered by the entry, and some functions will not have any entry.
2861 To correctly recognize the end of the covered region, the linker
2862 should have inserted dummy records with a CANTUNWIND marker.
2864 Unfortunately, current versions of GNU ld do not reliably do
2865 this, and thus we may have found an incorrect entry above.
2866 As a (temporary) sanity check, we only use the entry if it
2867 lies *within* the bounds of the function. Note that this check
2868 might reject perfectly valid entries that just happen to cover
2869 multiple functions; therefore this check ought to be removed
2870 once the linker is fixed. */
2871 if (func_start > exidx_region)
2875 /* Decode the list of unwinding instructions into a prologue cache.
2876 Note that this may fail due to e.g. a "refuse to unwind" code. */
2877 cache = arm_exidx_fill_cache (this_frame, entry);
2881 *this_prologue_cache = cache;
2885 struct frame_unwind arm_exidx_unwind = {
2887 arm_prologue_this_id,
2888 arm_prologue_prev_register,
2890 arm_exidx_unwind_sniffer
2893 static struct arm_prologue_cache *
2894 arm_make_stub_cache (struct frame_info *this_frame)
2896 struct arm_prologue_cache *cache;
2898 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2899 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2901 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2906 /* Our frame ID for a stub frame is the current SP and LR. */
2909 arm_stub_this_id (struct frame_info *this_frame,
2911 struct frame_id *this_id)
2913 struct arm_prologue_cache *cache;
2915 if (*this_cache == NULL)
2916 *this_cache = arm_make_stub_cache (this_frame);
2917 cache = *this_cache;
2919 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2923 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2924 struct frame_info *this_frame,
2925 void **this_prologue_cache)
2927 CORE_ADDR addr_in_block;
2930 addr_in_block = get_frame_address_in_block (this_frame);
2931 if (in_plt_section (addr_in_block, NULL)
2932 /* We also use the stub winder if the target memory is unreadable
2933 to avoid having the prologue unwinder trying to read it. */
2934 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2940 struct frame_unwind arm_stub_unwind = {
2943 arm_prologue_prev_register,
2945 arm_stub_unwind_sniffer
2949 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2951 struct arm_prologue_cache *cache;
2953 if (*this_cache == NULL)
2954 *this_cache = arm_make_prologue_cache (this_frame);
2955 cache = *this_cache;
2957 return cache->prev_sp - cache->framesize;
2960 struct frame_base arm_normal_base = {
2961 &arm_prologue_unwind,
2962 arm_normal_frame_base,
2963 arm_normal_frame_base,
2964 arm_normal_frame_base
2967 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2968 dummy frame. The frame ID's base needs to match the TOS value
2969 saved by save_dummy_frame_tos() and returned from
2970 arm_push_dummy_call, and the PC needs to match the dummy frame's
2973 static struct frame_id
2974 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2976 return frame_id_build (get_frame_register_unsigned (this_frame,
2978 get_frame_pc (this_frame));
2981 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2982 be used to construct the previous frame's ID, after looking up the
2983 containing function). */
2986 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2989 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2990 return arm_addr_bits_remove (gdbarch, pc);
2994 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2996 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2999 static struct value *
3000 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3003 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3005 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3010 /* The PC is normally copied from the return column, which
3011 describes saves of LR. However, that version may have an
3012 extra bit set to indicate Thumb state. The bit is not
3014 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3015 return frame_unwind_got_constant (this_frame, regnum,
3016 arm_addr_bits_remove (gdbarch, lr));
3019 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3020 cpsr = get_frame_register_unsigned (this_frame, regnum);
3021 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3022 if (IS_THUMB_ADDR (lr))
3026 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3029 internal_error (__FILE__, __LINE__,
3030 _("Unexpected register %d"), regnum);
3035 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3036 struct dwarf2_frame_state_reg *reg,
3037 struct frame_info *this_frame)
3043 reg->how = DWARF2_FRAME_REG_FN;
3044 reg->loc.fn = arm_dwarf2_prev_register;
3047 reg->how = DWARF2_FRAME_REG_CFA;
3052 /* Return true if we are in the function's epilogue, i.e. after the
3053 instruction that destroyed the function's stack frame. */
3056 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3058 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3059 unsigned int insn, insn2;
3060 int found_return = 0, found_stack_adjust = 0;
3061 CORE_ADDR func_start, func_end;
3065 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3068 /* The epilogue is a sequence of instructions along the following lines:
3070 - add stack frame size to SP or FP
3071 - [if frame pointer used] restore SP from FP
3072 - restore registers from SP [may include PC]
3073 - a return-type instruction [if PC wasn't already restored]
3075 In a first pass, we scan forward from the current PC and verify the
3076 instructions we find as compatible with this sequence, ending in a
3079 However, this is not sufficient to distinguish indirect function calls
3080 within a function from indirect tail calls in the epilogue in some cases.
3081 Therefore, if we didn't already find any SP-changing instruction during
3082 forward scan, we add a backward scanning heuristic to ensure we actually
3083 are in the epilogue. */
3086 while (scan_pc < func_end && !found_return)
3088 if (target_read_memory (scan_pc, buf, 2))
3092 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3094 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3096 else if (insn == 0x46f7) /* mov pc, lr */
3098 else if (insn == 0x46bd) /* mov sp, r7 */
3099 found_stack_adjust = 1;
3100 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3101 found_stack_adjust = 1;
3102 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3104 found_stack_adjust = 1;
3105 if (insn & 0x0100) /* <registers> include PC. */
3108 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3110 if (target_read_memory (scan_pc, buf, 2))
3114 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3116 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3118 found_stack_adjust = 1;
3119 if (insn2 & 0x8000) /* <registers> include PC. */
3122 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3123 && (insn2 & 0x0fff) == 0x0b04)
3125 found_stack_adjust = 1;
3126 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3129 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3130 && (insn2 & 0x0e00) == 0x0a00)
3131 found_stack_adjust = 1;
3142 /* Since any instruction in the epilogue sequence, with the possible
3143 exception of return itself, updates the stack pointer, we need to
3144 scan backwards for at most one instruction. Try either a 16-bit or
3145 a 32-bit instruction. This is just a heuristic, so we do not worry
3146 too much about false positives. */
3148 if (!found_stack_adjust)
3150 if (pc - 4 < func_start)
3152 if (target_read_memory (pc - 4, buf, 4))
3155 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3156 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3158 if (insn2 == 0x46bd) /* mov sp, r7 */
3159 found_stack_adjust = 1;
3160 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3161 found_stack_adjust = 1;
3162 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3163 found_stack_adjust = 1;
3164 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3165 found_stack_adjust = 1;
3166 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3167 && (insn2 & 0x0fff) == 0x0b04)
3168 found_stack_adjust = 1;
3169 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3170 && (insn2 & 0x0e00) == 0x0a00)
3171 found_stack_adjust = 1;
3174 return found_stack_adjust;
3177 /* Return true if we are in the function's epilogue, i.e. after the
3178 instruction that destroyed the function's stack frame. */
3181 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3183 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3185 int found_return, found_stack_adjust;
3186 CORE_ADDR func_start, func_end;
3188 if (arm_pc_is_thumb (gdbarch, pc))
3189 return thumb_in_function_epilogue_p (gdbarch, pc);
3191 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3194 /* We are in the epilogue if the previous instruction was a stack
3195 adjustment and the next instruction is a possible return (bx, mov
3196 pc, or pop). We could have to scan backwards to find the stack
3197 adjustment, or forwards to find the return, but this is a decent
3198 approximation. First scan forwards. */
3201 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3202 if (bits (insn, 28, 31) != INST_NV)
3204 if ((insn & 0x0ffffff0) == 0x012fff10)
3207 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3210 else if ((insn & 0x0fff0000) == 0x08bd0000
3211 && (insn & 0x0000c000) != 0)
3212 /* POP (LDMIA), including PC or LR. */
3219 /* Scan backwards. This is just a heuristic, so do not worry about
3220 false positives from mode changes. */
3222 if (pc < func_start + 4)
3225 found_stack_adjust = 0;
3226 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3227 if (bits (insn, 28, 31) != INST_NV)
3229 if ((insn & 0x0df0f000) == 0x0080d000)
3230 /* ADD SP (register or immediate). */
3231 found_stack_adjust = 1;
3232 else if ((insn & 0x0df0f000) == 0x0040d000)
3233 /* SUB SP (register or immediate). */
3234 found_stack_adjust = 1;
3235 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3237 found_stack_adjust = 1;
3238 else if ((insn & 0x0fff0000) == 0x08bd0000)
3240 found_stack_adjust = 1;
3243 if (found_stack_adjust)
3250 /* When arguments must be pushed onto the stack, they go on in reverse
3251 order. The code below implements a FILO (stack) to do this. */
3256 struct stack_item *prev;
3260 static struct stack_item *
3261 push_stack_item (struct stack_item *prev, const void *contents, int len)
3263 struct stack_item *si;
3264 si = xmalloc (sizeof (struct stack_item));
3265 si->data = xmalloc (len);
3268 memcpy (si->data, contents, len);
3272 static struct stack_item *
3273 pop_stack_item (struct stack_item *si)
3275 struct stack_item *dead = si;
3283 /* Return the alignment (in bytes) of the given type. */
3286 arm_type_align (struct type *t)
3292 t = check_typedef (t);
3293 switch (TYPE_CODE (t))
3296 /* Should never happen. */
3297 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3301 case TYPE_CODE_ENUM:
3305 case TYPE_CODE_RANGE:
3306 case TYPE_CODE_BITSTRING:
3308 case TYPE_CODE_CHAR:
3309 case TYPE_CODE_BOOL:
3310 return TYPE_LENGTH (t);
3312 case TYPE_CODE_ARRAY:
3313 case TYPE_CODE_COMPLEX:
3314 /* TODO: What about vector types? */
3315 return arm_type_align (TYPE_TARGET_TYPE (t));
3317 case TYPE_CODE_STRUCT:
3318 case TYPE_CODE_UNION:
3320 for (n = 0; n < TYPE_NFIELDS (t); n++)
3322 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3330 /* Possible base types for a candidate for passing and returning in
3333 enum arm_vfp_cprc_base_type
3342 /* The length of one element of base type B. */
3345 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3349 case VFP_CPRC_SINGLE:
3351 case VFP_CPRC_DOUBLE:
3353 case VFP_CPRC_VEC64:
3355 case VFP_CPRC_VEC128:
3358 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3363 /* The character ('s', 'd' or 'q') for the type of VFP register used
3364 for passing base type B. */
3367 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3371 case VFP_CPRC_SINGLE:
3373 case VFP_CPRC_DOUBLE:
3375 case VFP_CPRC_VEC64:
3377 case VFP_CPRC_VEC128:
3380 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3385 /* Determine whether T may be part of a candidate for passing and
3386 returning in VFP registers, ignoring the limit on the total number
3387 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3388 classification of the first valid component found; if it is not
3389 VFP_CPRC_UNKNOWN, all components must have the same classification
3390 as *BASE_TYPE. If it is found that T contains a type not permitted
3391 for passing and returning in VFP registers, a type differently
3392 classified from *BASE_TYPE, or two types differently classified
3393 from each other, return -1, otherwise return the total number of
3394 base-type elements found (possibly 0 in an empty structure or
3395 array). Vectors and complex types are not currently supported,
3396 matching the generic AAPCS support. */
3399 arm_vfp_cprc_sub_candidate (struct type *t,
3400 enum arm_vfp_cprc_base_type *base_type)
3402 t = check_typedef (t);
3403 switch (TYPE_CODE (t))
3406 switch (TYPE_LENGTH (t))
3409 if (*base_type == VFP_CPRC_UNKNOWN)
3410 *base_type = VFP_CPRC_SINGLE;
3411 else if (*base_type != VFP_CPRC_SINGLE)
3416 if (*base_type == VFP_CPRC_UNKNOWN)
3417 *base_type = VFP_CPRC_DOUBLE;
3418 else if (*base_type != VFP_CPRC_DOUBLE)
3427 case TYPE_CODE_ARRAY:
3431 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3434 if (TYPE_LENGTH (t) == 0)
3436 gdb_assert (count == 0);
3439 else if (count == 0)
3441 unitlen = arm_vfp_cprc_unit_length (*base_type);
3442 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3443 return TYPE_LENGTH (t) / unitlen;
3447 case TYPE_CODE_STRUCT:
3452 for (i = 0; i < TYPE_NFIELDS (t); i++)
3454 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3456 if (sub_count == -1)
3460 if (TYPE_LENGTH (t) == 0)
3462 gdb_assert (count == 0);
3465 else if (count == 0)
3467 unitlen = arm_vfp_cprc_unit_length (*base_type);
3468 if (TYPE_LENGTH (t) != unitlen * count)
3473 case TYPE_CODE_UNION:
3478 for (i = 0; i < TYPE_NFIELDS (t); i++)
3480 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3482 if (sub_count == -1)
3484 count = (count > sub_count ? count : sub_count);
3486 if (TYPE_LENGTH (t) == 0)
3488 gdb_assert (count == 0);
3491 else if (count == 0)
3493 unitlen = arm_vfp_cprc_unit_length (*base_type);
3494 if (TYPE_LENGTH (t) != unitlen * count)
3506 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3507 if passed to or returned from a non-variadic function with the VFP
3508 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3509 *BASE_TYPE to the base type for T and *COUNT to the number of
3510 elements of that base type before returning. */
3513 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3516 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3517 int c = arm_vfp_cprc_sub_candidate (t, &b);
3518 if (c <= 0 || c > 4)
3525 /* Return 1 if the VFP ABI should be used for passing arguments to and
3526 returning values from a function of type FUNC_TYPE, 0
3530 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3532 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3533 /* Variadic functions always use the base ABI. Assume that functions
3534 without debug info are not variadic. */
3535 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3537 /* The VFP ABI is only supported as a variant of AAPCS. */
3538 if (tdep->arm_abi != ARM_ABI_AAPCS)
3540 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3543 /* We currently only support passing parameters in integer registers, which
3544 conforms with GCC's default model, and VFP argument passing following
3545 the VFP variant of AAPCS. Several other variants exist and
3546 we should probably support some of them based on the selected ABI. */
3549 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3550 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3551 struct value **args, CORE_ADDR sp, int struct_return,
3552 CORE_ADDR struct_addr)
3554 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3558 struct stack_item *si = NULL;
3561 unsigned vfp_regs_free = (1 << 16) - 1;
3563 /* Determine the type of this function and whether the VFP ABI
3565 ftype = check_typedef (value_type (function));
3566 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3567 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3568 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3570 /* Set the return address. For the ARM, the return breakpoint is
3571 always at BP_ADDR. */
3572 if (arm_pc_is_thumb (gdbarch, bp_addr))
3574 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3576 /* Walk through the list of args and determine how large a temporary
3577 stack is required. Need to take care here as structs may be
3578 passed on the stack, and we have to to push them. */
3581 argreg = ARM_A1_REGNUM;
3584 /* The struct_return pointer occupies the first parameter
3585 passing register. */
3589 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3590 gdbarch_register_name (gdbarch, argreg),
3591 paddress (gdbarch, struct_addr));
3592 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3596 for (argnum = 0; argnum < nargs; argnum++)
3599 struct type *arg_type;
3600 struct type *target_type;
3601 enum type_code typecode;
3602 const bfd_byte *val;
3604 enum arm_vfp_cprc_base_type vfp_base_type;
3606 int may_use_core_reg = 1;
3608 arg_type = check_typedef (value_type (args[argnum]));
3609 len = TYPE_LENGTH (arg_type);
3610 target_type = TYPE_TARGET_TYPE (arg_type);
3611 typecode = TYPE_CODE (arg_type);
3612 val = value_contents (args[argnum]);
3614 align = arm_type_align (arg_type);
3615 /* Round alignment up to a whole number of words. */
3616 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3617 /* Different ABIs have different maximum alignments. */
3618 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3620 /* The APCS ABI only requires word alignment. */
3621 align = INT_REGISTER_SIZE;
3625 /* The AAPCS requires at most doubleword alignment. */
3626 if (align > INT_REGISTER_SIZE * 2)
3627 align = INT_REGISTER_SIZE * 2;
3631 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3639 /* Because this is a CPRC it cannot go in a core register or
3640 cause a core register to be skipped for alignment.
3641 Either it goes in VFP registers and the rest of this loop
3642 iteration is skipped for this argument, or it goes on the
3643 stack (and the stack alignment code is correct for this
3645 may_use_core_reg = 0;
3647 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3648 shift = unit_length / 4;
3649 mask = (1 << (shift * vfp_base_count)) - 1;
3650 for (regno = 0; regno < 16; regno += shift)
3651 if (((vfp_regs_free >> regno) & mask) == mask)
3660 vfp_regs_free &= ~(mask << regno);
3661 reg_scaled = regno / shift;
3662 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3663 for (i = 0; i < vfp_base_count; i++)
3667 if (reg_char == 'q')
3668 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3669 val + i * unit_length);
3672 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3673 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3675 regcache_cooked_write (regcache, regnum,
3676 val + i * unit_length);
3683 /* This CPRC could not go in VFP registers, so all VFP
3684 registers are now marked as used. */
3689 /* Push stack padding for dowubleword alignment. */
3690 if (nstack & (align - 1))
3692 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3693 nstack += INT_REGISTER_SIZE;
3696 /* Doubleword aligned quantities must go in even register pairs. */
3697 if (may_use_core_reg
3698 && argreg <= ARM_LAST_ARG_REGNUM
3699 && align > INT_REGISTER_SIZE
3703 /* If the argument is a pointer to a function, and it is a
3704 Thumb function, create a LOCAL copy of the value and set
3705 the THUMB bit in it. */
3706 if (TYPE_CODE_PTR == typecode
3707 && target_type != NULL
3708 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3710 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3711 if (arm_pc_is_thumb (gdbarch, regval))
3713 bfd_byte *copy = alloca (len);
3714 store_unsigned_integer (copy, len, byte_order,
3715 MAKE_THUMB_ADDR (regval));
3720 /* Copy the argument to general registers or the stack in
3721 register-sized pieces. Large arguments are split between
3722 registers and stack. */
3725 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3727 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3729 /* The argument is being passed in a general purpose
3732 = extract_unsigned_integer (val, partial_len, byte_order);
3733 if (byte_order == BFD_ENDIAN_BIG)
3734 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3736 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3738 gdbarch_register_name
3740 phex (regval, INT_REGISTER_SIZE));
3741 regcache_cooked_write_unsigned (regcache, argreg, regval);
3746 /* Push the arguments onto the stack. */
3748 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3750 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3751 nstack += INT_REGISTER_SIZE;
3758 /* If we have an odd number of words to push, then decrement the stack
3759 by one word now, so first stack argument will be dword aligned. */
3766 write_memory (sp, si->data, si->len);
3767 si = pop_stack_item (si);
3770 /* Finally, update teh SP register. */
3771 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3777 /* Always align the frame to an 8-byte boundary. This is required on
3778 some platforms and harmless on the rest. */
3781 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3783 /* Align the stack to eight bytes. */
3784 return sp & ~ (CORE_ADDR) 7;
3788 print_fpu_flags (int flags)
3790 if (flags & (1 << 0))
3791 fputs ("IVO ", stdout);
3792 if (flags & (1 << 1))
3793 fputs ("DVZ ", stdout);
3794 if (flags & (1 << 2))
3795 fputs ("OFL ", stdout);
3796 if (flags & (1 << 3))
3797 fputs ("UFL ", stdout);
3798 if (flags & (1 << 4))
3799 fputs ("INX ", stdout);
3803 /* Print interesting information about the floating point processor
3804 (if present) or emulator. */
3806 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3807 struct frame_info *frame, const char *args)
3809 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3812 type = (status >> 24) & 127;
3813 if (status & (1 << 31))
3814 printf (_("Hardware FPU type %d\n"), type);
3816 printf (_("Software FPU type %d\n"), type);
3817 /* i18n: [floating point unit] mask */
3818 fputs (_("mask: "), stdout);
3819 print_fpu_flags (status >> 16);
3820 /* i18n: [floating point unit] flags */
3821 fputs (_("flags: "), stdout);
3822 print_fpu_flags (status);
3825 /* Construct the ARM extended floating point type. */
3826 static struct type *
3827 arm_ext_type (struct gdbarch *gdbarch)
3829 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3831 if (!tdep->arm_ext_type)
3833 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3834 floatformats_arm_ext);
3836 return tdep->arm_ext_type;
3839 static struct type *
3840 arm_neon_double_type (struct gdbarch *gdbarch)
3842 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3844 if (tdep->neon_double_type == NULL)
3846 struct type *t, *elem;
3848 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3850 elem = builtin_type (gdbarch)->builtin_uint8;
3851 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3852 elem = builtin_type (gdbarch)->builtin_uint16;
3853 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3854 elem = builtin_type (gdbarch)->builtin_uint32;
3855 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3856 elem = builtin_type (gdbarch)->builtin_uint64;
3857 append_composite_type_field (t, "u64", elem);
3858 elem = builtin_type (gdbarch)->builtin_float;
3859 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3860 elem = builtin_type (gdbarch)->builtin_double;
3861 append_composite_type_field (t, "f64", elem);
3863 TYPE_VECTOR (t) = 1;
3864 TYPE_NAME (t) = "neon_d";
3865 tdep->neon_double_type = t;
3868 return tdep->neon_double_type;
3871 /* FIXME: The vector types are not correctly ordered on big-endian
3872 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3873 bits of d0 - regardless of what unit size is being held in d0. So
3874 the offset of the first uint8 in d0 is 7, but the offset of the
3875 first float is 4. This code works as-is for little-endian
3878 static struct type *
3879 arm_neon_quad_type (struct gdbarch *gdbarch)
3881 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3883 if (tdep->neon_quad_type == NULL)
3885 struct type *t, *elem;
3887 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3889 elem = builtin_type (gdbarch)->builtin_uint8;
3890 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3891 elem = builtin_type (gdbarch)->builtin_uint16;
3892 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3893 elem = builtin_type (gdbarch)->builtin_uint32;
3894 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3895 elem = builtin_type (gdbarch)->builtin_uint64;
3896 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3897 elem = builtin_type (gdbarch)->builtin_float;
3898 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3899 elem = builtin_type (gdbarch)->builtin_double;
3900 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3902 TYPE_VECTOR (t) = 1;
3903 TYPE_NAME (t) = "neon_q";
3904 tdep->neon_quad_type = t;
3907 return tdep->neon_quad_type;
3910 /* Return the GDB type object for the "standard" data type of data in
3913 static struct type *
3914 arm_register_type (struct gdbarch *gdbarch, int regnum)
3916 int num_regs = gdbarch_num_regs (gdbarch);
3918 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3919 && regnum >= num_regs && regnum < num_regs + 32)
3920 return builtin_type (gdbarch)->builtin_float;
3922 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3923 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3924 return arm_neon_quad_type (gdbarch);
3926 /* If the target description has register information, we are only
3927 in this function so that we can override the types of
3928 double-precision registers for NEON. */
3929 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3931 struct type *t = tdesc_register_type (gdbarch, regnum);
3933 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3934 && TYPE_CODE (t) == TYPE_CODE_FLT
3935 && gdbarch_tdep (gdbarch)->have_neon)
3936 return arm_neon_double_type (gdbarch);
3941 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3943 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3944 return builtin_type (gdbarch)->builtin_void;
3946 return arm_ext_type (gdbarch);
3948 else if (regnum == ARM_SP_REGNUM)
3949 return builtin_type (gdbarch)->builtin_data_ptr;
3950 else if (regnum == ARM_PC_REGNUM)
3951 return builtin_type (gdbarch)->builtin_func_ptr;
3952 else if (regnum >= ARRAY_SIZE (arm_register_names))
3953 /* These registers are only supported on targets which supply
3954 an XML description. */
3955 return builtin_type (gdbarch)->builtin_int0;
3957 return builtin_type (gdbarch)->builtin_uint32;
3960 /* Map a DWARF register REGNUM onto the appropriate GDB register
3964 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3966 /* Core integer regs. */
3967 if (reg >= 0 && reg <= 15)
3970 /* Legacy FPA encoding. These were once used in a way which
3971 overlapped with VFP register numbering, so their use is
3972 discouraged, but GDB doesn't support the ARM toolchain
3973 which used them for VFP. */
3974 if (reg >= 16 && reg <= 23)
3975 return ARM_F0_REGNUM + reg - 16;
3977 /* New assignments for the FPA registers. */
3978 if (reg >= 96 && reg <= 103)
3979 return ARM_F0_REGNUM + reg - 96;
3981 /* WMMX register assignments. */
3982 if (reg >= 104 && reg <= 111)
3983 return ARM_WCGR0_REGNUM + reg - 104;
3985 if (reg >= 112 && reg <= 127)
3986 return ARM_WR0_REGNUM + reg - 112;
3988 if (reg >= 192 && reg <= 199)
3989 return ARM_WC0_REGNUM + reg - 192;
3991 /* VFP v2 registers. A double precision value is actually
3992 in d1 rather than s2, but the ABI only defines numbering
3993 for the single precision registers. This will "just work"
3994 in GDB for little endian targets (we'll read eight bytes,
3995 starting in s0 and then progressing to s1), but will be
3996 reversed on big endian targets with VFP. This won't
3997 be a problem for the new Neon quad registers; you're supposed
3998 to use DW_OP_piece for those. */
3999 if (reg >= 64 && reg <= 95)
4003 sprintf (name_buf, "s%d", reg - 64);
4004 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4008 /* VFP v3 / Neon registers. This range is also used for VFP v2
4009 registers, except that it now describes d0 instead of s0. */
4010 if (reg >= 256 && reg <= 287)
4014 sprintf (name_buf, "d%d", reg - 256);
4015 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4022 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4024 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4027 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4029 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4030 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4032 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4033 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4035 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4036 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4038 if (reg < NUM_GREGS)
4039 return SIM_ARM_R0_REGNUM + reg;
4042 if (reg < NUM_FREGS)
4043 return SIM_ARM_FP0_REGNUM + reg;
4046 if (reg < NUM_SREGS)
4047 return SIM_ARM_FPS_REGNUM + reg;
4050 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4053 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4054 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4055 It is thought that this is is the floating-point register format on
4056 little-endian systems. */
4059 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4060 void *dbl, int endianess)
4064 if (endianess == BFD_ENDIAN_BIG)
4065 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4067 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4069 floatformat_from_doublest (fmt, &d, dbl);
4073 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4078 floatformat_to_doublest (fmt, ptr, &d);
4079 if (endianess == BFD_ENDIAN_BIG)
4080 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4082 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4087 condition_true (unsigned long cond, unsigned long status_reg)
4089 if (cond == INST_AL || cond == INST_NV)
4095 return ((status_reg & FLAG_Z) != 0);
4097 return ((status_reg & FLAG_Z) == 0);
4099 return ((status_reg & FLAG_C) != 0);
4101 return ((status_reg & FLAG_C) == 0);
4103 return ((status_reg & FLAG_N) != 0);
4105 return ((status_reg & FLAG_N) == 0);
4107 return ((status_reg & FLAG_V) != 0);
4109 return ((status_reg & FLAG_V) == 0);
4111 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4113 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4115 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4117 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4119 return (((status_reg & FLAG_Z) == 0)
4120 && (((status_reg & FLAG_N) == 0)
4121 == ((status_reg & FLAG_V) == 0)));
4123 return (((status_reg & FLAG_Z) != 0)
4124 || (((status_reg & FLAG_N) == 0)
4125 != ((status_reg & FLAG_V) == 0)));
4130 static unsigned long
4131 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4132 unsigned long pc_val, unsigned long status_reg)
4134 unsigned long res, shift;
4135 int rm = bits (inst, 0, 3);
4136 unsigned long shifttype = bits (inst, 5, 6);
4140 int rs = bits (inst, 8, 11);
4141 shift = (rs == 15 ? pc_val + 8
4142 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4145 shift = bits (inst, 7, 11);
4148 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4149 : get_frame_register_unsigned (frame, rm));
4154 res = shift >= 32 ? 0 : res << shift;
4158 res = shift >= 32 ? 0 : res >> shift;
4164 res = ((res & 0x80000000L)
4165 ? ~((~res) >> shift) : res >> shift);
4168 case 3: /* ROR/RRX */
4171 res = (res >> 1) | (carry ? 0x80000000L : 0);
4173 res = (res >> shift) | (res << (32 - shift));
4177 return res & 0xffffffff;
4180 /* Return number of 1-bits in VAL. */
4183 bitcount (unsigned long val)
4186 for (nbits = 0; val != 0; nbits++)
4187 val &= val - 1; /* Delete rightmost 1-bit in val. */
4191 /* Return the size in bytes of the complete Thumb instruction whose
4192 first halfword is INST1. */
4195 thumb_insn_size (unsigned short inst1)
4197 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4204 thumb_advance_itstate (unsigned int itstate)
4206 /* Preserve IT[7:5], the first three bits of the condition. Shift
4207 the upcoming condition flags left by one bit. */
4208 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4210 /* If we have finished the IT block, clear the state. */
4211 if ((itstate & 0x0f) == 0)
4217 /* Find the next PC after the current instruction executes. In some
4218 cases we can not statically determine the answer (see the IT state
4219 handling in this function); in that case, a breakpoint may be
4220 inserted in addition to the returned PC, which will be used to set
4221 another breakpoint by our caller. */
4224 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4226 struct gdbarch *gdbarch = get_frame_arch (frame);
4227 struct address_space *aspace = get_frame_address_space (frame);
4228 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4229 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4230 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4231 unsigned short inst1;
4232 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4233 unsigned long offset;
4234 ULONGEST status, itstate;
4236 nextpc = MAKE_THUMB_ADDR (nextpc);
4237 pc_val = MAKE_THUMB_ADDR (pc_val);
4239 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4241 /* Thumb-2 conditional execution support. There are eight bits in
4242 the CPSR which describe conditional execution state. Once
4243 reconstructed (they're in a funny order), the low five bits
4244 describe the low bit of the condition for each instruction and
4245 how many instructions remain. The high three bits describe the
4246 base condition. One of the low four bits will be set if an IT
4247 block is active. These bits read as zero on earlier
4249 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4250 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4252 /* If-Then handling. On GNU/Linux, where this routine is used, we
4253 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4254 can disable execution of the undefined instruction. So we might
4255 miss the breakpoint if we set it on a skipped conditional
4256 instruction. Because conditional instructions can change the
4257 flags, affecting the execution of further instructions, we may
4258 need to set two breakpoints. */
4260 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4262 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4264 /* An IT instruction. Because this instruction does not
4265 modify the flags, we can accurately predict the next
4266 executed instruction. */
4267 itstate = inst1 & 0x00ff;
4268 pc += thumb_insn_size (inst1);
4270 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4272 inst1 = read_memory_unsigned_integer (pc, 2,
4273 byte_order_for_code);
4274 pc += thumb_insn_size (inst1);
4275 itstate = thumb_advance_itstate (itstate);
4278 return MAKE_THUMB_ADDR (pc);
4280 else if (itstate != 0)
4282 /* We are in a conditional block. Check the condition. */
4283 if (! condition_true (itstate >> 4, status))
4285 /* Advance to the next executed instruction. */
4286 pc += thumb_insn_size (inst1);
4287 itstate = thumb_advance_itstate (itstate);
4289 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4291 inst1 = read_memory_unsigned_integer (pc, 2,
4292 byte_order_for_code);
4293 pc += thumb_insn_size (inst1);
4294 itstate = thumb_advance_itstate (itstate);
4297 return MAKE_THUMB_ADDR (pc);
4299 else if ((itstate & 0x0f) == 0x08)
4301 /* This is the last instruction of the conditional
4302 block, and it is executed. We can handle it normally
4303 because the following instruction is not conditional,
4304 and we must handle it normally because it is
4305 permitted to branch. Fall through. */
4311 /* There are conditional instructions after this one.
4312 If this instruction modifies the flags, then we can
4313 not predict what the next executed instruction will
4314 be. Fortunately, this instruction is architecturally
4315 forbidden to branch; we know it will fall through.
4316 Start by skipping past it. */
4317 pc += thumb_insn_size (inst1);
4318 itstate = thumb_advance_itstate (itstate);
4320 /* Set a breakpoint on the following instruction. */
4321 gdb_assert ((itstate & 0x0f) != 0);
4323 insert_single_step_breakpoint (gdbarch, aspace, pc);
4324 cond_negated = (itstate >> 4) & 1;
4326 /* Skip all following instructions with the same
4327 condition. If there is a later instruction in the IT
4328 block with the opposite condition, set the other
4329 breakpoint there. If not, then set a breakpoint on
4330 the instruction after the IT block. */
4333 inst1 = read_memory_unsigned_integer (pc, 2,
4334 byte_order_for_code);
4335 pc += thumb_insn_size (inst1);
4336 itstate = thumb_advance_itstate (itstate);
4338 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4340 return MAKE_THUMB_ADDR (pc);
4344 else if (itstate & 0x0f)
4346 /* We are in a conditional block. Check the condition. */
4347 int cond = itstate >> 4;
4349 if (! condition_true (cond, status))
4351 /* Advance to the next instruction. All the 32-bit
4352 instructions share a common prefix. */
4353 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4354 return MAKE_THUMB_ADDR (pc + 4);
4356 return MAKE_THUMB_ADDR (pc + 2);
4359 /* Otherwise, handle the instruction normally. */
4362 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4366 /* Fetch the saved PC from the stack. It's stored above
4367 all of the other registers. */
4368 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4369 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4370 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4372 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4374 unsigned long cond = bits (inst1, 8, 11);
4375 if (cond == 0x0f) /* 0x0f = SWI */
4377 struct gdbarch_tdep *tdep;
4378 tdep = gdbarch_tdep (gdbarch);
4380 if (tdep->syscall_next_pc != NULL)
4381 nextpc = tdep->syscall_next_pc (frame);
4384 else if (cond != 0x0f && condition_true (cond, status))
4385 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4387 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4389 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4391 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4393 unsigned short inst2;
4394 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4396 /* Default to the next instruction. */
4398 nextpc = MAKE_THUMB_ADDR (nextpc);
4400 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4402 /* Branches and miscellaneous control instructions. */
4404 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4407 int j1, j2, imm1, imm2;
4409 imm1 = sbits (inst1, 0, 10);
4410 imm2 = bits (inst2, 0, 10);
4411 j1 = bit (inst2, 13);
4412 j2 = bit (inst2, 11);
4414 offset = ((imm1 << 12) + (imm2 << 1));
4415 offset ^= ((!j2) << 22) | ((!j1) << 23);
4417 nextpc = pc_val + offset;
4418 /* For BLX make sure to clear the low bits. */
4419 if (bit (inst2, 12) == 0)
4420 nextpc = nextpc & 0xfffffffc;
4422 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4424 /* SUBS PC, LR, #imm8. */
4425 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4426 nextpc -= inst2 & 0x00ff;
4428 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4430 /* Conditional branch. */
4431 if (condition_true (bits (inst1, 6, 9), status))
4433 int sign, j1, j2, imm1, imm2;
4435 sign = sbits (inst1, 10, 10);
4436 imm1 = bits (inst1, 0, 5);
4437 imm2 = bits (inst2, 0, 10);
4438 j1 = bit (inst2, 13);
4439 j2 = bit (inst2, 11);
4441 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4442 offset += (imm1 << 12) + (imm2 << 1);
4444 nextpc = pc_val + offset;
4448 else if ((inst1 & 0xfe50) == 0xe810)
4450 /* Load multiple or RFE. */
4451 int rn, offset, load_pc = 1;
4453 rn = bits (inst1, 0, 3);
4454 if (bit (inst1, 7) && !bit (inst1, 8))
4457 if (!bit (inst2, 15))
4459 offset = bitcount (inst2) * 4 - 4;
4461 else if (!bit (inst1, 7) && bit (inst1, 8))
4464 if (!bit (inst2, 15))
4468 else if (bit (inst1, 7) && bit (inst1, 8))
4473 else if (!bit (inst1, 7) && !bit (inst1, 8))
4483 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4484 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4487 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4489 /* MOV PC or MOVS PC. */
4490 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4491 nextpc = MAKE_THUMB_ADDR (nextpc);
4493 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4497 int rn, load_pc = 1;
4499 rn = bits (inst1, 0, 3);
4500 base = get_frame_register_unsigned (frame, rn);
4503 base = (base + 4) & ~(CORE_ADDR) 0x3;
4505 base += bits (inst2, 0, 11);
4507 base -= bits (inst2, 0, 11);
4509 else if (bit (inst1, 7))
4510 base += bits (inst2, 0, 11);
4511 else if (bit (inst2, 11))
4513 if (bit (inst2, 10))
4516 base += bits (inst2, 0, 7);
4518 base -= bits (inst2, 0, 7);
4521 else if ((inst2 & 0x0fc0) == 0x0000)
4523 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4524 base += get_frame_register_unsigned (frame, rm) << shift;
4531 nextpc = get_frame_memory_unsigned (frame, base, 4);
4533 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4536 CORE_ADDR tbl_reg, table, offset, length;
4538 tbl_reg = bits (inst1, 0, 3);
4539 if (tbl_reg == 0x0f)
4540 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4542 table = get_frame_register_unsigned (frame, tbl_reg);
4544 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4545 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4546 nextpc = pc_val + length;
4548 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4551 CORE_ADDR tbl_reg, table, offset, length;
4553 tbl_reg = bits (inst1, 0, 3);
4554 if (tbl_reg == 0x0f)
4555 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4557 table = get_frame_register_unsigned (frame, tbl_reg);
4559 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4560 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4561 nextpc = pc_val + length;
4564 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4566 if (bits (inst1, 3, 6) == 0x0f)
4569 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4571 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4573 if (bits (inst1, 3, 6) == 0x0f)
4576 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4578 nextpc = MAKE_THUMB_ADDR (nextpc);
4580 else if ((inst1 & 0xf500) == 0xb100)
4583 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4584 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4586 if (bit (inst1, 11) && reg != 0)
4587 nextpc = pc_val + imm;
4588 else if (!bit (inst1, 11) && reg == 0)
4589 nextpc = pc_val + imm;
4594 /* Get the raw next address. PC is the current program counter, in
4595 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
4596 the alternative next instruction if there are two options.
4598 The value returned has the execution state of the next instruction
4599 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4600 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4604 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4606 struct gdbarch *gdbarch = get_frame_arch (frame);
4607 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4608 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4609 unsigned long pc_val;
4610 unsigned long this_instr;
4611 unsigned long status;
4614 if (arm_frame_is_thumb (frame))
4615 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
4617 pc_val = (unsigned long) pc;
4618 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4620 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4621 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4623 if (bits (this_instr, 28, 31) == INST_NV)
4624 switch (bits (this_instr, 24, 27))
4629 /* Branch with Link and change to Thumb. */
4630 nextpc = BranchDest (pc, this_instr);
4631 nextpc |= bit (this_instr, 24) << 1;
4632 nextpc = MAKE_THUMB_ADDR (nextpc);
4638 /* Coprocessor register transfer. */
4639 if (bits (this_instr, 12, 15) == 15)
4640 error (_("Invalid update to pc in instruction"));
4643 else if (condition_true (bits (this_instr, 28, 31), status))
4645 switch (bits (this_instr, 24, 27))
4648 case 0x1: /* data processing */
4652 unsigned long operand1, operand2, result = 0;
4656 if (bits (this_instr, 12, 15) != 15)
4659 if (bits (this_instr, 22, 25) == 0
4660 && bits (this_instr, 4, 7) == 9) /* multiply */
4661 error (_("Invalid update to pc in instruction"));
4663 /* BX <reg>, BLX <reg> */
4664 if (bits (this_instr, 4, 27) == 0x12fff1
4665 || bits (this_instr, 4, 27) == 0x12fff3)
4667 rn = bits (this_instr, 0, 3);
4668 nextpc = (rn == 15) ? pc_val + 8
4669 : get_frame_register_unsigned (frame, rn);
4673 /* Multiply into PC. */
4674 c = (status & FLAG_C) ? 1 : 0;
4675 rn = bits (this_instr, 16, 19);
4676 operand1 = (rn == 15) ? pc_val + 8
4677 : get_frame_register_unsigned (frame, rn);
4679 if (bit (this_instr, 25))
4681 unsigned long immval = bits (this_instr, 0, 7);
4682 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4683 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4686 else /* operand 2 is a shifted register. */
4687 operand2 = shifted_reg_val (frame, this_instr, c,
4690 switch (bits (this_instr, 21, 24))
4693 result = operand1 & operand2;
4697 result = operand1 ^ operand2;
4701 result = operand1 - operand2;
4705 result = operand2 - operand1;
4709 result = operand1 + operand2;
4713 result = operand1 + operand2 + c;
4717 result = operand1 - operand2 + c;
4721 result = operand2 - operand1 + c;
4727 case 0xb: /* tst, teq, cmp, cmn */
4728 result = (unsigned long) nextpc;
4732 result = operand1 | operand2;
4736 /* Always step into a function. */
4741 result = operand1 & ~operand2;
4749 /* In 26-bit APCS the bottom two bits of the result are
4750 ignored, and we always end up in ARM state. */
4752 nextpc = arm_addr_bits_remove (gdbarch, result);
4760 case 0x5: /* data transfer */
4763 if (bit (this_instr, 20))
4766 if (bits (this_instr, 12, 15) == 15)
4772 if (bit (this_instr, 22))
4773 error (_("Invalid update to pc in instruction"));
4775 /* byte write to PC */
4776 rn = bits (this_instr, 16, 19);
4777 base = (rn == 15) ? pc_val + 8
4778 : get_frame_register_unsigned (frame, rn);
4779 if (bit (this_instr, 24))
4782 int c = (status & FLAG_C) ? 1 : 0;
4783 unsigned long offset =
4784 (bit (this_instr, 25)
4785 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4786 : bits (this_instr, 0, 11));
4788 if (bit (this_instr, 23))
4793 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4800 case 0x9: /* block transfer */
4801 if (bit (this_instr, 20))
4804 if (bit (this_instr, 15))
4809 if (bit (this_instr, 23))
4812 unsigned long reglist = bits (this_instr, 0, 14);
4813 offset = bitcount (reglist) * 4;
4814 if (bit (this_instr, 24)) /* pre */
4817 else if (bit (this_instr, 24))
4821 unsigned long rn_val =
4822 get_frame_register_unsigned (frame,
4823 bits (this_instr, 16, 19));
4825 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4833 case 0xb: /* branch & link */
4834 case 0xa: /* branch */
4836 nextpc = BranchDest (pc, this_instr);
4842 case 0xe: /* coproc ops */
4846 struct gdbarch_tdep *tdep;
4847 tdep = gdbarch_tdep (gdbarch);
4849 if (tdep->syscall_next_pc != NULL)
4850 nextpc = tdep->syscall_next_pc (frame);
4856 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4865 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4867 struct gdbarch *gdbarch = get_frame_arch (frame);
4869 gdbarch_addr_bits_remove (gdbarch,
4870 arm_get_next_pc_raw (frame, pc, TRUE));
4872 error (_("Infinite loop detected"));
4876 /* single_step() is called just before we want to resume the inferior,
4877 if we want to single-step it but there is no hardware or kernel
4878 single-step support. We find the target of the coming instruction
4879 and breakpoint it. */
4882 arm_software_single_step (struct frame_info *frame)
4884 struct gdbarch *gdbarch = get_frame_arch (frame);
4885 struct address_space *aspace = get_frame_address_space (frame);
4887 /* NOTE: This may insert the wrong breakpoint instruction when
4888 single-stepping over a mode-changing instruction, if the
4889 CPSR heuristics are used. */
4891 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4892 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4897 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4898 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4899 NULL if an error occurs. BUF is freed. */
4902 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4903 int old_len, int new_len)
4905 gdb_byte *new_buf, *middle;
4906 int bytes_to_read = new_len - old_len;
4908 new_buf = xmalloc (new_len);
4909 memcpy (new_buf + bytes_to_read, buf, old_len);
4911 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4919 /* An IT block is at most the 2-byte IT instruction followed by
4920 four 4-byte instructions. The furthest back we must search to
4921 find an IT block that affects the current instruction is thus
4922 2 + 3 * 4 == 14 bytes. */
4923 #define MAX_IT_BLOCK_PREFIX 14
4925 /* Use a quick scan if there are more than this many bytes of
4927 #define IT_SCAN_THRESHOLD 32
4929 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4930 A breakpoint in an IT block may not be hit, depending on the
4933 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4937 CORE_ADDR boundary, func_start;
4938 int buf_len, buf2_len;
4939 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4940 int i, any, last_it, last_it_count;
4942 /* If we are using BKPT breakpoints, none of this is necessary. */
4943 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4946 /* ARM mode does not have this problem. */
4947 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4950 /* We are setting a breakpoint in Thumb code that could potentially
4951 contain an IT block. The first step is to find how much Thumb
4952 code there is; we do not need to read outside of known Thumb
4954 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4956 /* Thumb-2 code must have mapping symbols to have a chance. */
4959 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4961 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4962 && func_start > boundary)
4963 boundary = func_start;
4965 /* Search for a candidate IT instruction. We have to do some fancy
4966 footwork to distinguish a real IT instruction from the second
4967 half of a 32-bit instruction, but there is no need for that if
4968 there's no candidate. */
4969 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4971 /* No room for an IT instruction. */
4974 buf = xmalloc (buf_len);
4975 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4978 for (i = 0; i < buf_len; i += 2)
4980 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4981 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4993 /* OK, the code bytes before this instruction contain at least one
4994 halfword which resembles an IT instruction. We know that it's
4995 Thumb code, but there are still two possibilities. Either the
4996 halfword really is an IT instruction, or it is the second half of
4997 a 32-bit Thumb instruction. The only way we can tell is to
4998 scan forwards from a known instruction boundary. */
4999 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5003 /* There's a lot of code before this instruction. Start with an
5004 optimistic search; it's easy to recognize halfwords that can
5005 not be the start of a 32-bit instruction, and use that to
5006 lock on to the instruction boundaries. */
5007 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5010 buf_len = IT_SCAN_THRESHOLD;
5013 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5015 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5016 if (thumb_insn_size (inst1) == 2)
5023 /* At this point, if DEFINITE, BUF[I] is the first place we
5024 are sure that we know the instruction boundaries, and it is far
5025 enough from BPADDR that we could not miss an IT instruction
5026 affecting BPADDR. If ! DEFINITE, give up - start from a
5030 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5034 buf_len = bpaddr - boundary;
5040 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5043 buf_len = bpaddr - boundary;
5047 /* Scan forwards. Find the last IT instruction before BPADDR. */
5052 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5054 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5059 else if (inst1 & 0x0002)
5061 else if (inst1 & 0x0004)
5066 i += thumb_insn_size (inst1);
5072 /* There wasn't really an IT instruction after all. */
5075 if (last_it_count < 1)
5076 /* It was too far away. */
5079 /* This really is a trouble spot. Move the breakpoint to the IT
5081 return bpaddr - buf_len + last_it;
5084 /* ARM displaced stepping support.
5086 Generally ARM displaced stepping works as follows:
5088 1. When an instruction is to be single-stepped, it is first decoded by
5089 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5090 Depending on the type of instruction, it is then copied to a scratch
5091 location, possibly in a modified form. The copy_* set of functions
5092 performs such modification, as necessary. A breakpoint is placed after
5093 the modified instruction in the scratch space to return control to GDB.
5094 Note in particular that instructions which modify the PC will no longer
5095 do so after modification.
5097 2. The instruction is single-stepped, by setting the PC to the scratch
5098 location address, and resuming. Control returns to GDB when the
5101 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5102 function used for the current instruction. This function's job is to
5103 put the CPU/memory state back to what it would have been if the
5104 instruction had been executed unmodified in its original location. */
5106 /* NOP instruction (mov r0, r0). */
5107 #define ARM_NOP 0xe1a00000
5109 /* Helper for register reads for displaced stepping. In particular, this
5110 returns the PC as it would be seen by the instruction at its original
5114 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
5120 if (debug_displaced)
5121 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5122 (unsigned long) from + 8);
5123 return (ULONGEST) from + 8; /* Pipeline offset. */
5127 regcache_cooked_read_unsigned (regs, regno, &ret);
5128 if (debug_displaced)
5129 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5130 regno, (unsigned long) ret);
5136 displaced_in_arm_mode (struct regcache *regs)
5139 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5141 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5143 return (ps & t_bit) == 0;
5146 /* Write to the PC as from a branch instruction. */
5149 branch_write_pc (struct regcache *regs, ULONGEST val)
5151 if (displaced_in_arm_mode (regs))
5152 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5153 architecture versions < 6. */
5154 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5155 val & ~(ULONGEST) 0x3);
5157 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5158 val & ~(ULONGEST) 0x1);
5161 /* Write to the PC as from a branch-exchange instruction. */
5164 bx_write_pc (struct regcache *regs, ULONGEST val)
5167 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5169 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5173 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5174 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5176 else if ((val & 2) == 0)
5178 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5179 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5183 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5184 mode, align dest to 4 bytes). */
5185 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5186 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5187 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5191 /* Write to the PC as if from a load instruction. */
5194 load_write_pc (struct regcache *regs, ULONGEST val)
5196 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5197 bx_write_pc (regs, val);
5199 branch_write_pc (regs, val);
5202 /* Write to the PC as if from an ALU instruction. */
5205 alu_write_pc (struct regcache *regs, ULONGEST val)
5207 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
5208 bx_write_pc (regs, val);
5210 branch_write_pc (regs, val);
5213 /* Helper for writing to registers for displaced stepping. Writing to the PC
5214 has a varying effects depending on the instruction which does the write:
5215 this is controlled by the WRITE_PC argument. */
5218 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5219 int regno, ULONGEST val, enum pc_write_style write_pc)
5223 if (debug_displaced)
5224 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5225 (unsigned long) val);
5228 case BRANCH_WRITE_PC:
5229 branch_write_pc (regs, val);
5233 bx_write_pc (regs, val);
5237 load_write_pc (regs, val);
5241 alu_write_pc (regs, val);
5244 case CANNOT_WRITE_PC:
5245 warning (_("Instruction wrote to PC in an unexpected way when "
5246 "single-stepping"));
5250 internal_error (__FILE__, __LINE__,
5251 _("Invalid argument to displaced_write_reg"));
5254 dsc->wrote_to_pc = 1;
5258 if (debug_displaced)
5259 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5260 regno, (unsigned long) val);
5261 regcache_cooked_write_unsigned (regs, regno, val);
5265 /* This function is used to concisely determine if an instruction INSN
5266 references PC. Register fields of interest in INSN should have the
5267 corresponding fields of BITMASK set to 0b1111. The function
5268 returns return 1 if any of these fields in INSN reference the PC
5269 (also 0b1111, r15), else it returns 0. */
5272 insn_references_pc (uint32_t insn, uint32_t bitmask)
5274 uint32_t lowbit = 1;
5276 while (bitmask != 0)
5280 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5286 mask = lowbit * 0xf;
5288 if ((insn & mask) == mask)
5297 /* The simplest copy function. Many instructions have the same effect no
5298 matter what address they are executed at: in those cases, use this. */
5301 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5302 const char *iname, struct displaced_step_closure *dsc)
5304 if (debug_displaced)
5305 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5306 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5309 dsc->modinsn[0] = insn;
5314 /* Preload instructions with immediate offset. */
5317 cleanup_preload (struct gdbarch *gdbarch,
5318 struct regcache *regs, struct displaced_step_closure *dsc)
5320 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5321 if (!dsc->u.preload.immed)
5322 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5326 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5327 struct displaced_step_closure *dsc)
5329 unsigned int rn = bits (insn, 16, 19);
5331 CORE_ADDR from = dsc->insn_addr;
5333 if (!insn_references_pc (insn, 0x000f0000ul))
5334 return copy_unmodified (gdbarch, insn, "preload", dsc);
5336 if (debug_displaced)
5337 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5338 (unsigned long) insn);
5340 /* Preload instructions:
5342 {pli/pld} [rn, #+/-imm]
5344 {pli/pld} [r0, #+/-imm]. */
5346 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5347 rn_val = displaced_read_reg (regs, from, rn);
5348 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5350 dsc->u.preload.immed = 1;
5352 dsc->modinsn[0] = insn & 0xfff0ffff;
5354 dsc->cleanup = &cleanup_preload;
5359 /* Preload instructions with register offset. */
5362 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5363 struct regcache *regs,
5364 struct displaced_step_closure *dsc)
5366 unsigned int rn = bits (insn, 16, 19);
5367 unsigned int rm = bits (insn, 0, 3);
5368 ULONGEST rn_val, rm_val;
5369 CORE_ADDR from = dsc->insn_addr;
5371 if (!insn_references_pc (insn, 0x000f000ful))
5372 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
5374 if (debug_displaced)
5375 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5376 (unsigned long) insn);
5378 /* Preload register-offset instructions:
5380 {pli/pld} [rn, rm {, shift}]
5382 {pli/pld} [r0, r1 {, shift}]. */
5384 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5385 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5386 rn_val = displaced_read_reg (regs, from, rn);
5387 rm_val = displaced_read_reg (regs, from, rm);
5388 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5389 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5391 dsc->u.preload.immed = 0;
5393 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5395 dsc->cleanup = &cleanup_preload;
5400 /* Copy/cleanup coprocessor load and store instructions. */
5403 cleanup_copro_load_store (struct gdbarch *gdbarch,
5404 struct regcache *regs,
5405 struct displaced_step_closure *dsc)
5407 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5409 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5411 if (dsc->u.ldst.writeback)
5412 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5416 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5417 struct regcache *regs,
5418 struct displaced_step_closure *dsc)
5420 unsigned int rn = bits (insn, 16, 19);
5422 CORE_ADDR from = dsc->insn_addr;
5424 if (!insn_references_pc (insn, 0x000f0000ul))
5425 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5427 if (debug_displaced)
5428 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5429 "load/store insn %.8lx\n", (unsigned long) insn);
5431 /* Coprocessor load/store instructions:
5433 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5435 {stc/stc2} [r0, #+/-imm].
5437 ldc/ldc2 are handled identically. */
5439 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5440 rn_val = displaced_read_reg (regs, from, rn);
5441 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5443 dsc->u.ldst.writeback = bit (insn, 25);
5444 dsc->u.ldst.rn = rn;
5446 dsc->modinsn[0] = insn & 0xfff0ffff;
5448 dsc->cleanup = &cleanup_copro_load_store;
5453 /* Clean up branch instructions (actually perform the branch, by setting
5457 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5458 struct displaced_step_closure *dsc)
5460 ULONGEST from = dsc->insn_addr;
5461 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5462 int branch_taken = condition_true (dsc->u.branch.cond, status);
5463 enum pc_write_style write_pc = dsc->u.branch.exchange
5464 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5469 if (dsc->u.branch.link)
5471 ULONGEST pc = displaced_read_reg (regs, from, 15);
5472 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
5475 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
5478 /* Copy B/BL/BLX instructions with immediate destinations. */
5481 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5482 struct regcache *regs, struct displaced_step_closure *dsc)
5484 unsigned int cond = bits (insn, 28, 31);
5485 int exchange = (cond == 0xf);
5486 int link = exchange || bit (insn, 24);
5487 CORE_ADDR from = dsc->insn_addr;
5490 if (debug_displaced)
5491 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5492 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5493 (unsigned long) insn);
5495 /* Implement "BL<cond> <label>" as:
5497 Preparation: cond <- instruction condition
5498 Insn: mov r0, r0 (nop)
5499 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5501 B<cond> similar, but don't set r14 in cleanup. */
5504 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5505 then arrange the switch into Thumb mode. */
5506 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5508 offset = bits (insn, 0, 23) << 2;
5510 if (bit (offset, 25))
5511 offset = offset | ~0x3ffffff;
5513 dsc->u.branch.cond = cond;
5514 dsc->u.branch.link = link;
5515 dsc->u.branch.exchange = exchange;
5516 dsc->u.branch.dest = from + 8 + offset;
5518 dsc->modinsn[0] = ARM_NOP;
5520 dsc->cleanup = &cleanup_branch;
5525 /* Copy BX/BLX with register-specified destinations. */
5528 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5529 struct regcache *regs, struct displaced_step_closure *dsc)
5531 unsigned int cond = bits (insn, 28, 31);
5534 int link = bit (insn, 5);
5535 unsigned int rm = bits (insn, 0, 3);
5536 CORE_ADDR from = dsc->insn_addr;
5538 if (debug_displaced)
5539 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
5540 "%.8lx\n", (link) ? "blx" : "bx",
5541 (unsigned long) insn);
5543 /* Implement {BX,BLX}<cond> <reg>" as:
5545 Preparation: cond <- instruction condition
5546 Insn: mov r0, r0 (nop)
5547 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5549 Don't set r14 in cleanup for BX. */
5551 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
5553 dsc->u.branch.cond = cond;
5554 dsc->u.branch.link = link;
5555 dsc->u.branch.exchange = 1;
5557 dsc->modinsn[0] = ARM_NOP;
5559 dsc->cleanup = &cleanup_branch;
5564 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5567 cleanup_alu_imm (struct gdbarch *gdbarch,
5568 struct regcache *regs, struct displaced_step_closure *dsc)
5570 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5571 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5572 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5573 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5577 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5578 struct displaced_step_closure *dsc)
5580 unsigned int rn = bits (insn, 16, 19);
5581 unsigned int rd = bits (insn, 12, 15);
5582 unsigned int op = bits (insn, 21, 24);
5583 int is_mov = (op == 0xd);
5584 ULONGEST rd_val, rn_val;
5585 CORE_ADDR from = dsc->insn_addr;
5587 if (!insn_references_pc (insn, 0x000ff000ul))
5588 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5590 if (debug_displaced)
5591 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5592 "%.8lx\n", is_mov ? "move" : "ALU",
5593 (unsigned long) insn);
5595 /* Instruction is of form:
5597 <op><cond> rd, [rn,] #imm
5601 Preparation: tmp1, tmp2 <- r0, r1;
5603 Insn: <op><cond> r0, r1, #imm
5604 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5607 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5608 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5609 rn_val = displaced_read_reg (regs, from, rn);
5610 rd_val = displaced_read_reg (regs, from, rd);
5611 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5612 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5616 dsc->modinsn[0] = insn & 0xfff00fff;
5618 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5620 dsc->cleanup = &cleanup_alu_imm;
5625 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5628 cleanup_alu_reg (struct gdbarch *gdbarch,
5629 struct regcache *regs, struct displaced_step_closure *dsc)
5634 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5636 for (i = 0; i < 3; i++)
5637 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5639 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5643 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5644 struct displaced_step_closure *dsc)
5646 unsigned int rn = bits (insn, 16, 19);
5647 unsigned int rm = bits (insn, 0, 3);
5648 unsigned int rd = bits (insn, 12, 15);
5649 unsigned int op = bits (insn, 21, 24);
5650 int is_mov = (op == 0xd);
5651 ULONGEST rd_val, rn_val, rm_val;
5652 CORE_ADDR from = dsc->insn_addr;
5654 if (!insn_references_pc (insn, 0x000ff00ful))
5655 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5657 if (debug_displaced)
5658 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5659 is_mov ? "move" : "ALU", (unsigned long) insn);
5661 /* Instruction is of form:
5663 <op><cond> rd, [rn,] rm [, <shift>]
5667 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5668 r0, r1, r2 <- rd, rn, rm
5669 Insn: <op><cond> r0, r1, r2 [, <shift>]
5670 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5673 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5674 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5675 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5676 rd_val = displaced_read_reg (regs, from, rd);
5677 rn_val = displaced_read_reg (regs, from, rn);
5678 rm_val = displaced_read_reg (regs, from, rm);
5679 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5680 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5681 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5685 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5687 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5689 dsc->cleanup = &cleanup_alu_reg;
5694 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5697 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5698 struct regcache *regs,
5699 struct displaced_step_closure *dsc)
5701 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5704 for (i = 0; i < 4; i++)
5705 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5707 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5711 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5712 struct regcache *regs,
5713 struct displaced_step_closure *dsc)
5715 unsigned int rn = bits (insn, 16, 19);
5716 unsigned int rm = bits (insn, 0, 3);
5717 unsigned int rd = bits (insn, 12, 15);
5718 unsigned int rs = bits (insn, 8, 11);
5719 unsigned int op = bits (insn, 21, 24);
5720 int is_mov = (op == 0xd), i;
5721 ULONGEST rd_val, rn_val, rm_val, rs_val;
5722 CORE_ADDR from = dsc->insn_addr;
5724 if (!insn_references_pc (insn, 0x000fff0ful))
5725 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5727 if (debug_displaced)
5728 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5729 "%.8lx\n", is_mov ? "move" : "ALU",
5730 (unsigned long) insn);
5732 /* Instruction is of form:
5734 <op><cond> rd, [rn,] rm, <shift> rs
5738 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5739 r0, r1, r2, r3 <- rd, rn, rm, rs
5740 Insn: <op><cond> r0, r1, r2, <shift> r3
5742 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5746 for (i = 0; i < 4; i++)
5747 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5749 rd_val = displaced_read_reg (regs, from, rd);
5750 rn_val = displaced_read_reg (regs, from, rn);
5751 rm_val = displaced_read_reg (regs, from, rm);
5752 rs_val = displaced_read_reg (regs, from, rs);
5753 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5754 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5755 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5756 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5760 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5762 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5764 dsc->cleanup = &cleanup_alu_shifted_reg;
5769 /* Clean up load instructions. */
5772 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5773 struct displaced_step_closure *dsc)
5775 ULONGEST rt_val, rt_val2 = 0, rn_val;
5776 CORE_ADDR from = dsc->insn_addr;
5778 rt_val = displaced_read_reg (regs, from, 0);
5779 if (dsc->u.ldst.xfersize == 8)
5780 rt_val2 = displaced_read_reg (regs, from, 1);
5781 rn_val = displaced_read_reg (regs, from, 2);
5783 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5784 if (dsc->u.ldst.xfersize > 4)
5785 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5786 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5787 if (!dsc->u.ldst.immed)
5788 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5790 /* Handle register writeback. */
5791 if (dsc->u.ldst.writeback)
5792 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5793 /* Put result in right place. */
5794 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5795 if (dsc->u.ldst.xfersize == 8)
5796 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5799 /* Clean up store instructions. */
5802 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5803 struct displaced_step_closure *dsc)
5805 CORE_ADDR from = dsc->insn_addr;
5806 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
5808 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5809 if (dsc->u.ldst.xfersize > 4)
5810 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5811 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5812 if (!dsc->u.ldst.immed)
5813 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5814 if (!dsc->u.ldst.restore_r4)
5815 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5818 if (dsc->u.ldst.writeback)
5819 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5822 /* Copy "extra" load/store instructions. These are halfword/doubleword
5823 transfers, which have a different encoding to byte/word transfers. */
5826 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5827 struct regcache *regs, struct displaced_step_closure *dsc)
5829 unsigned int op1 = bits (insn, 20, 24);
5830 unsigned int op2 = bits (insn, 5, 6);
5831 unsigned int rt = bits (insn, 12, 15);
5832 unsigned int rn = bits (insn, 16, 19);
5833 unsigned int rm = bits (insn, 0, 3);
5834 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5835 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5836 int immed = (op1 & 0x4) != 0;
5838 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5839 CORE_ADDR from = dsc->insn_addr;
5841 if (!insn_references_pc (insn, 0x000ff00ful))
5842 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5844 if (debug_displaced)
5845 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5846 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5847 (unsigned long) insn);
5849 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5852 internal_error (__FILE__, __LINE__,
5853 _("copy_extra_ld_st: instruction decode error"));
5855 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5856 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5857 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5859 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5861 rt_val = displaced_read_reg (regs, from, rt);
5862 if (bytesize[opcode] == 8)
5863 rt_val2 = displaced_read_reg (regs, from, rt + 1);
5864 rn_val = displaced_read_reg (regs, from, rn);
5866 rm_val = displaced_read_reg (regs, from, rm);
5868 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5869 if (bytesize[opcode] == 8)
5870 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5871 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5873 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5876 dsc->u.ldst.xfersize = bytesize[opcode];
5877 dsc->u.ldst.rn = rn;
5878 dsc->u.ldst.immed = immed;
5879 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5880 dsc->u.ldst.restore_r4 = 0;
5883 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5885 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5886 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5888 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5890 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5891 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5893 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5898 /* Copy byte/word loads and stores. */
5901 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5902 struct regcache *regs,
5903 struct displaced_step_closure *dsc, int load, int byte,
5906 int immed = !bit (insn, 25);
5907 unsigned int rt = bits (insn, 12, 15);
5908 unsigned int rn = bits (insn, 16, 19);
5909 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5910 ULONGEST rt_val, rn_val, rm_val = 0;
5911 CORE_ADDR from = dsc->insn_addr;
5913 if (!insn_references_pc (insn, 0x000ff00ful))
5914 return copy_unmodified (gdbarch, insn, "load/store", dsc);
5916 if (debug_displaced)
5917 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
5918 load ? (byte ? "ldrb" : "ldr")
5919 : (byte ? "strb" : "str"), usermode ? "t" : "",
5920 (unsigned long) insn);
5922 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5923 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5925 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5927 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
5929 rt_val = displaced_read_reg (regs, from, rt);
5930 rn_val = displaced_read_reg (regs, from, rn);
5932 rm_val = displaced_read_reg (regs, from, rm);
5934 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5935 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5937 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5940 dsc->u.ldst.xfersize = byte ? 1 : 4;
5941 dsc->u.ldst.rn = rn;
5942 dsc->u.ldst.immed = immed;
5943 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5945 /* To write PC we can do:
5947 Before this sequence of instructions:
5948 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5949 r2 is the Rn value got from dispalced_read_reg.
5951 Insn1: push {pc} Write address of STR instruction + offset on stack
5952 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5953 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5954 = addr(Insn1) + offset - addr(Insn3) - 8
5956 Insn4: add r4, r4, #8 r4 = offset - 8
5957 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5959 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5961 Otherwise we don't know what value to write for PC, since the offset is
5962 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5963 of this can be found in Section "Saving from r15" in
5964 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5966 if (load || rt != 15)
5968 dsc->u.ldst.restore_r4 = 0;
5971 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5973 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5974 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5976 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5978 {ldr,str}[b]<cond> r0, [r2, r3]. */
5979 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5983 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5984 dsc->u.ldst.restore_r4 = 1;
5985 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
5986 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
5987 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5988 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5989 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5993 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5995 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
5997 dsc->modinsn[6] = 0x0; /* breakpoint location. */
5998 dsc->modinsn[7] = 0x0; /* scratch space. */
6003 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6008 /* Cleanup LDM instructions with fully-populated register list. This is an
6009 unfortunate corner case: it's impossible to implement correctly by modifying
6010 the instruction. The issue is as follows: we have an instruction,
6014 which we must rewrite to avoid loading PC. A possible solution would be to
6015 do the load in two halves, something like (with suitable cleanup
6019 ldm[id][ab] r8!, {r0-r7}
6021 ldm[id][ab] r8, {r7-r14}
6024 but at present there's no suitable place for <temp>, since the scratch space
6025 is overwritten before the cleanup routine is called. For now, we simply
6026 emulate the instruction. */
6029 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6030 struct displaced_step_closure *dsc)
6032 ULONGEST from = dsc->insn_addr;
6033 int inc = dsc->u.block.increment;
6034 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6035 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6036 uint32_t regmask = dsc->u.block.regmask;
6037 int regno = inc ? 0 : 15;
6038 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6039 int exception_return = dsc->u.block.load && dsc->u.block.user
6040 && (regmask & 0x8000) != 0;
6041 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
6042 int do_transfer = condition_true (dsc->u.block.cond, status);
6043 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6048 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6049 sensible we can do here. Complain loudly. */
6050 if (exception_return)
6051 error (_("Cannot single-step exception return"));
6053 /* We don't handle any stores here for now. */
6054 gdb_assert (dsc->u.block.load != 0);
6056 if (debug_displaced)
6057 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6058 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6059 dsc->u.block.increment ? "inc" : "dec",
6060 dsc->u.block.before ? "before" : "after");
6067 while (regno <= 15 && (regmask & (1 << regno)) == 0)
6070 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6073 xfer_addr += bump_before;
6075 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6076 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6078 xfer_addr += bump_after;
6080 regmask &= ~(1 << regno);
6083 if (dsc->u.block.writeback)
6084 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6088 /* Clean up an STM which included the PC in the register list. */
6091 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6092 struct displaced_step_closure *dsc)
6094 ULONGEST from = dsc->insn_addr;
6095 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
6096 int store_executed = condition_true (dsc->u.block.cond, status);
6097 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6098 CORE_ADDR stm_insn_addr;
6101 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6103 /* If condition code fails, there's nothing else to do. */
6104 if (!store_executed)
6107 if (dsc->u.block.increment)
6109 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6111 if (dsc->u.block.before)
6116 pc_stored_at = dsc->u.block.xfer_addr;
6118 if (dsc->u.block.before)
6122 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6123 stm_insn_addr = dsc->scratch_base;
6124 offset = pc_val - stm_insn_addr;
6126 if (debug_displaced)
6127 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6128 "STM instruction\n", offset);
6130 /* Rewrite the stored PC to the proper value for the non-displaced original
6132 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6133 dsc->insn_addr + offset);
6136 /* Clean up an LDM which includes the PC in the register list. We clumped all
6137 the registers in the transferred list into a contiguous range r0...rX (to
6138 avoid loading PC directly and losing control of the debugged program), so we
6139 must undo that here. */
6142 cleanup_block_load_pc (struct gdbarch *gdbarch,
6143 struct regcache *regs,
6144 struct displaced_step_closure *dsc)
6146 ULONGEST from = dsc->insn_addr;
6147 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
6148 int load_executed = condition_true (dsc->u.block.cond, status), i;
6149 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
6150 unsigned int regs_loaded = bitcount (mask);
6151 unsigned int num_to_shuffle = regs_loaded, clobbered;
6153 /* The method employed here will fail if the register list is fully populated
6154 (we need to avoid loading PC directly). */
6155 gdb_assert (num_to_shuffle < 16);
6160 clobbered = (1 << num_to_shuffle) - 1;
6162 while (num_to_shuffle > 0)
6164 if ((mask & (1 << write_reg)) != 0)
6166 unsigned int read_reg = num_to_shuffle - 1;
6168 if (read_reg != write_reg)
6170 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
6171 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6172 if (debug_displaced)
6173 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6174 "loaded register r%d to r%d\n"), read_reg,
6177 else if (debug_displaced)
6178 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6179 "r%d already in the right place\n"),
6182 clobbered &= ~(1 << write_reg);
6190 /* Restore any registers we scribbled over. */
6191 for (write_reg = 0; clobbered != 0; write_reg++)
6193 if ((clobbered & (1 << write_reg)) != 0)
6195 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6197 if (debug_displaced)
6198 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6199 "clobbered register r%d\n"), write_reg);
6200 clobbered &= ~(1 << write_reg);
6204 /* Perform register writeback manually. */
6205 if (dsc->u.block.writeback)
6207 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6209 if (dsc->u.block.increment)
6210 new_rn_val += regs_loaded * 4;
6212 new_rn_val -= regs_loaded * 4;
6214 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6219 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6220 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6223 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6224 struct displaced_step_closure *dsc)
6226 int load = bit (insn, 20);
6227 int user = bit (insn, 22);
6228 int increment = bit (insn, 23);
6229 int before = bit (insn, 24);
6230 int writeback = bit (insn, 21);
6231 int rn = bits (insn, 16, 19);
6232 CORE_ADDR from = dsc->insn_addr;
6234 /* Block transfers which don't mention PC can be run directly
6236 if (rn != 15 && (insn & 0x8000) == 0)
6237 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6241 warning (_("displaced: Unpredictable LDM or STM with "
6242 "base register r15"));
6243 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6246 if (debug_displaced)
6247 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6248 "%.8lx\n", (unsigned long) insn);
6250 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
6251 dsc->u.block.rn = rn;
6253 dsc->u.block.load = load;
6254 dsc->u.block.user = user;
6255 dsc->u.block.increment = increment;
6256 dsc->u.block.before = before;
6257 dsc->u.block.writeback = writeback;
6258 dsc->u.block.cond = bits (insn, 28, 31);
6260 dsc->u.block.regmask = insn & 0xffff;
6264 if ((insn & 0xffff) == 0xffff)
6266 /* LDM with a fully-populated register list. This case is
6267 particularly tricky. Implement for now by fully emulating the
6268 instruction (which might not behave perfectly in all cases, but
6269 these instructions should be rare enough for that not to matter
6271 dsc->modinsn[0] = ARM_NOP;
6273 dsc->cleanup = &cleanup_block_load_all;
6277 /* LDM of a list of registers which includes PC. Implement by
6278 rewriting the list of registers to be transferred into a
6279 contiguous chunk r0...rX before doing the transfer, then shuffling
6280 registers into the correct places in the cleanup routine. */
6281 unsigned int regmask = insn & 0xffff;
6282 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6283 unsigned int to = 0, from = 0, i, new_rn;
6285 for (i = 0; i < num_in_list; i++)
6286 dsc->tmp[i] = displaced_read_reg (regs, from, i);
6288 /* Writeback makes things complicated. We need to avoid clobbering
6289 the base register with one of the registers in our modified
6290 register list, but just using a different register can't work in
6293 ldm r14!, {r0-r13,pc}
6295 which would need to be rewritten as:
6299 but that can't work, because there's no free register for N.
6301 Solve this by turning off the writeback bit, and emulating
6302 writeback manually in the cleanup routine. */
6307 new_regmask = (1 << num_in_list) - 1;
6309 if (debug_displaced)
6310 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6311 "{..., pc}: original reg list %.4x, modified "
6312 "list %.4x\n"), rn, writeback ? "!" : "",
6313 (int) insn & 0xffff, new_regmask);
6315 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6317 dsc->cleanup = &cleanup_block_load_pc;
6322 /* STM of a list of registers which includes PC. Run the instruction
6323 as-is, but out of line: this will store the wrong value for the PC,
6324 so we must manually fix up the memory in the cleanup routine.
6325 Doing things this way has the advantage that we can auto-detect
6326 the offset of the PC write (which is architecture-dependent) in
6327 the cleanup routine. */
6328 dsc->modinsn[0] = insn;
6330 dsc->cleanup = &cleanup_block_store_pc;
6336 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6337 for Linux, where some SVC instructions must be treated specially. */
6340 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6341 struct displaced_step_closure *dsc)
6343 CORE_ADDR from = dsc->insn_addr;
6344 CORE_ADDR resume_addr = from + 4;
6346 if (debug_displaced)
6347 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6348 "%.8lx\n", (unsigned long) resume_addr);
6350 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6354 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6355 struct regcache *regs, struct displaced_step_closure *dsc)
6357 CORE_ADDR from = dsc->insn_addr;
6359 /* Allow OS-specific code to override SVC handling. */
6360 if (dsc->u.svc.copy_svc_os)
6361 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
6363 if (debug_displaced)
6364 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6365 (unsigned long) insn);
6367 /* Preparation: none.
6368 Insn: unmodified svc.
6369 Cleanup: pc <- insn_addr + 4. */
6371 dsc->modinsn[0] = insn;
6373 dsc->cleanup = &cleanup_svc;
6374 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6376 dsc->wrote_to_pc = 1;
6381 /* Copy undefined instructions. */
6384 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6385 struct displaced_step_closure *dsc)
6387 if (debug_displaced)
6388 fprintf_unfiltered (gdb_stdlog,
6389 "displaced: copying undefined insn %.8lx\n",
6390 (unsigned long) insn);
6392 dsc->modinsn[0] = insn;
6397 /* Copy unpredictable instructions. */
6400 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6401 struct displaced_step_closure *dsc)
6403 if (debug_displaced)
6404 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6405 "%.8lx\n", (unsigned long) insn);
6407 dsc->modinsn[0] = insn;
6412 /* The decode_* functions are instruction decoding helpers. They mostly follow
6413 the presentation in the ARM ARM. */
6416 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6417 struct regcache *regs,
6418 struct displaced_step_closure *dsc)
6420 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6421 unsigned int rn = bits (insn, 16, 19);
6423 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6424 return copy_unmodified (gdbarch, insn, "cps", dsc);
6425 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6426 return copy_unmodified (gdbarch, insn, "setend", dsc);
6427 else if ((op1 & 0x60) == 0x20)
6428 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6429 else if ((op1 & 0x71) == 0x40)
6430 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
6431 else if ((op1 & 0x77) == 0x41)
6432 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6433 else if ((op1 & 0x77) == 0x45)
6434 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6435 else if ((op1 & 0x77) == 0x51)
6438 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6440 return copy_unpred (gdbarch, insn, dsc);
6442 else if ((op1 & 0x77) == 0x55)
6443 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6444 else if (op1 == 0x57)
6447 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
6448 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
6449 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
6450 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
6451 default: return copy_unpred (gdbarch, insn, dsc);
6453 else if ((op1 & 0x63) == 0x43)
6454 return copy_unpred (gdbarch, insn, dsc);
6455 else if ((op2 & 0x1) == 0x0)
6456 switch (op1 & ~0x80)
6459 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6461 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6462 case 0x71: case 0x75:
6464 return copy_preload_reg (gdbarch, insn, regs, dsc);
6465 case 0x63: case 0x67: case 0x73: case 0x77:
6466 return copy_unpred (gdbarch, insn, dsc);
6468 return copy_undef (gdbarch, insn, dsc);
6471 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6475 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6476 struct regcache *regs,
6477 struct displaced_step_closure *dsc)
6479 if (bit (insn, 27) == 0)
6480 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6481 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6482 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6485 return copy_unmodified (gdbarch, insn, "srs", dsc);
6488 return copy_unmodified (gdbarch, insn, "rfe", dsc);
6490 case 0x4: case 0x5: case 0x6: case 0x7:
6491 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6494 switch ((insn & 0xe00000) >> 21)
6496 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6498 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6501 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6504 return copy_undef (gdbarch, insn, dsc);
6509 int rn_f = (bits (insn, 16, 19) == 0xf);
6510 switch ((insn & 0xe00000) >> 21)
6513 /* ldc/ldc2 imm (undefined for rn == pc). */
6514 return rn_f ? copy_undef (gdbarch, insn, dsc)
6515 : copy_copro_load_store (gdbarch, insn, regs, dsc);
6518 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6520 case 0x4: case 0x5: case 0x6: case 0x7:
6521 /* ldc/ldc2 lit (undefined for rn != pc). */
6522 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
6523 : copy_undef (gdbarch, insn, dsc);
6526 return copy_undef (gdbarch, insn, dsc);
6531 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6534 if (bits (insn, 16, 19) == 0xf)
6536 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6538 return copy_undef (gdbarch, insn, dsc);
6542 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6544 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6548 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6550 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6553 return copy_undef (gdbarch, insn, dsc);
6557 /* Decode miscellaneous instructions in dp/misc encoding space. */
6560 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6561 struct regcache *regs,
6562 struct displaced_step_closure *dsc)
6564 unsigned int op2 = bits (insn, 4, 6);
6565 unsigned int op = bits (insn, 21, 22);
6566 unsigned int op1 = bits (insn, 16, 19);
6571 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6574 if (op == 0x1) /* bx. */
6575 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6577 return copy_unmodified (gdbarch, insn, "clz", dsc);
6579 return copy_undef (gdbarch, insn, dsc);
6583 /* Not really supported. */
6584 return copy_unmodified (gdbarch, insn, "bxj", dsc);
6586 return copy_undef (gdbarch, insn, dsc);
6590 return copy_bx_blx_reg (gdbarch, insn,
6591 regs, dsc); /* blx register. */
6593 return copy_undef (gdbarch, insn, dsc);
6596 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6600 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
6602 /* Not really supported. */
6603 return copy_unmodified (gdbarch, insn, "smc", dsc);
6606 return copy_undef (gdbarch, insn, dsc);
6611 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6612 struct displaced_step_closure *dsc)
6615 switch (bits (insn, 20, 24))
6618 return copy_unmodified (gdbarch, insn, "movw", dsc);
6621 return copy_unmodified (gdbarch, insn, "movt", dsc);
6623 case 0x12: case 0x16:
6624 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
6627 return copy_alu_imm (gdbarch, insn, regs, dsc);
6631 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6633 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6634 return copy_alu_reg (gdbarch, insn, regs, dsc);
6635 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6636 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6637 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6638 return decode_miscellaneous (gdbarch, insn, regs, dsc);
6639 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6640 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6641 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6642 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6643 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6644 return copy_unmodified (gdbarch, insn, "synch", dsc);
6645 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6646 /* 2nd arg means "unpriveleged". */
6647 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6651 /* Should be unreachable. */
6656 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6657 struct regcache *regs,
6658 struct displaced_step_closure *dsc)
6660 int a = bit (insn, 25), b = bit (insn, 4);
6661 uint32_t op1 = bits (insn, 20, 24);
6662 int rn_f = bits (insn, 16, 19) == 0xf;
6664 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6665 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6666 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
6667 else if ((!a && (op1 & 0x17) == 0x02)
6668 || (a && (op1 & 0x17) == 0x02 && !b))
6669 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
6670 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6671 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6672 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
6673 else if ((!a && (op1 & 0x17) == 0x03)
6674 || (a && (op1 & 0x17) == 0x03 && !b))
6675 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
6676 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6677 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6678 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6679 else if ((!a && (op1 & 0x17) == 0x06)
6680 || (a && (op1 & 0x17) == 0x06 && !b))
6681 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6682 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6683 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6684 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6685 else if ((!a && (op1 & 0x17) == 0x07)
6686 || (a && (op1 & 0x17) == 0x07 && !b))
6687 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6689 /* Should be unreachable. */
6694 decode_media (struct gdbarch *gdbarch, uint32_t insn,
6695 struct displaced_step_closure *dsc)
6697 switch (bits (insn, 20, 24))
6699 case 0x00: case 0x01: case 0x02: case 0x03:
6700 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6702 case 0x04: case 0x05: case 0x06: case 0x07:
6703 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6705 case 0x08: case 0x09: case 0x0a: case 0x0b:
6706 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6707 return copy_unmodified (gdbarch, insn,
6708 "decode/pack/unpack/saturate/reverse", dsc);
6711 if (bits (insn, 5, 7) == 0) /* op2. */
6713 if (bits (insn, 12, 15) == 0xf)
6714 return copy_unmodified (gdbarch, insn, "usad8", dsc);
6716 return copy_unmodified (gdbarch, insn, "usada8", dsc);
6719 return copy_undef (gdbarch, insn, dsc);
6721 case 0x1a: case 0x1b:
6722 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6723 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
6725 return copy_undef (gdbarch, insn, dsc);
6727 case 0x1c: case 0x1d:
6728 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6730 if (bits (insn, 0, 3) == 0xf)
6731 return copy_unmodified (gdbarch, insn, "bfc", dsc);
6733 return copy_unmodified (gdbarch, insn, "bfi", dsc);
6736 return copy_undef (gdbarch, insn, dsc);
6738 case 0x1e: case 0x1f:
6739 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6740 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
6742 return copy_undef (gdbarch, insn, dsc);
6745 /* Should be unreachable. */
6750 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6751 struct regcache *regs, struct displaced_step_closure *dsc)
6754 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6756 return copy_block_xfer (gdbarch, insn, regs, dsc);
6760 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6761 struct regcache *regs,
6762 struct displaced_step_closure *dsc)
6764 unsigned int opcode = bits (insn, 20, 24);
6768 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6769 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6771 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6772 case 0x12: case 0x16:
6773 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6775 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6776 case 0x13: case 0x17:
6777 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6779 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6780 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6781 /* Note: no writeback for these instructions. Bit 25 will always be
6782 zero though (via caller), so the following works OK. */
6783 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6786 /* Should be unreachable. */
6791 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6792 struct regcache *regs, struct displaced_step_closure *dsc)
6794 unsigned int op1 = bits (insn, 20, 25);
6795 int op = bit (insn, 4);
6796 unsigned int coproc = bits (insn, 8, 11);
6797 unsigned int rn = bits (insn, 16, 19);
6799 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6800 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6801 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6802 && (coproc & 0xe) != 0xa)
6804 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6805 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6806 && (coproc & 0xe) != 0xa)
6807 /* ldc/ldc2 imm/lit. */
6808 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6809 else if ((op1 & 0x3e) == 0x00)
6810 return copy_undef (gdbarch, insn, dsc);
6811 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6812 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6813 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6814 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6815 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6816 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6817 else if ((op1 & 0x30) == 0x20 && !op)
6819 if ((coproc & 0xe) == 0xa)
6820 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6822 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6824 else if ((op1 & 0x30) == 0x20 && op)
6825 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6826 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6827 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6828 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6829 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6830 else if ((op1 & 0x30) == 0x30)
6831 return copy_svc (gdbarch, insn, to, regs, dsc);
6833 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6837 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6838 CORE_ADDR to, struct regcache *regs,
6839 struct displaced_step_closure *dsc)
6841 error (_("Displaced stepping is only supported in ARM mode"));
6845 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6846 CORE_ADDR to, struct regcache *regs,
6847 struct displaced_step_closure *dsc)
6850 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6853 /* Most displaced instructions use a 1-instruction scratch space, so set this
6854 here and override below if/when necessary. */
6856 dsc->insn_addr = from;
6857 dsc->scratch_base = to;
6858 dsc->cleanup = NULL;
6859 dsc->wrote_to_pc = 0;
6861 if (!displaced_in_arm_mode (regs))
6862 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6864 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6865 if (debug_displaced)
6866 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6867 "at %.8lx\n", (unsigned long) insn,
6868 (unsigned long) from);
6870 if ((insn & 0xf0000000) == 0xf0000000)
6871 err = decode_unconditional (gdbarch, insn, regs, dsc);
6872 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6874 case 0x0: case 0x1: case 0x2: case 0x3:
6875 err = decode_dp_misc (gdbarch, insn, regs, dsc);
6878 case 0x4: case 0x5: case 0x6:
6879 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6883 err = decode_media (gdbarch, insn, dsc);
6886 case 0x8: case 0x9: case 0xa: case 0xb:
6887 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6890 case 0xc: case 0xd: case 0xe: case 0xf:
6891 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
6896 internal_error (__FILE__, __LINE__,
6897 _("arm_process_displaced_insn: Instruction decode error"));
6900 /* Actually set up the scratch space for a displaced instruction. */
6903 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
6904 CORE_ADDR to, struct displaced_step_closure *dsc)
6906 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6908 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6910 /* Poke modified instruction(s). */
6911 for (i = 0; i < dsc->numinsns; i++)
6913 if (debug_displaced)
6914 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
6915 "%.8lx\n", (unsigned long) dsc->modinsn[i],
6916 (unsigned long) to + i * 4);
6917 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
6921 /* Put breakpoint afterwards. */
6922 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
6923 tdep->arm_breakpoint_size);
6925 if (debug_displaced)
6926 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
6927 paddress (gdbarch, from), paddress (gdbarch, to));
6930 /* Entry point for copying an instruction into scratch space for displaced
6933 struct displaced_step_closure *
6934 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
6935 CORE_ADDR from, CORE_ADDR to,
6936 struct regcache *regs)
6938 struct displaced_step_closure *dsc
6939 = xmalloc (sizeof (struct displaced_step_closure));
6940 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
6941 arm_displaced_init_closure (gdbarch, from, to, dsc);
6946 /* Entry point for cleaning things up after a displaced instruction has been
6950 arm_displaced_step_fixup (struct gdbarch *gdbarch,
6951 struct displaced_step_closure *dsc,
6952 CORE_ADDR from, CORE_ADDR to,
6953 struct regcache *regs)
6956 dsc->cleanup (gdbarch, regs, dsc);
6958 if (!dsc->wrote_to_pc)
6959 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
6962 #include "bfd-in2.h"
6963 #include "libcoff.h"
6966 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
6968 struct gdbarch *gdbarch = info->application_data;
6970 if (arm_pc_is_thumb (gdbarch, memaddr))
6972 static asymbol *asym;
6973 static combined_entry_type ce;
6974 static struct coff_symbol_struct csym;
6975 static struct bfd fake_bfd;
6976 static bfd_target fake_target;
6978 if (csym.native == NULL)
6980 /* Create a fake symbol vector containing a Thumb symbol.
6981 This is solely so that the code in print_insn_little_arm()
6982 and print_insn_big_arm() in opcodes/arm-dis.c will detect
6983 the presence of a Thumb symbol and switch to decoding
6984 Thumb instructions. */
6986 fake_target.flavour = bfd_target_coff_flavour;
6987 fake_bfd.xvec = &fake_target;
6988 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
6990 csym.symbol.the_bfd = &fake_bfd;
6991 csym.symbol.name = "fake";
6992 asym = (asymbol *) & csym;
6995 memaddr = UNMAKE_THUMB_ADDR (memaddr);
6996 info->symbols = &asym;
6999 info->symbols = NULL;
7001 if (info->endian == BFD_ENDIAN_BIG)
7002 return print_insn_big_arm (memaddr, info);
7004 return print_insn_little_arm (memaddr, info);
7007 /* The following define instruction sequences that will cause ARM
7008 cpu's to take an undefined instruction trap. These are used to
7009 signal a breakpoint to GDB.
7011 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7012 modes. A different instruction is required for each mode. The ARM
7013 cpu's can also be big or little endian. Thus four different
7014 instructions are needed to support all cases.
7016 Note: ARMv4 defines several new instructions that will take the
7017 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7018 not in fact add the new instructions. The new undefined
7019 instructions in ARMv4 are all instructions that had no defined
7020 behaviour in earlier chips. There is no guarantee that they will
7021 raise an exception, but may be treated as NOP's. In practice, it
7022 may only safe to rely on instructions matching:
7024 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7025 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7026 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7028 Even this may only true if the condition predicate is true. The
7029 following use a condition predicate of ALWAYS so it is always TRUE.
7031 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7032 and NetBSD all use a software interrupt rather than an undefined
7033 instruction to force a trap. This can be handled by by the
7034 abi-specific code during establishment of the gdbarch vector. */
7036 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7037 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7038 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7039 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7041 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7042 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7043 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7044 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7046 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7047 the program counter value to determine whether a 16-bit or 32-bit
7048 breakpoint should be used. It returns a pointer to a string of
7049 bytes that encode a breakpoint instruction, stores the length of
7050 the string to *lenptr, and adjusts the program counter (if
7051 necessary) to point to the actual memory location where the
7052 breakpoint should be inserted. */
7054 static const unsigned char *
7055 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7057 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7058 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7060 if (arm_pc_is_thumb (gdbarch, *pcptr))
7062 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7064 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7065 check whether we are replacing a 32-bit instruction. */
7066 if (tdep->thumb2_breakpoint != NULL)
7069 if (target_read_memory (*pcptr, buf, 2) == 0)
7071 unsigned short inst1;
7072 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7073 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7075 *lenptr = tdep->thumb2_breakpoint_size;
7076 return tdep->thumb2_breakpoint;
7081 *lenptr = tdep->thumb_breakpoint_size;
7082 return tdep->thumb_breakpoint;
7086 *lenptr = tdep->arm_breakpoint_size;
7087 return tdep->arm_breakpoint;
7092 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7095 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7097 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7099 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7100 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7101 that this is not confused with a 32-bit ARM breakpoint. */
7105 /* Extract from an array REGBUF containing the (raw) register state a
7106 function return value of type TYPE, and copy that, in virtual
7107 format, into VALBUF. */
7110 arm_extract_return_value (struct type *type, struct regcache *regs,
7113 struct gdbarch *gdbarch = get_regcache_arch (regs);
7114 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7116 if (TYPE_CODE_FLT == TYPE_CODE (type))
7118 switch (gdbarch_tdep (gdbarch)->fp_model)
7122 /* The value is in register F0 in internal format. We need to
7123 extract the raw value and then convert it to the desired
7125 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7127 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7128 convert_from_extended (floatformat_from_type (type), tmpbuf,
7129 valbuf, gdbarch_byte_order (gdbarch));
7133 case ARM_FLOAT_SOFT_FPA:
7134 case ARM_FLOAT_SOFT_VFP:
7135 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7136 not using the VFP ABI code. */
7138 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7139 if (TYPE_LENGTH (type) > 4)
7140 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7141 valbuf + INT_REGISTER_SIZE);
7145 internal_error (__FILE__, __LINE__,
7146 _("arm_extract_return_value: "
7147 "Floating point model not supported"));
7151 else if (TYPE_CODE (type) == TYPE_CODE_INT
7152 || TYPE_CODE (type) == TYPE_CODE_CHAR
7153 || TYPE_CODE (type) == TYPE_CODE_BOOL
7154 || TYPE_CODE (type) == TYPE_CODE_PTR
7155 || TYPE_CODE (type) == TYPE_CODE_REF
7156 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7158 /* If the type is a plain integer, then the access is
7159 straight-forward. Otherwise we have to play around a bit
7161 int len = TYPE_LENGTH (type);
7162 int regno = ARM_A1_REGNUM;
7167 /* By using store_unsigned_integer we avoid having to do
7168 anything special for small big-endian values. */
7169 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7170 store_unsigned_integer (valbuf,
7171 (len > INT_REGISTER_SIZE
7172 ? INT_REGISTER_SIZE : len),
7174 len -= INT_REGISTER_SIZE;
7175 valbuf += INT_REGISTER_SIZE;
7180 /* For a structure or union the behaviour is as if the value had
7181 been stored to word-aligned memory and then loaded into
7182 registers with 32-bit load instruction(s). */
7183 int len = TYPE_LENGTH (type);
7184 int regno = ARM_A1_REGNUM;
7185 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7189 regcache_cooked_read (regs, regno++, tmpbuf);
7190 memcpy (valbuf, tmpbuf,
7191 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7192 len -= INT_REGISTER_SIZE;
7193 valbuf += INT_REGISTER_SIZE;
7199 /* Will a function return an aggregate type in memory or in a
7200 register? Return 0 if an aggregate type can be returned in a
7201 register, 1 if it must be returned in memory. */
7204 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7207 enum type_code code;
7209 CHECK_TYPEDEF (type);
7211 /* In the ARM ABI, "integer" like aggregate types are returned in
7212 registers. For an aggregate type to be integer like, its size
7213 must be less than or equal to INT_REGISTER_SIZE and the
7214 offset of each addressable subfield must be zero. Note that bit
7215 fields are not addressable, and all addressable subfields of
7216 unions always start at offset zero.
7218 This function is based on the behaviour of GCC 2.95.1.
7219 See: gcc/arm.c: arm_return_in_memory() for details.
7221 Note: All versions of GCC before GCC 2.95.2 do not set up the
7222 parameters correctly for a function returning the following
7223 structure: struct { float f;}; This should be returned in memory,
7224 not a register. Richard Earnshaw sent me a patch, but I do not
7225 know of any way to detect if a function like the above has been
7226 compiled with the correct calling convention. */
7228 /* All aggregate types that won't fit in a register must be returned
7230 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7235 /* The AAPCS says all aggregates not larger than a word are returned
7237 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7240 /* The only aggregate types that can be returned in a register are
7241 structs and unions. Arrays must be returned in memory. */
7242 code = TYPE_CODE (type);
7243 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7248 /* Assume all other aggregate types can be returned in a register.
7249 Run a check for structures, unions and arrays. */
7252 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7255 /* Need to check if this struct/union is "integer" like. For
7256 this to be true, its size must be less than or equal to
7257 INT_REGISTER_SIZE and the offset of each addressable
7258 subfield must be zero. Note that bit fields are not
7259 addressable, and unions always start at offset zero. If any
7260 of the subfields is a floating point type, the struct/union
7261 cannot be an integer type. */
7263 /* For each field in the object, check:
7264 1) Is it FP? --> yes, nRc = 1;
7265 2) Is it addressable (bitpos != 0) and
7266 not packed (bitsize == 0)?
7270 for (i = 0; i < TYPE_NFIELDS (type); i++)
7272 enum type_code field_type_code;
7273 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7276 /* Is it a floating point type field? */
7277 if (field_type_code == TYPE_CODE_FLT)
7283 /* If bitpos != 0, then we have to care about it. */
7284 if (TYPE_FIELD_BITPOS (type, i) != 0)
7286 /* Bitfields are not addressable. If the field bitsize is
7287 zero, then the field is not packed. Hence it cannot be
7288 a bitfield or any other packed type. */
7289 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7301 /* Write into appropriate registers a function return value of type
7302 TYPE, given in virtual format. */
7305 arm_store_return_value (struct type *type, struct regcache *regs,
7306 const gdb_byte *valbuf)
7308 struct gdbarch *gdbarch = get_regcache_arch (regs);
7309 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7311 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7313 char buf[MAX_REGISTER_SIZE];
7315 switch (gdbarch_tdep (gdbarch)->fp_model)
7319 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7320 gdbarch_byte_order (gdbarch));
7321 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7324 case ARM_FLOAT_SOFT_FPA:
7325 case ARM_FLOAT_SOFT_VFP:
7326 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7327 not using the VFP ABI code. */
7329 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7330 if (TYPE_LENGTH (type) > 4)
7331 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7332 valbuf + INT_REGISTER_SIZE);
7336 internal_error (__FILE__, __LINE__,
7337 _("arm_store_return_value: Floating "
7338 "point model not supported"));
7342 else if (TYPE_CODE (type) == TYPE_CODE_INT
7343 || TYPE_CODE (type) == TYPE_CODE_CHAR
7344 || TYPE_CODE (type) == TYPE_CODE_BOOL
7345 || TYPE_CODE (type) == TYPE_CODE_PTR
7346 || TYPE_CODE (type) == TYPE_CODE_REF
7347 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7349 if (TYPE_LENGTH (type) <= 4)
7351 /* Values of one word or less are zero/sign-extended and
7353 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7354 LONGEST val = unpack_long (type, valbuf);
7356 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7357 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7361 /* Integral values greater than one word are stored in consecutive
7362 registers starting with r0. This will always be a multiple of
7363 the regiser size. */
7364 int len = TYPE_LENGTH (type);
7365 int regno = ARM_A1_REGNUM;
7369 regcache_cooked_write (regs, regno++, valbuf);
7370 len -= INT_REGISTER_SIZE;
7371 valbuf += INT_REGISTER_SIZE;
7377 /* For a structure or union the behaviour is as if the value had
7378 been stored to word-aligned memory and then loaded into
7379 registers with 32-bit load instruction(s). */
7380 int len = TYPE_LENGTH (type);
7381 int regno = ARM_A1_REGNUM;
7382 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7386 memcpy (tmpbuf, valbuf,
7387 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7388 regcache_cooked_write (regs, regno++, tmpbuf);
7389 len -= INT_REGISTER_SIZE;
7390 valbuf += INT_REGISTER_SIZE;
7396 /* Handle function return values. */
7398 static enum return_value_convention
7399 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7400 struct type *valtype, struct regcache *regcache,
7401 gdb_byte *readbuf, const gdb_byte *writebuf)
7403 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7404 enum arm_vfp_cprc_base_type vfp_base_type;
7407 if (arm_vfp_abi_for_function (gdbarch, func_type)
7408 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7410 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7411 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7413 for (i = 0; i < vfp_base_count; i++)
7415 if (reg_char == 'q')
7418 arm_neon_quad_write (gdbarch, regcache, i,
7419 writebuf + i * unit_length);
7422 arm_neon_quad_read (gdbarch, regcache, i,
7423 readbuf + i * unit_length);
7430 sprintf (name_buf, "%c%d", reg_char, i);
7431 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7434 regcache_cooked_write (regcache, regnum,
7435 writebuf + i * unit_length);
7437 regcache_cooked_read (regcache, regnum,
7438 readbuf + i * unit_length);
7441 return RETURN_VALUE_REGISTER_CONVENTION;
7444 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7445 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7446 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7448 if (tdep->struct_return == pcc_struct_return
7449 || arm_return_in_memory (gdbarch, valtype))
7450 return RETURN_VALUE_STRUCT_CONVENTION;
7454 arm_store_return_value (valtype, regcache, writebuf);
7457 arm_extract_return_value (valtype, regcache, readbuf);
7459 return RETURN_VALUE_REGISTER_CONVENTION;
7464 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7466 struct gdbarch *gdbarch = get_frame_arch (frame);
7467 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7468 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7470 char buf[INT_REGISTER_SIZE];
7472 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7474 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7478 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7482 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7483 return the target PC. Otherwise return 0. */
7486 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7490 CORE_ADDR start_addr;
7492 /* Find the starting address and name of the function containing the PC. */
7493 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7496 /* If PC is in a Thumb call or return stub, return the address of the
7497 target PC, which is in a register. The thunk functions are called
7498 _call_via_xx, where x is the register name. The possible names
7499 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7500 functions, named __ARM_call_via_r[0-7]. */
7501 if (strncmp (name, "_call_via_", 10) == 0
7502 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7504 /* Use the name suffix to determine which register contains the
7506 static char *table[15] =
7507 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7508 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7511 int offset = strlen (name) - 2;
7513 for (regno = 0; regno <= 14; regno++)
7514 if (strcmp (&name[offset], table[regno]) == 0)
7515 return get_frame_register_unsigned (frame, regno);
7518 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7519 non-interworking calls to foo. We could decode the stubs
7520 to find the target but it's easier to use the symbol table. */
7521 namelen = strlen (name);
7522 if (name[0] == '_' && name[1] == '_'
7523 && ((namelen > 2 + strlen ("_from_thumb")
7524 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7525 strlen ("_from_thumb")) == 0)
7526 || (namelen > 2 + strlen ("_from_arm")
7527 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7528 strlen ("_from_arm")) == 0)))
7531 int target_len = namelen - 2;
7532 struct minimal_symbol *minsym;
7533 struct objfile *objfile;
7534 struct obj_section *sec;
7536 if (name[namelen - 1] == 'b')
7537 target_len -= strlen ("_from_thumb");
7539 target_len -= strlen ("_from_arm");
7541 target_name = alloca (target_len + 1);
7542 memcpy (target_name, name + 2, target_len);
7543 target_name[target_len] = '\0';
7545 sec = find_pc_section (pc);
7546 objfile = (sec == NULL) ? NULL : sec->objfile;
7547 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7549 return SYMBOL_VALUE_ADDRESS (minsym);
7554 return 0; /* not a stub */
7558 set_arm_command (char *args, int from_tty)
7560 printf_unfiltered (_("\
7561 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7562 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7566 show_arm_command (char *args, int from_tty)
7568 cmd_show_list (showarmcmdlist, from_tty, "");
7572 arm_update_current_architecture (void)
7574 struct gdbarch_info info;
7576 /* If the current architecture is not ARM, we have nothing to do. */
7577 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7580 /* Update the architecture. */
7581 gdbarch_info_init (&info);
7583 if (!gdbarch_update_p (info))
7584 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7588 set_fp_model_sfunc (char *args, int from_tty,
7589 struct cmd_list_element *c)
7591 enum arm_float_model fp_model;
7593 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7594 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7596 arm_fp_model = fp_model;
7600 if (fp_model == ARM_FLOAT_LAST)
7601 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7604 arm_update_current_architecture ();
7608 show_fp_model (struct ui_file *file, int from_tty,
7609 struct cmd_list_element *c, const char *value)
7611 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7613 if (arm_fp_model == ARM_FLOAT_AUTO
7614 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7615 fprintf_filtered (file, _("\
7616 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7617 fp_model_strings[tdep->fp_model]);
7619 fprintf_filtered (file, _("\
7620 The current ARM floating point model is \"%s\".\n"),
7621 fp_model_strings[arm_fp_model]);
7625 arm_set_abi (char *args, int from_tty,
7626 struct cmd_list_element *c)
7628 enum arm_abi_kind arm_abi;
7630 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7631 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7633 arm_abi_global = arm_abi;
7637 if (arm_abi == ARM_ABI_LAST)
7638 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7641 arm_update_current_architecture ();
7645 arm_show_abi (struct ui_file *file, int from_tty,
7646 struct cmd_list_element *c, const char *value)
7648 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7650 if (arm_abi_global == ARM_ABI_AUTO
7651 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7652 fprintf_filtered (file, _("\
7653 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7654 arm_abi_strings[tdep->arm_abi]);
7656 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7661 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7662 struct cmd_list_element *c, const char *value)
7664 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7666 fprintf_filtered (file,
7667 _("The current execution mode assumed "
7668 "(when symbols are unavailable) is \"%s\".\n"),
7669 arm_fallback_mode_string);
7673 arm_show_force_mode (struct ui_file *file, int from_tty,
7674 struct cmd_list_element *c, const char *value)
7676 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7678 fprintf_filtered (file,
7679 _("The current execution mode assumed "
7680 "(even when symbols are available) is \"%s\".\n"),
7681 arm_force_mode_string);
7684 /* If the user changes the register disassembly style used for info
7685 register and other commands, we have to also switch the style used
7686 in opcodes for disassembly output. This function is run in the "set
7687 arm disassembly" command, and does that. */
7690 set_disassembly_style_sfunc (char *args, int from_tty,
7691 struct cmd_list_element *c)
7693 set_disassembly_style ();
7696 /* Return the ARM register name corresponding to register I. */
7698 arm_register_name (struct gdbarch *gdbarch, int i)
7700 const int num_regs = gdbarch_num_regs (gdbarch);
7702 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7703 && i >= num_regs && i < num_regs + 32)
7705 static const char *const vfp_pseudo_names[] = {
7706 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7707 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7708 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7709 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7712 return vfp_pseudo_names[i - num_regs];
7715 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7716 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7718 static const char *const neon_pseudo_names[] = {
7719 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7720 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7723 return neon_pseudo_names[i - num_regs - 32];
7726 if (i >= ARRAY_SIZE (arm_register_names))
7727 /* These registers are only supported on targets which supply
7728 an XML description. */
7731 return arm_register_names[i];
7735 set_disassembly_style (void)
7739 /* Find the style that the user wants. */
7740 for (current = 0; current < num_disassembly_options; current++)
7741 if (disassembly_style == valid_disassembly_styles[current])
7743 gdb_assert (current < num_disassembly_options);
7745 /* Synchronize the disassembler. */
7746 set_arm_regname_option (current);
7749 /* Test whether the coff symbol specific value corresponds to a Thumb
7753 coff_sym_is_thumb (int val)
7755 return (val == C_THUMBEXT
7756 || val == C_THUMBSTAT
7757 || val == C_THUMBEXTFUNC
7758 || val == C_THUMBSTATFUNC
7759 || val == C_THUMBLABEL);
7762 /* arm_coff_make_msymbol_special()
7763 arm_elf_make_msymbol_special()
7765 These functions test whether the COFF or ELF symbol corresponds to
7766 an address in thumb code, and set a "special" bit in a minimal
7767 symbol to indicate that it does. */
7770 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7772 /* Thumb symbols are of type STT_LOPROC, (synonymous with
7774 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
7776 MSYMBOL_SET_SPECIAL (msym);
7780 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7782 if (coff_sym_is_thumb (val))
7783 MSYMBOL_SET_SPECIAL (msym);
7787 arm_objfile_data_free (struct objfile *objfile, void *arg)
7789 struct arm_per_objfile *data = arg;
7792 for (i = 0; i < objfile->obfd->section_count; i++)
7793 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7797 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7800 const char *name = bfd_asymbol_name (sym);
7801 struct arm_per_objfile *data;
7802 VEC(arm_mapping_symbol_s) **map_p;
7803 struct arm_mapping_symbol new_map_sym;
7805 gdb_assert (name[0] == '$');
7806 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7809 data = objfile_data (objfile, arm_objfile_data_key);
7812 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7813 struct arm_per_objfile);
7814 set_objfile_data (objfile, arm_objfile_data_key, data);
7815 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7816 objfile->obfd->section_count,
7817 VEC(arm_mapping_symbol_s) *);
7819 map_p = &data->section_maps[bfd_get_section (sym)->index];
7821 new_map_sym.value = sym->value;
7822 new_map_sym.type = name[1];
7824 /* Assume that most mapping symbols appear in order of increasing
7825 value. If they were randomly distributed, it would be faster to
7826 always push here and then sort at first use. */
7827 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7829 struct arm_mapping_symbol *prev_map_sym;
7831 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7832 if (prev_map_sym->value >= sym->value)
7835 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7836 arm_compare_mapping_symbols);
7837 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7842 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7846 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7848 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7849 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7851 /* If necessary, set the T bit. */
7854 ULONGEST val, t_bit;
7855 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7856 t_bit = arm_psr_thumb_bit (gdbarch);
7857 if (arm_pc_is_thumb (gdbarch, pc))
7858 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7861 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7866 /* Read the contents of a NEON quad register, by reading from two
7867 double registers. This is used to implement the quad pseudo
7868 registers, and for argument passing in case the quad registers are
7869 missing; vectors are passed in quad registers when using the VFP
7870 ABI, even if a NEON unit is not present. REGNUM is the index of
7871 the quad register, in [0, 15]. */
7874 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
7875 int regnum, gdb_byte *buf)
7878 gdb_byte reg_buf[8];
7879 int offset, double_regnum;
7881 sprintf (name_buf, "d%d", regnum << 1);
7882 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7885 /* d0 is always the least significant half of q0. */
7886 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7891 regcache_raw_read (regcache, double_regnum, reg_buf);
7892 memcpy (buf + offset, reg_buf, 8);
7894 offset = 8 - offset;
7895 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
7896 memcpy (buf + offset, reg_buf, 8);
7900 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
7901 int regnum, gdb_byte *buf)
7903 const int num_regs = gdbarch_num_regs (gdbarch);
7905 gdb_byte reg_buf[8];
7906 int offset, double_regnum;
7908 gdb_assert (regnum >= num_regs);
7911 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7912 /* Quad-precision register. */
7913 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
7916 /* Single-precision register. */
7917 gdb_assert (regnum < 32);
7919 /* s0 is always the least significant half of d0. */
7920 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7921 offset = (regnum & 1) ? 0 : 4;
7923 offset = (regnum & 1) ? 4 : 0;
7925 sprintf (name_buf, "d%d", regnum >> 1);
7926 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7929 regcache_raw_read (regcache, double_regnum, reg_buf);
7930 memcpy (buf, reg_buf + offset, 4);
7934 /* Store the contents of BUF to a NEON quad register, by writing to
7935 two double registers. This is used to implement the quad pseudo
7936 registers, and for argument passing in case the quad registers are
7937 missing; vectors are passed in quad registers when using the VFP
7938 ABI, even if a NEON unit is not present. REGNUM is the index
7939 of the quad register, in [0, 15]. */
7942 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
7943 int regnum, const gdb_byte *buf)
7946 gdb_byte reg_buf[8];
7947 int offset, double_regnum;
7949 sprintf (name_buf, "d%d", regnum << 1);
7950 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7953 /* d0 is always the least significant half of q0. */
7954 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7959 regcache_raw_write (regcache, double_regnum, buf + offset);
7960 offset = 8 - offset;
7961 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
7965 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
7966 int regnum, const gdb_byte *buf)
7968 const int num_regs = gdbarch_num_regs (gdbarch);
7970 gdb_byte reg_buf[8];
7971 int offset, double_regnum;
7973 gdb_assert (regnum >= num_regs);
7976 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7977 /* Quad-precision register. */
7978 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
7981 /* Single-precision register. */
7982 gdb_assert (regnum < 32);
7984 /* s0 is always the least significant half of d0. */
7985 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7986 offset = (regnum & 1) ? 0 : 4;
7988 offset = (regnum & 1) ? 4 : 0;
7990 sprintf (name_buf, "d%d", regnum >> 1);
7991 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7994 regcache_raw_read (regcache, double_regnum, reg_buf);
7995 memcpy (reg_buf + offset, buf, 4);
7996 regcache_raw_write (regcache, double_regnum, reg_buf);
8000 static struct value *
8001 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8003 const int *reg_p = baton;
8004 return value_of_register (*reg_p, frame);
8007 static enum gdb_osabi
8008 arm_elf_osabi_sniffer (bfd *abfd)
8010 unsigned int elfosabi;
8011 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8013 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8015 if (elfosabi == ELFOSABI_ARM)
8016 /* GNU tools use this value. Check note sections in this case,
8018 bfd_map_over_sections (abfd,
8019 generic_elf_osabi_sniff_abi_tag_sections,
8022 /* Anything else will be handled by the generic ELF sniffer. */
8027 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8028 struct reggroup *group)
8030 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8031 this, FPS register belongs to save_regroup, restore_reggroup, and
8032 all_reggroup, of course. */
8033 if (regnum == ARM_FPS_REGNUM)
8034 return (group == float_reggroup
8035 || group == save_reggroup
8036 || group == restore_reggroup
8037 || group == all_reggroup);
8039 return default_register_reggroup_p (gdbarch, regnum, group);
8043 /* Initialize the current architecture based on INFO. If possible,
8044 re-use an architecture from ARCHES, which is a list of
8045 architectures already created during this debugging session.
8047 Called e.g. at program startup, when reading a core file, and when
8048 reading a binary file. */
8050 static struct gdbarch *
8051 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8053 struct gdbarch_tdep *tdep;
8054 struct gdbarch *gdbarch;
8055 struct gdbarch_list *best_arch;
8056 enum arm_abi_kind arm_abi = arm_abi_global;
8057 enum arm_float_model fp_model = arm_fp_model;
8058 struct tdesc_arch_data *tdesc_data = NULL;
8060 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8062 int have_fpa_registers = 1;
8063 const struct target_desc *tdesc = info.target_desc;
8065 /* If we have an object to base this architecture on, try to determine
8068 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8070 int ei_osabi, e_flags;
8072 switch (bfd_get_flavour (info.abfd))
8074 case bfd_target_aout_flavour:
8075 /* Assume it's an old APCS-style ABI. */
8076 arm_abi = ARM_ABI_APCS;
8079 case bfd_target_coff_flavour:
8080 /* Assume it's an old APCS-style ABI. */
8082 arm_abi = ARM_ABI_APCS;
8085 case bfd_target_elf_flavour:
8086 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8087 e_flags = elf_elfheader (info.abfd)->e_flags;
8089 if (ei_osabi == ELFOSABI_ARM)
8091 /* GNU tools used to use this value, but do not for EABI
8092 objects. There's nowhere to tag an EABI version
8093 anyway, so assume APCS. */
8094 arm_abi = ARM_ABI_APCS;
8096 else if (ei_osabi == ELFOSABI_NONE)
8098 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8099 int attr_arch, attr_profile;
8103 case EF_ARM_EABI_UNKNOWN:
8104 /* Assume GNU tools. */
8105 arm_abi = ARM_ABI_APCS;
8108 case EF_ARM_EABI_VER4:
8109 case EF_ARM_EABI_VER5:
8110 arm_abi = ARM_ABI_AAPCS;
8111 /* EABI binaries default to VFP float ordering.
8112 They may also contain build attributes that can
8113 be used to identify if the VFP argument-passing
8115 if (fp_model == ARM_FLOAT_AUTO)
8118 switch (bfd_elf_get_obj_attr_int (info.abfd,
8123 /* "The user intended FP parameter/result
8124 passing to conform to AAPCS, base
8126 fp_model = ARM_FLOAT_SOFT_VFP;
8129 /* "The user intended FP parameter/result
8130 passing to conform to AAPCS, VFP
8132 fp_model = ARM_FLOAT_VFP;
8135 /* "The user intended FP parameter/result
8136 passing to conform to tool chain-specific
8137 conventions" - we don't know any such
8138 conventions, so leave it as "auto". */
8141 /* Attribute value not mentioned in the
8142 October 2008 ABI, so leave it as
8147 fp_model = ARM_FLOAT_SOFT_VFP;
8153 /* Leave it as "auto". */
8154 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8159 /* Detect M-profile programs. This only works if the
8160 executable file includes build attributes; GCC does
8161 copy them to the executable, but e.g. RealView does
8163 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8165 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8167 Tag_CPU_arch_profile);
8168 /* GCC specifies the profile for v6-M; RealView only
8169 specifies the profile for architectures starting with
8170 V7 (as opposed to architectures with a tag
8171 numerically greater than TAG_CPU_ARCH_V7). */
8172 if (!tdesc_has_registers (tdesc)
8173 && (attr_arch == TAG_CPU_ARCH_V6_M
8174 || attr_arch == TAG_CPU_ARCH_V6S_M
8175 || attr_profile == 'M'))
8176 tdesc = tdesc_arm_with_m;
8180 if (fp_model == ARM_FLOAT_AUTO)
8182 int e_flags = elf_elfheader (info.abfd)->e_flags;
8184 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8187 /* Leave it as "auto". Strictly speaking this case
8188 means FPA, but almost nobody uses that now, and
8189 many toolchains fail to set the appropriate bits
8190 for the floating-point model they use. */
8192 case EF_ARM_SOFT_FLOAT:
8193 fp_model = ARM_FLOAT_SOFT_FPA;
8195 case EF_ARM_VFP_FLOAT:
8196 fp_model = ARM_FLOAT_VFP;
8198 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8199 fp_model = ARM_FLOAT_SOFT_VFP;
8204 if (e_flags & EF_ARM_BE8)
8205 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8210 /* Leave it as "auto". */
8215 /* Check any target description for validity. */
8216 if (tdesc_has_registers (tdesc))
8218 /* For most registers we require GDB's default names; but also allow
8219 the numeric names for sp / lr / pc, as a convenience. */
8220 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8221 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8222 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8224 const struct tdesc_feature *feature;
8227 feature = tdesc_find_feature (tdesc,
8228 "org.gnu.gdb.arm.core");
8229 if (feature == NULL)
8231 feature = tdesc_find_feature (tdesc,
8232 "org.gnu.gdb.arm.m-profile");
8233 if (feature == NULL)
8239 tdesc_data = tdesc_data_alloc ();
8242 for (i = 0; i < ARM_SP_REGNUM; i++)
8243 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8244 arm_register_names[i]);
8245 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8248 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8251 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8255 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8256 ARM_PS_REGNUM, "xpsr");
8258 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8259 ARM_PS_REGNUM, "cpsr");
8263 tdesc_data_cleanup (tdesc_data);
8267 feature = tdesc_find_feature (tdesc,
8268 "org.gnu.gdb.arm.fpa");
8269 if (feature != NULL)
8272 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8273 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8274 arm_register_names[i]);
8277 tdesc_data_cleanup (tdesc_data);
8282 have_fpa_registers = 0;
8284 feature = tdesc_find_feature (tdesc,
8285 "org.gnu.gdb.xscale.iwmmxt");
8286 if (feature != NULL)
8288 static const char *const iwmmxt_names[] = {
8289 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8290 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8291 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8292 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8296 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8298 &= tdesc_numbered_register (feature, tdesc_data, i,
8299 iwmmxt_names[i - ARM_WR0_REGNUM]);
8301 /* Check for the control registers, but do not fail if they
8303 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8304 tdesc_numbered_register (feature, tdesc_data, i,
8305 iwmmxt_names[i - ARM_WR0_REGNUM]);
8307 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8309 &= tdesc_numbered_register (feature, tdesc_data, i,
8310 iwmmxt_names[i - ARM_WR0_REGNUM]);
8314 tdesc_data_cleanup (tdesc_data);
8319 /* If we have a VFP unit, check whether the single precision registers
8320 are present. If not, then we will synthesize them as pseudo
8322 feature = tdesc_find_feature (tdesc,
8323 "org.gnu.gdb.arm.vfp");
8324 if (feature != NULL)
8326 static const char *const vfp_double_names[] = {
8327 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8328 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8329 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8330 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8333 /* Require the double precision registers. There must be either
8336 for (i = 0; i < 32; i++)
8338 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8340 vfp_double_names[i]);
8345 if (!valid_p && i != 16)
8347 tdesc_data_cleanup (tdesc_data);
8351 if (tdesc_unnumbered_register (feature, "s0") == 0)
8352 have_vfp_pseudos = 1;
8354 have_vfp_registers = 1;
8356 /* If we have VFP, also check for NEON. The architecture allows
8357 NEON without VFP (integer vector operations only), but GDB
8358 does not support that. */
8359 feature = tdesc_find_feature (tdesc,
8360 "org.gnu.gdb.arm.neon");
8361 if (feature != NULL)
8363 /* NEON requires 32 double-precision registers. */
8366 tdesc_data_cleanup (tdesc_data);
8370 /* If there are quad registers defined by the stub, use
8371 their type; otherwise (normally) provide them with
8372 the default type. */
8373 if (tdesc_unnumbered_register (feature, "q0") == 0)
8374 have_neon_pseudos = 1;
8381 /* If there is already a candidate, use it. */
8382 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8384 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8386 if (arm_abi != ARM_ABI_AUTO
8387 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8390 if (fp_model != ARM_FLOAT_AUTO
8391 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8394 /* There are various other properties in tdep that we do not
8395 need to check here: those derived from a target description,
8396 since gdbarches with a different target description are
8397 automatically disqualified. */
8399 /* Do check is_m, though, since it might come from the binary. */
8400 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8403 /* Found a match. */
8407 if (best_arch != NULL)
8409 if (tdesc_data != NULL)
8410 tdesc_data_cleanup (tdesc_data);
8411 return best_arch->gdbarch;
8414 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8415 gdbarch = gdbarch_alloc (&info, tdep);
8417 /* Record additional information about the architecture we are defining.
8418 These are gdbarch discriminators, like the OSABI. */
8419 tdep->arm_abi = arm_abi;
8420 tdep->fp_model = fp_model;
8422 tdep->have_fpa_registers = have_fpa_registers;
8423 tdep->have_vfp_registers = have_vfp_registers;
8424 tdep->have_vfp_pseudos = have_vfp_pseudos;
8425 tdep->have_neon_pseudos = have_neon_pseudos;
8426 tdep->have_neon = have_neon;
8429 switch (info.byte_order_for_code)
8431 case BFD_ENDIAN_BIG:
8432 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8433 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8434 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8435 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8439 case BFD_ENDIAN_LITTLE:
8440 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8441 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8442 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8443 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8448 internal_error (__FILE__, __LINE__,
8449 _("arm_gdbarch_init: bad byte order for float format"));
8452 /* On ARM targets char defaults to unsigned. */
8453 set_gdbarch_char_signed (gdbarch, 0);
8455 /* Note: for displaced stepping, this includes the breakpoint, and one word
8456 of additional scratch space. This setting isn't used for anything beside
8457 displaced stepping at present. */
8458 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8460 /* This should be low enough for everything. */
8461 tdep->lowest_pc = 0x20;
8462 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8464 /* The default, for both APCS and AAPCS, is to return small
8465 structures in registers. */
8466 tdep->struct_return = reg_struct_return;
8468 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8469 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8471 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8473 /* Frame handling. */
8474 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8475 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8476 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8478 frame_base_set_default (gdbarch, &arm_normal_base);
8480 /* Address manipulation. */
8481 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8482 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8484 /* Advance PC across function entry code. */
8485 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8487 /* Detect whether PC is in function epilogue. */
8488 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8490 /* Skip trampolines. */
8491 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8493 /* The stack grows downward. */
8494 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8496 /* Breakpoint manipulation. */
8497 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8498 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8499 arm_remote_breakpoint_from_pc);
8501 /* Information about registers, etc. */
8502 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8503 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8504 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8505 set_gdbarch_register_type (gdbarch, arm_register_type);
8506 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8508 /* This "info float" is FPA-specific. Use the generic version if we
8510 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8511 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8513 /* Internal <-> external register number maps. */
8514 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8515 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8517 set_gdbarch_register_name (gdbarch, arm_register_name);
8519 /* Returning results. */
8520 set_gdbarch_return_value (gdbarch, arm_return_value);
8523 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8525 /* Minsymbol frobbing. */
8526 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8527 set_gdbarch_coff_make_msymbol_special (gdbarch,
8528 arm_coff_make_msymbol_special);
8529 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8531 /* Thumb-2 IT block support. */
8532 set_gdbarch_adjust_breakpoint_address (gdbarch,
8533 arm_adjust_breakpoint_address);
8535 /* Virtual tables. */
8536 set_gdbarch_vbit_in_delta (gdbarch, 1);
8538 /* Hook in the ABI-specific overrides, if they have been registered. */
8539 gdbarch_init_osabi (info, gdbarch);
8541 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8543 /* Add some default predicates. */
8544 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8545 dwarf2_append_unwinders (gdbarch);
8546 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8547 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8549 /* Now we have tuned the configuration, set a few final things,
8550 based on what the OS ABI has told us. */
8552 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8553 binaries are always marked. */
8554 if (tdep->arm_abi == ARM_ABI_AUTO)
8555 tdep->arm_abi = ARM_ABI_APCS;
8557 /* Watchpoints are not steppable. */
8558 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8560 /* We used to default to FPA for generic ARM, but almost nobody
8561 uses that now, and we now provide a way for the user to force
8562 the model. So default to the most useful variant. */
8563 if (tdep->fp_model == ARM_FLOAT_AUTO)
8564 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8566 if (tdep->jb_pc >= 0)
8567 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8569 /* Floating point sizes and format. */
8570 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8571 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8573 set_gdbarch_double_format
8574 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8575 set_gdbarch_long_double_format
8576 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8580 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8581 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8584 if (have_vfp_pseudos)
8586 /* NOTE: These are the only pseudo registers used by
8587 the ARM target at the moment. If more are added, a
8588 little more care in numbering will be needed. */
8590 int num_pseudos = 32;
8591 if (have_neon_pseudos)
8593 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8594 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8595 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8600 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8602 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8604 /* Override tdesc_register_type to adjust the types of VFP
8605 registers for NEON. */
8606 set_gdbarch_register_type (gdbarch, arm_register_type);
8609 /* Add standard register aliases. We add aliases even for those
8610 nanes which are used by the current architecture - it's simpler,
8611 and does no harm, since nothing ever lists user registers. */
8612 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8613 user_reg_add (gdbarch, arm_register_aliases[i].name,
8614 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8620 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8622 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8627 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8628 (unsigned long) tdep->lowest_pc);
8631 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8634 _initialize_arm_tdep (void)
8636 struct ui_file *stb;
8638 struct cmd_list_element *new_set, *new_show;
8639 const char *setname;
8640 const char *setdesc;
8641 const char *const *regnames;
8643 static char *helptext;
8644 char regdesc[1024], *rdptr = regdesc;
8645 size_t rest = sizeof (regdesc);
8647 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8649 arm_objfile_data_key
8650 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8652 /* Add ourselves to objfile event chain. */
8653 observer_attach_new_objfile (arm_exidx_new_objfile);
8655 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8657 /* Register an ELF OS ABI sniffer for ARM binaries. */
8658 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8659 bfd_target_elf_flavour,
8660 arm_elf_osabi_sniffer);
8662 /* Initialize the standard target descriptions. */
8663 initialize_tdesc_arm_with_m ();
8665 /* Get the number of possible sets of register names defined in opcodes. */
8666 num_disassembly_options = get_arm_regname_num_options ();
8668 /* Add root prefix command for all "set arm"/"show arm" commands. */
8669 add_prefix_cmd ("arm", no_class, set_arm_command,
8670 _("Various ARM-specific commands."),
8671 &setarmcmdlist, "set arm ", 0, &setlist);
8673 add_prefix_cmd ("arm", no_class, show_arm_command,
8674 _("Various ARM-specific commands."),
8675 &showarmcmdlist, "show arm ", 0, &showlist);
8677 /* Sync the opcode insn printer with our register viewer. */
8678 parse_arm_disassembler_option ("reg-names-std");
8680 /* Initialize the array that will be passed to
8681 add_setshow_enum_cmd(). */
8682 valid_disassembly_styles
8683 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8684 for (i = 0; i < num_disassembly_options; i++)
8686 numregs = get_arm_regnames (i, &setname, &setdesc, ®names);
8687 valid_disassembly_styles[i] = setname;
8688 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8691 /* When we find the default names, tell the disassembler to use
8693 if (!strcmp (setname, "std"))
8695 disassembly_style = setname;
8696 set_arm_regname_option (i);
8699 /* Mark the end of valid options. */
8700 valid_disassembly_styles[num_disassembly_options] = NULL;
8702 /* Create the help text. */
8703 stb = mem_fileopen ();
8704 fprintf_unfiltered (stb, "%s%s%s",
8705 _("The valid values are:\n"),
8707 _("The default is \"std\"."));
8708 helptext = ui_file_xstrdup (stb, NULL);
8709 ui_file_delete (stb);
8711 add_setshow_enum_cmd("disassembler", no_class,
8712 valid_disassembly_styles, &disassembly_style,
8713 _("Set the disassembly style."),
8714 _("Show the disassembly style."),
8716 set_disassembly_style_sfunc,
8717 NULL, /* FIXME: i18n: The disassembly style is
8719 &setarmcmdlist, &showarmcmdlist);
8721 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8722 _("Set usage of ARM 32-bit mode."),
8723 _("Show usage of ARM 32-bit mode."),
8724 _("When off, a 26-bit PC will be used."),
8726 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8728 &setarmcmdlist, &showarmcmdlist);
8730 /* Add a command to allow the user to force the FPU model. */
8731 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, ¤t_fp_model,
8732 _("Set the floating point type."),
8733 _("Show the floating point type."),
8734 _("auto - Determine the FP typefrom the OS-ABI.\n\
8735 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8736 fpa - FPA co-processor (GCC compiled).\n\
8737 softvfp - Software FP with pure-endian doubles.\n\
8738 vfp - VFP co-processor."),
8739 set_fp_model_sfunc, show_fp_model,
8740 &setarmcmdlist, &showarmcmdlist);
8742 /* Add a command to allow the user to force the ABI. */
8743 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8746 NULL, arm_set_abi, arm_show_abi,
8747 &setarmcmdlist, &showarmcmdlist);
8749 /* Add two commands to allow the user to force the assumed
8751 add_setshow_enum_cmd ("fallback-mode", class_support,
8752 arm_mode_strings, &arm_fallback_mode_string,
8753 _("Set the mode assumed when symbols are unavailable."),
8754 _("Show the mode assumed when symbols are unavailable."),
8755 NULL, NULL, arm_show_fallback_mode,
8756 &setarmcmdlist, &showarmcmdlist);
8757 add_setshow_enum_cmd ("force-mode", class_support,
8758 arm_mode_strings, &arm_force_mode_string,
8759 _("Set the mode assumed even when symbols are available."),
8760 _("Show the mode assumed even when symbols are available."),
8761 NULL, NULL, arm_show_force_mode,
8762 &setarmcmdlist, &showarmcmdlist);
8764 /* Debugging flag. */
8765 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8766 _("Set ARM debugging."),
8767 _("Show ARM debugging."),
8768 _("When on, arm-specific debugging is enabled."),
8770 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8771 &setdebuglist, &showdebuglist);