1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "arch-utils.h"
29 #include "frame-unwind.h"
30 #include "frame-base.h"
31 #include "trad-frame.h"
40 #include "reggroups.h"
41 #include "floatformat.h"
46 #include "dwarf2-frame.h"
48 #include "exceptions.h"
52 /* The list of available "set spu " and "show spu " commands. */
53 static struct cmd_list_element *setspucmdlist = NULL;
54 static struct cmd_list_element *showspucmdlist = NULL;
56 /* Whether to stop for new SPE contexts. */
57 static int spu_stop_on_load_p = 0;
58 /* Whether to automatically flush the SW-managed cache. */
59 static int spu_auto_flush_cache_p = 1;
62 /* The tdep structure. */
65 /* The spufs ID identifying our address space. */
68 /* SPU-specific vector type. */
69 struct type *spu_builtin_type_vec128;
73 /* SPU-specific vector type. */
75 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
77 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
79 if (!tdep->spu_builtin_type_vec128)
81 const struct builtin_type *bt = builtin_type (gdbarch);
84 t = arch_composite_type (gdbarch,
85 "__spu_builtin_type_vec128", TYPE_CODE_UNION);
86 append_composite_type_field (t, "uint128", bt->builtin_int128);
87 append_composite_type_field (t, "v2_int64",
88 init_vector_type (bt->builtin_int64, 2));
89 append_composite_type_field (t, "v4_int32",
90 init_vector_type (bt->builtin_int32, 4));
91 append_composite_type_field (t, "v8_int16",
92 init_vector_type (bt->builtin_int16, 8));
93 append_composite_type_field (t, "v16_int8",
94 init_vector_type (bt->builtin_int8, 16));
95 append_composite_type_field (t, "v2_double",
96 init_vector_type (bt->builtin_double, 2));
97 append_composite_type_field (t, "v4_float",
98 init_vector_type (bt->builtin_float, 4));
101 TYPE_NAME (t) = "spu_builtin_type_vec128";
103 tdep->spu_builtin_type_vec128 = t;
106 return tdep->spu_builtin_type_vec128;
110 /* The list of available "info spu " commands. */
111 static struct cmd_list_element *infospucmdlist = NULL;
116 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
118 static char *register_names[] =
120 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
121 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
122 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
123 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
124 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
125 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
126 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
127 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
128 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
129 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
130 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
131 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
132 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
133 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
134 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
135 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
136 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
141 if (reg_nr >= sizeof register_names / sizeof *register_names)
144 return register_names[reg_nr];
148 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
150 if (reg_nr < SPU_NUM_GPRS)
151 return spu_builtin_type_vec128 (gdbarch);
156 return builtin_type (gdbarch)->builtin_uint32;
159 return builtin_type (gdbarch)->builtin_func_ptr;
162 return builtin_type (gdbarch)->builtin_data_ptr;
164 case SPU_FPSCR_REGNUM:
165 return builtin_type (gdbarch)->builtin_uint128;
167 case SPU_SRR0_REGNUM:
168 return builtin_type (gdbarch)->builtin_uint32;
170 case SPU_LSLR_REGNUM:
171 return builtin_type (gdbarch)->builtin_uint32;
173 case SPU_DECR_REGNUM:
174 return builtin_type (gdbarch)->builtin_uint32;
176 case SPU_DECR_STATUS_REGNUM:
177 return builtin_type (gdbarch)->builtin_uint32;
180 internal_error (__FILE__, __LINE__, _("invalid regnum"));
184 /* Pseudo registers for preferred slots - stack pointer. */
186 static enum register_status
187 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
190 struct gdbarch *gdbarch = get_regcache_arch (regcache);
191 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
192 enum register_status status;
198 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
199 if (status != REG_VALID)
201 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
202 memset (reg, 0, sizeof reg);
203 target_read (¤t_target, TARGET_OBJECT_SPU, annex,
206 ul = strtoulst ((char *) reg, NULL, 16);
207 store_unsigned_integer (buf, 4, byte_order, ul);
211 static enum register_status
212 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
213 int regnum, gdb_byte *buf)
218 enum register_status status;
223 status = regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
224 if (status != REG_VALID)
226 memcpy (buf, reg, 4);
229 case SPU_FPSCR_REGNUM:
230 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
231 if (status != REG_VALID)
233 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
234 target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
237 case SPU_SRR0_REGNUM:
238 return spu_pseudo_register_read_spu (regcache, "srr0", buf);
240 case SPU_LSLR_REGNUM:
241 return spu_pseudo_register_read_spu (regcache, "lslr", buf);
243 case SPU_DECR_REGNUM:
244 return spu_pseudo_register_read_spu (regcache, "decr", buf);
246 case SPU_DECR_STATUS_REGNUM:
247 return spu_pseudo_register_read_spu (regcache, "decr_status", buf);
250 internal_error (__FILE__, __LINE__, _("invalid regnum"));
255 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
258 struct gdbarch *gdbarch = get_regcache_arch (regcache);
259 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
264 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
265 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
266 xsnprintf (reg, sizeof reg, "0x%s",
267 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
268 target_write (¤t_target, TARGET_OBJECT_SPU, annex,
269 (gdb_byte *) reg, 0, strlen (reg));
273 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
274 int regnum, const gdb_byte *buf)
283 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
284 memcpy (reg, buf, 4);
285 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
288 case SPU_FPSCR_REGNUM:
289 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
290 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
291 target_write (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
294 case SPU_SRR0_REGNUM:
295 spu_pseudo_register_write_spu (regcache, "srr0", buf);
298 case SPU_LSLR_REGNUM:
299 spu_pseudo_register_write_spu (regcache, "lslr", buf);
302 case SPU_DECR_REGNUM:
303 spu_pseudo_register_write_spu (regcache, "decr", buf);
306 case SPU_DECR_STATUS_REGNUM:
307 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
311 internal_error (__FILE__, __LINE__, _("invalid regnum"));
316 spu_ax_pseudo_register_collect (struct gdbarch *gdbarch,
317 struct agent_expr *ax, int regnum)
322 ax_reg_mask (ax, SPU_RAW_SP_REGNUM);
325 case SPU_FPSCR_REGNUM:
326 case SPU_SRR0_REGNUM:
327 case SPU_LSLR_REGNUM:
328 case SPU_DECR_REGNUM:
329 case SPU_DECR_STATUS_REGNUM:
333 internal_error (__FILE__, __LINE__, _("invalid regnum"));
338 spu_ax_pseudo_register_push_stack (struct gdbarch *gdbarch,
339 struct agent_expr *ax, int regnum)
344 ax_reg (ax, SPU_RAW_SP_REGNUM);
347 case SPU_FPSCR_REGNUM:
348 case SPU_SRR0_REGNUM:
349 case SPU_LSLR_REGNUM:
350 case SPU_DECR_REGNUM:
351 case SPU_DECR_STATUS_REGNUM:
355 internal_error (__FILE__, __LINE__, _("invalid regnum"));
360 /* Value conversion -- access scalar values at the preferred slot. */
362 static struct value *
363 spu_value_from_register (struct gdbarch *gdbarch, struct type *type,
364 int regnum, struct frame_id frame_id)
366 struct value *value = default_value_from_register (gdbarch, type,
368 int len = TYPE_LENGTH (type);
370 if (regnum < SPU_NUM_GPRS && len < 16)
372 int preferred_slot = len < 4 ? 4 - len : 0;
373 set_value_offset (value, preferred_slot);
379 /* Register groups. */
382 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
383 struct reggroup *group)
385 /* Registers displayed via 'info regs'. */
386 if (group == general_reggroup)
389 /* Registers displayed via 'info float'. */
390 if (group == float_reggroup)
393 /* Registers that need to be saved/restored in order to
394 push or pop frames. */
395 if (group == save_reggroup || group == restore_reggroup)
398 return default_register_reggroup_p (gdbarch, regnum, group);
401 /* DWARF-2 register numbers. */
404 spu_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
406 /* Use cooked instead of raw SP. */
407 return (reg == SPU_RAW_SP_REGNUM)? SPU_SP_REGNUM : reg;
411 /* Address handling. */
414 spu_gdbarch_id (struct gdbarch *gdbarch)
416 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
419 /* The objfile architecture of a standalone SPU executable does not
420 provide an SPU ID. Retrieve it from the objfile's relocated
421 address range in this special case. */
423 && symfile_objfile && symfile_objfile->obfd
424 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
425 && symfile_objfile->sections != symfile_objfile->sections_end)
426 id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
432 spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
434 if (dwarf2_addr_class == 1)
435 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
441 spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
443 if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
450 spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
451 const char *name, int *type_flags_ptr)
453 if (strcmp (name, "__ea") == 0)
455 *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
463 spu_address_to_pointer (struct gdbarch *gdbarch,
464 struct type *type, gdb_byte *buf, CORE_ADDR addr)
466 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
467 store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
468 SPUADDR_ADDR (addr));
472 spu_pointer_to_address (struct gdbarch *gdbarch,
473 struct type *type, const gdb_byte *buf)
475 int id = spu_gdbarch_id (gdbarch);
476 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
478 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
480 /* Do not convert __ea pointers. */
481 if (TYPE_ADDRESS_CLASS_1 (type))
484 return addr? SPUADDR (id, addr) : 0;
488 spu_integer_to_address (struct gdbarch *gdbarch,
489 struct type *type, const gdb_byte *buf)
491 int id = spu_gdbarch_id (gdbarch);
492 ULONGEST addr = unpack_long (type, buf);
494 return SPUADDR (id, addr);
498 /* Decoding SPU instructions. */
535 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
537 if ((insn >> 21) == op)
540 *ra = (insn >> 7) & 127;
541 *rb = (insn >> 14) & 127;
549 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
551 if ((insn >> 28) == op)
553 *rt = (insn >> 21) & 127;
554 *ra = (insn >> 7) & 127;
555 *rb = (insn >> 14) & 127;
564 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
566 if ((insn >> 21) == op)
569 *ra = (insn >> 7) & 127;
570 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
578 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
580 if ((insn >> 24) == op)
583 *ra = (insn >> 7) & 127;
584 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
592 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
594 if ((insn >> 23) == op)
597 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
605 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
607 if ((insn >> 25) == op)
610 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
618 is_branch (unsigned int insn, int *offset, int *reg)
622 if (is_ri16 (insn, op_br, &rt, &i16)
623 || is_ri16 (insn, op_brsl, &rt, &i16)
624 || is_ri16 (insn, op_brnz, &rt, &i16)
625 || is_ri16 (insn, op_brz, &rt, &i16)
626 || is_ri16 (insn, op_brhnz, &rt, &i16)
627 || is_ri16 (insn, op_brhz, &rt, &i16))
629 *reg = SPU_PC_REGNUM;
634 if (is_ri16 (insn, op_bra, &rt, &i16)
635 || is_ri16 (insn, op_brasl, &rt, &i16))
642 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
643 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
644 || is_ri7 (insn, op_biz, &rt, reg, &i7)
645 || is_ri7 (insn, op_binz, &rt, reg, &i7)
646 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
647 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
657 /* Prolog parsing. */
659 struct spu_prologue_data
661 /* Stack frame size. -1 if analysis was unsuccessful. */
664 /* How to find the CFA. The CFA is equal to SP at function entry. */
668 /* Offset relative to CFA where a register is saved. -1 if invalid. */
669 int reg_offset[SPU_NUM_GPRS];
673 spu_analyze_prologue (struct gdbarch *gdbarch,
674 CORE_ADDR start_pc, CORE_ADDR end_pc,
675 struct spu_prologue_data *data)
677 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
682 int reg_immed[SPU_NUM_GPRS];
684 CORE_ADDR prolog_pc = start_pc;
689 /* Initialize DATA to default values. */
692 data->cfa_reg = SPU_RAW_SP_REGNUM;
693 data->cfa_offset = 0;
695 for (i = 0; i < SPU_NUM_GPRS; i++)
696 data->reg_offset[i] = -1;
698 /* Set up REG_IMMED array. This is non-zero for a register if we know its
699 preferred slot currently holds this immediate value. */
700 for (i = 0; i < SPU_NUM_GPRS; i++)
703 /* Scan instructions until the first branch.
705 The following instructions are important prolog components:
707 - The first instruction to set up the stack pointer.
708 - The first instruction to set up the frame pointer.
709 - The first instruction to save the link register.
710 - The first instruction to save the backchain.
712 We return the instruction after the latest of these four,
713 or the incoming PC if none is found. The first instruction
714 to set up the stack pointer also defines the frame size.
716 Note that instructions saving incoming arguments to their stack
717 slots are not counted as important, because they are hard to
718 identify with certainty. This should not matter much, because
719 arguments are relevant only in code compiled with debug data,
720 and in such code the GDB core will advance until the first source
721 line anyway, using SAL data.
723 For purposes of stack unwinding, we analyze the following types
724 of instructions in addition:
726 - Any instruction adding to the current frame pointer.
727 - Any instruction loading an immediate constant into a register.
728 - Any instruction storing a register onto the stack.
730 These are used to compute the CFA and REG_OFFSET output. */
732 for (pc = start_pc; pc < end_pc; pc += 4)
735 int rt, ra, rb, rc, immed;
737 if (target_read_memory (pc, buf, 4))
739 insn = extract_unsigned_integer (buf, 4, byte_order);
741 /* AI is the typical instruction to set up a stack frame.
742 It is also used to initialize the frame pointer. */
743 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
745 if (rt == data->cfa_reg && ra == data->cfa_reg)
746 data->cfa_offset -= immed;
748 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
756 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
762 data->cfa_reg = SPU_FP_REGNUM;
763 data->cfa_offset -= immed;
767 /* A is used to set up stack frames of size >= 512 bytes.
768 If we have tracked the contents of the addend register,
769 we can handle this as well. */
770 else if (is_rr (insn, op_a, &rt, &ra, &rb))
772 if (rt == data->cfa_reg && ra == data->cfa_reg)
774 if (reg_immed[rb] != 0)
775 data->cfa_offset -= reg_immed[rb];
777 data->cfa_reg = -1; /* We don't know the CFA any more. */
780 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
786 if (reg_immed[rb] != 0)
787 data->size = -reg_immed[rb];
791 /* We need to track IL and ILA used to load immediate constants
792 in case they are later used as input to an A instruction. */
793 else if (is_ri16 (insn, op_il, &rt, &immed))
795 reg_immed[rt] = immed;
797 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
801 else if (is_ri18 (insn, op_ila, &rt, &immed))
803 reg_immed[rt] = immed & 0x3ffff;
805 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
809 /* STQD is used to save registers to the stack. */
810 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
812 if (ra == data->cfa_reg)
813 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
815 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
822 if (ra == SPU_RAW_SP_REGNUM
823 && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
831 /* _start uses SELB to set up the stack pointer. */
832 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
834 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
838 /* We terminate if we find a branch. */
839 else if (is_branch (insn, &immed, &ra))
844 /* If we successfully parsed until here, and didn't find any instruction
845 modifying SP, we assume we have a frameless function. */
849 /* Return cooked instead of raw SP. */
850 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
851 data->cfa_reg = SPU_SP_REGNUM;
856 /* Return the first instruction after the prologue starting at PC. */
858 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
860 struct spu_prologue_data data;
861 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
864 /* Return the frame pointer in use at address PC. */
866 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
867 int *reg, LONGEST *offset)
869 struct spu_prologue_data data;
870 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
872 if (data.size != -1 && data.cfa_reg != -1)
874 /* The 'frame pointer' address is CFA minus frame size. */
876 *offset = data.cfa_offset - data.size;
880 /* ??? We don't really know ... */
881 *reg = SPU_SP_REGNUM;
886 /* Return true if we are in the function's epilogue, i.e. after the
887 instruction that destroyed the function's stack frame.
889 1) scan forward from the point of execution:
890 a) If you find an instruction that modifies the stack pointer
891 or transfers control (except a return), execution is not in
893 b) Stop scanning if you find a return instruction or reach the
894 end of the function or reach the hard limit for the size of
896 2) scan backward from the point of execution:
897 a) If you find an instruction that modifies the stack pointer,
898 execution *is* in an epilogue, return.
899 b) Stop scanning if you reach an instruction that transfers
900 control or the beginning of the function or reach the hard
901 limit for the size of an epilogue. */
904 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
906 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
907 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
910 int rt, ra, rb, immed;
912 /* Find the search limits based on function boundaries and hard limit.
913 We assume the epilogue can be up to 64 instructions long. */
915 const int spu_max_epilogue_size = 64 * 4;
917 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
920 if (pc - func_start < spu_max_epilogue_size)
921 epilogue_start = func_start;
923 epilogue_start = pc - spu_max_epilogue_size;
925 if (func_end - pc < spu_max_epilogue_size)
926 epilogue_end = func_end;
928 epilogue_end = pc + spu_max_epilogue_size;
930 /* Scan forward until next 'bi $0'. */
932 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
934 if (target_read_memory (scan_pc, buf, 4))
936 insn = extract_unsigned_integer (buf, 4, byte_order);
938 if (is_branch (insn, &immed, &ra))
940 if (immed == 0 && ra == SPU_LR_REGNUM)
946 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
947 || is_rr (insn, op_a, &rt, &ra, &rb)
948 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
950 if (rt == SPU_RAW_SP_REGNUM)
955 if (scan_pc >= epilogue_end)
958 /* Scan backward until adjustment to stack pointer (R1). */
960 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
962 if (target_read_memory (scan_pc, buf, 4))
964 insn = extract_unsigned_integer (buf, 4, byte_order);
966 if (is_branch (insn, &immed, &ra))
969 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
970 || is_rr (insn, op_a, &rt, &ra, &rb)
971 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
973 if (rt == SPU_RAW_SP_REGNUM)
982 /* Normal stack frames. */
984 struct spu_unwind_cache
987 CORE_ADDR frame_base;
988 CORE_ADDR local_base;
990 struct trad_frame_saved_reg *saved_regs;
993 static struct spu_unwind_cache *
994 spu_frame_unwind_cache (struct frame_info *this_frame,
995 void **this_prologue_cache)
997 struct gdbarch *gdbarch = get_frame_arch (this_frame);
998 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
999 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1000 struct spu_unwind_cache *info;
1001 struct spu_prologue_data data;
1002 CORE_ADDR id = tdep->id;
1005 if (*this_prologue_cache)
1006 return *this_prologue_cache;
1008 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
1009 *this_prologue_cache = info;
1010 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1011 info->frame_base = 0;
1012 info->local_base = 0;
1014 /* Find the start of the current function, and analyze its prologue. */
1015 info->func = get_frame_func (this_frame);
1016 if (info->func == 0)
1018 /* Fall back to using the current PC as frame ID. */
1019 info->func = get_frame_pc (this_frame);
1023 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
1026 /* If successful, use prologue analysis data. */
1027 if (data.size != -1 && data.cfa_reg != -1)
1032 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
1033 get_frame_register (this_frame, data.cfa_reg, buf);
1034 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
1035 cfa = SPUADDR (id, cfa);
1037 /* Call-saved register slots. */
1038 for (i = 0; i < SPU_NUM_GPRS; i++)
1039 if (i == SPU_LR_REGNUM
1040 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
1041 if (data.reg_offset[i] != -1)
1042 info->saved_regs[i].addr = cfa - data.reg_offset[i];
1045 info->frame_base = cfa;
1046 info->local_base = cfa - data.size;
1049 /* Otherwise, fall back to reading the backchain link. */
1057 /* Get local store limit. */
1058 lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
1060 lslr = (ULONGEST) -1;
1062 /* Get the backchain. */
1063 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1064 status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1067 /* A zero backchain terminates the frame chain. Also, sanity
1068 check against the local store size limit. */
1069 if (status && backchain > 0 && backchain <= lslr)
1071 /* Assume the link register is saved into its slot. */
1072 if (backchain + 16 <= lslr)
1073 info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id,
1077 info->frame_base = SPUADDR (id, backchain);
1078 info->local_base = SPUADDR (id, reg);
1082 /* If we didn't find a frame, we cannot determine SP / return address. */
1083 if (info->frame_base == 0)
1086 /* The previous SP is equal to the CFA. */
1087 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1088 SPUADDR_ADDR (info->frame_base));
1090 /* Read full contents of the unwound link register in order to
1091 be able to determine the return address. */
1092 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1093 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1095 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
1097 /* Normally, the return address is contained in the slot 0 of the
1098 link register, and slots 1-3 are zero. For an overlay return,
1099 slot 0 contains the address of the overlay manager return stub,
1100 slot 1 contains the partition number of the overlay section to
1101 be returned to, and slot 2 contains the return address within
1102 that section. Return the latter address in that case. */
1103 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
1104 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1105 extract_unsigned_integer (buf + 8, 4, byte_order));
1107 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1108 extract_unsigned_integer (buf, 4, byte_order));
1114 spu_frame_this_id (struct frame_info *this_frame,
1115 void **this_prologue_cache, struct frame_id *this_id)
1117 struct spu_unwind_cache *info =
1118 spu_frame_unwind_cache (this_frame, this_prologue_cache);
1120 if (info->frame_base == 0)
1123 *this_id = frame_id_build (info->frame_base, info->func);
1126 static struct value *
1127 spu_frame_prev_register (struct frame_info *this_frame,
1128 void **this_prologue_cache, int regnum)
1130 struct spu_unwind_cache *info
1131 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1133 /* Special-case the stack pointer. */
1134 if (regnum == SPU_RAW_SP_REGNUM)
1135 regnum = SPU_SP_REGNUM;
1137 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1140 static const struct frame_unwind spu_frame_unwind = {
1142 default_frame_unwind_stop_reason,
1144 spu_frame_prev_register,
1146 default_frame_sniffer
1150 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1152 struct spu_unwind_cache *info
1153 = spu_frame_unwind_cache (this_frame, this_cache);
1154 return info->local_base;
1157 static const struct frame_base spu_frame_base = {
1159 spu_frame_base_address,
1160 spu_frame_base_address,
1161 spu_frame_base_address
1165 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1167 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1168 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1169 /* Mask off interrupt enable bit. */
1170 return SPUADDR (tdep->id, pc & -4);
1174 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1176 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1177 CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1178 return SPUADDR (tdep->id, sp);
1182 spu_read_pc (struct regcache *regcache)
1184 struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1186 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1187 /* Mask off interrupt enable bit. */
1188 return SPUADDR (tdep->id, pc & -4);
1192 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1194 /* Keep interrupt enabled state unchanged. */
1197 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1198 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1199 (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1203 /* Cell/B.E. cross-architecture unwinder support. */
1205 struct spu2ppu_cache
1207 struct frame_id frame_id;
1208 struct regcache *regcache;
1211 static struct gdbarch *
1212 spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1214 struct spu2ppu_cache *cache = *this_cache;
1215 return get_regcache_arch (cache->regcache);
1219 spu2ppu_this_id (struct frame_info *this_frame,
1220 void **this_cache, struct frame_id *this_id)
1222 struct spu2ppu_cache *cache = *this_cache;
1223 *this_id = cache->frame_id;
1226 static struct value *
1227 spu2ppu_prev_register (struct frame_info *this_frame,
1228 void **this_cache, int regnum)
1230 struct spu2ppu_cache *cache = *this_cache;
1231 struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1234 buf = alloca (register_size (gdbarch, regnum));
1235 regcache_cooked_read (cache->regcache, regnum, buf);
1236 return frame_unwind_got_bytes (this_frame, regnum, buf);
1240 spu2ppu_sniffer (const struct frame_unwind *self,
1241 struct frame_info *this_frame, void **this_prologue_cache)
1243 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1244 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1245 CORE_ADDR base, func, backchain;
1248 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_spu)
1251 base = get_frame_sp (this_frame);
1252 func = get_frame_pc (this_frame);
1253 if (target_read_memory (base, buf, 4))
1255 backchain = extract_unsigned_integer (buf, 4, byte_order);
1259 struct frame_info *fi;
1261 struct spu2ppu_cache *cache
1262 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1264 cache->frame_id = frame_id_build (base + 16, func);
1266 for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1267 if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1272 cache->regcache = frame_save_as_regcache (fi);
1273 *this_prologue_cache = cache;
1278 struct regcache *regcache;
1279 regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1280 cache->regcache = regcache_dup (regcache);
1281 *this_prologue_cache = cache;
1290 spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1292 struct spu2ppu_cache *cache = this_cache;
1293 regcache_xfree (cache->regcache);
1296 static const struct frame_unwind spu2ppu_unwind = {
1298 default_frame_unwind_stop_reason,
1300 spu2ppu_prev_register,
1303 spu2ppu_dealloc_cache,
1308 /* Function calling convention. */
1311 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1317 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1318 struct value **args, int nargs, struct type *value_type,
1319 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1320 struct regcache *regcache)
1322 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1323 sp = (sp - 4) & ~15;
1324 /* Store the address of that breakpoint */
1326 /* The call starts at the callee's entry point. */
1333 spu_scalar_value_p (struct type *type)
1335 switch (TYPE_CODE (type))
1338 case TYPE_CODE_ENUM:
1339 case TYPE_CODE_RANGE:
1340 case TYPE_CODE_CHAR:
1341 case TYPE_CODE_BOOL:
1344 return TYPE_LENGTH (type) <= 16;
1352 spu_value_to_regcache (struct regcache *regcache, int regnum,
1353 struct type *type, const gdb_byte *in)
1355 int len = TYPE_LENGTH (type);
1357 if (spu_scalar_value_p (type))
1359 int preferred_slot = len < 4 ? 4 - len : 0;
1360 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1366 regcache_cooked_write (regcache, regnum++, in);
1372 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1377 spu_regcache_to_value (struct regcache *regcache, int regnum,
1378 struct type *type, gdb_byte *out)
1380 int len = TYPE_LENGTH (type);
1382 if (spu_scalar_value_p (type))
1384 int preferred_slot = len < 4 ? 4 - len : 0;
1385 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1391 regcache_cooked_read (regcache, regnum++, out);
1397 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1402 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1403 struct regcache *regcache, CORE_ADDR bp_addr,
1404 int nargs, struct value **args, CORE_ADDR sp,
1405 int struct_return, CORE_ADDR struct_addr)
1407 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1410 int regnum = SPU_ARG1_REGNUM;
1414 /* Set the return address. */
1415 memset (buf, 0, sizeof buf);
1416 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1417 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1419 /* If STRUCT_RETURN is true, then the struct return address (in
1420 STRUCT_ADDR) will consume the first argument-passing register.
1421 Both adjust the register count and store that value. */
1424 memset (buf, 0, sizeof buf);
1425 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1426 regcache_cooked_write (regcache, regnum++, buf);
1429 /* Fill in argument registers. */
1430 for (i = 0; i < nargs; i++)
1432 struct value *arg = args[i];
1433 struct type *type = check_typedef (value_type (arg));
1434 const gdb_byte *contents = value_contents (arg);
1435 int n_regs = align_up (TYPE_LENGTH (type), 16) / 16;
1437 /* If the argument doesn't wholly fit into registers, it and
1438 all subsequent arguments go to the stack. */
1439 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1445 spu_value_to_regcache (regcache, regnum, type, contents);
1449 /* Overflow arguments go to the stack. */
1450 if (stack_arg != -1)
1454 /* Allocate all required stack size. */
1455 for (i = stack_arg; i < nargs; i++)
1457 struct type *type = check_typedef (value_type (args[i]));
1458 sp -= align_up (TYPE_LENGTH (type), 16);
1461 /* Fill in stack arguments. */
1463 for (i = stack_arg; i < nargs; i++)
1465 struct value *arg = args[i];
1466 struct type *type = check_typedef (value_type (arg));
1467 int len = TYPE_LENGTH (type);
1470 if (spu_scalar_value_p (type))
1471 preferred_slot = len < 4 ? 4 - len : 0;
1475 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1476 ap += align_up (TYPE_LENGTH (type), 16);
1480 /* Allocate stack frame header. */
1483 /* Store stack back chain. */
1484 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1485 target_write_memory (sp, buf, 16);
1487 /* Finally, update all slots of the SP register. */
1488 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1489 for (i = 0; i < 4; i++)
1491 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1492 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1494 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1499 static struct frame_id
1500 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1502 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1503 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1504 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1505 return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1508 /* Function return value access. */
1510 static enum return_value_convention
1511 spu_return_value (struct gdbarch *gdbarch, struct value *function,
1512 struct type *type, struct regcache *regcache,
1513 gdb_byte *out, const gdb_byte *in)
1515 struct type *func_type = function ? value_type (function) : NULL;
1516 enum return_value_convention rvc;
1517 int opencl_vector = 0;
1521 func_type = check_typedef (func_type);
1523 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1524 func_type = check_typedef (TYPE_TARGET_TYPE (func_type));
1526 if (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1527 && TYPE_CALLING_CONVENTION (func_type) == DW_CC_GDB_IBM_OpenCL
1528 && TYPE_CODE (type) == TYPE_CODE_ARRAY
1529 && TYPE_VECTOR (type))
1533 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1534 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1536 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1542 case RETURN_VALUE_REGISTER_CONVENTION:
1543 if (opencl_vector && TYPE_LENGTH (type) == 2)
1544 regcache_cooked_write_part (regcache, SPU_ARG1_REGNUM, 2, 2, in);
1546 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1549 case RETURN_VALUE_STRUCT_CONVENTION:
1550 error (_("Cannot set function return value."));
1558 case RETURN_VALUE_REGISTER_CONVENTION:
1559 if (opencl_vector && TYPE_LENGTH (type) == 2)
1560 regcache_cooked_read_part (regcache, SPU_ARG1_REGNUM, 2, 2, out);
1562 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1565 case RETURN_VALUE_STRUCT_CONVENTION:
1566 error (_("Function return value unknown."));
1577 static const gdb_byte *
1578 spu_breakpoint_from_pc (struct gdbarch *gdbarch,
1579 CORE_ADDR * pcptr, int *lenptr)
1581 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1583 *lenptr = sizeof breakpoint;
1588 spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
1589 struct bp_target_info *bp_tgt)
1591 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1592 that in a combined application, we have some breakpoints inserted in SPU
1593 code, and now the application forks (on the PPU side). GDB common code
1594 will assume that the fork system call copied all breakpoints into the new
1595 process' address space, and that all those copies now need to be removed
1596 (see breakpoint.c:detach_breakpoints).
1598 While this is certainly true for PPU side breakpoints, it is not true
1599 for SPU side breakpoints. fork will clone the SPU context file
1600 descriptors, so that all the existing SPU contexts are in accessible
1601 in the new process. However, the contents of the SPU contexts themselves
1602 are *not* cloned. Therefore the effect of detach_breakpoints is to
1603 remove SPU breakpoints from the *original* SPU context's local store
1604 -- this is not the correct behaviour.
1606 The workaround is to check whether the PID we are asked to remove this
1607 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1608 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1609 true in the context of detach_breakpoints. If so, we simply do nothing.
1610 [ Note that for the fork child process, it does not matter if breakpoints
1611 remain inserted, because those SPU contexts are not runnable anyway --
1612 the Linux kernel allows only the original process to invoke spu_run. */
1614 if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
1617 return default_memory_remove_breakpoint (gdbarch, bp_tgt);
1621 /* Software single-stepping support. */
1624 spu_software_single_step (struct frame_info *frame)
1626 struct gdbarch *gdbarch = get_frame_arch (frame);
1627 struct address_space *aspace = get_frame_address_space (frame);
1628 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1629 CORE_ADDR pc, next_pc;
1635 pc = get_frame_pc (frame);
1637 if (target_read_memory (pc, buf, 4))
1639 insn = extract_unsigned_integer (buf, 4, byte_order);
1641 /* Get local store limit. */
1642 lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
1644 lslr = (ULONGEST) -1;
1646 /* Next sequential instruction is at PC + 4, except if the current
1647 instruction is a PPE-assisted call, in which case it is at PC + 8.
1648 Wrap around LS limit to be on the safe side. */
1649 if ((insn & 0xffffff00) == 0x00002100)
1650 next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
1652 next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
1654 insert_single_step_breakpoint (gdbarch,
1655 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
1657 if (is_branch (insn, &offset, ®))
1659 CORE_ADDR target = offset;
1661 if (reg == SPU_PC_REGNUM)
1662 target += SPUADDR_ADDR (pc);
1667 if (get_frame_register_bytes (frame, reg, 0, 4, buf,
1669 target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1673 throw_error (OPTIMIZED_OUT_ERROR,
1674 _("Could not determine address of "
1675 "single-step breakpoint."));
1677 throw_error (NOT_AVAILABLE_ERROR,
1678 _("Could not determine address of "
1679 "single-step breakpoint."));
1683 target = target & lslr;
1684 if (target != next_pc)
1685 insert_single_step_breakpoint (gdbarch, aspace,
1686 SPUADDR (SPUADDR_SPU (pc), target));
1693 /* Longjmp support. */
1696 spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1698 struct gdbarch *gdbarch = get_frame_arch (frame);
1699 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1700 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1705 /* Jump buffer is pointed to by the argument register $r3. */
1706 if (!get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf,
1710 jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1711 if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1714 *pc = extract_unsigned_integer (buf, 4, byte_order);
1715 *pc = SPUADDR (tdep->id, *pc);
1722 struct spu_dis_asm_data
1724 struct gdbarch *gdbarch;
1729 spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1731 struct spu_dis_asm_data *data = info->application_data;
1732 print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1736 gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1738 /* The opcodes disassembler does 18-bit address arithmetic. Make
1739 sure the SPU ID encoded in the high bits is added back when we
1740 call print_address. */
1741 struct disassemble_info spu_info = *info;
1742 struct spu_dis_asm_data data;
1743 data.gdbarch = info->application_data;
1744 data.id = SPUADDR_SPU (memaddr);
1746 spu_info.application_data = &data;
1747 spu_info.print_address_func = spu_dis_asm_print_address;
1748 return print_insn_spu (memaddr, &spu_info);
1752 /* Target overlays for the SPU overlay manager.
1754 See the documentation of simple_overlay_update for how the
1755 interface is supposed to work.
1757 Data structures used by the overlay manager:
1765 } _ovly_table[]; -- one entry per overlay section
1767 struct ovly_buf_table
1770 } _ovly_buf_table[]; -- one entry per overlay buffer
1772 _ovly_table should never change.
1774 Both tables are aligned to a 16-byte boundary, the symbols
1775 _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
1776 size set to the size of the respective array. buf in _ovly_table is
1777 an index into _ovly_buf_table.
1779 mapped is an index into _ovly_table. Both the mapped and buf indices start
1780 from one to reference the first entry in their respective tables. */
1782 /* Using the per-objfile private data mechanism, we store for each
1783 objfile an array of "struct spu_overlay_table" structures, one
1784 for each obj_section of the objfile. This structure holds two
1785 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1786 is *not* an overlay section. If it is non-zero, it represents
1787 a target address. The overlay section is mapped iff the target
1788 integer at this location equals MAPPED_VAL. */
1790 static const struct objfile_data *spu_overlay_data;
1792 struct spu_overlay_table
1794 CORE_ADDR mapped_ptr;
1795 CORE_ADDR mapped_val;
1798 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1799 the _ovly_table data structure from the target and initialize the
1800 spu_overlay_table data structure from it. */
1801 static struct spu_overlay_table *
1802 spu_get_overlay_table (struct objfile *objfile)
1804 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1805 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1806 struct bound_minimal_symbol ovly_table_msym, ovly_buf_table_msym;
1807 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1808 unsigned ovly_table_size, ovly_buf_table_size;
1809 struct spu_overlay_table *tbl;
1810 struct obj_section *osect;
1811 gdb_byte *ovly_table;
1814 tbl = objfile_data (objfile, spu_overlay_data);
1818 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1819 if (!ovly_table_msym.minsym)
1822 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table",
1824 if (!ovly_buf_table_msym.minsym)
1827 ovly_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_table_msym);
1828 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym.minsym);
1830 ovly_buf_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1831 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym.minsym);
1833 ovly_table = xmalloc (ovly_table_size);
1834 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1836 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1837 objfile->sections_end - objfile->sections,
1838 struct spu_overlay_table);
1840 for (i = 0; i < ovly_table_size / 16; i++)
1842 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
1844 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1846 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
1848 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
1851 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1854 ALL_OBJFILE_OSECTIONS (objfile, osect)
1855 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1856 && pos == osect->the_bfd_section->filepos)
1858 int ndx = osect - objfile->sections;
1859 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1860 tbl[ndx].mapped_val = i + 1;
1866 set_objfile_data (objfile, spu_overlay_data, tbl);
1870 /* Read _ovly_buf_table entry from the target to dermine whether
1871 OSECT is currently mapped, and update the mapped state. */
1873 spu_overlay_update_osect (struct obj_section *osect)
1875 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1876 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1877 struct spu_overlay_table *ovly_table;
1880 ovly_table = spu_get_overlay_table (osect->objfile);
1884 ovly_table += osect - osect->objfile->sections;
1885 if (ovly_table->mapped_ptr == 0)
1888 id = SPUADDR_SPU (obj_section_addr (osect));
1889 val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1891 osect->ovly_mapped = (val == ovly_table->mapped_val);
1894 /* If OSECT is NULL, then update all sections' mapped state.
1895 If OSECT is non-NULL, then update only OSECT's mapped state. */
1897 spu_overlay_update (struct obj_section *osect)
1899 /* Just one section. */
1901 spu_overlay_update_osect (osect);
1906 struct objfile *objfile;
1908 ALL_OBJSECTIONS (objfile, osect)
1909 if (section_is_overlay (osect))
1910 spu_overlay_update_osect (osect);
1914 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1915 If there is one, go through all sections and make sure for non-
1916 overlay sections LMA equals VMA, while for overlay sections LMA
1917 is larger than SPU_OVERLAY_LMA. */
1919 spu_overlay_new_objfile (struct objfile *objfile)
1921 struct spu_overlay_table *ovly_table;
1922 struct obj_section *osect;
1924 /* If we've already touched this file, do nothing. */
1925 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1928 /* Consider only SPU objfiles. */
1929 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1932 /* Check if this objfile has overlays. */
1933 ovly_table = spu_get_overlay_table (objfile);
1937 /* Now go and fiddle with all the LMAs. */
1938 ALL_OBJFILE_OSECTIONS (objfile, osect)
1940 bfd *obfd = objfile->obfd;
1941 asection *bsect = osect->the_bfd_section;
1942 int ndx = osect - objfile->sections;
1944 if (ovly_table[ndx].mapped_ptr == 0)
1945 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1947 bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
1952 /* Insert temporary breakpoint on "main" function of newly loaded
1953 SPE context OBJFILE. */
1955 spu_catch_start (struct objfile *objfile)
1957 struct bound_minimal_symbol minsym;
1958 struct symtab *symtab;
1962 /* Do this only if requested by "set spu stop-on-load on". */
1963 if (!spu_stop_on_load_p)
1966 /* Consider only SPU objfiles. */
1967 if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1970 /* The main objfile is handled differently. */
1971 if (objfile == symfile_objfile)
1974 /* There can be multiple symbols named "main". Search for the
1975 "main" in *this* objfile. */
1976 minsym = lookup_minimal_symbol ("main", NULL, objfile);
1980 /* If we have debugging information, try to use it -- this
1981 will allow us to properly skip the prologue. */
1982 pc = BMSYMBOL_VALUE_ADDRESS (minsym);
1983 symtab = find_pc_sect_symtab (pc, MSYMBOL_OBJ_SECTION (minsym.objfile,
1987 const struct blockvector *bv = BLOCKVECTOR (symtab);
1988 struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1990 struct symtab_and_line sal;
1992 sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
1995 fixup_symbol_section (sym, objfile);
1996 sal = find_function_start_sal (sym, 1);
2001 /* Use a numerical address for the set_breakpoint command to avoid having
2002 the breakpoint re-set incorrectly. */
2003 xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
2004 create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
2005 NULL /* cond_string */, -1 /* thread */,
2006 NULL /* extra_string */,
2007 0 /* parse_condition_and_thread */, 1 /* tempflag */,
2008 bp_breakpoint /* type_wanted */,
2009 0 /* ignore_count */,
2010 AUTO_BOOLEAN_FALSE /* pending_break_support */,
2011 &bkpt_breakpoint_ops /* ops */, 0 /* from_tty */,
2012 1 /* enabled */, 0 /* internal */, 0);
2016 /* Look up OBJFILE loaded into FRAME's SPU context. */
2017 static struct objfile *
2018 spu_objfile_from_frame (struct frame_info *frame)
2020 struct gdbarch *gdbarch = get_frame_arch (frame);
2021 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2022 struct objfile *obj;
2024 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2029 if (obj->sections != obj->sections_end
2030 && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
2037 /* Flush cache for ea pointer access if available. */
2039 flush_ea_cache (void)
2041 struct bound_minimal_symbol msymbol;
2042 struct objfile *obj;
2044 if (!has_stack_frames ())
2047 obj = spu_objfile_from_frame (get_current_frame ());
2051 /* Lookup inferior function __cache_flush. */
2052 msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
2053 if (msymbol.minsym != NULL)
2058 type = objfile_type (obj)->builtin_void;
2059 type = lookup_function_type (type);
2060 type = lookup_pointer_type (type);
2061 addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2063 call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
2067 /* This handler is called when the inferior has stopped. If it is stopped in
2068 SPU architecture then flush the ea cache if used. */
2070 spu_attach_normal_stop (struct bpstats *bs, int print_frame)
2072 if (!spu_auto_flush_cache_p)
2075 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
2076 re-entering this function when __cache_flush stops. */
2077 spu_auto_flush_cache_p = 0;
2079 spu_auto_flush_cache_p = 1;
2083 /* "info spu" commands. */
2086 info_spu_event_command (char *args, int from_tty)
2088 struct frame_info *frame = get_selected_frame (NULL);
2089 ULONGEST event_status = 0;
2090 ULONGEST event_mask = 0;
2091 struct cleanup *chain;
2097 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2098 error (_("\"info spu\" is only supported on the SPU architecture."));
2100 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2102 xsnprintf (annex, sizeof annex, "%d/event_status", id);
2103 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2104 buf, 0, (sizeof (buf) - 1));
2106 error (_("Could not read event_status."));
2108 event_status = strtoulst ((char *) buf, NULL, 16);
2110 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
2111 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2112 buf, 0, (sizeof (buf) - 1));
2114 error (_("Could not read event_mask."));
2116 event_mask = strtoulst ((char *) buf, NULL, 16);
2118 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoEvent");
2120 if (ui_out_is_mi_like_p (current_uiout))
2122 ui_out_field_fmt (current_uiout, "event_status",
2123 "0x%s", phex_nz (event_status, 4));
2124 ui_out_field_fmt (current_uiout, "event_mask",
2125 "0x%s", phex_nz (event_mask, 4));
2129 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
2130 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
2133 do_cleanups (chain);
2137 info_spu_signal_command (char *args, int from_tty)
2139 struct frame_info *frame = get_selected_frame (NULL);
2140 struct gdbarch *gdbarch = get_frame_arch (frame);
2141 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2142 ULONGEST signal1 = 0;
2143 ULONGEST signal1_type = 0;
2144 int signal1_pending = 0;
2145 ULONGEST signal2 = 0;
2146 ULONGEST signal2_type = 0;
2147 int signal2_pending = 0;
2148 struct cleanup *chain;
2154 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2155 error (_("\"info spu\" is only supported on the SPU architecture."));
2157 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2159 xsnprintf (annex, sizeof annex, "%d/signal1", id);
2160 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2162 error (_("Could not read signal1."));
2165 signal1 = extract_unsigned_integer (buf, 4, byte_order);
2166 signal1_pending = 1;
2169 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2170 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2171 buf, 0, (sizeof (buf) - 1));
2173 error (_("Could not read signal1_type."));
2175 signal1_type = strtoulst ((char *) buf, NULL, 16);
2177 xsnprintf (annex, sizeof annex, "%d/signal2", id);
2178 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2180 error (_("Could not read signal2."));
2183 signal2 = extract_unsigned_integer (buf, 4, byte_order);
2184 signal2_pending = 1;
2187 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2188 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2189 buf, 0, (sizeof (buf) - 1));
2191 error (_("Could not read signal2_type."));
2193 signal2_type = strtoulst ((char *) buf, NULL, 16);
2195 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoSignal");
2197 if (ui_out_is_mi_like_p (current_uiout))
2199 ui_out_field_int (current_uiout, "signal1_pending", signal1_pending);
2200 ui_out_field_fmt (current_uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2201 ui_out_field_int (current_uiout, "signal1_type", signal1_type);
2202 ui_out_field_int (current_uiout, "signal2_pending", signal2_pending);
2203 ui_out_field_fmt (current_uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2204 ui_out_field_int (current_uiout, "signal2_type", signal2_type);
2208 if (signal1_pending)
2209 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2211 printf_filtered (_("Signal 1 not pending "));
2214 printf_filtered (_("(Type Or)\n"));
2216 printf_filtered (_("(Type Overwrite)\n"));
2218 if (signal2_pending)
2219 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2221 printf_filtered (_("Signal 2 not pending "));
2224 printf_filtered (_("(Type Or)\n"));
2226 printf_filtered (_("(Type Overwrite)\n"));
2229 do_cleanups (chain);
2233 info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
2234 const char *field, const char *msg)
2236 struct cleanup *chain;
2242 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 1, nr, "mbox");
2244 ui_out_table_header (current_uiout, 32, ui_left, field, msg);
2245 ui_out_table_body (current_uiout);
2247 for (i = 0; i < nr; i++)
2249 struct cleanup *val_chain;
2251 val_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "mbox");
2252 val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
2253 ui_out_field_fmt (current_uiout, field, "0x%s", phex (val, 4));
2254 do_cleanups (val_chain);
2256 if (!ui_out_is_mi_like_p (current_uiout))
2257 printf_filtered ("\n");
2260 do_cleanups (chain);
2264 info_spu_mailbox_command (char *args, int from_tty)
2266 struct frame_info *frame = get_selected_frame (NULL);
2267 struct gdbarch *gdbarch = get_frame_arch (frame);
2268 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2269 struct cleanup *chain;
2275 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2276 error (_("\"info spu\" is only supported on the SPU architecture."));
2278 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2280 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoMailbox");
2282 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2283 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2284 buf, 0, sizeof buf);
2286 error (_("Could not read mbox_info."));
2288 info_spu_mailbox_list (buf, len / 4, byte_order,
2289 "mbox", "SPU Outbound Mailbox");
2291 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2292 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2293 buf, 0, sizeof buf);
2295 error (_("Could not read ibox_info."));
2297 info_spu_mailbox_list (buf, len / 4, byte_order,
2298 "ibox", "SPU Outbound Interrupt Mailbox");
2300 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2301 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2302 buf, 0, sizeof buf);
2304 error (_("Could not read wbox_info."));
2306 info_spu_mailbox_list (buf, len / 4, byte_order,
2307 "wbox", "SPU Inbound Mailbox");
2309 do_cleanups (chain);
2313 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2315 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2316 return (word >> (63 - last)) & mask;
2320 info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
2322 static char *spu_mfc_opcode[256] =
2324 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2325 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2326 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2327 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2328 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2329 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2330 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2331 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2332 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2333 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2334 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2335 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2336 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2337 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2338 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2339 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2340 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2341 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2342 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2343 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2344 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2345 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2346 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2347 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2348 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2349 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2350 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2351 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2352 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2353 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2354 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2355 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2358 int *seq = alloca (nr * sizeof (int));
2360 struct cleanup *chain;
2364 /* Determine sequence in which to display (valid) entries. */
2365 for (i = 0; i < nr; i++)
2367 /* Search for the first valid entry all of whose
2368 dependencies are met. */
2369 for (j = 0; j < nr; j++)
2371 ULONGEST mfc_cq_dw3;
2372 ULONGEST dependencies;
2374 if (done & (1 << (nr - 1 - j)))
2378 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2379 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2382 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2383 if ((dependencies & done) != dependencies)
2387 done |= 1 << (nr - 1 - j);
2398 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 10, nr,
2401 ui_out_table_header (current_uiout, 7, ui_left, "opcode", "Opcode");
2402 ui_out_table_header (current_uiout, 3, ui_left, "tag", "Tag");
2403 ui_out_table_header (current_uiout, 3, ui_left, "tid", "TId");
2404 ui_out_table_header (current_uiout, 3, ui_left, "rid", "RId");
2405 ui_out_table_header (current_uiout, 18, ui_left, "ea", "EA");
2406 ui_out_table_header (current_uiout, 7, ui_left, "lsa", "LSA");
2407 ui_out_table_header (current_uiout, 7, ui_left, "size", "Size");
2408 ui_out_table_header (current_uiout, 7, ui_left, "lstaddr", "LstAddr");
2409 ui_out_table_header (current_uiout, 7, ui_left, "lstsize", "LstSize");
2410 ui_out_table_header (current_uiout, 1, ui_left, "error_p", "E");
2412 ui_out_table_body (current_uiout);
2414 for (i = 0; i < nr; i++)
2416 struct cleanup *cmd_chain;
2417 ULONGEST mfc_cq_dw0;
2418 ULONGEST mfc_cq_dw1;
2419 ULONGEST mfc_cq_dw2;
2420 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2421 int list_lsa, list_size, mfc_lsa, mfc_size;
2423 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2425 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2426 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2429 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2431 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2433 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2435 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2436 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2437 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2438 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2439 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2440 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2441 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2443 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2444 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2446 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2447 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2448 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2449 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2450 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2451 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2453 cmd_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "cmd");
2455 if (spu_mfc_opcode[mfc_cmd_opcode])
2456 ui_out_field_string (current_uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2458 ui_out_field_int (current_uiout, "opcode", mfc_cmd_opcode);
2460 ui_out_field_int (current_uiout, "tag", mfc_cmd_tag);
2461 ui_out_field_int (current_uiout, "tid", tclass_id);
2462 ui_out_field_int (current_uiout, "rid", rclass_id);
2465 ui_out_field_fmt (current_uiout, "ea", "0x%s", phex (mfc_ea, 8));
2467 ui_out_field_skip (current_uiout, "ea");
2469 ui_out_field_fmt (current_uiout, "lsa", "0x%05x", mfc_lsa << 4);
2471 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size << 4);
2473 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size);
2477 ui_out_field_fmt (current_uiout, "lstaddr", "0x%05x", list_lsa << 3);
2478 ui_out_field_fmt (current_uiout, "lstsize", "0x%05x", list_size << 3);
2482 ui_out_field_skip (current_uiout, "lstaddr");
2483 ui_out_field_skip (current_uiout, "lstsize");
2487 ui_out_field_string (current_uiout, "error_p", "*");
2489 ui_out_field_skip (current_uiout, "error_p");
2491 do_cleanups (cmd_chain);
2493 if (!ui_out_is_mi_like_p (current_uiout))
2494 printf_filtered ("\n");
2497 do_cleanups (chain);
2501 info_spu_dma_command (char *args, int from_tty)
2503 struct frame_info *frame = get_selected_frame (NULL);
2504 struct gdbarch *gdbarch = get_frame_arch (frame);
2505 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2506 ULONGEST dma_info_type;
2507 ULONGEST dma_info_mask;
2508 ULONGEST dma_info_status;
2509 ULONGEST dma_info_stall_and_notify;
2510 ULONGEST dma_info_atomic_command_status;
2511 struct cleanup *chain;
2517 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2518 error (_("\"info spu\" is only supported on the SPU architecture."));
2520 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2522 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2523 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2524 buf, 0, 40 + 16 * 32);
2526 error (_("Could not read dma_info."));
2529 = extract_unsigned_integer (buf, 8, byte_order);
2531 = extract_unsigned_integer (buf + 8, 8, byte_order);
2533 = extract_unsigned_integer (buf + 16, 8, byte_order);
2534 dma_info_stall_and_notify
2535 = extract_unsigned_integer (buf + 24, 8, byte_order);
2536 dma_info_atomic_command_status
2537 = extract_unsigned_integer (buf + 32, 8, byte_order);
2539 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoDMA");
2541 if (ui_out_is_mi_like_p (current_uiout))
2543 ui_out_field_fmt (current_uiout, "dma_info_type", "0x%s",
2544 phex_nz (dma_info_type, 4));
2545 ui_out_field_fmt (current_uiout, "dma_info_mask", "0x%s",
2546 phex_nz (dma_info_mask, 4));
2547 ui_out_field_fmt (current_uiout, "dma_info_status", "0x%s",
2548 phex_nz (dma_info_status, 4));
2549 ui_out_field_fmt (current_uiout, "dma_info_stall_and_notify", "0x%s",
2550 phex_nz (dma_info_stall_and_notify, 4));
2551 ui_out_field_fmt (current_uiout, "dma_info_atomic_command_status", "0x%s",
2552 phex_nz (dma_info_atomic_command_status, 4));
2556 const char *query_msg = _("no query pending");
2558 if (dma_info_type & 4)
2559 switch (dma_info_type & 3)
2561 case 1: query_msg = _("'any' query pending"); break;
2562 case 2: query_msg = _("'all' query pending"); break;
2563 default: query_msg = _("undefined query type"); break;
2566 printf_filtered (_("Tag-Group Status 0x%s\n"),
2567 phex (dma_info_status, 4));
2568 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2569 phex (dma_info_mask, 4), query_msg);
2570 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2571 phex (dma_info_stall_and_notify, 4));
2572 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2573 phex (dma_info_atomic_command_status, 4));
2574 printf_filtered ("\n");
2577 info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2578 do_cleanups (chain);
2582 info_spu_proxydma_command (char *args, int from_tty)
2584 struct frame_info *frame = get_selected_frame (NULL);
2585 struct gdbarch *gdbarch = get_frame_arch (frame);
2586 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2587 ULONGEST dma_info_type;
2588 ULONGEST dma_info_mask;
2589 ULONGEST dma_info_status;
2590 struct cleanup *chain;
2596 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2597 error (_("\"info spu\" is only supported on the SPU architecture."));
2599 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2601 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2602 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
2603 buf, 0, 24 + 8 * 32);
2605 error (_("Could not read proxydma_info."));
2607 dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2608 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2609 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2611 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout,
2614 if (ui_out_is_mi_like_p (current_uiout))
2616 ui_out_field_fmt (current_uiout, "proxydma_info_type", "0x%s",
2617 phex_nz (dma_info_type, 4));
2618 ui_out_field_fmt (current_uiout, "proxydma_info_mask", "0x%s",
2619 phex_nz (dma_info_mask, 4));
2620 ui_out_field_fmt (current_uiout, "proxydma_info_status", "0x%s",
2621 phex_nz (dma_info_status, 4));
2625 const char *query_msg;
2627 switch (dma_info_type & 3)
2629 case 0: query_msg = _("no query pending"); break;
2630 case 1: query_msg = _("'any' query pending"); break;
2631 case 2: query_msg = _("'all' query pending"); break;
2632 default: query_msg = _("undefined query type"); break;
2635 printf_filtered (_("Tag-Group Status 0x%s\n"),
2636 phex (dma_info_status, 4));
2637 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2638 phex (dma_info_mask, 4), query_msg);
2639 printf_filtered ("\n");
2642 info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2643 do_cleanups (chain);
2647 info_spu_command (char *args, int from_tty)
2649 printf_unfiltered (_("\"info spu\" must be followed by "
2650 "the name of an SPU facility.\n"));
2651 help_list (infospucmdlist, "info spu ", all_commands, gdb_stdout);
2655 /* Root of all "set spu "/"show spu " commands. */
2658 show_spu_command (char *args, int from_tty)
2660 help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2664 set_spu_command (char *args, int from_tty)
2666 help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2670 show_spu_stop_on_load (struct ui_file *file, int from_tty,
2671 struct cmd_list_element *c, const char *value)
2673 fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2678 show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2679 struct cmd_list_element *c, const char *value)
2681 fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2686 /* Set up gdbarch struct. */
2688 static struct gdbarch *
2689 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2691 struct gdbarch *gdbarch;
2692 struct gdbarch_tdep *tdep;
2695 /* Which spufs ID was requested as address space? */
2697 id = *(int *)info.tdep_info;
2698 /* For objfile architectures of SPU solibs, decode the ID from the name.
2699 This assumes the filename convention employed by solib-spu.c. */
2702 char *name = strrchr (info.abfd->filename, '@');
2704 sscanf (name, "@0x%*x <%d>", &id);
2707 /* Find a candidate among extant architectures. */
2708 for (arches = gdbarch_list_lookup_by_info (arches, &info);
2710 arches = gdbarch_list_lookup_by_info (arches->next, &info))
2712 tdep = gdbarch_tdep (arches->gdbarch);
2713 if (tdep && tdep->id == id)
2714 return arches->gdbarch;
2717 /* None found, so create a new architecture. */
2718 tdep = XCNEW (struct gdbarch_tdep);
2720 gdbarch = gdbarch_alloc (&info, tdep);
2723 set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2726 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2727 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2728 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2729 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2730 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2731 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2732 set_gdbarch_register_name (gdbarch, spu_register_name);
2733 set_gdbarch_register_type (gdbarch, spu_register_type);
2734 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2735 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2736 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2737 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2738 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, spu_dwarf_reg_to_regnum);
2739 set_gdbarch_ax_pseudo_register_collect
2740 (gdbarch, spu_ax_pseudo_register_collect);
2741 set_gdbarch_ax_pseudo_register_push_stack
2742 (gdbarch, spu_ax_pseudo_register_push_stack);
2745 set_gdbarch_char_signed (gdbarch, 0);
2746 set_gdbarch_ptr_bit (gdbarch, 32);
2747 set_gdbarch_addr_bit (gdbarch, 32);
2748 set_gdbarch_short_bit (gdbarch, 16);
2749 set_gdbarch_int_bit (gdbarch, 32);
2750 set_gdbarch_long_bit (gdbarch, 32);
2751 set_gdbarch_long_long_bit (gdbarch, 64);
2752 set_gdbarch_float_bit (gdbarch, 32);
2753 set_gdbarch_double_bit (gdbarch, 64);
2754 set_gdbarch_long_double_bit (gdbarch, 64);
2755 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2756 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2757 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2759 /* Address handling. */
2760 set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2761 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2762 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2763 set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2764 set_gdbarch_address_class_type_flags_to_name
2765 (gdbarch, spu_address_class_type_flags_to_name);
2766 set_gdbarch_address_class_name_to_type_flags
2767 (gdbarch, spu_address_class_name_to_type_flags);
2770 /* Inferior function calls. */
2771 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2772 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2773 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2774 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2775 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2776 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2777 set_gdbarch_return_value (gdbarch, spu_return_value);
2779 /* Frame handling. */
2780 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2781 dwarf2_append_unwinders (gdbarch);
2782 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2783 frame_base_set_default (gdbarch, &spu_frame_base);
2784 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2785 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2786 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2787 set_gdbarch_frame_args_skip (gdbarch, 0);
2788 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2789 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2791 /* Cell/B.E. cross-architecture unwinder support. */
2792 frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2795 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2796 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2797 set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
2798 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2799 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2800 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2803 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2808 /* Provide a prototype to silence -Wmissing-prototypes. */
2809 extern initialize_file_ftype _initialize_spu_tdep;
2812 _initialize_spu_tdep (void)
2814 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2816 /* Add ourselves to objfile event chain. */
2817 observer_attach_new_objfile (spu_overlay_new_objfile);
2818 spu_overlay_data = register_objfile_data ();
2820 /* Install spu stop-on-load handler. */
2821 observer_attach_new_objfile (spu_catch_start);
2823 /* Add ourselves to normal_stop event chain. */
2824 observer_attach_normal_stop (spu_attach_normal_stop);
2826 /* Add root prefix command for all "set spu"/"show spu" commands. */
2827 add_prefix_cmd ("spu", no_class, set_spu_command,
2828 _("Various SPU specific commands."),
2829 &setspucmdlist, "set spu ", 0, &setlist);
2830 add_prefix_cmd ("spu", no_class, show_spu_command,
2831 _("Various SPU specific commands."),
2832 &showspucmdlist, "show spu ", 0, &showlist);
2834 /* Toggle whether or not to add a temporary breakpoint at the "main"
2835 function of new SPE contexts. */
2836 add_setshow_boolean_cmd ("stop-on-load", class_support,
2837 &spu_stop_on_load_p, _("\
2838 Set whether to stop for new SPE threads."),
2840 Show whether to stop for new SPE threads."),
2842 Use \"on\" to give control to the user when a new SPE thread\n\
2843 enters its \"main\" function.\n\
2844 Use \"off\" to disable stopping for new SPE threads."),
2846 show_spu_stop_on_load,
2847 &setspucmdlist, &showspucmdlist);
2849 /* Toggle whether or not to automatically flush the software-managed
2850 cache whenever SPE execution stops. */
2851 add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2852 &spu_auto_flush_cache_p, _("\
2853 Set whether to automatically flush the software-managed cache."),
2855 Show whether to automatically flush the software-managed cache."),
2857 Use \"on\" to automatically flush the software-managed cache\n\
2858 whenever SPE execution stops.\n\
2859 Use \"off\" to never automatically flush the software-managed cache."),
2861 show_spu_auto_flush_cache,
2862 &setspucmdlist, &showspucmdlist);
2864 /* Add root prefix command for all "info spu" commands. */
2865 add_prefix_cmd ("spu", class_info, info_spu_command,
2866 _("Various SPU specific commands."),
2867 &infospucmdlist, "info spu ", 0, &infolist);
2869 /* Add various "info spu" commands. */
2870 add_cmd ("event", class_info, info_spu_event_command,
2871 _("Display SPU event facility status.\n"),
2873 add_cmd ("signal", class_info, info_spu_signal_command,
2874 _("Display SPU signal notification facility status.\n"),
2876 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2877 _("Display SPU mailbox facility status.\n"),
2879 add_cmd ("dma", class_info, info_spu_dma_command,
2880 _("Display MFC DMA status.\n"),
2882 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2883 _("Display MFC Proxy-DMA status.\n"),