+
+/* Address to use for displaced stepping. When debugging a stand-alone
+ SPU executable, entry_point_address () will point to an SPU local-store
+ address and is thus not usable as displaced stepping location. We use
+ the auxiliary vector to determine the PowerPC-side entry point address
+ instead. */
+
+static CORE_ADDR ppc_linux_entry_point_addr = 0;
+
+static void
+ppc_linux_inferior_created (struct target_ops *target, int from_tty)
+{
+ ppc_linux_entry_point_addr = 0;
+}
+
+static CORE_ADDR
+ppc_linux_displaced_step_location (struct gdbarch *gdbarch)
+{
+ if (ppc_linux_entry_point_addr == 0)
+ {
+ CORE_ADDR addr;
+
+ /* Determine entry point from target auxiliary vector. */
+ if (target_auxv_search (¤t_target, AT_ENTRY, &addr) <= 0)
+ error (_("Cannot find AT_ENTRY auxiliary vector entry."));
+
+ /* Make certain that the address points at real code, and not a
+ function descriptor. */
+ addr = gdbarch_convert_from_func_ptr_addr (gdbarch, addr,
+ ¤t_target);
+
+ /* Inferior calls also use the entry point as a breakpoint location.
+ We don't want displaced stepping to interfere with those
+ breakpoints, so leave space. */
+ ppc_linux_entry_point_addr = addr + 2 * PPC_INSN_SIZE;
+ }
+
+ return ppc_linux_entry_point_addr;
+}
+
+
+/* Return 1 if PPC_ORIG_R3_REGNUM and PPC_TRAP_REGNUM are usable. */
+int
+ppc_linux_trap_reg_p (struct gdbarch *gdbarch)
+{
+ /* If we do not have a target description with registers, then
+ the special registers will not be included in the register set. */
+ if (!tdesc_has_registers (gdbarch_target_desc (gdbarch)))
+ return 0;
+
+ /* If we do, then it is safe to check the size. */
+ return register_size (gdbarch, PPC_ORIG_R3_REGNUM) > 0
+ && register_size (gdbarch, PPC_TRAP_REGNUM) > 0;
+}
+
+/* Return the current system call's number present in the
+ r0 register. When the function fails, it returns -1. */
+static LONGEST
+ppc_linux_get_syscall_number (struct gdbarch *gdbarch,
+ ptid_t ptid)
+{
+ struct regcache *regcache = get_thread_regcache (ptid);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ struct cleanup *cleanbuf;
+ /* The content of a register */
+ gdb_byte *buf;
+ /* The result */
+ LONGEST ret;
+
+ /* Make sure we're in a 32- or 64-bit machine */
+ gdb_assert (tdep->wordsize == 4 || tdep->wordsize == 8);
+
+ buf = (gdb_byte *) xmalloc (tdep->wordsize * sizeof (gdb_byte));
+
+ cleanbuf = make_cleanup (xfree, buf);
+
+ /* Getting the system call number from the register.
+ When dealing with PowerPC architecture, this information
+ is stored at 0th register. */
+ regcache_cooked_read (regcache, tdep->ppc_gp0_regnum, buf);
+
+ ret = extract_signed_integer (buf, tdep->wordsize, byte_order);
+ do_cleanups (cleanbuf);
+
+ return ret;
+}
+
+static void
+ppc_linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
+{
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+
+ regcache_cooked_write_unsigned (regcache, gdbarch_pc_regnum (gdbarch), pc);
+
+ /* Set special TRAP register to -1 to prevent the kernel from
+ messing with the PC we just installed, if we happen to be
+ within an interrupted system call that the kernel wants to
+ restart.
+
+ Note that after we return from the dummy call, the TRAP and
+ ORIG_R3 registers will be automatically restored, and the
+ kernel continues to restart the system call at this point. */
+ if (ppc_linux_trap_reg_p (gdbarch))
+ regcache_cooked_write_unsigned (regcache, PPC_TRAP_REGNUM, -1);
+}
+
+static int
+ppc_linux_spu_section (bfd *abfd, asection *asect, void *user_data)
+{
+ return strncmp (bfd_section_name (abfd, asect), "SPU/", 4) == 0;
+}
+
+static const struct target_desc *
+ppc_linux_core_read_description (struct gdbarch *gdbarch,
+ struct target_ops *target,
+ bfd *abfd)
+{
+ asection *cell = bfd_sections_find_if (abfd, ppc_linux_spu_section, NULL);
+ asection *altivec = bfd_get_section_by_name (abfd, ".reg-ppc-vmx");
+ asection *vsx = bfd_get_section_by_name (abfd, ".reg-ppc-vsx");
+ asection *section = bfd_get_section_by_name (abfd, ".reg");
+ if (! section)
+ return NULL;
+
+ switch (bfd_section_size (abfd, section))
+ {
+ case 48 * 4:
+ if (cell)
+ return tdesc_powerpc_cell32l;
+ else if (vsx)
+ return tdesc_powerpc_vsx32l;
+ else if (altivec)
+ return tdesc_powerpc_altivec32l;
+ else
+ return tdesc_powerpc_32l;
+
+ case 48 * 8:
+ if (cell)
+ return tdesc_powerpc_cell64l;
+ else if (vsx)
+ return tdesc_powerpc_vsx64l;
+ else if (altivec)
+ return tdesc_powerpc_altivec64l;
+ else
+ return tdesc_powerpc_64l;
+
+ default:
+ return NULL;
+ }
+}
+
+
+/* Implementation of `gdbarch_elf_make_msymbol_special', as defined in
+ gdbarch.h. This implementation is used for the ELFv2 ABI only. */
+
+static void
+ppc_elfv2_elf_make_msymbol_special (asymbol *sym, struct minimal_symbol *msym)
+{
+ elf_symbol_type *elf_sym = (elf_symbol_type *)sym;
+
+ /* If the symbol is marked as having a local entry point, set a target
+ flag in the msymbol. We currently only support local entry point
+ offsets of 8 bytes, which is the only entry point offset ever used
+ by current compilers. If/when other offsets are ever used, we will
+ have to use additional target flag bits to store them. */
+ switch (PPC64_LOCAL_ENTRY_OFFSET (elf_sym->internal_elf_sym.st_other))
+ {
+ default:
+ break;
+ case 8:
+ MSYMBOL_TARGET_FLAG_1 (msym) = 1;
+ break;
+ }
+}
+
+/* Implementation of `gdbarch_skip_entrypoint', as defined in
+ gdbarch.h. This implementation is used for the ELFv2 ABI only. */
+
+static CORE_ADDR
+ppc_elfv2_skip_entrypoint (struct gdbarch *gdbarch, CORE_ADDR pc)
+{
+ struct bound_minimal_symbol fun;
+ int local_entry_offset = 0;
+
+ fun = lookup_minimal_symbol_by_pc (pc);
+ if (fun.minsym == NULL)
+ return pc;
+
+ /* See ppc_elfv2_elf_make_msymbol_special for how local entry point
+ offset values are encoded. */
+ if (MSYMBOL_TARGET_FLAG_1 (fun.minsym))
+ local_entry_offset = 8;
+
+ if (BMSYMBOL_VALUE_ADDRESS (fun) <= pc
+ && pc < BMSYMBOL_VALUE_ADDRESS (fun) + local_entry_offset)
+ return BMSYMBOL_VALUE_ADDRESS (fun) + local_entry_offset;
+
+ return pc;
+}
+
+/* Implementation of `gdbarch_stap_is_single_operand', as defined in
+ gdbarch.h. */
+
+static int
+ppc_stap_is_single_operand (struct gdbarch *gdbarch, const char *s)
+{
+ return (*s == 'i' /* Literal number. */
+ || (isdigit (*s) && s[1] == '('
+ && isdigit (s[2])) /* Displacement. */
+ || (*s == '(' && isdigit (s[1])) /* Register indirection. */
+ || isdigit (*s)); /* Register value. */
+}
+
+/* Implementation of `gdbarch_stap_parse_special_token', as defined in
+ gdbarch.h. */
+
+static int
+ppc_stap_parse_special_token (struct gdbarch *gdbarch,
+ struct stap_parse_info *p)
+{
+ if (isdigit (*p->arg))
+ {
+ /* This temporary pointer is needed because we have to do a lookahead.
+ We could be dealing with a register displacement, and in such case
+ we would not need to do anything. */
+ const char *s = p->arg;
+ char *regname;
+ int len;
+ struct stoken str;
+
+ while (isdigit (*s))
+ ++s;
+
+ if (*s == '(')
+ {
+ /* It is a register displacement indeed. Returning 0 means we are
+ deferring the treatment of this case to the generic parser. */
+ return 0;
+ }
+
+ len = s - p->arg;
+ regname = alloca (len + 2);
+ regname[0] = 'r';
+
+ strncpy (regname + 1, p->arg, len);
+ ++len;
+ regname[len] = '\0';
+
+ if (user_reg_map_name_to_regnum (gdbarch, regname, len) == -1)
+ error (_("Invalid register name `%s' on expression `%s'."),
+ regname, p->saved_arg);
+
+ write_exp_elt_opcode (&p->pstate, OP_REGISTER);
+ str.ptr = regname;
+ str.length = len;
+ write_exp_string (&p->pstate, str);
+ write_exp_elt_opcode (&p->pstate, OP_REGISTER);
+
+ p->arg = s;
+ }
+ else
+ {
+ /* All the other tokens should be handled correctly by the generic
+ parser. */
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Cell/B.E. active SPE context tracking support. */
+
+static struct objfile *spe_context_objfile = NULL;
+static CORE_ADDR spe_context_lm_addr = 0;
+static CORE_ADDR spe_context_offset = 0;
+
+static ptid_t spe_context_cache_ptid;
+static CORE_ADDR spe_context_cache_address;
+
+/* Hook into inferior_created, solib_loaded, and solib_unloaded observers
+ to track whether we've loaded a version of libspe2 (as static or dynamic
+ library) that provides the __spe_current_active_context variable. */
+static void
+ppc_linux_spe_context_lookup (struct objfile *objfile)
+{
+ struct bound_minimal_symbol sym;
+
+ if (!objfile)
+ {
+ spe_context_objfile = NULL;
+ spe_context_lm_addr = 0;
+ spe_context_offset = 0;
+ spe_context_cache_ptid = minus_one_ptid;
+ spe_context_cache_address = 0;
+ return;
+ }
+
+ sym = lookup_minimal_symbol ("__spe_current_active_context", NULL, objfile);
+ if (sym.minsym)
+ {
+ spe_context_objfile = objfile;
+ spe_context_lm_addr = svr4_fetch_objfile_link_map (objfile);
+ spe_context_offset = BMSYMBOL_VALUE_ADDRESS (sym);
+ spe_context_cache_ptid = minus_one_ptid;
+ spe_context_cache_address = 0;
+ return;
+ }
+}
+
+static void
+ppc_linux_spe_context_inferior_created (struct target_ops *t, int from_tty)
+{
+ struct objfile *objfile;
+
+ ppc_linux_spe_context_lookup (NULL);
+ ALL_OBJFILES (objfile)
+ ppc_linux_spe_context_lookup (objfile);
+}
+
+static void
+ppc_linux_spe_context_solib_loaded (struct so_list *so)
+{
+ if (strstr (so->so_original_name, "/libspe") != NULL)
+ {
+ solib_read_symbols (so, 0);
+ ppc_linux_spe_context_lookup (so->objfile);
+ }
+}
+
+static void
+ppc_linux_spe_context_solib_unloaded (struct so_list *so)
+{
+ if (so->objfile == spe_context_objfile)
+ ppc_linux_spe_context_lookup (NULL);
+}
+
+/* Retrieve contents of the N'th element in the current thread's
+ linked SPE context list into ID and NPC. Return the address of
+ said context element, or 0 if not found. */
+static CORE_ADDR
+ppc_linux_spe_context (int wordsize, enum bfd_endian byte_order,
+ int n, int *id, unsigned int *npc)
+{
+ CORE_ADDR spe_context = 0;
+ gdb_byte buf[16];
+ int i;
+
+ /* Quick exit if we have not found __spe_current_active_context. */
+ if (!spe_context_objfile)
+ return 0;
+
+ /* Look up cached address of thread-local variable. */
+ if (!ptid_equal (spe_context_cache_ptid, inferior_ptid))
+ {
+ struct target_ops *target = ¤t_target;
+ volatile struct gdb_exception ex;
+
+ TRY_CATCH (ex, RETURN_MASK_ERROR)
+ {
+ /* We do not call target_translate_tls_address here, because
+ svr4_fetch_objfile_link_map may invalidate the frame chain,
+ which must not do while inside a frame sniffer.
+
+ Instead, we have cached the lm_addr value, and use that to
+ directly call the target's to_get_thread_local_address. */
+ spe_context_cache_address
+ = target->to_get_thread_local_address (target, inferior_ptid,
+ spe_context_lm_addr,
+ spe_context_offset);
+ spe_context_cache_ptid = inferior_ptid;
+ }
+
+ if (ex.reason < 0)
+ return 0;
+ }
+
+ /* Read variable value. */
+ if (target_read_memory (spe_context_cache_address, buf, wordsize) == 0)
+ spe_context = extract_unsigned_integer (buf, wordsize, byte_order);
+
+ /* Cyle through to N'th linked list element. */
+ for (i = 0; i < n && spe_context; i++)
+ if (target_read_memory (spe_context + align_up (12, wordsize),
+ buf, wordsize) == 0)
+ spe_context = extract_unsigned_integer (buf, wordsize, byte_order);
+ else
+ spe_context = 0;
+
+ /* Read current context. */
+ if (spe_context
+ && target_read_memory (spe_context, buf, 12) != 0)
+ spe_context = 0;
+
+ /* Extract data elements. */
+ if (spe_context)
+ {
+ if (id)
+ *id = extract_signed_integer (buf, 4, byte_order);
+ if (npc)
+ *npc = extract_unsigned_integer (buf + 4, 4, byte_order);
+ }
+
+ return spe_context;
+}
+
+
+/* Cell/B.E. cross-architecture unwinder support. */
+
+struct ppu2spu_cache
+{
+ struct frame_id frame_id;
+ struct regcache *regcache;
+};
+
+static struct gdbarch *
+ppu2spu_prev_arch (struct frame_info *this_frame, void **this_cache)
+{
+ struct ppu2spu_cache *cache = *this_cache;
+ return get_regcache_arch (cache->regcache);
+}
+
+static void
+ppu2spu_this_id (struct frame_info *this_frame,
+ void **this_cache, struct frame_id *this_id)
+{
+ struct ppu2spu_cache *cache = *this_cache;
+ *this_id = cache->frame_id;
+}
+
+static struct value *
+ppu2spu_prev_register (struct frame_info *this_frame,
+ void **this_cache, int regnum)
+{
+ struct ppu2spu_cache *cache = *this_cache;
+ struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
+ gdb_byte *buf;
+
+ buf = alloca (register_size (gdbarch, regnum));
+
+ if (regnum < gdbarch_num_regs (gdbarch))
+ regcache_raw_read (cache->regcache, regnum, buf);
+ else
+ gdbarch_pseudo_register_read (gdbarch, cache->regcache, regnum, buf);
+
+ return frame_unwind_got_bytes (this_frame, regnum, buf);
+}
+
+struct ppu2spu_data
+{
+ struct gdbarch *gdbarch;
+ int id;
+ unsigned int npc;
+ gdb_byte gprs[128*16];
+};
+
+static int
+ppu2spu_unwind_register (void *src, int regnum, gdb_byte *buf)
+{
+ struct ppu2spu_data *data = src;
+ enum bfd_endian byte_order = gdbarch_byte_order (data->gdbarch);
+
+ if (regnum >= 0 && regnum < SPU_NUM_GPRS)
+ memcpy (buf, data->gprs + 16*regnum, 16);
+ else if (regnum == SPU_ID_REGNUM)
+ store_unsigned_integer (buf, 4, byte_order, data->id);
+ else if (regnum == SPU_PC_REGNUM)
+ store_unsigned_integer (buf, 4, byte_order, data->npc);
+ else
+ return REG_UNAVAILABLE;
+
+ return REG_VALID;
+}
+
+static int
+ppu2spu_sniffer (const struct frame_unwind *self,
+ struct frame_info *this_frame, void **this_prologue_cache)
+{
+ struct gdbarch *gdbarch = get_frame_arch (this_frame);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ struct ppu2spu_data data;
+ struct frame_info *fi;
+ CORE_ADDR base, func, backchain, spe_context;
+ gdb_byte buf[8];
+ int n = 0;
+
+ /* Count the number of SPU contexts already in the frame chain. */
+ for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
+ if (get_frame_type (fi) == ARCH_FRAME
+ && gdbarch_bfd_arch_info (get_frame_arch (fi))->arch == bfd_arch_spu)
+ n++;
+
+ base = get_frame_sp (this_frame);
+ func = get_frame_pc (this_frame);
+ if (target_read_memory (base, buf, tdep->wordsize))
+ return 0;
+ backchain = extract_unsigned_integer (buf, tdep->wordsize, byte_order);
+
+ spe_context = ppc_linux_spe_context (tdep->wordsize, byte_order,
+ n, &data.id, &data.npc);
+ if (spe_context && base <= spe_context && spe_context < backchain)
+ {
+ char annex[32];
+
+ /* Find gdbarch for SPU. */
+ struct gdbarch_info info;
+ gdbarch_info_init (&info);
+ info.bfd_arch_info = bfd_lookup_arch (bfd_arch_spu, bfd_mach_spu);
+ info.byte_order = BFD_ENDIAN_BIG;
+ info.osabi = GDB_OSABI_LINUX;
+ info.tdep_info = (void *) &data.id;
+ data.gdbarch = gdbarch_find_by_info (info);
+ if (!data.gdbarch)
+ return 0;
+
+ xsnprintf (annex, sizeof annex, "%d/regs", data.id);
+ if (target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ data.gprs, 0, sizeof data.gprs)
+ == sizeof data.gprs)
+ {
+ struct ppu2spu_cache *cache
+ = FRAME_OBSTACK_CALLOC (1, struct ppu2spu_cache);
+
+ struct address_space *aspace = get_frame_address_space (this_frame);
+ struct regcache *regcache = regcache_xmalloc (data.gdbarch, aspace);
+ struct cleanup *cleanups = make_cleanup_regcache_xfree (regcache);
+ regcache_save (regcache, ppu2spu_unwind_register, &data);
+ discard_cleanups (cleanups);
+
+ cache->frame_id = frame_id_build (base, func);
+ cache->regcache = regcache;
+ *this_prologue_cache = cache;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+ppu2spu_dealloc_cache (struct frame_info *self, void *this_cache)
+{
+ struct ppu2spu_cache *cache = this_cache;
+ regcache_xfree (cache->regcache);
+}
+
+static const struct frame_unwind ppu2spu_unwind = {
+ ARCH_FRAME,
+ default_frame_unwind_stop_reason,
+ ppu2spu_this_id,
+ ppu2spu_prev_register,
+ NULL,
+ ppu2spu_sniffer,
+ ppu2spu_dealloc_cache,
+ ppu2spu_prev_arch,
+};
+
+