This patch cleans up some minor inconsistencies in target delegation.
It's primary purpose is to avoid confusion in the code. A few spots
were checking the "beneath" target; however this can only be NULL for
the dummy target, so such tests are not needed. Some other spots were
iterating over the beneath targets, looking for a method
implementation. This is not needed for methods handled by
make-target-delegates, as there is always an implementation.
2014-07-18 Tom Tromey <tromey@redhat.com>
PR gdb/17130:
* spu-multiarch.c (spu_region_ok_for_hw_watchpoint)
(spu_fetch_registers, spu_store_registers, spu_xfer_partial)
(spu_search_memory, spu_mourn_inferior): Simplify delegation.
* linux-thread-db.c (thread_db_pid_to_str): Always delegate.
* windows-nat.c (windows_xfer_partial): Always delegate.
* record-btrace.c (record_btrace_xfer_partial): Simplify
delegation.
(record_btrace_fetch_registers, record_btrace_store_registers)
(record_btrace_prepare_to_store, record_btrace_resume)
(record_btrace_wait, record_btrace_find_new_threads)
(record_btrace_thread_alive): Likewise.
* procfs.c (procfs_xfer_partial): Always delegate.
* corelow.c (core_xfer_partial): Always delegate.
* sol-thread.c (sol_find_new_threads): Simplify delegation.
2014-07-18 Tom Tromey <tromey@redhat.com>
+ PR gdb/17130:
+ * spu-multiarch.c (spu_region_ok_for_hw_watchpoint)
+ (spu_fetch_registers, spu_store_registers, spu_xfer_partial)
+ (spu_search_memory, spu_mourn_inferior): Simplify delegation.
+ * linux-thread-db.c (thread_db_pid_to_str): Always delegate.
+ * windows-nat.c (windows_xfer_partial): Always delegate.
+ * record-btrace.c (record_btrace_xfer_partial): Simplify
+ delegation.
+ (record_btrace_fetch_registers, record_btrace_store_registers)
+ (record_btrace_prepare_to_store, record_btrace_resume)
+ (record_btrace_wait, record_btrace_find_new_threads)
+ (record_btrace_thread_alive): Likewise.
+ * procfs.c (procfs_xfer_partial): Always delegate.
+ * corelow.c (core_xfer_partial): Always delegate.
+ * sol-thread.c (sol_find_new_threads): Simplify delegation.
+
+2014-07-18 Tom Tromey <tromey@redhat.com>
+
* exec.c (exec_make_note_section): Move earlier.
2014-07-17 Doug Evans <dje@google.com>
return TARGET_XFER_E_IO;
default:
- if (ops->beneath != NULL)
- return ops->beneath->to_xfer_partial (ops->beneath, object,
- annex, readbuf,
- writebuf, offset, len,
- xfered_len);
- return TARGET_XFER_E_IO;
+ return ops->beneath->to_xfer_partial (ops->beneath, object,
+ annex, readbuf,
+ writebuf, offset, len,
+ xfered_len);
}
}
}
beneath = find_target_beneath (ops);
- if (beneath->to_pid_to_str (beneath, ptid))
- return beneath->to_pid_to_str (beneath, ptid);
-
- return normal_pid_to_str (ptid);
+ return beneath->to_pid_to_str (beneath, ptid);
}
/* Return a string describing the state of the thread specified by
#endif
default:
- if (ops->beneath != NULL)
- return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
- readbuf, writebuf, offset, len,
- xfered_len);
- return TARGET_XFER_E_IO;
+ return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
+ readbuf, writebuf, offset, len,
+ xfered_len);
}
}
}
/* Forward the request. */
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_xfer_partial != NULL)
- return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
- offset, len, xfered_len);
-
- *xfered_len = len;
- return TARGET_XFER_UNAVAILABLE;
+ ops = ops->beneath;
+ return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
+ offset, len, xfered_len);
}
/* The to_insert_breakpoint method of target record-btrace. */
}
else
{
- struct target_ops *t;
+ struct target_ops *t = ops->beneath;
- for (t = ops->beneath; t != NULL; t = t->beneath)
- if (t->to_fetch_registers != NULL)
- {
- t->to_fetch_registers (t, regcache, regno);
- break;
- }
+ t->to_fetch_registers (t, regcache, regno);
}
}
gdb_assert (may_write_registers != 0);
- for (t = ops->beneath; t != NULL; t = t->beneath)
- if (t->to_store_registers != NULL)
- {
- t->to_store_registers (t, regcache, regno);
- return;
- }
-
- noprocess ();
+ t = ops->beneath;
+ t->to_store_registers (t, regcache, regno);
}
/* The to_prepare_to_store method of target record-btrace. */
if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
return;
- for (t = ops->beneath; t != NULL; t = t->beneath)
- if (t->to_prepare_to_store != NULL)
- {
- t->to_prepare_to_store (t, regcache);
- return;
- }
+ t = ops->beneath;
+ t->to_prepare_to_store (t, regcache);
}
/* The branch trace frame cache. */
/* As long as we're not replaying, just forward the request. */
if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
{
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_resume != NULL)
- return ops->to_resume (ops, ptid, step, signal);
-
- error (_("Cannot find target for stepping."));
+ ops = ops->beneath;
+ return ops->to_resume (ops, ptid, step, signal);
}
/* Compute the btrace thread flag for the requested move. */
/* As long as we're not replaying, just forward the request. */
if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
{
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_wait != NULL)
- return ops->to_wait (ops, ptid, status, options);
-
- error (_("Cannot find target for waiting."));
+ ops = ops->beneath;
+ return ops->to_wait (ops, ptid, status, options);
}
/* Let's find a thread to move. */
return;
/* Forward the request. */
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_find_new_threads != NULL)
- {
- ops->to_find_new_threads (ops);
- break;
- }
+ ops = ops->beneath;
+ ops->to_find_new_threads (ops);
}
/* The to_thread_alive method of target record-btrace. */
return find_thread_ptid (ptid) != NULL;
/* Forward the request. */
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_thread_alive != NULL)
- return ops->to_thread_alive (ops, ptid);
-
- return 0;
+ ops = ops->beneath;
+ return ops->to_thread_alive (ops, ptid);
}
/* Set the replay branch trace instruction iterator. If IT is NULL, replay
struct target_ops *beneath = find_target_beneath (ops);
/* First Find any new LWP's. */
- if (beneath->to_find_new_threads != NULL)
- beneath->to_find_new_threads (beneath);
+ beneath->to_find_new_threads (beneath);
/* Then find any new user-level threads. */
p_td_ta_thr_iter (main_ta, sol_find_new_threads_callback, (void *) 0,
CORE_ADDR addr, int len)
{
struct target_ops *ops_beneath = find_target_beneath (&spu_ops);
- while (ops_beneath && !ops_beneath->to_region_ok_for_hw_watchpoint)
- ops_beneath = find_target_beneath (ops_beneath);
/* We cannot watch SPU local store. */
if (SPUADDR_SPU (addr) != -1)
return 0;
- if (ops_beneath)
- return ops_beneath->to_region_ok_for_hw_watchpoint (ops_beneath,
- addr, len);
-
- return 0;
+ return ops_beneath->to_region_ok_for_hw_watchpoint (ops_beneath, addr, len);
}
/* Override the to_fetch_registers routine. */
/* This version applies only if we're currently in spu_run. */
if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
{
- while (ops_beneath && !ops_beneath->to_fetch_registers)
- ops_beneath = find_target_beneath (ops_beneath);
-
- gdb_assert (ops_beneath);
ops_beneath->to_fetch_registers (ops_beneath, regcache, regno);
return;
}
/* This version applies only if we're currently in spu_run. */
if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
{
- while (ops_beneath && !ops_beneath->to_fetch_registers)
- ops_beneath = find_target_beneath (ops_beneath);
-
- gdb_assert (ops_beneath);
ops_beneath->to_store_registers (ops_beneath, regcache, regno);
return;
}
ULONGEST *xfered_len)
{
struct target_ops *ops_beneath = find_target_beneath (ops);
- while (ops_beneath && !ops_beneath->to_xfer_partial)
- ops_beneath = find_target_beneath (ops_beneath);
- gdb_assert (ops_beneath);
/* Use the "mem" spufs file to access SPU local store. */
if (object == TARGET_OBJECT_MEMORY)
CORE_ADDR *found_addrp)
{
struct target_ops *ops_beneath = find_target_beneath (ops);
- while (ops_beneath && !ops_beneath->to_search_memory)
- ops_beneath = find_target_beneath (ops_beneath);
- /* For SPU local store, always fall back to the simple method. Likewise
- if we do not have any target-specific special implementation. */
- if (!ops_beneath || SPUADDR_SPU (start_addr) >= 0)
+ /* For SPU local store, always fall back to the simple method. */
+ if (SPUADDR_SPU (start_addr) >= 0)
return simple_search_memory (ops,
start_addr, search_space_len,
pattern, pattern_len, found_addrp);
spu_mourn_inferior (struct target_ops *ops)
{
struct target_ops *ops_beneath = find_target_beneath (ops);
- while (ops_beneath && !ops_beneath->to_mourn_inferior)
- ops_beneath = find_target_beneath (ops_beneath);
- gdb_assert (ops_beneath);
ops_beneath->to_mourn_inferior (ops_beneath);
spu_multiarch_deactivate ();
}
writebuf, offset, len, xfered_len);
default:
- if (ops->beneath != NULL)
- return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
- readbuf, writebuf, offset, len,
- xfered_len);
- return TARGET_XFER_E_IO;
+ return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
+ readbuf, writebuf, offset, len,
+ xfered_len);
}
}