forked from Imagelibrary/binutils-gdb
clean up some target delegation cases
This patch cleans up some minor inconsistencies in target delegation. It's primary purpose is to avoid confusion in the code. A few spots were checking the "beneath" target; however this can only be NULL for the dummy target, so such tests are not needed. Some other spots were iterating over the beneath targets, looking for a method implementation. This is not needed for methods handled by make-target-delegates, as there is always an implementation. 2014-07-18 Tom Tromey <tromey@redhat.com> PR gdb/17130: * spu-multiarch.c (spu_region_ok_for_hw_watchpoint) (spu_fetch_registers, spu_store_registers, spu_xfer_partial) (spu_search_memory, spu_mourn_inferior): Simplify delegation. * linux-thread-db.c (thread_db_pid_to_str): Always delegate. * windows-nat.c (windows_xfer_partial): Always delegate. * record-btrace.c (record_btrace_xfer_partial): Simplify delegation. (record_btrace_fetch_registers, record_btrace_store_registers) (record_btrace_prepare_to_store, record_btrace_resume) (record_btrace_wait, record_btrace_find_new_threads) (record_btrace_thread_alive): Likewise. * procfs.c (procfs_xfer_partial): Always delegate. * corelow.c (core_xfer_partial): Always delegate. * sol-thread.c (sol_find_new_threads): Simplify delegation.
This commit is contained in:
@@ -896,13 +896,9 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
|
||||
}
|
||||
|
||||
/* Forward the request. */
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_xfer_partial != NULL)
|
||||
return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
|
||||
offset, len, xfered_len);
|
||||
|
||||
*xfered_len = len;
|
||||
return TARGET_XFER_UNAVAILABLE;
|
||||
ops = ops->beneath;
|
||||
return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
|
||||
offset, len, xfered_len);
|
||||
}
|
||||
|
||||
/* The to_insert_breakpoint method of target record-btrace. */
|
||||
@@ -996,14 +992,9 @@ record_btrace_fetch_registers (struct target_ops *ops,
|
||||
}
|
||||
else
|
||||
{
|
||||
struct target_ops *t;
|
||||
struct target_ops *t = ops->beneath;
|
||||
|
||||
for (t = ops->beneath; t != NULL; t = t->beneath)
|
||||
if (t->to_fetch_registers != NULL)
|
||||
{
|
||||
t->to_fetch_registers (t, regcache, regno);
|
||||
break;
|
||||
}
|
||||
t->to_fetch_registers (t, regcache, regno);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1020,14 +1011,8 @@ record_btrace_store_registers (struct target_ops *ops,
|
||||
|
||||
gdb_assert (may_write_registers != 0);
|
||||
|
||||
for (t = ops->beneath; t != NULL; t = t->beneath)
|
||||
if (t->to_store_registers != NULL)
|
||||
{
|
||||
t->to_store_registers (t, regcache, regno);
|
||||
return;
|
||||
}
|
||||
|
||||
noprocess ();
|
||||
t = ops->beneath;
|
||||
t->to_store_registers (t, regcache, regno);
|
||||
}
|
||||
|
||||
/* The to_prepare_to_store method of target record-btrace. */
|
||||
@@ -1041,12 +1026,8 @@ record_btrace_prepare_to_store (struct target_ops *ops,
|
||||
if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
|
||||
return;
|
||||
|
||||
for (t = ops->beneath; t != NULL; t = t->beneath)
|
||||
if (t->to_prepare_to_store != NULL)
|
||||
{
|
||||
t->to_prepare_to_store (t, regcache);
|
||||
return;
|
||||
}
|
||||
t = ops->beneath;
|
||||
t->to_prepare_to_store (t, regcache);
|
||||
}
|
||||
|
||||
/* The branch trace frame cache. */
|
||||
@@ -1533,11 +1514,8 @@ record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
|
||||
/* As long as we're not replaying, just forward the request. */
|
||||
if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
|
||||
{
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_resume != NULL)
|
||||
return ops->to_resume (ops, ptid, step, signal);
|
||||
|
||||
error (_("Cannot find target for stepping."));
|
||||
ops = ops->beneath;
|
||||
return ops->to_resume (ops, ptid, step, signal);
|
||||
}
|
||||
|
||||
/* Compute the btrace thread flag for the requested move. */
|
||||
@@ -1760,11 +1738,8 @@ record_btrace_wait (struct target_ops *ops, ptid_t ptid,
|
||||
/* As long as we're not replaying, just forward the request. */
|
||||
if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
|
||||
{
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_wait != NULL)
|
||||
return ops->to_wait (ops, ptid, status, options);
|
||||
|
||||
error (_("Cannot find target for waiting."));
|
||||
ops = ops->beneath;
|
||||
return ops->to_wait (ops, ptid, status, options);
|
||||
}
|
||||
|
||||
/* Let's find a thread to move. */
|
||||
@@ -1826,12 +1801,8 @@ record_btrace_find_new_threads (struct target_ops *ops)
|
||||
return;
|
||||
|
||||
/* Forward the request. */
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_find_new_threads != NULL)
|
||||
{
|
||||
ops->to_find_new_threads (ops);
|
||||
break;
|
||||
}
|
||||
ops = ops->beneath;
|
||||
ops->to_find_new_threads (ops);
|
||||
}
|
||||
|
||||
/* The to_thread_alive method of target record-btrace. */
|
||||
@@ -1844,11 +1815,8 @@ record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
|
||||
return find_thread_ptid (ptid) != NULL;
|
||||
|
||||
/* Forward the request. */
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_thread_alive != NULL)
|
||||
return ops->to_thread_alive (ops, ptid);
|
||||
|
||||
return 0;
|
||||
ops = ops->beneath;
|
||||
return ops->to_thread_alive (ops, ptid);
|
||||
}
|
||||
|
||||
/* Set the replay branch trace instruction iterator. If IT is NULL, replay
|
||||
|
||||
Reference in New Issue
Block a user