mirror of
https://github.com/bminor/binutils-gdb.git
synced 2025-11-16 04:24:43 +00:00
gdb: change inf_threads_iterator to yield references
When adding reference_to_pointer_iterator, I saw it as a temporary thing, to not have to do a codebase-wide change right away. Remove it from inf_threads_iterator and adjust all the users. It's very possible that I forgot to update some spots in the files I can't compile, but it will be very easy to fix if that happens. Change-Id: Iddc462fecfaafb6a9861d185b217bc714e7dc651 Approved-By: Tom Tromey <tom@tromey.com>
This commit is contained in:
committed by
Simon Marchi
parent
7f1cdb3e37
commit
1ad8737b3c
@@ -293,10 +293,10 @@ void
|
||||
aarch64_notify_debug_reg_change (ptid_t ptid,
|
||||
int is_watchpoint, unsigned int idx)
|
||||
{
|
||||
for (thread_info *tp : current_inferior ()->non_exited_threads ())
|
||||
for (thread_info &tp : current_inferior ()->non_exited_threads ())
|
||||
{
|
||||
if (tp->ptid.lwp_p ())
|
||||
aarch64_debug_pending_threads.emplace (tp->ptid.lwp ());
|
||||
if (tp.ptid.lwp_p ())
|
||||
aarch64_debug_pending_threads.emplace (tp.ptid.lwp ());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -775,18 +775,18 @@ amd_dbgapi_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
|
||||
/* Disable forward progress requirement. */
|
||||
require_forward_progress (scope_ptid, proc_target, false);
|
||||
|
||||
for (thread_info *thread : all_non_exited_threads (proc_target, scope_ptid))
|
||||
for (thread_info &thread : all_non_exited_threads (proc_target, scope_ptid))
|
||||
{
|
||||
if (!ptid_is_gpu (thread->ptid))
|
||||
if (!ptid_is_gpu (thread.ptid))
|
||||
continue;
|
||||
|
||||
amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (thread->ptid);
|
||||
amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (thread.ptid);
|
||||
amd_dbgapi_status_t status;
|
||||
|
||||
wave_info &wi = get_thread_wave_info (thread);
|
||||
wave_info &wi = get_thread_wave_info (&thread);
|
||||
amd_dbgapi_resume_mode_t &resume_mode = wi.last_resume_mode;
|
||||
amd_dbgapi_exceptions_t wave_exception;
|
||||
if (thread->ptid == inferior_ptid)
|
||||
if (thread.ptid == inferior_ptid)
|
||||
{
|
||||
resume_mode = (step
|
||||
? AMD_DBGAPI_RESUME_MODE_SINGLE_STEP
|
||||
@@ -934,10 +934,10 @@ amd_dbgapi_target::stop (ptid_t ptid)
|
||||
for (auto *inf : all_inferiors (proc_target))
|
||||
/* Use the threads_safe iterator since stop_one_thread may delete the
|
||||
thread if it has exited. */
|
||||
for (auto *thread : inf->threads_safe ())
|
||||
if (thread->state != THREAD_EXITED && thread->ptid.matches (ptid)
|
||||
&& ptid_is_gpu (thread->ptid))
|
||||
stop_one_thread (thread);
|
||||
for (auto &thread : inf->threads_safe ())
|
||||
if (thread.state != THREAD_EXITED && thread.ptid.matches (ptid)
|
||||
&& ptid_is_gpu (thread.ptid))
|
||||
stop_one_thread (&thread);
|
||||
}
|
||||
|
||||
/* Callback for our async event handler. */
|
||||
@@ -1883,15 +1883,15 @@ amd_dbgapi_target::update_thread_list ()
|
||||
/* Prune the wave_ids that already have a thread_info. Any thread_info
|
||||
which does not have a corresponding wave_id represents a wave which
|
||||
is gone at this point and should be deleted. */
|
||||
for (thread_info *tp : inf->threads_safe ())
|
||||
if (ptid_is_gpu (tp->ptid) && tp->state != THREAD_EXITED)
|
||||
for (thread_info &tp : inf->threads_safe ())
|
||||
if (ptid_is_gpu (tp.ptid) && tp.state != THREAD_EXITED)
|
||||
{
|
||||
auto it = threads.find (tp->ptid.tid ());
|
||||
auto it = threads.find (tp.ptid.tid ());
|
||||
|
||||
if (it == threads.end ())
|
||||
{
|
||||
auto wave_id = get_amd_dbgapi_wave_id (tp->ptid);
|
||||
wave_info &wi = get_thread_wave_info (tp);
|
||||
auto wave_id = get_amd_dbgapi_wave_id (tp.ptid);
|
||||
wave_info &wi = get_thread_wave_info (&tp);
|
||||
|
||||
/* Waves that were stepping or in progress of being
|
||||
stopped are guaranteed to report a
|
||||
@@ -1912,7 +1912,7 @@ amd_dbgapi_target::update_thread_list ()
|
||||
{
|
||||
amd_dbgapi_debug_printf ("wave_%ld disappeared, deleting it",
|
||||
wave_id.handle);
|
||||
delete_thread_silent (tp);
|
||||
delete_thread_silent (&tp);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -2117,7 +2117,7 @@ amd_dbgapi_inferior_forked (inferior *parent_inf, inferior *child_inf,
|
||||
if (fork_kind != TARGET_WAITKIND_VFORKED)
|
||||
{
|
||||
scoped_restore_current_thread restore_thread;
|
||||
switch_to_thread (*child_inf->threads ().begin ());
|
||||
switch_to_thread (&*child_inf->threads ().begin ());
|
||||
attach_amd_dbgapi (child_inf);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -624,8 +624,8 @@ breakpoints_should_be_inserted_now (void)
|
||||
|
||||
/* Don't remove breakpoints yet if, even though all threads are
|
||||
stopped, we still have events to process. */
|
||||
for (thread_info *tp : all_non_exited_threads ())
|
||||
if (tp->resumed () && tp->has_pending_waitstatus ())
|
||||
for (thread_info &tp : all_non_exited_threads ())
|
||||
if (tp.resumed () && tp.has_pending_waitstatus ())
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -2365,8 +2365,8 @@ btrace_free_objfile (struct objfile *objfile)
|
||||
{
|
||||
DEBUG ("free objfile");
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads ())
|
||||
btrace_clear (tp);
|
||||
for (thread_info &tp : all_non_exited_threads ())
|
||||
btrace_clear (&tp);
|
||||
}
|
||||
|
||||
/* See btrace.h. */
|
||||
|
||||
@@ -1690,7 +1690,7 @@ darwin_attach_pid (struct inferior *inf)
|
||||
static struct thread_info *
|
||||
thread_info_from_private_thread_info (darwin_thread_info *pti)
|
||||
{
|
||||
for (struct thread_info *it : all_threads ())
|
||||
for (struct thread_info &it : all_threads ())
|
||||
{
|
||||
darwin_thread_info *iter_pti = get_darwin_thread_info (it);
|
||||
|
||||
|
||||
@@ -100,12 +100,12 @@ elf_none_make_corefile_notes (struct gdbarch *gdbarch, bfd *obfd,
|
||||
gcore_elf_build_thread_register_notes (gdbarch, signalled_thr,
|
||||
stop_signal, obfd, ¬e_data,
|
||||
note_size);
|
||||
for (thread_info *thr : current_inferior ()->non_exited_threads ())
|
||||
for (thread_info &thr : current_inferior ()->non_exited_threads ())
|
||||
{
|
||||
if (thr == signalled_thr)
|
||||
if (&thr == signalled_thr)
|
||||
continue;
|
||||
|
||||
gcore_elf_build_thread_register_notes (gdbarch, thr, stop_signal, obfd,
|
||||
gcore_elf_build_thread_register_notes (gdbarch, &thr, stop_signal, obfd,
|
||||
¬e_data, note_size);
|
||||
}
|
||||
|
||||
|
||||
@@ -1203,7 +1203,7 @@ fbsd_nat_target::resume_one_process (ptid_t ptid, int step,
|
||||
return;
|
||||
}
|
||||
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
{
|
||||
/* If ptid is a specific LWP, suspend all other LWPs in the
|
||||
process, otherwise resume all LWPs in the process.. */
|
||||
@@ -1887,7 +1887,7 @@ fbsd_nat_target::detach_fork_children (inferior *inf)
|
||||
{
|
||||
/* Detach any child processes associated with pending fork events in
|
||||
threads belonging to this process. */
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
detach_fork_children (tp);
|
||||
|
||||
/* Unwind state associated with any pending events. Reset
|
||||
|
||||
@@ -722,12 +722,12 @@ fbsd_make_corefile_notes (struct gdbarch *gdbarch, bfd *obfd, int *note_size)
|
||||
enum gdb_signal stop_signal = signalled_thr->stop_signal ();
|
||||
gcore_elf_build_thread_register_notes (gdbarch, signalled_thr, stop_signal,
|
||||
obfd, ¬e_data, note_size);
|
||||
for (thread_info *thr : current_inferior ()->non_exited_threads ())
|
||||
for (thread_info &thr : current_inferior ()->non_exited_threads ())
|
||||
{
|
||||
if (thr == signalled_thr)
|
||||
if (&thr == signalled_thr)
|
||||
continue;
|
||||
|
||||
gcore_elf_build_thread_register_notes (gdbarch, thr, stop_signal,
|
||||
gcore_elf_build_thread_register_notes (gdbarch, &thr, stop_signal,
|
||||
obfd, ¬e_data, note_size);
|
||||
}
|
||||
|
||||
|
||||
@@ -850,9 +850,9 @@ gcore_find_signalled_thread ()
|
||||
&& curr_thr->stop_signal () != GDB_SIGNAL_0)
|
||||
return curr_thr;
|
||||
|
||||
for (thread_info *thr : current_inferior ()->non_exited_threads ())
|
||||
if (thr->stop_signal () != GDB_SIGNAL_0)
|
||||
return thr;
|
||||
for (thread_info &thr : current_inferior ()->non_exited_threads ())
|
||||
if (thr.stop_signal () != GDB_SIGNAL_0)
|
||||
return &thr;
|
||||
|
||||
/* Default to the current thread, unless it has exited. */
|
||||
if (curr_thr->state != THREAD_EXITED)
|
||||
|
||||
@@ -35,6 +35,7 @@ struct symtab;
|
||||
#include "gdbsupport/forward-scope-exit.h"
|
||||
#include "displaced-stepping.h"
|
||||
#include "gdbsupport/intrusive_list.h"
|
||||
#include "gdbsupport/reference-to-pointer-iterator.h"
|
||||
#include "thread-fsm.h"
|
||||
#include "language.h"
|
||||
|
||||
@@ -747,7 +748,7 @@ extern struct thread_info *iterate_over_threads (thread_callback_func);
|
||||
Used like this, it walks over all threads of all inferiors of all
|
||||
targets:
|
||||
|
||||
for (thread_info *thr : all_threads ())
|
||||
for (thread_info &thr : all_threads ())
|
||||
{ .... }
|
||||
|
||||
FILTER_PTID can be used to filter out threads that don't match.
|
||||
|
||||
32
gdb/infcmd.c
32
gdb/infcmd.c
@@ -1203,20 +1203,20 @@ signal_command (const char *signum_exp, int from_tty)
|
||||
|
||||
thread_info *current = inferior_thread ();
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (resume_target, resume_ptid))
|
||||
{
|
||||
if (tp == current)
|
||||
if (&tp == current)
|
||||
continue;
|
||||
|
||||
if (tp->stop_signal () != GDB_SIGNAL_0
|
||||
&& signal_pass_state (tp->stop_signal ()))
|
||||
if (tp.stop_signal () != GDB_SIGNAL_0
|
||||
&& signal_pass_state (tp.stop_signal ()))
|
||||
{
|
||||
if (!must_confirm)
|
||||
gdb_printf (_("Note:\n"));
|
||||
gdb_printf (_(" Thread %s previously stopped with signal %s, %s.\n"),
|
||||
print_thread_id (tp),
|
||||
gdb_signal_to_name (tp->stop_signal ()),
|
||||
gdb_signal_to_string (tp->stop_signal ()));
|
||||
print_thread_id (&tp),
|
||||
gdb_signal_to_name (tp.stop_signal ()),
|
||||
gdb_signal_to_string (tp.stop_signal ()));
|
||||
must_confirm = 1;
|
||||
}
|
||||
}
|
||||
@@ -2479,12 +2479,12 @@ proceed_after_attach (inferior *inf)
|
||||
/* Backup current thread and selected frame. */
|
||||
scoped_restore_current_thread restore_thread;
|
||||
|
||||
for (thread_info *thread : inf->non_exited_threads ())
|
||||
if (!thread->executing ()
|
||||
&& !thread->stop_requested
|
||||
&& thread->stop_signal () == GDB_SIGNAL_0)
|
||||
for (thread_info &thread : inf->non_exited_threads ())
|
||||
if (!thread.executing ()
|
||||
&& !thread.stop_requested
|
||||
&& thread.stop_signal () == GDB_SIGNAL_0)
|
||||
{
|
||||
switch_to_thread (thread);
|
||||
switch_to_thread (&thread);
|
||||
clear_proceed_status (0);
|
||||
proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
|
||||
}
|
||||
@@ -2588,10 +2588,10 @@ attach_post_wait (int from_tty, enum attach_post_wait_mode mode)
|
||||
stop. For consistency, always select the thread with
|
||||
lowest GDB number, which should be the main thread, if it
|
||||
still exists. */
|
||||
for (thread_info *thread : current_inferior ()->non_exited_threads ())
|
||||
if (thread->inf->num < lowest->inf->num
|
||||
|| thread->per_inf_num < lowest->per_inf_num)
|
||||
lowest = thread;
|
||||
for (thread_info &thread : current_inferior ()->non_exited_threads ())
|
||||
if (thread.inf->num < lowest->inf->num
|
||||
|| thread.per_inf_num < lowest->per_inf_num)
|
||||
lowest = &thread;
|
||||
|
||||
switch_to_thread (lowest);
|
||||
}
|
||||
|
||||
@@ -104,8 +104,8 @@ inferior::unpush_target (struct target_ops *t)
|
||||
{
|
||||
process_stratum_target *proc_target = as_process_stratum_target (t);
|
||||
|
||||
for (thread_info *thread : this->non_exited_threads ())
|
||||
proc_target->maybe_remove_resumed_with_pending_wait_status (thread);
|
||||
for (thread_info &thread : this->non_exited_threads ())
|
||||
proc_target->maybe_remove_resumed_with_pending_wait_status (&thread);
|
||||
}
|
||||
|
||||
return m_target_stack.unpush (t);
|
||||
@@ -458,7 +458,7 @@ number_of_live_inferiors (process_stratum_target *proc_target)
|
||||
|
||||
for (inferior *inf : all_non_exited_inferiors (proc_target))
|
||||
if (inf->has_execution ())
|
||||
for (thread_info *tp ATTRIBUTE_UNUSED : inf->non_exited_threads ())
|
||||
for (thread_info &tp ATTRIBUTE_UNUSED : inf->non_exited_threads ())
|
||||
{
|
||||
/* Found a live thread in this inferior, go to the next
|
||||
inferior. */
|
||||
|
||||
@@ -469,7 +469,7 @@ public:
|
||||
/* Returns a range adapter covering the inferior's threads,
|
||||
including exited threads. Used like this:
|
||||
|
||||
for (thread_info *thr : inf->threads ())
|
||||
for (thread_info &thr : inf->threads ())
|
||||
{ .... }
|
||||
*/
|
||||
inf_threads_range threads ()
|
||||
@@ -478,7 +478,7 @@ public:
|
||||
/* Returns a range adapter covering the inferior's non-exited
|
||||
threads. Used like this:
|
||||
|
||||
for (thread_info *thr : inf->non_exited_threads ())
|
||||
for (thread_info &thr : inf->non_exited_threads ())
|
||||
{ .... }
|
||||
*/
|
||||
inf_non_exited_threads_range non_exited_threads ()
|
||||
@@ -488,9 +488,9 @@ public:
|
||||
used with range-for, safely. I.e., it is safe to delete the
|
||||
currently-iterated thread, like this:
|
||||
|
||||
for (thread_info *t : inf->threads_safe ())
|
||||
for (thread_info &t : inf->threads_safe ())
|
||||
if (some_condition ())
|
||||
delete f;
|
||||
delete &f;
|
||||
*/
|
||||
inline safe_inf_threads_range threads_safe ()
|
||||
{ return safe_inf_threads_range (this->thread_list.begin ()); }
|
||||
|
||||
10
gdb/inflow.c
10
gdb/inflow.c
@@ -546,15 +546,15 @@ child_interrupt (struct target_ops *self)
|
||||
{
|
||||
/* Interrupt the first inferior that has a resumed thread. */
|
||||
thread_info *resumed = NULL;
|
||||
for (thread_info *thr : all_non_exited_threads ())
|
||||
for (thread_info &thr : all_non_exited_threads ())
|
||||
{
|
||||
if (thr->executing ())
|
||||
if (thr.executing ())
|
||||
{
|
||||
resumed = thr;
|
||||
resumed = &thr;
|
||||
break;
|
||||
}
|
||||
if (thr->has_pending_waitstatus ())
|
||||
resumed = thr;
|
||||
if (thr.has_pending_waitstatus ())
|
||||
resumed = &thr;
|
||||
}
|
||||
|
||||
if (resumed != NULL)
|
||||
|
||||
206
gdb/infrun.c
206
gdb/infrun.c
@@ -723,7 +723,7 @@ holding the child stopped. Try \"set %ps\" or \"%ps\".\n"),
|
||||
if (!follow_child && !sched_multi)
|
||||
maybe_restore.emplace ();
|
||||
|
||||
switch_to_thread (*child_inf->threads ().begin ());
|
||||
switch_to_thread (&*child_inf->threads ().begin ());
|
||||
|
||||
post_create_inferior (0, child_has_new_pspace);
|
||||
}
|
||||
@@ -778,23 +778,23 @@ follow_fork ()
|
||||
switch back to it, to tell the target to follow it (in either
|
||||
direction). We'll afterwards refuse to resume, and inform
|
||||
the user what happened. */
|
||||
for (thread_info *tp : all_non_exited_threads (resume_target,
|
||||
for (thread_info &tp : all_non_exited_threads (resume_target,
|
||||
resume_ptid))
|
||||
{
|
||||
if (tp == cur_thr)
|
||||
if (&tp == cur_thr)
|
||||
continue;
|
||||
|
||||
/* follow_fork_inferior clears tp->pending_follow, and below
|
||||
we'll need the value after the follow_fork_inferior
|
||||
call. */
|
||||
target_waitkind kind = tp->pending_follow.kind ();
|
||||
target_waitkind kind = tp.pending_follow.kind ();
|
||||
|
||||
if (kind != TARGET_WAITKIND_SPURIOUS)
|
||||
{
|
||||
infrun_debug_printf ("need to follow-fork [%s] first",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
|
||||
switch_to_thread (tp);
|
||||
switch_to_thread (&tp);
|
||||
|
||||
/* Set up inferior(s) as specified by the caller, and
|
||||
tell the target to do whatever is necessary to follow
|
||||
@@ -1144,8 +1144,8 @@ handle_vfork_child_exec_or_exit (int exec)
|
||||
infrun_debug_printf ("resuming vfork parent process %d",
|
||||
resume_parent->pid);
|
||||
|
||||
for (thread_info *thread : resume_parent->threads ())
|
||||
proceed_after_vfork_done (thread);
|
||||
for (thread_info &thread : resume_parent->threads ())
|
||||
proceed_after_vfork_done (&thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1668,8 +1668,8 @@ infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
|
||||
stepping buffer bytes. */
|
||||
follow_inf->displaced_step_state.reset ();
|
||||
|
||||
for (thread_info *thread : follow_inf->threads ())
|
||||
thread->displaced_step_state.reset ();
|
||||
for (thread_info &thread : follow_inf->threads ())
|
||||
thread.displaced_step_state.reset ();
|
||||
|
||||
/* Since an in-line step is done with everything else stopped, if there was
|
||||
one in progress at the time of the exec, it must have been the exec'ing
|
||||
@@ -1973,10 +1973,10 @@ static bool
|
||||
any_thread_needs_target_thread_events (process_stratum_target *target,
|
||||
ptid_t resume_ptid)
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
|
||||
if (displaced_step_in_progress_thread (tp)
|
||||
|| schedlock_applies (tp)
|
||||
|| tp->thread_fsm () != nullptr)
|
||||
for (thread_info &tp : all_non_exited_threads (target, resume_ptid))
|
||||
if (displaced_step_in_progress_thread (&tp)
|
||||
|| schedlock_applies (&tp)
|
||||
|| tp.thread_fsm () != nullptr)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@@ -2617,10 +2617,10 @@ do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
|
||||
if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
|
||||
{
|
||||
process_stratum_target *resume_target = tp->inf->process_target ();
|
||||
for (thread_info *thr_iter : all_non_exited_threads (resume_target,
|
||||
for (thread_info &thr_iter : all_non_exited_threads (resume_target,
|
||||
resume_ptid))
|
||||
if (thr_iter != tp)
|
||||
thr_iter->set_thread_options (0);
|
||||
if (&thr_iter != tp)
|
||||
thr_iter.set_thread_options (0);
|
||||
}
|
||||
|
||||
infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
|
||||
@@ -3129,8 +3129,8 @@ clear_proceed_status (int step)
|
||||
|
||||
/* In all-stop mode, delete the per-thread status of all threads
|
||||
we're about to resume, implicitly and explicitly. */
|
||||
for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
|
||||
clear_proceed_status_thread (tp);
|
||||
for (thread_info &tp : all_non_exited_threads (resume_target, resume_ptid))
|
||||
clear_proceed_status_thread (&tp);
|
||||
}
|
||||
|
||||
if (inferior_ptid != null_ptid)
|
||||
@@ -3711,25 +3711,25 @@ proceed (CORE_ADDR addr, enum gdb_signal siggnal)
|
||||
threads. */
|
||||
if (!non_stop && !schedlock_applies (cur_thr))
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (resume_target,
|
||||
for (thread_info &tp : all_non_exited_threads (resume_target,
|
||||
resume_ptid))
|
||||
{
|
||||
switch_to_thread_no_regs (tp);
|
||||
switch_to_thread_no_regs (&tp);
|
||||
|
||||
/* Ignore the current thread here. It's handled
|
||||
afterwards. */
|
||||
if (tp == cur_thr)
|
||||
if (&tp == cur_thr)
|
||||
continue;
|
||||
|
||||
if (!thread_still_needs_step_over (tp))
|
||||
if (!thread_still_needs_step_over (&tp))
|
||||
continue;
|
||||
|
||||
gdb_assert (!thread_is_in_step_over_chain (tp));
|
||||
gdb_assert (!thread_is_in_step_over_chain (&tp));
|
||||
|
||||
infrun_debug_printf ("need to step-over [%s] first",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
|
||||
global_thread_step_over_chain_enqueue (tp);
|
||||
global_thread_step_over_chain_enqueue (&tp);
|
||||
}
|
||||
|
||||
switch_to_thread (cur_thr);
|
||||
@@ -3769,11 +3769,11 @@ proceed (CORE_ADDR addr, enum gdb_signal siggnal)
|
||||
|
||||
/* In all-stop, but the target is always in non-stop mode.
|
||||
Start all other threads that are implicitly resumed too. */
|
||||
for (thread_info *tp : all_non_exited_threads (resume_target,
|
||||
for (thread_info &tp : all_non_exited_threads (resume_target,
|
||||
resume_ptid))
|
||||
{
|
||||
switch_to_thread_no_regs (tp);
|
||||
proceed_resume_thread_checked (tp);
|
||||
switch_to_thread_no_regs (&tp);
|
||||
proceed_resume_thread_checked (&tp);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -3876,33 +3876,33 @@ infrun_thread_stop_requested (ptid_t ptid)
|
||||
but the user/frontend doesn't know about that yet (e.g., the
|
||||
thread had been temporarily paused for some step-over), set up
|
||||
for reporting the stop now. */
|
||||
for (thread_info *tp : all_threads (curr_target, ptid))
|
||||
for (thread_info &tp : all_threads (curr_target, ptid))
|
||||
{
|
||||
if (tp->state != THREAD_RUNNING)
|
||||
if (tp.state != THREAD_RUNNING)
|
||||
continue;
|
||||
if (tp->executing ())
|
||||
if (tp.executing ())
|
||||
continue;
|
||||
|
||||
/* Remove matching threads from the step-over queue, so
|
||||
start_step_over doesn't try to resume them
|
||||
automatically. */
|
||||
if (thread_is_in_step_over_chain (tp))
|
||||
global_thread_step_over_chain_remove (tp);
|
||||
if (thread_is_in_step_over_chain (&tp))
|
||||
global_thread_step_over_chain_remove (&tp);
|
||||
|
||||
/* If the thread is stopped, but the user/frontend doesn't
|
||||
know about that yet, queue a pending event, as if the
|
||||
thread had just stopped now. Unless the thread already had
|
||||
a pending event. */
|
||||
if (!tp->has_pending_waitstatus ())
|
||||
if (!tp.has_pending_waitstatus ())
|
||||
{
|
||||
target_waitstatus ws;
|
||||
ws.set_stopped (GDB_SIGNAL_0);
|
||||
tp->set_pending_waitstatus (ws);
|
||||
tp.set_pending_waitstatus (ws);
|
||||
}
|
||||
|
||||
/* Clear the inline-frame state, since we're re-processing the
|
||||
stop. */
|
||||
clear_inline_frame_state (tp);
|
||||
clear_inline_frame_state (&tp);
|
||||
|
||||
/* If this thread was paused because some other thread was
|
||||
doing an inline-step over, let that finish first. Once
|
||||
@@ -3914,7 +3914,7 @@ infrun_thread_stop_requested (ptid_t ptid)
|
||||
/* Otherwise we can process the (new) pending event now. Set
|
||||
it so this pending event is considered by
|
||||
do_target_wait. */
|
||||
tp->set_resumed (true);
|
||||
tp.set_resumed (true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3950,8 +3950,8 @@ for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
|
||||
else
|
||||
{
|
||||
/* In all-stop mode, all threads have stopped. */
|
||||
for (thread_info *tp : all_non_exited_threads ())
|
||||
func (tp);
|
||||
for (thread_info &tp : all_non_exited_threads ())
|
||||
func (&tp);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4327,20 +4327,20 @@ prepare_for_detach (void)
|
||||
|
||||
/* Stop threads currently displaced stepping, aborting it. */
|
||||
|
||||
for (thread_info *thr : inf->non_exited_threads ())
|
||||
for (thread_info &thr : inf->non_exited_threads ())
|
||||
{
|
||||
if (thr->displaced_step_state.in_progress ())
|
||||
if (thr.displaced_step_state.in_progress ())
|
||||
{
|
||||
if (thr->executing ())
|
||||
if (thr.executing ())
|
||||
{
|
||||
if (!thr->stop_requested)
|
||||
if (!thr.stop_requested)
|
||||
{
|
||||
target_stop (thr->ptid);
|
||||
thr->stop_requested = true;
|
||||
target_stop (thr.ptid);
|
||||
thr.stop_requested = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
thr->set_resumed (false);
|
||||
thr.set_resumed (false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5458,9 +5458,9 @@ handle_one (const wait_one_event &event)
|
||||
{
|
||||
int pid = event.ptid.pid ();
|
||||
inferior *inf = find_inferior_pid (event.target, pid);
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
{
|
||||
t = tp;
|
||||
t = &tp;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -5729,9 +5729,9 @@ stop_all_threads (const char *reason, inferior *inf)
|
||||
|
||||
/* Go through all threads looking for threads that we need
|
||||
to tell the target to stop. */
|
||||
for (thread_info *t : all_non_exited_threads ())
|
||||
for (thread_info &t : all_non_exited_threads ())
|
||||
{
|
||||
if (inf != nullptr && t->inf != inf)
|
||||
if (inf != nullptr && t.inf != inf)
|
||||
continue;
|
||||
|
||||
/* For a single-target setting with an all-stop target,
|
||||
@@ -5741,38 +5741,38 @@ stop_all_threads (const char *reason, inferior *inf)
|
||||
targets' threads. This should be fine due to the
|
||||
protection of 'check_multi_target_resumption'. */
|
||||
|
||||
switch_to_thread_no_regs (t);
|
||||
switch_to_thread_no_regs (&t);
|
||||
if (!target_is_non_stop_p ())
|
||||
continue;
|
||||
|
||||
if (t->executing ())
|
||||
if (t.executing ())
|
||||
{
|
||||
/* If already stopping, don't request a stop again.
|
||||
We just haven't seen the notification yet. */
|
||||
if (!t->stop_requested)
|
||||
if (!t.stop_requested)
|
||||
{
|
||||
infrun_debug_printf (" %s executing, need stop",
|
||||
t->ptid.to_string ().c_str ());
|
||||
target_stop (t->ptid);
|
||||
t->stop_requested = true;
|
||||
t.ptid.to_string ().c_str ());
|
||||
target_stop (t.ptid);
|
||||
t.stop_requested = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
infrun_debug_printf (" %s executing, already stopping",
|
||||
t->ptid.to_string ().c_str ());
|
||||
t.ptid.to_string ().c_str ());
|
||||
}
|
||||
|
||||
if (t->stop_requested)
|
||||
if (t.stop_requested)
|
||||
waits_needed++;
|
||||
}
|
||||
else
|
||||
{
|
||||
infrun_debug_printf (" %s not executing",
|
||||
t->ptid.to_string ().c_str ());
|
||||
t.ptid.to_string ().c_str ());
|
||||
|
||||
/* The thread may be not executing, but still be
|
||||
resumed with a pending status to process. */
|
||||
t->set_resumed (false);
|
||||
t.set_resumed (false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5886,21 +5886,21 @@ handle_no_resumed (struct execution_control_state *ecs)
|
||||
whether to report it to the user. */
|
||||
bool ignore_event = false;
|
||||
|
||||
for (thread_info *thread : all_non_exited_threads ())
|
||||
for (thread_info &thread : all_non_exited_threads ())
|
||||
{
|
||||
if (swap_terminal && thread->executing ())
|
||||
if (swap_terminal && thread.executing ())
|
||||
{
|
||||
if (thread->inf != curr_inf)
|
||||
if (thread.inf != curr_inf)
|
||||
{
|
||||
target_terminal::ours ();
|
||||
|
||||
switch_to_thread (thread);
|
||||
switch_to_thread (&thread);
|
||||
target_terminal::inferior ();
|
||||
}
|
||||
swap_terminal = false;
|
||||
}
|
||||
|
||||
if (!ignore_event && thread->resumed ())
|
||||
if (!ignore_event && thread.resumed ())
|
||||
{
|
||||
/* Either there were no unwaited-for children left in the
|
||||
target at some point, but there are now, or some target
|
||||
@@ -6059,8 +6059,8 @@ handle_thread_exited (execution_control_state *ecs)
|
||||
thread. */
|
||||
return handle_as_no_resumed ();
|
||||
}
|
||||
thread_info *non_exited_thread = *range.begin ();
|
||||
switch_to_thread (non_exited_thread);
|
||||
thread_info &non_exited_thread = *range.begin ();
|
||||
switch_to_thread (&non_exited_thread);
|
||||
insert_breakpoints ();
|
||||
resume (GDB_SIGNAL_0);
|
||||
}
|
||||
@@ -6615,83 +6615,83 @@ restart_threads (struct thread_info *event_thread, inferior *inf)
|
||||
/* In case the instruction just stepped spawned a new thread. */
|
||||
update_thread_list ();
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads ())
|
||||
for (thread_info &tp : all_non_exited_threads ())
|
||||
{
|
||||
if (inf != nullptr && tp->inf != inf)
|
||||
if (inf != nullptr && tp.inf != inf)
|
||||
continue;
|
||||
|
||||
if (tp->inf->detaching)
|
||||
if (tp.inf->detaching)
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] inferior detaching",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
continue;
|
||||
}
|
||||
|
||||
switch_to_thread_no_regs (tp);
|
||||
switch_to_thread_no_regs (&tp);
|
||||
|
||||
if (tp == event_thread)
|
||||
if (&tp == event_thread)
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] is event thread",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
|
||||
if (!(tp.state == THREAD_RUNNING || tp.control.in_infcall))
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] not meant to be running",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tp->resumed ())
|
||||
if (tp.resumed ())
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] resumed",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
gdb_assert (tp.executing () || tp.has_pending_waitstatus ());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (thread_is_in_step_over_chain (tp))
|
||||
if (thread_is_in_step_over_chain (&tp))
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] needs step-over",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
gdb_assert (!tp->resumed ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
gdb_assert (!tp.resumed ());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
if (tp->has_pending_waitstatus ())
|
||||
if (tp.has_pending_waitstatus ())
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] has pending status",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
tp->set_resumed (true);
|
||||
tp.ptid.to_string ().c_str ());
|
||||
tp.set_resumed (true);
|
||||
continue;
|
||||
}
|
||||
|
||||
gdb_assert (!tp->stop_requested);
|
||||
gdb_assert (!tp.stop_requested);
|
||||
|
||||
/* If some thread needs to start a step-over at this point, it
|
||||
should still be in the step-over queue, and thus skipped
|
||||
above. */
|
||||
if (thread_still_needs_step_over (tp))
|
||||
if (thread_still_needs_step_over (&tp))
|
||||
{
|
||||
internal_error ("thread [%s] needs a step-over, but not in "
|
||||
"step-over queue\n",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
tp.ptid.to_string ().c_str ());
|
||||
}
|
||||
|
||||
if (currently_stepping (tp))
|
||||
if (currently_stepping (&tp))
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] was stepping",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
keep_going_stepped_thread (tp);
|
||||
tp.ptid.to_string ().c_str ());
|
||||
keep_going_stepped_thread (&tp);
|
||||
}
|
||||
else
|
||||
{
|
||||
infrun_debug_printf ("restart threads: [%s] continuing",
|
||||
tp->ptid.to_string ().c_str ());
|
||||
execution_control_state ecs (tp);
|
||||
switch_to_thread (tp);
|
||||
tp.ptid.to_string ().c_str ());
|
||||
execution_control_state ecs (&tp);
|
||||
switch_to_thread (&tp);
|
||||
keep_going_pass_signal (&ecs);
|
||||
}
|
||||
}
|
||||
@@ -8515,20 +8515,20 @@ restart_after_all_stop_detach (process_stratum_target *proc_target)
|
||||
resumed. With the remote target (in all-stop), it's even
|
||||
impossible to issue another resumption if the target is already
|
||||
resumed, until the target reports a stop. */
|
||||
for (thread_info *thr : all_threads (proc_target))
|
||||
for (thread_info &thr : all_threads (proc_target))
|
||||
{
|
||||
if (thr->state != THREAD_RUNNING)
|
||||
if (thr.state != THREAD_RUNNING)
|
||||
continue;
|
||||
|
||||
/* If we have any thread that is already executing, then we
|
||||
don't need to resume the target -- it is already been
|
||||
resumed. */
|
||||
if (thr->executing ())
|
||||
if (thr.executing ())
|
||||
return;
|
||||
|
||||
/* If we have a pending event to process, skip resuming the
|
||||
target and go straight to processing it. */
|
||||
if (thr->resumed () && thr->has_pending_waitstatus ())
|
||||
if (thr.resumed () && thr.has_pending_waitstatus ())
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -8539,13 +8539,13 @@ restart_after_all_stop_detach (process_stratum_target *proc_target)
|
||||
|
||||
/* Otherwise, find the first THREAD_RUNNING thread and resume
|
||||
it. */
|
||||
for (thread_info *thr : all_threads (proc_target))
|
||||
for (thread_info &thr : all_threads (proc_target))
|
||||
{
|
||||
if (thr->state != THREAD_RUNNING)
|
||||
if (thr.state != THREAD_RUNNING)
|
||||
continue;
|
||||
|
||||
execution_control_state ecs (thr);
|
||||
switch_to_thread (thr);
|
||||
execution_control_state ecs (&thr);
|
||||
switch_to_thread (&thr);
|
||||
keep_going (&ecs);
|
||||
return;
|
||||
}
|
||||
|
||||
10
gdb/infrun.h
10
gdb/infrun.h
@@ -65,13 +65,13 @@ infrun_debug_show_threads (const char *title, ThreadRange threads)
|
||||
INFRUN_SCOPED_DEBUG_ENTER_EXIT;
|
||||
|
||||
infrun_debug_printf ("%s:", title);
|
||||
for (thread_info *thread : threads)
|
||||
for (thread_info &thread : threads)
|
||||
infrun_debug_printf (" thread %s, executing = %d, resumed = %d, "
|
||||
"state = %s",
|
||||
thread->ptid.to_string ().c_str (),
|
||||
thread->executing (),
|
||||
thread->resumed (),
|
||||
thread_state_string (thread->state));
|
||||
thread.ptid.to_string ().c_str (),
|
||||
thread.executing (),
|
||||
thread.resumed (),
|
||||
thread_state_string (thread.state));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -970,7 +970,7 @@ inf_has_multiple_threads ()
|
||||
|
||||
/* Return true as soon as we see the second thread of the current
|
||||
inferior. */
|
||||
for (thread_info *tp ATTRIBUTE_UNUSED : current_inferior ()->threads ())
|
||||
for (thread_info &tp ATTRIBUTE_UNUSED : current_inferior ()->threads ())
|
||||
if (++count > 1)
|
||||
return true;
|
||||
|
||||
|
||||
@@ -2377,9 +2377,9 @@ linux_make_corefile_notes (struct gdbarch *gdbarch, bfd *obfd, int *note_size)
|
||||
target_thread_architecture (signalled_thr->ptid),
|
||||
obfd, note_data, note_size, stop_signal);
|
||||
}
|
||||
for (thread_info *thr : current_inferior ()->non_exited_threads ())
|
||||
for (thread_info &thr : current_inferior ()->non_exited_threads ())
|
||||
{
|
||||
if (thr == signalled_thr)
|
||||
if (&thr == signalled_thr)
|
||||
continue;
|
||||
|
||||
/* On some architectures, like AArch64, each thread can have a distinct
|
||||
@@ -2388,7 +2388,7 @@ linux_make_corefile_notes (struct gdbarch *gdbarch, bfd *obfd, int *note_size)
|
||||
|
||||
Fetch each thread's gdbarch and pass it down to the lower layers so
|
||||
we can dump the right set of registers. */
|
||||
linux_corefile_thread (thr, target_thread_architecture (thr->ptid),
|
||||
linux_corefile_thread (&thr, target_thread_architecture (thr.ptid),
|
||||
obfd, note_data, note_size, stop_signal);
|
||||
}
|
||||
|
||||
|
||||
@@ -1706,12 +1706,12 @@ thread_db_target::thread_handle_to_thread_info (const gdb_byte *thread_handle,
|
||||
error (_("Thread handle size mismatch: %d vs %zu (from libthread_db)"),
|
||||
handle_len, sizeof (handle_tid));
|
||||
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
{
|
||||
thread_db_thread_info *priv = get_thread_db_thread_info (tp);
|
||||
thread_db_thread_info *priv = get_thread_db_thread_info (&tp);
|
||||
|
||||
if (priv != NULL && handle_tid == priv->tid)
|
||||
return tp;
|
||||
return &tp;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
@@ -682,8 +682,8 @@ mi_on_resume_1 (struct mi_interp *mi,
|
||||
&& !multiple_inferiors_p ())
|
||||
gdb_printf (mi->raw_stdout, "*running,thread-id=\"all\"\n");
|
||||
else
|
||||
for (thread_info *tp : all_non_exited_threads (targ, ptid))
|
||||
mi_output_running (tp);
|
||||
for (thread_info &tp : all_non_exited_threads (targ, ptid))
|
||||
mi_output_running (&tp);
|
||||
|
||||
if (!mi->running_result_record_printed && mi->mi_proceeded)
|
||||
{
|
||||
|
||||
@@ -556,13 +556,13 @@ mi_cmd_thread_list_ids (const char *command, const char *const *argv, int argc)
|
||||
{
|
||||
ui_out_emit_tuple tuple_emitter (current_uiout, "thread-ids");
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads ())
|
||||
for (thread_info &tp : all_non_exited_threads ())
|
||||
{
|
||||
if (tp->ptid == inferior_ptid)
|
||||
current_thread = tp->global_num;
|
||||
if (tp.ptid == inferior_ptid)
|
||||
current_thread = tp.global_num;
|
||||
|
||||
num++;
|
||||
current_uiout->field_signed ("thread-id", tp->global_num);
|
||||
current_uiout->field_signed ("thread-id", tp.global_num);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -476,7 +476,7 @@ nbsd_resume(nbsd_nat_target *target, ptid_t ptid, int step,
|
||||
/* If ptid is a specific LWP, suspend all other LWPs in the process. */
|
||||
inferior *inf = find_inferior_ptid (target, ptid);
|
||||
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
{
|
||||
if (tp->ptid.lwp () == ptid.lwp ())
|
||||
request = PT_RESUME;
|
||||
@@ -491,20 +491,20 @@ nbsd_resume(nbsd_nat_target *target, ptid_t ptid, int step,
|
||||
{
|
||||
/* If ptid is a wildcard, resume all matching threads (they won't run
|
||||
until the process is continued however). */
|
||||
for (thread_info *tp : all_non_exited_threads (target, ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (target, ptid))
|
||||
if (ptrace (PT_RESUME, tp->ptid.pid (), NULL, tp->ptid.lwp ()) == -1)
|
||||
perror_with_name (("ptrace"));
|
||||
}
|
||||
|
||||
if (step)
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (target, ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (target, ptid))
|
||||
if (ptrace (PT_SETSTEP, tp->ptid.pid (), NULL, tp->ptid.lwp ()) == -1)
|
||||
perror_with_name (("ptrace"));
|
||||
}
|
||||
else
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (target, ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (target, ptid))
|
||||
if (ptrace (PT_CLEARSTEP, tp->ptid.pid (), NULL, tp->ptid.lwp ()) == -1)
|
||||
perror_with_name (("ptrace"));
|
||||
}
|
||||
|
||||
@@ -393,12 +393,12 @@ record_btrace_target_open (const char *args, int from_tty)
|
||||
if (!target_has_execution ())
|
||||
error (_("The program is not being run."));
|
||||
|
||||
for (thread_info *tp : current_inferior ()->non_exited_threads ())
|
||||
if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
|
||||
for (thread_info &tp : current_inferior ()->non_exited_threads ())
|
||||
if (args == NULL || *args == 0 || number_is_in_list (args, tp.global_num))
|
||||
{
|
||||
btrace_enable (tp, &record_btrace_conf);
|
||||
btrace_enable (&tp, &record_btrace_conf);
|
||||
|
||||
btrace_disable.add_thread (tp);
|
||||
btrace_disable.add_thread (&tp);
|
||||
}
|
||||
|
||||
record_btrace_push_target ();
|
||||
@@ -415,9 +415,9 @@ record_btrace_target::stop_recording ()
|
||||
|
||||
record_btrace_auto_disable ();
|
||||
|
||||
for (thread_info *tp : current_inferior ()->non_exited_threads ())
|
||||
if (tp->btrace.target != NULL)
|
||||
btrace_disable (tp);
|
||||
for (thread_info &tp : current_inferior ()->non_exited_threads ())
|
||||
if (tp.btrace.target != NULL)
|
||||
btrace_disable (&tp);
|
||||
}
|
||||
|
||||
/* The disconnect method of target record-btrace. */
|
||||
@@ -449,8 +449,8 @@ record_btrace_target::close ()
|
||||
|
||||
/* We should have already stopped recording.
|
||||
Tear down btrace in case we have not. */
|
||||
for (thread_info *tp : current_inferior ()->non_exited_threads ())
|
||||
btrace_teardown (tp);
|
||||
for (thread_info &tp : current_inferior ()->non_exited_threads ())
|
||||
btrace_teardown (&tp);
|
||||
}
|
||||
|
||||
/* The async method of target record-btrace. */
|
||||
@@ -1458,8 +1458,8 @@ bool
|
||||
record_btrace_target::record_is_replaying (ptid_t ptid)
|
||||
{
|
||||
process_stratum_target *proc_target = current_inferior ()->process_target ();
|
||||
for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
|
||||
if (btrace_is_replaying (tp))
|
||||
for (thread_info &tp : all_non_exited_threads (proc_target, ptid))
|
||||
if (btrace_is_replaying (&tp))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@@ -2219,18 +2219,18 @@ record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
|
||||
{
|
||||
gdb_assert (inferior_ptid.matches (ptid));
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (proc_target, ptid))
|
||||
{
|
||||
if (tp->ptid.matches (inferior_ptid))
|
||||
record_btrace_resume_thread (tp, flag);
|
||||
if (tp.ptid.matches (inferior_ptid))
|
||||
record_btrace_resume_thread (&tp, flag);
|
||||
else
|
||||
record_btrace_resume_thread (tp, cflag);
|
||||
record_btrace_resume_thread (&tp, cflag);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
|
||||
record_btrace_resume_thread (tp, flag);
|
||||
for (thread_info &tp : all_non_exited_threads (proc_target, ptid))
|
||||
record_btrace_resume_thread (&tp, flag);
|
||||
}
|
||||
|
||||
/* Async support. */
|
||||
@@ -2616,9 +2616,9 @@ record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
|
||||
|
||||
/* Keep a work list of moving threads. */
|
||||
process_stratum_target *proc_target = current_inferior ()->process_target ();
|
||||
for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
|
||||
if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
|
||||
moving.push_back (tp);
|
||||
for (thread_info &tp : all_non_exited_threads (proc_target, ptid))
|
||||
if ((tp.btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
|
||||
moving.push_back (&tp);
|
||||
|
||||
if (moving.empty ())
|
||||
{
|
||||
@@ -2699,8 +2699,8 @@ record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
|
||||
/* Stop all other threads. */
|
||||
if (!target_is_non_stop_p ())
|
||||
{
|
||||
for (thread_info *tp : current_inferior ()->non_exited_threads ())
|
||||
record_btrace_cancel_resume (tp);
|
||||
for (thread_info &tp : current_inferior ()->non_exited_threads ())
|
||||
record_btrace_cancel_resume (&tp);
|
||||
}
|
||||
|
||||
/* In async mode, we need to announce further events. */
|
||||
@@ -2739,10 +2739,10 @@ record_btrace_target::stop (ptid_t ptid)
|
||||
process_stratum_target *proc_target
|
||||
= current_inferior ()->process_target ();
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (proc_target, ptid))
|
||||
{
|
||||
tp->btrace.flags &= ~BTHR_MOVE;
|
||||
tp->btrace.flags |= BTHR_STOP;
|
||||
tp.btrace.flags &= ~BTHR_MOVE;
|
||||
tp.btrace.flags |= BTHR_STOP;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2918,8 +2918,8 @@ record_btrace_target::goto_record (ULONGEST insn_number)
|
||||
void
|
||||
record_btrace_target::record_stop_replaying ()
|
||||
{
|
||||
for (thread_info *tp : current_inferior ()->non_exited_threads ())
|
||||
record_btrace_stop_replaying (tp);
|
||||
for (thread_info &tp : current_inferior ()->non_exited_threads ())
|
||||
record_btrace_stop_replaying (&tp);
|
||||
}
|
||||
|
||||
/* The execution_direction target method. */
|
||||
|
||||
@@ -1117,8 +1117,8 @@ record_full_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
|
||||
all executed instructions, so we can record them all. */
|
||||
process_stratum_target *proc_target
|
||||
= current_inferior ()->process_target ();
|
||||
for (thread_info *thread : all_non_exited_threads (proc_target, ptid))
|
||||
thread->control.may_range_step = 0;
|
||||
for (thread_info &thread : all_non_exited_threads (proc_target, ptid))
|
||||
thread.control.may_range_step = 0;
|
||||
|
||||
this->beneath ()->resume (ptid, step, signal);
|
||||
}
|
||||
@@ -1213,8 +1213,8 @@ record_full_wait_1 (struct target_ops *ops,
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads ())
|
||||
delete_single_step_breakpoints (tp);
|
||||
for (thread_info &tp : all_non_exited_threads ())
|
||||
delete_single_step_breakpoints (&tp);
|
||||
|
||||
if (record_full_resume_step)
|
||||
return ret;
|
||||
|
||||
139
gdb/remote.c
139
gdb/remote.c
@@ -4446,7 +4446,7 @@ static bool
|
||||
has_single_non_exited_thread (inferior *inf)
|
||||
{
|
||||
int count = 0;
|
||||
for (thread_info *tp ATTRIBUTE_UNUSED : inf->non_exited_threads ())
|
||||
for (thread_info &tp ATTRIBUTE_UNUSED : inf->non_exited_threads ())
|
||||
if (++count > 1)
|
||||
break;
|
||||
return count == 1;
|
||||
@@ -5214,26 +5214,26 @@ remote_target::process_initial_stop_replies (int from_tty)
|
||||
/* Now go over all threads that are stopped, and print their current
|
||||
frame. If all-stop, then if there's a signalled thread, pick
|
||||
that as current. */
|
||||
for (thread_info *thread : all_non_exited_threads (this))
|
||||
for (thread_info &thread : all_non_exited_threads (this))
|
||||
{
|
||||
if (first == NULL)
|
||||
first = thread;
|
||||
first = &thread;
|
||||
|
||||
if (!non_stop)
|
||||
thread->set_running (false);
|
||||
else if (thread->state != THREAD_STOPPED)
|
||||
thread.set_running (false);
|
||||
else if (thread.state != THREAD_STOPPED)
|
||||
continue;
|
||||
|
||||
if (selected == nullptr && thread->has_pending_waitstatus ())
|
||||
selected = thread;
|
||||
if (selected == nullptr && thread.has_pending_waitstatus ())
|
||||
selected = &thread;
|
||||
|
||||
if (lowest_stopped == NULL
|
||||
|| thread->inf->num < lowest_stopped->inf->num
|
||||
|| thread->per_inf_num < lowest_stopped->per_inf_num)
|
||||
lowest_stopped = thread;
|
||||
|| thread.inf->num < lowest_stopped->inf->num
|
||||
|| thread.per_inf_num < lowest_stopped->per_inf_num)
|
||||
lowest_stopped = &thread;
|
||||
|
||||
if (non_stop)
|
||||
print_one_stopped_thread (thread);
|
||||
print_one_stopped_thread (&thread);
|
||||
}
|
||||
|
||||
/* In all-stop, we only print the status of one thread, and leave
|
||||
@@ -5623,10 +5623,10 @@ remote_target::start_remote_1 (int from_tty, int extended_p)
|
||||
remote_debug_printf ("warning: couldn't determine remote "
|
||||
"current thread; picking first in list.");
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (this,
|
||||
for (thread_info &tp : all_non_exited_threads (this,
|
||||
minus_one_ptid))
|
||||
{
|
||||
switch_to_thread (tp);
|
||||
switch_to_thread (&tp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -6815,9 +6815,9 @@ remote_target::remote_detach_1 (inferior *inf, int from_tty)
|
||||
/* See if any thread of the inferior we are detaching has a pending fork
|
||||
status. In that case, we must detach from the child resulting from
|
||||
that fork. */
|
||||
for (thread_info *thread : inf->non_exited_threads ())
|
||||
for (thread_info &thread : inf->non_exited_threads ())
|
||||
{
|
||||
const target_waitstatus *ws = thread_pending_fork_status (thread);
|
||||
const target_waitstatus *ws = thread_pending_fork_status (&thread);
|
||||
|
||||
if (ws == nullptr)
|
||||
continue;
|
||||
@@ -7221,14 +7221,14 @@ char *
|
||||
remote_target::append_pending_thread_resumptions (char *p, char *endp,
|
||||
ptid_t ptid)
|
||||
{
|
||||
for (thread_info *thread : all_non_exited_threads (this, ptid))
|
||||
if (inferior_ptid != thread->ptid
|
||||
&& thread->stop_signal () != GDB_SIGNAL_0)
|
||||
for (thread_info &thread : all_non_exited_threads (this, ptid))
|
||||
if (inferior_ptid != thread.ptid
|
||||
&& thread.stop_signal () != GDB_SIGNAL_0)
|
||||
{
|
||||
p = append_resumption (p, endp, thread->ptid,
|
||||
0, thread->stop_signal ());
|
||||
thread->set_stop_signal (GDB_SIGNAL_0);
|
||||
resume_clear_thread_private_info (thread);
|
||||
p = append_resumption (p, endp, thread.ptid,
|
||||
0, thread.stop_signal ());
|
||||
thread.set_stop_signal (GDB_SIGNAL_0);
|
||||
resume_clear_thread_private_info (&thread);
|
||||
}
|
||||
|
||||
return p;
|
||||
@@ -7254,8 +7254,8 @@ remote_target::remote_resume_with_hc (ptid_t ptid, int step,
|
||||
else
|
||||
set_continue_thread (ptid);
|
||||
|
||||
for (thread_info *thread : all_non_exited_threads (this))
|
||||
resume_clear_thread_private_info (thread);
|
||||
for (thread_info &thread : all_non_exited_threads (this))
|
||||
resume_clear_thread_private_info (&thread);
|
||||
|
||||
buf = rs->buf.data ();
|
||||
if (::execution_direction == EXEC_REVERSE)
|
||||
@@ -7417,8 +7417,8 @@ remote_target::resume (ptid_t scope_ptid, int step, enum gdb_signal siggnal)
|
||||
remote_resume_with_hc (scope_ptid, step, siggnal);
|
||||
|
||||
/* Update resumed state tracked by the remote target. */
|
||||
for (thread_info *tp : all_non_exited_threads (this, scope_ptid))
|
||||
get_remote_thread_info (tp)->set_resumed ();
|
||||
for (thread_info &tp : all_non_exited_threads (this, scope_ptid))
|
||||
get_remote_thread_info (&tp)->set_resumed ();
|
||||
|
||||
/* We've just told the target to resume. The remote server will
|
||||
wait for the inferior to stop, and then send a stop reply. In
|
||||
@@ -7630,15 +7630,15 @@ remote_target::commit_resumed ()
|
||||
|
||||
bool any_pending_vcont_resume = false;
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (this))
|
||||
for (thread_info &tp : all_non_exited_threads (this))
|
||||
{
|
||||
remote_thread_info *priv = get_remote_thread_info (tp);
|
||||
remote_thread_info *priv = get_remote_thread_info (&tp);
|
||||
|
||||
/* If a thread of a process is not meant to be resumed, then we
|
||||
can't wildcard that process. */
|
||||
if (priv->get_resume_state () == resume_state::NOT_RESUMED)
|
||||
{
|
||||
get_remote_inferior (tp->inf)->may_wildcard_vcont = false;
|
||||
get_remote_inferior (tp.inf)->may_wildcard_vcont = false;
|
||||
|
||||
/* And if we can't wildcard a process, we can't wildcard
|
||||
everything either. */
|
||||
@@ -7652,7 +7652,7 @@ remote_target::commit_resumed ()
|
||||
/* If a thread is the parent of an unfollowed fork/vfork/clone,
|
||||
then we can't do a global wildcard, as that would resume the
|
||||
pending child. */
|
||||
if (thread_pending_child_status (tp) != nullptr)
|
||||
if (thread_pending_child_status (&tp) != nullptr)
|
||||
may_global_wildcard_vcont = false;
|
||||
}
|
||||
|
||||
@@ -7669,9 +7669,9 @@ remote_target::commit_resumed ()
|
||||
struct vcont_builder vcont_builder (this);
|
||||
|
||||
/* Threads first. */
|
||||
for (thread_info *tp : all_non_exited_threads (this))
|
||||
for (thread_info &tp : all_non_exited_threads (this))
|
||||
{
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (tp);
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (&tp);
|
||||
|
||||
/* If the thread was previously vCont-resumed, no need to send a specific
|
||||
action for it. If we didn't receive a resume request for it, don't
|
||||
@@ -7679,14 +7679,14 @@ remote_target::commit_resumed ()
|
||||
if (remote_thr->get_resume_state () != resume_state::RESUMED_PENDING_VCONT)
|
||||
continue;
|
||||
|
||||
gdb_assert (!thread_is_in_step_over_chain (tp));
|
||||
gdb_assert (!thread_is_in_step_over_chain (&tp));
|
||||
|
||||
/* We should never be commit-resuming a thread that has a stop reply.
|
||||
Otherwise, we would end up reporting a stop event for a thread while
|
||||
it is running on the remote target. */
|
||||
remote_state *rs = get_remote_state ();
|
||||
for (const auto &stop_reply : rs->stop_reply_queue)
|
||||
gdb_assert (stop_reply->ptid != tp->ptid);
|
||||
gdb_assert (stop_reply->ptid != tp.ptid);
|
||||
|
||||
const resumed_pending_vcont_info &info
|
||||
= remote_thr->resumed_pending_vcont_info ();
|
||||
@@ -7694,8 +7694,8 @@ remote_target::commit_resumed ()
|
||||
/* Check if we need to send a specific action for this thread. If not,
|
||||
it will be included in a wildcard resume instead. */
|
||||
if (info.step || info.sig != GDB_SIGNAL_0
|
||||
|| !get_remote_inferior (tp->inf)->may_wildcard_vcont)
|
||||
vcont_builder.push_action (tp->ptid, info.step, info.sig);
|
||||
|| !get_remote_inferior (tp.inf)->may_wildcard_vcont)
|
||||
vcont_builder.push_action (tp.ptid, info.step, info.sig);
|
||||
|
||||
remote_thr->set_resumed ();
|
||||
}
|
||||
@@ -7778,9 +7778,9 @@ remote_target::remote_stop_ns (ptid_t ptid)
|
||||
whether the thread wasn't resumed with a signal. Generating a
|
||||
phony stop in that case would result in losing the signal. */
|
||||
bool needs_commit = false;
|
||||
for (thread_info *tp : all_non_exited_threads (this, ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (this, ptid))
|
||||
{
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (tp);
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (&tp);
|
||||
|
||||
if (remote_thr->get_resume_state ()
|
||||
== resume_state::RESUMED_PENDING_VCONT)
|
||||
@@ -7801,17 +7801,17 @@ remote_target::remote_stop_ns (ptid_t ptid)
|
||||
if (needs_commit)
|
||||
commit_resumed ();
|
||||
else
|
||||
for (thread_info *tp : all_non_exited_threads (this, ptid))
|
||||
for (thread_info &tp : all_non_exited_threads (this, ptid))
|
||||
{
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (tp);
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (&tp);
|
||||
|
||||
if (remote_thr->get_resume_state ()
|
||||
== resume_state::RESUMED_PENDING_VCONT)
|
||||
{
|
||||
remote_debug_printf ("Enqueueing phony stop reply for thread pending "
|
||||
"vCont-resume (%d, %ld, %s)", tp->ptid.pid(),
|
||||
tp->ptid.lwp (),
|
||||
pulongest (tp->ptid.tid ()));
|
||||
"vCont-resume (%d, %ld, %s)", tp.ptid.pid(),
|
||||
tp.ptid.lwp (),
|
||||
pulongest (tp.ptid.tid ()));
|
||||
|
||||
/* Check that the thread wasn't resumed with a signal.
|
||||
Generating a phony stop would result in losing the
|
||||
@@ -7821,10 +7821,10 @@ remote_target::remote_stop_ns (ptid_t ptid)
|
||||
gdb_assert (info.sig == GDB_SIGNAL_0);
|
||||
|
||||
stop_reply_up sr = std::make_unique<stop_reply> ();
|
||||
sr->ptid = tp->ptid;
|
||||
sr->ptid = tp.ptid;
|
||||
sr->rs = rs;
|
||||
sr->ws.set_stopped (GDB_SIGNAL_0);
|
||||
sr->arch = tp->inf->arch ();
|
||||
sr->arch = tp.inf->arch ();
|
||||
sr->stop_reason = TARGET_STOPPED_BY_NO_REASON;
|
||||
sr->watch_data_address = 0;
|
||||
sr->core = 0;
|
||||
@@ -8120,9 +8120,9 @@ remote_target::remove_new_children (threads_listing_context *context)
|
||||
|
||||
/* For any threads stopped at a (v)fork/clone event, remove the
|
||||
corresponding child threads from the CONTEXT list. */
|
||||
for (thread_info *thread : all_non_exited_threads (this))
|
||||
for (thread_info &thread : all_non_exited_threads (this))
|
||||
{
|
||||
const target_waitstatus *ws = thread_pending_child_status (thread);
|
||||
const target_waitstatus *ws = thread_pending_child_status (&thread);
|
||||
|
||||
if (ws == nullptr)
|
||||
continue;
|
||||
@@ -8817,17 +8817,17 @@ remote_target::select_thread_for_ambiguous_stop_reply
|
||||
|
||||
/* Consider all non-exited threads of the target, find the first resumed
|
||||
one. */
|
||||
for (thread_info *thr : all_non_exited_threads (this))
|
||||
for (thread_info &thr : all_non_exited_threads (this))
|
||||
{
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (thr);
|
||||
remote_thread_info *remote_thr = get_remote_thread_info (&thr);
|
||||
|
||||
if (remote_thr->get_resume_state () != resume_state::RESUMED)
|
||||
continue;
|
||||
|
||||
if (first_resumed_thread == nullptr)
|
||||
first_resumed_thread = thr;
|
||||
first_resumed_thread = &thr;
|
||||
else if (!process_wide_stop
|
||||
|| first_resumed_thread->ptid.pid () != thr->ptid.pid ())
|
||||
|| first_resumed_thread->ptid.pid () != thr.ptid.pid ())
|
||||
ambiguous = true;
|
||||
}
|
||||
|
||||
@@ -8937,8 +8937,8 @@ remote_target::process_stop_reply (stop_reply_up stop_reply,
|
||||
{
|
||||
/* If the target works in all-stop mode, a stop-reply indicates that
|
||||
all the target's threads stopped. */
|
||||
for (thread_info *tp : all_non_exited_threads (this))
|
||||
get_remote_thread_info (tp)->set_not_resumed ();
|
||||
for (thread_info &tp : all_non_exited_threads (this))
|
||||
get_remote_thread_info (&tp)->set_not_resumed ();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9006,9 +9006,9 @@ remote_target::wait_ns (ptid_t ptid, struct target_waitstatus *status,
|
||||
static ptid_t
|
||||
first_remote_resumed_thread (remote_target *target)
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (target, minus_one_ptid))
|
||||
if (tp->resumed ())
|
||||
return tp->ptid;
|
||||
for (thread_info &tp : all_non_exited_threads (target, minus_one_ptid))
|
||||
if (tp.resumed ())
|
||||
return tp.ptid;
|
||||
return null_ptid;
|
||||
}
|
||||
|
||||
@@ -10933,9 +10933,9 @@ remote_target::kill_new_fork_children (inferior *inf)
|
||||
|
||||
/* Kill the fork child threads of any threads in inferior INF that are stopped
|
||||
at a fork event. */
|
||||
for (thread_info *thread : inf->non_exited_threads ())
|
||||
for (thread_info &thread : inf->non_exited_threads ())
|
||||
{
|
||||
const target_waitstatus *ws = thread_pending_fork_status (thread);
|
||||
const target_waitstatus *ws = thread_pending_fork_status (&thread);
|
||||
|
||||
if (ws == nullptr)
|
||||
continue;
|
||||
@@ -15463,10 +15463,10 @@ remote_target::remote_btrace_maybe_reopen ()
|
||||
if (m_features.packet_support (PACKET_qXfer_btrace_conf) != PACKET_ENABLE)
|
||||
return;
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (this))
|
||||
for (thread_info &tp : all_non_exited_threads (this))
|
||||
{
|
||||
memset (&rs->btrace_config, 0x00, sizeof (struct btrace_config));
|
||||
btrace_read_config (tp, &rs->btrace_config);
|
||||
btrace_read_config (&tp, &rs->btrace_config);
|
||||
|
||||
if (rs->btrace_config.format == BTRACE_FORMAT_NONE)
|
||||
continue;
|
||||
@@ -15496,8 +15496,7 @@ remote_target::remote_btrace_maybe_reopen ()
|
||||
btrace_format_string (rs->btrace_config.format));
|
||||
}
|
||||
|
||||
tp->btrace.target
|
||||
= new btrace_target_info { tp->ptid, rs->btrace_config };
|
||||
tp.btrace.target = new btrace_target_info { tp.ptid, rs->btrace_config };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15733,18 +15732,18 @@ remote_target::thread_handle_to_thread_info (const gdb_byte *thread_handle,
|
||||
int handle_len,
|
||||
inferior *inf)
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (this))
|
||||
for (thread_info &tp : all_non_exited_threads (this))
|
||||
{
|
||||
remote_thread_info *priv = get_remote_thread_info (tp);
|
||||
remote_thread_info *priv = get_remote_thread_info (&tp);
|
||||
|
||||
if (tp->inf == inf && priv != NULL)
|
||||
if (tp.inf == inf && priv != NULL)
|
||||
{
|
||||
if (handle_len != priv->thread_handle.size ())
|
||||
error (_("Thread handle size mismatch: %d vs %zu (from remote)"),
|
||||
handle_len, priv->thread_handle.size ());
|
||||
if (memcmp (thread_handle, priv->thread_handle.data (),
|
||||
handle_len) == 0)
|
||||
return tp;
|
||||
return &tp;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15931,9 +15930,9 @@ remote_target::commit_requested_thread_options ()
|
||||
/* Now set non-zero options for threads that need them. We don't
|
||||
bother with the case of all threads of a process wanting the same
|
||||
non-zero options as that's not an expected scenario. */
|
||||
for (thread_info *tp : all_non_exited_threads (this))
|
||||
for (thread_info &tp : all_non_exited_threads (this))
|
||||
{
|
||||
gdb_thread_options options = tp->thread_options ();
|
||||
gdb_thread_options options = tp.thread_options ();
|
||||
|
||||
if (options == 0)
|
||||
continue;
|
||||
@@ -15950,10 +15949,10 @@ remote_target::commit_requested_thread_options ()
|
||||
*obuf_p++ = ';';
|
||||
obuf_p += xsnprintf (obuf_p, obuf_endp - obuf_p, "%s",
|
||||
phex_nz (options));
|
||||
if (tp->ptid != magic_null_ptid)
|
||||
if (tp.ptid != magic_null_ptid)
|
||||
{
|
||||
*obuf_p++ = ':';
|
||||
obuf_p = write_ptid (obuf_p, obuf_endp, tp->ptid);
|
||||
obuf_p = write_ptid (obuf_p, obuf_endp, tp.ptid);
|
||||
}
|
||||
|
||||
size_t osize = obuf_p - obuf;
|
||||
|
||||
@@ -3812,11 +3812,11 @@ target_pass_ctrlc (void)
|
||||
if (proc_target == NULL)
|
||||
continue;
|
||||
|
||||
for (thread_info *thr : inf->non_exited_threads ())
|
||||
for (thread_info &thr : inf->non_exited_threads ())
|
||||
{
|
||||
/* A thread can be THREAD_STOPPED and executing, while
|
||||
running an infcall. */
|
||||
if (thr->state == THREAD_RUNNING || thr->executing ())
|
||||
if (thr.state == THREAD_RUNNING || thr.executing ())
|
||||
{
|
||||
/* We can get here quite deep in target layers. Avoid
|
||||
switching thread context or anything that would
|
||||
|
||||
@@ -21,15 +21,12 @@
|
||||
|
||||
#include "gdbsupport/filtered-iterator.h"
|
||||
#include "gdbsupport/iterator-range.h"
|
||||
#include "gdbsupport/next-iterator.h"
|
||||
#include "gdbsupport/reference-to-pointer-iterator.h"
|
||||
#include "gdbsupport/safe-iterator.h"
|
||||
|
||||
/* A forward iterator that iterates over a given inferior's
|
||||
threads. */
|
||||
|
||||
using inf_threads_iterator
|
||||
= reference_to_pointer_iterator<intrusive_list<thread_info>::iterator>;
|
||||
using inf_threads_iterator = intrusive_list<thread_info>::iterator;
|
||||
|
||||
/* A forward iterator that iterates over all threads of all
|
||||
inferiors. */
|
||||
@@ -87,9 +84,9 @@ class all_matching_threads_iterator
|
||||
{
|
||||
public:
|
||||
typedef all_matching_threads_iterator self_type;
|
||||
typedef struct thread_info *value_type;
|
||||
typedef struct thread_info *&reference;
|
||||
typedef struct thread_info **pointer;
|
||||
typedef struct thread_info value_type;
|
||||
typedef struct thread_info &reference;
|
||||
typedef struct thread_info *pointer;
|
||||
typedef std::forward_iterator_tag iterator_category;
|
||||
typedef int difference_type;
|
||||
|
||||
@@ -101,7 +98,7 @@ public:
|
||||
/* Create a one-past-end iterator. */
|
||||
all_matching_threads_iterator () = default;
|
||||
|
||||
thread_info *operator* () const { return m_thr; }
|
||||
reference operator* () const { return *m_thr; }
|
||||
|
||||
all_matching_threads_iterator &operator++ ()
|
||||
{
|
||||
@@ -149,9 +146,9 @@ private:
|
||||
|
||||
struct non_exited_thread_filter
|
||||
{
|
||||
bool operator() (struct thread_info *thr) const
|
||||
bool operator() (struct thread_info &thr) const
|
||||
{
|
||||
return thr->state != THREAD_EXITED;
|
||||
return thr.state != THREAD_EXITED;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
92
gdb/thread.c
92
gdb/thread.c
@@ -558,9 +558,9 @@ delete_thread_silent (thread_info *thread)
|
||||
struct thread_info *
|
||||
find_thread_global_id (int global_id)
|
||||
{
|
||||
for (thread_info *tp : all_threads ())
|
||||
if (tp->global_num == global_id)
|
||||
return tp;
|
||||
for (thread_info &tp : all_threads ())
|
||||
if (tp.global_num == global_id)
|
||||
return &tp;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -568,9 +568,9 @@ find_thread_global_id (int global_id)
|
||||
static struct thread_info *
|
||||
find_thread_id (struct inferior *inf, int thr_num)
|
||||
{
|
||||
for (thread_info *tp : inf->threads ())
|
||||
if (tp->per_inf_num == thr_num)
|
||||
return tp;
|
||||
for (thread_info &tp : inf->threads ())
|
||||
if (tp.per_inf_num == thr_num)
|
||||
return &tp;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -615,7 +615,7 @@ iterate_over_threads (gdb::function_view<bool (struct thread_info *)> callback)
|
||||
bool
|
||||
any_thread_p ()
|
||||
{
|
||||
for (thread_info *tp ATTRIBUTE_UNUSED : all_threads ())
|
||||
for (thread_info &tp ATTRIBUTE_UNUSED : all_threads ())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@@ -639,8 +639,8 @@ live_threads_count (void)
|
||||
int
|
||||
valid_global_thread_id (int global_id)
|
||||
{
|
||||
for (thread_info *tp : all_threads ())
|
||||
if (tp->global_num == global_id)
|
||||
for (thread_info &tp : all_threads ())
|
||||
if (tp.global_num == global_id)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
@@ -672,8 +672,8 @@ any_thread_of_inferior (inferior *inf)
|
||||
if (inf == current_inferior () && inferior_ptid != null_ptid)
|
||||
return inferior_thread ();
|
||||
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
return tp;
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
return &tp;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -699,12 +699,12 @@ any_live_thread_of_inferior (inferior *inf)
|
||||
return curr_tp;
|
||||
}
|
||||
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
{
|
||||
if (!tp->executing ())
|
||||
return tp;
|
||||
if (!tp.executing ())
|
||||
return &tp;
|
||||
|
||||
tp_executing = tp;
|
||||
tp_executing = &tp;
|
||||
}
|
||||
|
||||
/* If both the current thread and all live threads are executing,
|
||||
@@ -856,8 +856,8 @@ thread_change_ptid (process_stratum_target *targ,
|
||||
void
|
||||
set_resumed (process_stratum_target *targ, ptid_t ptid, bool resumed)
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (targ, ptid))
|
||||
tp->set_resumed (resumed);
|
||||
for (thread_info &tp : all_non_exited_threads (targ, ptid))
|
||||
tp.set_resumed (resumed);
|
||||
}
|
||||
|
||||
/* Helper for set_running, that marks one thread either running or
|
||||
@@ -920,8 +920,8 @@ set_running (process_stratum_target *targ, ptid_t ptid, bool running)
|
||||
multiple *running notifications just fine. */
|
||||
bool any_started = false;
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (targ, ptid))
|
||||
if (set_running_thread (tp, running))
|
||||
for (thread_info &tp : all_non_exited_threads (targ, ptid))
|
||||
if (set_running_thread (&tp, running))
|
||||
any_started = true;
|
||||
|
||||
if (any_started)
|
||||
@@ -931,8 +931,8 @@ set_running (process_stratum_target *targ, ptid_t ptid, bool running)
|
||||
void
|
||||
set_executing (process_stratum_target *targ, ptid_t ptid, bool executing)
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (targ, ptid))
|
||||
tp->set_executing (executing);
|
||||
for (thread_info &tp : all_non_exited_threads (targ, ptid))
|
||||
tp.set_executing (executing);
|
||||
|
||||
/* It only takes one running thread to spawn more threads. */
|
||||
if (executing)
|
||||
@@ -954,8 +954,8 @@ threads_are_executing (process_stratum_target *target)
|
||||
void
|
||||
set_stop_requested (process_stratum_target *targ, ptid_t ptid, bool stop)
|
||||
{
|
||||
for (thread_info *tp : all_non_exited_threads (targ, ptid))
|
||||
tp->stop_requested = stop;
|
||||
for (thread_info &tp : all_non_exited_threads (targ, ptid))
|
||||
tp.stop_requested = stop;
|
||||
|
||||
/* Call the stop requested observer so other components of GDB can
|
||||
react to this request. */
|
||||
@@ -968,8 +968,8 @@ finish_thread_state (process_stratum_target *targ, ptid_t ptid)
|
||||
{
|
||||
bool any_started = false;
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads (targ, ptid))
|
||||
if (set_running_thread (tp, tp->executing ()))
|
||||
for (thread_info &tp : all_non_exited_threads (targ, ptid))
|
||||
if (set_running_thread (&tp, tp.executing ()))
|
||||
any_started = true;
|
||||
|
||||
if (any_started)
|
||||
@@ -1281,7 +1281,7 @@ print_thread_info_1 (struct ui_out *uiout, const char *requested_threads,
|
||||
accommodate the largest entry. */
|
||||
size_t target_id_col_width = 17;
|
||||
|
||||
for (thread_info *tp : all_threads ())
|
||||
for (thread_info &tp : all_threads ())
|
||||
{
|
||||
any_thread = true;
|
||||
|
||||
@@ -1290,16 +1290,16 @@ print_thread_info_1 (struct ui_out *uiout, const char *requested_threads,
|
||||
switch_to_thread (current_thread);
|
||||
|
||||
if (!should_print_thread (requested_threads, opts,
|
||||
default_inf_num, global_ids, pid, tp))
|
||||
default_inf_num, global_ids, pid, &tp))
|
||||
continue;
|
||||
|
||||
/* Switch inferiors so we're looking at the right
|
||||
target stack. */
|
||||
switch_to_inferior_no_thread (tp->inf);
|
||||
switch_to_inferior_no_thread (tp.inf);
|
||||
|
||||
target_id_col_width
|
||||
= std::max (target_id_col_width,
|
||||
thread_target_id_str (tp).size ());
|
||||
thread_target_id_str (&tp).size ());
|
||||
|
||||
++n_matching_threads;
|
||||
}
|
||||
@@ -1327,13 +1327,13 @@ print_thread_info_1 (struct ui_out *uiout, const char *requested_threads,
|
||||
}
|
||||
|
||||
for (inferior *inf : all_inferiors ())
|
||||
for (thread_info *tp : inf->threads ())
|
||||
for (thread_info &tp : inf->threads ())
|
||||
{
|
||||
if (tp == current_thread && tp->state == THREAD_EXITED)
|
||||
if (&tp == current_thread && tp.state == THREAD_EXITED)
|
||||
current_exited = true;
|
||||
|
||||
print_thread (uiout, requested_threads, opts, global_ids, pid,
|
||||
default_inf_num, tp, current_thread);
|
||||
default_inf_num, &tp, current_thread);
|
||||
}
|
||||
|
||||
/* This end scope restores the current thread and the frame
|
||||
@@ -1746,8 +1746,8 @@ thread_apply_all_command (const char *cmd, int from_tty)
|
||||
std::vector<thread_info_ref> thr_list_cpy;
|
||||
thr_list_cpy.reserve (tc);
|
||||
|
||||
for (thread_info *tp : all_non_exited_threads ())
|
||||
thr_list_cpy.push_back (thread_info_ref::new_reference (tp));
|
||||
for (thread_info &tp : all_non_exited_threads ())
|
||||
thr_list_cpy.push_back (thread_info_ref::new_reference (&tp));
|
||||
gdb_assert (thr_list_cpy.size () == tc);
|
||||
|
||||
auto *sorter = (ascending
|
||||
@@ -2031,38 +2031,38 @@ thread_find_command (const char *arg, int from_tty)
|
||||
|
||||
update_thread_list ();
|
||||
|
||||
for (thread_info *tp : all_threads ())
|
||||
for (thread_info &tp : all_threads ())
|
||||
{
|
||||
switch_to_inferior_no_thread (tp->inf);
|
||||
switch_to_inferior_no_thread (tp.inf);
|
||||
|
||||
if (tp->name () != nullptr && re_exec (tp->name ()))
|
||||
if (tp.name () != nullptr && re_exec (tp.name ()))
|
||||
{
|
||||
gdb_printf (_("Thread %s has name '%s'\n"),
|
||||
print_thread_id (tp), tp->name ());
|
||||
print_thread_id (&tp), tp.name ());
|
||||
match++;
|
||||
}
|
||||
|
||||
tmp = target_thread_name (tp);
|
||||
tmp = target_thread_name (&tp);
|
||||
if (tmp != NULL && re_exec (tmp))
|
||||
{
|
||||
gdb_printf (_("Thread %s has target name '%s'\n"),
|
||||
print_thread_id (tp), tmp);
|
||||
print_thread_id (&tp), tmp);
|
||||
match++;
|
||||
}
|
||||
|
||||
std::string name = target_pid_to_str (tp->ptid);
|
||||
std::string name = target_pid_to_str (tp.ptid);
|
||||
if (!name.empty () && re_exec (name.c_str ()))
|
||||
{
|
||||
gdb_printf (_("Thread %s has target id '%s'\n"),
|
||||
print_thread_id (tp), name.c_str ());
|
||||
print_thread_id (&tp), name.c_str ());
|
||||
match++;
|
||||
}
|
||||
|
||||
tmp = target_extra_thread_info (tp);
|
||||
tmp = target_extra_thread_info (&tp);
|
||||
if (tmp != NULL && re_exec (tmp))
|
||||
{
|
||||
gdb_printf (_("Thread %s has extra info '%s'\n"),
|
||||
print_thread_id (tp), tmp);
|
||||
print_thread_id (&tp), tmp);
|
||||
match++;
|
||||
}
|
||||
}
|
||||
@@ -2164,9 +2164,9 @@ update_threads_executing (void)
|
||||
return;
|
||||
}
|
||||
|
||||
for (thread_info *tp : inf->non_exited_threads ())
|
||||
for (thread_info &tp : inf->non_exited_threads ())
|
||||
{
|
||||
if (tp->executing ())
|
||||
if (tp.executing ())
|
||||
{
|
||||
targ->threads_executing = true;
|
||||
return;
|
||||
|
||||
@@ -117,10 +117,10 @@ parse_thread_id (const char *tidstr, const char **end)
|
||||
inf = current_inferior ();
|
||||
|
||||
thread_info *tp = nullptr;
|
||||
for (thread_info *it : inf->threads ())
|
||||
if (it->per_inf_num == thr_num)
|
||||
for (thread_info &it : inf->threads ())
|
||||
if (it.per_inf_num == thr_num)
|
||||
{
|
||||
tp = it;
|
||||
tp = ⁢
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -86,9 +86,9 @@ x86bsd_dr_set (ptid_t ptid, int regnum, unsigned long value)
|
||||
|
||||
DBREG_DRX ((&dbregs), regnum) = value;
|
||||
|
||||
for (thread_info *thread : current_inferior ()->non_exited_threads ())
|
||||
for (thread_info &thread : current_inferior ()->non_exited_threads ())
|
||||
{
|
||||
if (gdb_ptrace (PT_SETDBREGS, thread->ptid,
|
||||
if (gdb_ptrace (PT_SETDBREGS, thread.ptid,
|
||||
(PTRACE_TYPE_ARG3) &dbregs) == -1)
|
||||
perror_with_name (_("Couldn't write debug registers"));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user