Fix regression on aarch64-linux gdbserver

Commit 9a03f218 ("Fix gdb.base/watchpoint-unaligned.exp on aarch64")
fixed a watchpoint bug in gdb -- but did not touch the corresponding
code in gdbserver.

This patch moves the gdb code into gdb/nat, so that it can be shared
with gdbserver, and then changes gdbserver to use it, fixing the bug.

This is yet another case where having a single back end would prevent
bugs.

I tested this using the AdaCore internal gdb testsuite.

Bug: https://sourceware.org/bugzilla/show_bug.cgi?id=29423
Approved-By: Luis Machado <luis.machado@arm.com>
This commit is contained in:
Tom Tromey
2024-04-19 07:54:19 -06:00
parent 635d05b88f
commit 0ee25f97d2
5 changed files with 126 additions and 158 deletions

View File

@@ -224,121 +224,6 @@ aarch64_remove_watchpoint (CORE_ADDR addr, int len, enum target_hw_bp_type type,
return ret;
}
/* See aarch64-nat.h. */
bool
aarch64_stopped_data_address (const struct aarch64_debug_reg_state *state,
CORE_ADDR addr_trap, CORE_ADDR *addr_p)
{
bool found = false;
for (int phase = 0; phase <= 1; ++phase)
for (int i = aarch64_num_wp_regs - 1; i >= 0; --i)
{
if (!(state->dr_ref_count_wp[i]
&& DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])))
{
/* Watchpoint disabled. */
continue;
}
const enum target_hw_bp_type type
= aarch64_watchpoint_type (state->dr_ctrl_wp[i]);
if (type == hw_execute)
{
/* Watchpoint disabled. */
continue;
}
if (phase == 0)
{
/* Phase 0: No hw_write. */
if (type == hw_write)
continue;
}
else
{
/* Phase 1: Only hw_write. */
if (type != hw_write)
continue;
}
const unsigned int offset
= aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
const unsigned int len
= aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
const CORE_ADDR addr_watch_aligned
= align_down (state->dr_addr_wp[i], AARCH64_HWP_MAX_LEN_PER_REG);
const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
/* ADDR_TRAP reports the first address of the memory range
accessed by the CPU, regardless of what was the memory
range watched. Thus, a large CPU access that straddles
the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
ADDR_TRAP that is lower than the
ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
addr: | 4 | 5 | 6 | 7 | 8 |
|---- range watched ----|
|----------- range accessed ------------|
In this case, ADDR_TRAP will be 4.
The access size also can be larger than that of the watchpoint
itself. For instance, the access size of an stp instruction is 16.
So, if we use stp to store to address p, and set a watchpoint on
address p + 8, the reported ADDR_TRAP can be p + 8 (observed on
RK3399 SOC). But it also can be p (observed on M1 SOC). Checking
for this situation introduces the possibility of false positives,
so we only do this for hw_write watchpoints. */
const CORE_ADDR max_access_size = type == hw_write ? 16 : 8;
const CORE_ADDR addr_watch_base = addr_watch_aligned -
(max_access_size - AARCH64_HWP_MAX_LEN_PER_REG);
if (!(addr_trap >= addr_watch_base
&& addr_trap < addr_watch + len))
{
/* Not a match. */
continue;
}
/* To match a watchpoint known to GDB core, we must never
report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
positive on kernels older than 4.10. See PR
external/20207. */
if (addr_p != nullptr)
*addr_p = addr_orig;
if (phase == 0)
{
/* Phase 0: Return first match. */
return true;
}
/* Phase 1. */
if (addr_p == nullptr)
{
/* First match, and we don't need to report an address. No need
to look for other matches. */
return true;
}
if (!found)
{
/* First match, and we need to report an address. Look for other
matches. */
found = true;
continue;
}
/* More than one match, and we need to return an address. No need to
look for further matches. */
return false;
}
return found;
}
/* Define AArch64 maintenance commands. */
static void

View File

@@ -45,14 +45,6 @@ struct aarch64_debug_reg_state *aarch64_get_debug_reg_state (pid_t pid);
void aarch64_remove_debug_reg_state (pid_t pid);
/* Helper for the "stopped_data_address" target method. Returns TRUE
if a hardware watchpoint trap at ADDR_TRAP matches a set
watchpoint. The address of the matched watchpoint is returned in
*ADDR_P. */
bool aarch64_stopped_data_address (const struct aarch64_debug_reg_state *state,
CORE_ADDR addr_trap, CORE_ADDR *addr_p);
/* Helper functions used by aarch64_nat_target below. See their
definitions. */

View File

@@ -645,3 +645,118 @@ aarch64_region_ok_for_watchpoint (CORE_ADDR addr, int len)
the checking is costly. */
return 1;
}
/* See nat/aarch64-hw-point.h. */
bool
aarch64_stopped_data_address (const struct aarch64_debug_reg_state *state,
CORE_ADDR addr_trap, CORE_ADDR *addr_p)
{
bool found = false;
for (int phase = 0; phase <= 1; ++phase)
for (int i = aarch64_num_wp_regs - 1; i >= 0; --i)
{
if (!(state->dr_ref_count_wp[i]
&& DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])))
{
/* Watchpoint disabled. */
continue;
}
const enum target_hw_bp_type type
= aarch64_watchpoint_type (state->dr_ctrl_wp[i]);
if (type == hw_execute)
{
/* Watchpoint disabled. */
continue;
}
if (phase == 0)
{
/* Phase 0: No hw_write. */
if (type == hw_write)
continue;
}
else
{
/* Phase 1: Only hw_write. */
if (type != hw_write)
continue;
}
const unsigned int offset
= aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
const unsigned int len
= aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
const CORE_ADDR addr_watch_aligned
= align_down (state->dr_addr_wp[i], AARCH64_HWP_MAX_LEN_PER_REG);
const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
/* ADDR_TRAP reports the first address of the memory range
accessed by the CPU, regardless of what was the memory
range watched. Thus, a large CPU access that straddles
the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
ADDR_TRAP that is lower than the
ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
addr: | 4 | 5 | 6 | 7 | 8 |
|---- range watched ----|
|----------- range accessed ------------|
In this case, ADDR_TRAP will be 4.
The access size also can be larger than that of the watchpoint
itself. For instance, the access size of an stp instruction is 16.
So, if we use stp to store to address p, and set a watchpoint on
address p + 8, the reported ADDR_TRAP can be p + 8 (observed on
RK3399 SOC). But it also can be p (observed on M1 SOC). Checking
for this situation introduces the possibility of false positives,
so we only do this for hw_write watchpoints. */
const CORE_ADDR max_access_size = type == hw_write ? 16 : 8;
const CORE_ADDR addr_watch_base = addr_watch_aligned -
(max_access_size - AARCH64_HWP_MAX_LEN_PER_REG);
if (!(addr_trap >= addr_watch_base
&& addr_trap < addr_watch + len))
{
/* Not a match. */
continue;
}
/* To match a watchpoint known to GDB core, we must never
report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
positive on kernels older than 4.10. See PR
external/20207. */
if (addr_p != nullptr)
*addr_p = addr_orig;
if (phase == 0)
{
/* Phase 0: Return first match. */
return true;
}
/* Phase 1. */
if (addr_p == nullptr)
{
/* First match, and we don't need to report an address. No need
to look for other matches. */
return true;
}
if (!found)
{
/* First match, and we need to report an address. Look for other
matches. */
found = true;
continue;
}
/* More than one match, and we need to return an address. No need to
look for further matches. */
return false;
}
return found;
}

View File

@@ -110,6 +110,14 @@ unsigned int aarch64_watchpoint_offset (unsigned int ctrl);
unsigned int aarch64_watchpoint_length (unsigned int ctrl);
enum target_hw_bp_type aarch64_watchpoint_type (unsigned int ctrl);
/* Helper for the "stopped_data_address" target method. Returns TRUE
if a hardware watchpoint trap at ADDR_TRAP matches a set
watchpoint. The address of the matched watchpoint is returned in
*ADDR_P. */
bool aarch64_stopped_data_address (const struct aarch64_debug_reg_state *state,
CORE_ADDR addr_trap, CORE_ADDR *addr_p);
int aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr,
int len, int is_insert, ptid_t ptid,
struct aarch64_debug_reg_state *state);

View File

@@ -576,41 +576,9 @@ aarch64_target::low_stopped_data_address ()
/* Check if the address matches any watched address. */
state = aarch64_get_debug_reg_state (pid_of (current_thread));
for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
{
const unsigned int offset
= aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
if (state->dr_ref_count_wp[i]
&& DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
&& addr_trap >= addr_watch_aligned
&& addr_trap < addr_watch + len)
{
/* ADDR_TRAP reports the first address of the memory range
accessed by the CPU, regardless of what was the memory
range watched. Thus, a large CPU access that straddles
the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
ADDR_TRAP that is lower than the
ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
addr: | 4 | 5 | 6 | 7 | 8 |
|---- range watched ----|
|----------- range accessed ------------|
In this case, ADDR_TRAP will be 4.
To match a watchpoint known to GDB core, we must never
report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
positive on kernels older than 4.10. See PR
external/20207. */
return addr_orig;
}
}
CORE_ADDR result;
if (aarch64_stopped_data_address (state, addr_trap, &result))
return result;
return (CORE_ADDR) 0;
}