libdebugger: Add SMP support for ARM

Make the aarch64 SMP support a separate file and share it between
aarch64 and arm.
This commit is contained in:
Chris Johns
2025-09-20 10:05:12 +10:00
committed by Gedare Bloom
parent 75e7f5cb80
commit f30157f988
5 changed files with 250 additions and 156 deletions

View File

@@ -51,6 +51,7 @@ extern char bsp_section_fast_text_end[];
#include <rtems/debugger/rtems-debugger-bsp.h>
#include "rtems-debugger-smp.h"
#include "rtems-debugger-target.h"
#include "rtems-debugger-threads.h"
@@ -58,155 +59,6 @@ extern char bsp_section_fast_text_end[];
#include <rtems/bspIo.h>
#endif
/*
* Structure used to manage a task executing a function on available cores on
* a scheduler.
*/
typedef struct {
rtems_id allCPUsBarrier;
rtems_task_entry work_function;
rtems_task_argument arg;
rtems_status_code sc;
} run_across_cpus_context;
/*
* The function that runs as the body of the task which moves itself among the
* various cores registered to a scheduler.
*/
static rtems_task run_across_cpus_task( rtems_task_argument arg )
{
uint32_t released = 0;
rtems_status_code sc;
run_across_cpus_context *ctx = (run_across_cpus_context *) arg;
cpu_set_t set;
cpu_set_t scheduler_set;
rtems_id scheduler_id;
sc = rtems_task_get_scheduler( RTEMS_SELF, &scheduler_id );
if ( sc != RTEMS_SUCCESSFUL ) {
ctx->sc = sc;
rtems_task_exit();
}
CPU_ZERO( &scheduler_set );
sc = rtems_scheduler_get_processor_set(
scheduler_id,
sizeof( scheduler_set ),
&scheduler_set
);
if ( sc != RTEMS_SUCCESSFUL ) {
ctx->sc = sc;
rtems_task_exit();
}
for (
int cpu_index = 0;
cpu_index < rtems_scheduler_get_processor_maximum();
cpu_index++
) {
if ( !CPU_ISSET( cpu_index, &scheduler_set ) ) {
continue;
}
CPU_ZERO( &set );
CPU_SET( cpu_index, &set );
sc = rtems_task_set_affinity( RTEMS_SELF, sizeof( set ), &set );
if ( sc != RTEMS_SUCCESSFUL ) {
ctx->sc = sc;
rtems_task_exit();
}
/* execute task on selected CPU */
ctx->work_function( ctx->arg );
}
sc = rtems_barrier_release( ctx->allCPUsBarrier, &released );
if ( sc != RTEMS_SUCCESSFUL ) {
ctx->sc = sc;
}
rtems_task_exit();
}
/*
* The function used to run a provided function with arbitrary argument across
* all cores registered to the current scheduler. This is similar to the Linux
* kernel's on_each_cpu() call and always waits for the task to complete before
* returning.
*/
static rtems_status_code run_across_cpus(
rtems_task_entry task_entry,
rtems_task_argument arg
)
{
rtems_status_code sc;
rtems_id Task_id;
run_across_cpus_context ctx;
ctx.work_function = task_entry;
ctx.arg = arg;
ctx.sc = RTEMS_SUCCESSFUL;
memset( &ctx.allCPUsBarrier, 0, sizeof( ctx.allCPUsBarrier ) );
sc = rtems_barrier_create(
rtems_build_name( 'B', 'c', 'p', 'u' ),
RTEMS_BARRIER_MANUAL_RELEASE,
2,
&ctx.allCPUsBarrier
);
if ( sc != RTEMS_SUCCESSFUL ) {
return sc;
}
sc = rtems_task_create(
rtems_build_name( 'T', 'c', 'p', 'u' ),
1,
RTEMS_MINIMUM_STACK_SIZE * 2,
RTEMS_DEFAULT_MODES,
RTEMS_FLOATING_POINT | RTEMS_DEFAULT_ATTRIBUTES,
&Task_id
);
if ( sc != RTEMS_SUCCESSFUL ) {
rtems_barrier_delete( ctx.allCPUsBarrier );
return sc;
}
sc = rtems_task_start(
Task_id,
run_across_cpus_task,
( rtems_task_argument ) & ctx
);
if ( sc != RTEMS_SUCCESSFUL ) {
rtems_task_delete( Task_id );
rtems_barrier_delete( ctx.allCPUsBarrier );
return sc;
}
/* wait on task */
sc = rtems_barrier_wait( ctx.allCPUsBarrier, RTEMS_NO_TIMEOUT );
if ( sc != RTEMS_SUCCESSFUL ) {
rtems_task_delete( Task_id );
rtems_barrier_delete( ctx.allCPUsBarrier );
return sc;
}
rtems_barrier_delete( ctx.allCPUsBarrier );
if ( ctx.sc != RTEMS_SUCCESSFUL ) {
return ctx.sc;
}
return sc;
}
/*
* Number of registers.
*/
@@ -1338,7 +1190,7 @@ int rtems_debugger_target_enable( void )
aarch64_debug_break_clear();
#endif
aarch64_debug_disable_debug_exceptions();
sc = run_across_cpus(
sc = rtems_debugger_cpu_run_all(
setup_debugger_on_cpu,
( rtems_task_argument ) & init_error
);
@@ -1384,7 +1236,7 @@ int rtems_debugger_target_disable( void )
aarch64_debug_break_unload();
aarch64_debug_break_clear();
#endif
sc = run_across_cpus(
sc = rtems_debugger_cpu_run_all(
teardown_debugger_on_cpu,
( rtems_task_argument ) & deinit_error
);

View File

@@ -41,6 +41,7 @@
#include <rtems/debugger/rtems-debugger-bsp.h>
#include "rtems-debugger-smp.h"
#include "rtems-debugger-target.h"
#include "rtems-debugger-threads.h"
@@ -1975,16 +1976,29 @@ rtems_debugger_get_int_reg(rtems_debugger_thread* thread, size_t reg)
return value;
}
static rtems_task
rtems_debugger_setup_on_cpu(rtems_task_argument arg)
{
(void) arg;
rtems_debugger_target_set_mmu();
rtems_debugger_target_set_vectors();
}
int
rtems_debugger_target_enable(void)
{
rtems_interrupt_lock_context lock_context;
rtems_status_code sc;
rtems_status_code error = RTEMS_SUCCESSFUL;
arm_debug_break_unload();
arm_debug_break_clear_all();
rtems_interrupt_lock_acquire(&target_lock, &lock_context);
rtems_debugger_target_set_mmu();
rtems_debugger_target_set_vectors();
rtems_interrupt_lock_release(&target_lock, &lock_context);
sc = rtems_debugger_cpu_run_all(
rtems_debugger_setup_on_cpu, (rtems_task_argument) &error);
if (sc != RTEMS_SUCCESSFUL) {
return -1;
}
if (error != RTEMS_SUCCESSFUL) {
return -1;
}
debug_session_active = true;
return 0;
}

View File

@@ -0,0 +1,175 @@
/*
* Copyright (c) 2021 Kinsey Moore
* Copyright (c) 2025 Chris Johns
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <rtems.h>
#include <rtems/score/cpu.h>
#include <rtems/score/threadimpl.h>
#include "rtems-debugger-smp.h"
#include "rtems-debugger-target.h"
#include "rtems-debugger-threads.h"
/*
* Structure used to manage a task executing a function on available
* cores on a scheduler.
*/
typedef struct {
rtems_id all_cpus_barrier;
rtems_task_entry worker;
rtems_task_argument arg;
rtems_status_code sc;
} rtems_debugger_cpu_run_context;
/*
* The function that runs as the body of the task which moves itself
* among the various cores registered to a scheduler.
*/
static rtems_task
rtems_debugger_cpu_run_body(rtems_task_argument arg)
{
uint32_t released = 0;
rtems_status_code sc;
rtems_debugger_cpu_run_context *ctx = (rtems_debugger_cpu_run_context *) arg;
cpu_set_t set;
cpu_set_t scheduler_set;
rtems_id scheduler_id;
sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
if (sc != RTEMS_SUCCESSFUL) {
ctx->sc = sc;
rtems_task_exit();
}
CPU_ZERO(&scheduler_set);
sc = rtems_scheduler_get_processor_set(
scheduler_id, sizeof(scheduler_set), &scheduler_set);
if (sc != RTEMS_SUCCESSFUL) {
ctx->sc = sc;
rtems_task_exit();
}
for (int cpu_index = 0;
cpu_index < rtems_scheduler_get_processor_maximum();
cpu_index++) {
if (!CPU_ISSET(cpu_index, &scheduler_set)) {
continue;
}
CPU_ZERO(&set);
CPU_SET(cpu_index, &set);
sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(set), &set);
if (sc != RTEMS_SUCCESSFUL) {
ctx->sc = sc;
rtems_task_exit();
}
/* execute task on selected CPU */
ctx->worker(ctx->arg);
}
sc = rtems_barrier_release(ctx->all_cpus_barrier, &released);
if (sc != RTEMS_SUCCESSFUL) {
ctx->sc = sc;
}
rtems_task_exit();
}
/*
* The function used to run a provided function with arbitrary argument across
* all cores registered to the current scheduler. This is similar to the Linux
* kernel's on_each_cpu() call and always waits for the task to complete before
* returning.
*/
rtems_status_code
rtems_debugger_cpu_run_all(rtems_task_entry task_entry, rtems_task_argument arg)
{
rtems_status_code sc;
rtems_id task_id;
rtems_debugger_cpu_run_context ctx;
memset(&ctx, 0, sizeof(ctx));
ctx.worker = task_entry;
ctx.arg = arg;
ctx.sc = RTEMS_SUCCESSFUL;
sc = rtems_barrier_create(
rtems_build_name('D', 'B', 'b', 'r'), RTEMS_BARRIER_MANUAL_RELEASE, 2,
&ctx.all_cpus_barrier);
if (sc != RTEMS_SUCCESSFUL) {
return sc;
}
sc = rtems_task_create(
rtems_build_name('D', 'B', 't', 'k'), 1, RTEMS_MINIMUM_STACK_SIZE * 2,
RTEMS_DEFAULT_MODES, RTEMS_FLOATING_POINT | RTEMS_DEFAULT_ATTRIBUTES, &task_id);
if (sc != RTEMS_SUCCESSFUL) {
rtems_barrier_delete(ctx.all_cpus_barrier);
return sc;
}
sc = rtems_task_start(
task_id, rtems_debugger_cpu_run_body, (rtems_task_argument) &ctx);
if (sc != RTEMS_SUCCESSFUL) {
rtems_task_delete(task_id);
rtems_barrier_delete(ctx.all_cpus_barrier);
return sc;
}
/* wait on task */
sc = rtems_barrier_wait(ctx.all_cpus_barrier, RTEMS_NO_TIMEOUT);
if (sc != RTEMS_SUCCESSFUL) {
rtems_task_delete(task_id);
rtems_barrier_delete(ctx.all_cpus_barrier);
return sc;
}
rtems_barrier_delete(ctx.all_cpus_barrier);
if (ctx.sc != RTEMS_SUCCESSFUL) {
return ctx.sc;
}
return sc;
}

View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2025 Chris Johns <chrisj@rtems.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Debugger for RTEMS.
*/
#ifndef _RTEMS_DEBUGGER_SMP_h
#define _RTEMS_DEBUGGER_SMP_h
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/*
* The function used to run a provided function with arbitrary argument across
* all cores registered to the current scheduler. This is similar to the Linux
* kernel's on_each_cpu() call and always waits for the task to complete before
* returning.
*/
rtems_status_code rtems_debugger_cpu_run_all(
rtems_task_entry task_entry, rtems_task_argument arg);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif

View File

@@ -4,6 +4,7 @@ cflags:
- ${COVERAGE_COMPILER_FLAGS}
copyrights:
- Copyright (C) 2020 embedded brains GmbH & Co. KG
- Copyright (C) 2025 Chris Johns
cppflags: []
cxxflags:
- ${COVERAGE_COMPILER_FLAGS}
@@ -29,6 +30,7 @@ source:
- cpukit/libdebugger/rtems-debugger-remote-tcp.c
- cpukit/libdebugger/rtems-debugger-remote.c
- cpukit/libdebugger/rtems-debugger-server.c
- cpukit/libdebugger/rtems-debugger-smp.c
- cpukit/libdebugger/rtems-debugger-target.c
- cpukit/libdebugger/rtems-debugger-threads.c
target: debugger