Implemented signal fastpath on AARCH64 (#793)

The signal fastpath aims to optimize the
seL4_Signal operation. In this commit, it is
implemented for MCS AARCH64 (SMP and non-SMP).
The fastpath does not include the case where
signaling results in a higher priority thread
being unblocked and made available for
scheduling (on any core). It does not
fastpath the case where the signaled thread
is donated a scheduling context and has its
FPU state saved in the FPU of a core.

Co-authored-by: Shane Kadish <shane.kadish@csiro.au>
Signed-off-by: Alwin Joshy <joshyalwin@gmail.com>
This commit is contained in:
alwin-joshy
2023-01-10 10:15:39 +11:00
committed by GitHub
parent 41a1399348
commit 069c937272
11 changed files with 277 additions and 16 deletions

View File

@@ -287,6 +287,13 @@ config_string(
UNQUOTE UNQUOTE
) )
config_option(
KernelSignalFastpath SIGNAL_FASTPATH "Enable notification signal fastpath"
DEFAULT OFF
DEPENDS "KernelIsMCS; KernelFastpath; KernelSel4ArchAarch64; NOT KernelVerificationBuild"
DEFAULT_DISABLED OFF
)
find_file( find_file(
KernelDomainSchedule default_domain.c KernelDomainSchedule default_domain.c
PATHS src/config PATHS src/config

View File

@@ -14,6 +14,12 @@
void slowpath(syscall_t syscall) void slowpath(syscall_t syscall)
NORETURN; NORETURN;
#ifdef CONFIG_SIGNAL_FASTPATH
static inline
void fastpath_signal(word_t cptr, word_t msgInfo)
NORETURN;
#endif
static inline static inline
void fastpath_call(word_t cptr, word_t r_msgInfo) void fastpath_call(word_t cptr, word_t r_msgInfo)
NORETURN; NORETURN;

View File

@@ -29,6 +29,9 @@ VISIBLE SECTION(".vectors.text");
void c_handle_fastpath_call(word_t cptr, word_t msgInfo) void c_handle_fastpath_call(word_t cptr, word_t msgInfo)
VISIBLE SECTION(".vectors.text"); VISIBLE SECTION(".vectors.text");
void c_handle_fastpath_signal(word_t cptr, word_t msgInfo)
VISIBLE SECTION(".vectors.text");
#ifdef CONFIG_KERNEL_MCS #ifdef CONFIG_KERNEL_MCS
void c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo, word_t reply) void c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo, word_t reply)
#else #else

View File

@@ -6,6 +6,70 @@
#pragma once #pragma once
#ifdef CONFIG_KERNEL_MCS
#include <object/reply.h>
#include <object/notification.h>
#endif
#ifdef CONFIG_SIGNAL_FASTPATH
/* Equivalent to schedContext_donate without migrateTCB() */
static inline void maybeDonateSchedContext_fp(tcb_t *dest, sched_context_t *sc)
{
if (!dest->tcbSchedContext) {
sc->scTcb = dest;
dest->tcbSchedContext = sc;
}
#ifdef ENABLE_SMP_SUPPORT
#ifdef CONFIG_DEBUG_BUILD
tcbDebugRemove(dest);
#endif
/* The part of migrateTCB() that doesn't involve the slowpathed FPU save */
dest->tcbAffinity = sc->scCore;
#ifdef CONFIG_DEBUG_BUILD
tcbDebugAppend(dest);
#endif
#endif
}
static inline void cancelIPC_fp(tcb_t *dest)
{
endpoint_t *ep_ptr;
tcb_queue_t queue;
ep_ptr = EP_PTR(thread_state_get_blockingObject(dest->tcbState));
queue = ep_ptr_get_queue(ep_ptr);
queue = tcbEPDequeue(dest, queue);
ep_ptr_set_queue(ep_ptr, queue);
if (!queue.head) {
endpoint_ptr_set_state(ep_ptr, EPState_Idle);
}
reply_t *reply = REPLY_PTR(thread_state_get_replyObject(dest->tcbState));
if (reply != NULL) {
reply_unlink(reply, dest);
}
}
/* Dequeue TCB from notification queue */
static inline void ntfn_queue_dequeue_fp(tcb_t *dest, notification_t *ntfn_ptr)
{
tcb_queue_t ntfn_queue;
ntfn_queue.head = (tcb_t *)notification_ptr_get_ntfnQueue_head(ntfn_ptr);
ntfn_queue.end = (tcb_t *)notification_ptr_get_ntfnQueue_tail(ntfn_ptr);
ntfn_queue = tcbEPDequeue(dest, ntfn_queue);
notification_ptr_set_ntfnQueue_head(ntfn_ptr, (word_t)ntfn_queue.head);
notification_ptr_set_ntfnQueue_tail(ntfn_ptr, (word_t)ntfn_queue.end);
if (!ntfn_queue.head) {
notification_ptr_set_state(ntfn_ptr, NtfnState_Idle);
}
}
#endif
/* Fastpath cap lookup. Returns a null_cap on failure. */ /* Fastpath cap lookup. Returns a null_cap on failure. */
static inline cap_t FORCE_INLINE lookup_fp(cap_t cap, cptr_t cptr) static inline cap_t FORCE_INLINE lookup_fp(cap_t cap, cptr_t cptr)
{ {

View File

@@ -19,6 +19,12 @@ static inline tcb_queue_t PURE ep_ptr_get_queue(endpoint_t *epptr)
return queue; return queue;
} }
static inline void ep_ptr_set_queue(endpoint_t *epptr, tcb_queue_t queue)
{
endpoint_ptr_set_epQueue_head(epptr, (word_t)queue.head);
endpoint_ptr_set_epQueue_tail(epptr, (word_t)queue.end);
}
#ifdef CONFIG_KERNEL_MCS #ifdef CONFIG_KERNEL_MCS
void sendIPC(bool_t blocking, bool_t do_call, word_t badge, void sendIPC(bool_t blocking, bool_t do_call, word_t badge,
bool_t canGrant, bool_t canGrantReply, bool_t canDonate, tcb_t *thread, bool_t canGrant, bool_t canGrantReply, bool_t canDonate, tcb_t *thread,

View File

@@ -36,4 +36,8 @@ static inline void maybeReturnSchedContext(notification_t *ntfnPtr, tcb_t *tcb)
} }
#endif #endif
static inline void ntfn_set_active(notification_t *ntfnPtr, word_t badge)
{
notification_ptr_set_state(ntfnPtr, NtfnState_Active);
notification_ptr_set_ntfnMsgIdentifier(ntfnPtr, badge);
}

View File

@@ -200,6 +200,10 @@ lel_syscall:
#ifdef CONFIG_FASTPATH #ifdef CONFIG_FASTPATH
cmp x7, #SYSCALL_CALL cmp x7, #SYSCALL_CALL
b.eq c_handle_fastpath_call b.eq c_handle_fastpath_call
#ifdef CONFIG_SIGNAL_FASTPATH
cmp x7, #SYSCALL_SEND
b.eq c_handle_fastpath_signal
#endif /* CONFIG_SIGNAL_FASTPATH */
cmp x7, #SYSCALL_REPLY_RECV cmp x7, #SYSCALL_REPLY_RECV
#ifdef CONFIG_KERNEL_MCS #ifdef CONFIG_KERNEL_MCS
mov x2, x6 mov x2, x6

View File

@@ -154,6 +154,24 @@ void VISIBLE c_handle_fastpath_call(word_t cptr, word_t msgInfo)
UNREACHABLE(); UNREACHABLE();
} }
#ifdef CONFIG_KERNEL_MCS
#ifdef CONFIG_SIGNAL_FASTPATH
ALIGN(L1_CACHE_LINE_SIZE)
void VISIBLE c_handle_fastpath_signal(word_t cptr, word_t msgInfo)
{
NODE_LOCK_SYS;
c_entry_hook();
#ifdef TRACK_KERNEL_ENTRIES
benchmark_debug_syscall_start(cptr, msgInfo, SysCall);
ksKernelEntry.is_fastpath = 1;
#endif /* DEBUG */
fastpath_signal(cptr, msgInfo);
UNREACHABLE();
}
#endif /* CONFIG_SIGNAL_FASTPATH */
#endif /* CONFIG_KERNEL_MCS */
ALIGN(L1_CACHE_LINE_SIZE) ALIGN(L1_CACHE_LINE_SIZE)
#ifdef CONFIG_KERNEL_MCS #ifdef CONFIG_KERNEL_MCS
void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo, word_t reply) void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo, word_t reply)

View File

@@ -6,9 +6,6 @@
#include <config.h> #include <config.h>
#include <fastpath/fastpath.h> #include <fastpath/fastpath.h>
#ifdef CONFIG_KERNEL_MCS
#include <object/reply.h>
#endif
#ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES
#include <benchmark/benchmark_track.h> #include <benchmark/benchmark_track.h>
@@ -521,3 +518,167 @@ void NORETURN fastpath_reply_recv(word_t cptr, word_t msgInfo)
fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread));
} }
#ifdef CONFIG_SIGNAL_FASTPATH
#ifdef CONFIG_ARCH_ARM
static inline
FORCE_INLINE
#endif
void NORETURN fastpath_signal(word_t cptr, word_t msgInfo)
{
word_t fault_type;
sched_context_t *sc = NULL;
bool_t schedulable = false;
bool_t crossnode = false;
bool_t idle = false;
tcb_t *dest = NULL;
/* Get fault type. */
fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault);
/* Check there's no saved fault. Can be removed if the current thread can't
* have a fault while invoking the fastpath */
if (unlikely(fault_type != seL4_Fault_NullFault)) {
slowpath(SysSend);
}
/* Lookup the cap */
cap_t cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr);
/* Check it's a notification */
if (unlikely(!cap_capType_equals(cap, cap_notification_cap))) {
slowpath(SysSend);
}
/* Check that we are allowed to send to this cap */
if (unlikely(!cap_notification_cap_get_capNtfnCanSend(cap))) {
slowpath(SysSend);
}
/* Check that the current domain hasn't expired */
if (unlikely(isCurDomainExpired())) {
slowpath(SysSend);
}
/* Get the notification address */
notification_t *ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap));
/* Get the notification state */
uint32_t ntfnState = notification_ptr_get_state(ntfnPtr);
/* Get the notification badge */
word_t badge = cap_notification_cap_get_capNtfnBadge(cap);
switch (ntfnState) {
case NtfnState_Active:
#ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES
ksKernelEntry.is_fastpath = true;
#endif
ntfn_set_active(ntfnPtr, badge | notification_ptr_get_ntfnMsgIdentifier(ntfnPtr));
restore_user_context();
UNREACHABLE();
case NtfnState_Idle:
dest = (tcb_t *) notification_ptr_get_ntfnBoundTCB(ntfnPtr);
if (!dest || thread_state_ptr_get_tsType(&dest->tcbState) != ThreadState_BlockedOnReceive) {
#ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES
ksKernelEntry.is_fastpath = true;
#endif
ntfn_set_active(ntfnPtr, badge);
restore_user_context();
UNREACHABLE();
}
idle = true;
break;
case NtfnState_Waiting:
dest = TCB_PTR(notification_ptr_get_ntfnQueue_head(ntfnPtr));
break;
default:
fail("Invalid notification state");
}
/* Get the bound SC of the signalled thread */
sc = dest->tcbSchedContext;
/* If the signalled thread doesn't have a bound SC, check if one can be
* donated from the notification. If not, go to the slowpath */
if (!sc) {
sc = SC_PTR(notification_ptr_get_ntfnSchedContext(ntfnPtr));
if (sc == NULL || sc->scTcb != NULL) {
slowpath(SysSend);
}
/* Slowpath the case where dest has its FPU context in the FPU of a core*/
#if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_HAVE_FPU)
if (nativeThreadUsingFPU(dest)) {
slowpath(SysSend);
}
#endif
}
/* Only fastpath signal to threads which will not become the new highest prio thread on the
* core of their SC, even if the currently running thread on the core is the idle thread. */
if (NODE_STATE_ON_CORE(ksCurThread, sc->scCore)->tcbPriority < dest->tcbPriority) {
slowpath(SysSend);
}
/* Simplified schedContext_resume that does not change state and reverts to the
* slowpath in cases where the SC does not have sufficient budget, as this case
* adds extra scheduler logic. Normally, this is done after donation of SC
* but after tweaking it, I don't see anything executed in schedContext_donate
* that will affect the conditions of this check */
if (sc->scRefillMax > 0) {
if (!(refill_ready(sc) && refill_sufficient(sc, 0))) {
slowpath(SysSend);
}
schedulable = true;
}
/* Check if signal is cross-core or cross-domain */
if (ksCurDomain != dest->tcbDomain SMP_COND_STATEMENT( || sc->scCore != getCurrentCPUIndex())) {
crossnode = true;
}
/* Point of no return */
#ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES
ksKernelEntry.is_fastpath = true;
#endif
if (idle) {
/* Cancel the IPC that the signalled thread is waiting on */
cancelIPC_fp(dest);
} else {
/* Dequeue dest from the notification queue */
ntfn_queue_dequeue_fp(dest, ntfnPtr);
}
/* Wake up the signalled thread and tranfer badge */
setRegister(dest, badgeRegister, badge);
thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running);
/* Donate SC if necessary. The checks for this were already done before
* the point of no return */
maybeDonateSchedContext_fp(dest, sc);
/* Left this in the same form as the slowpath. Not sure if optimal */
if (sc_sporadic(dest->tcbSchedContext)) {
assert(dest->tcbSchedContext != NODE_STATE(ksCurSC));
if (dest->tcbSchedContext != NODE_STATE(ksCurSC)) {
refill_unblock_check(dest->tcbSchedContext);
}
}
/* If dest was already not schedulable prior to the budget check
* the slowpath doesn't seem to do anything special besides just not
* not scheduling the dest thread. */
if (schedulable) {
if (NODE_STATE(ksCurThread)->tcbPriority > dest->tcbPriority || crossnode) {
SCHED_ENQUEUE(dest);
} else {
SCHED_APPEND(dest);
}
}
restore_user_context();
}
#endif

View File

@@ -16,12 +16,6 @@
#include <object/endpoint.h> #include <object/endpoint.h>
#include <object/tcb.h> #include <object/tcb.h>
static inline void ep_ptr_set_queue(endpoint_t *epptr, tcb_queue_t queue)
{
endpoint_ptr_set_epQueue_head(epptr, (word_t)queue.head);
endpoint_ptr_set_epQueue_tail(epptr, (word_t)queue.end);
}
#ifdef CONFIG_KERNEL_MCS #ifdef CONFIG_KERNEL_MCS
void sendIPC(bool_t blocking, bool_t do_call, word_t badge, void sendIPC(bool_t blocking, bool_t do_call, word_t badge,
bool_t canGrant, bool_t canGrantReply, bool_t canDonate, tcb_t *thread, endpoint_t *epptr) bool_t canGrant, bool_t canGrantReply, bool_t canDonate, tcb_t *thread, endpoint_t *epptr)

View File

@@ -32,12 +32,6 @@ static inline void ntfn_ptr_set_queue(notification_t *ntfnPtr, tcb_queue_t ntfn_
notification_ptr_set_ntfnQueue_tail(ntfnPtr, (word_t)ntfn_queue.end); notification_ptr_set_ntfnQueue_tail(ntfnPtr, (word_t)ntfn_queue.end);
} }
static inline void ntfn_set_active(notification_t *ntfnPtr, word_t badge)
{
notification_ptr_set_state(ntfnPtr, NtfnState_Active);
notification_ptr_set_ntfnMsgIdentifier(ntfnPtr, badge);
}
#ifdef CONFIG_KERNEL_MCS #ifdef CONFIG_KERNEL_MCS
static inline void maybeDonateSchedContext(tcb_t *tcb, notification_t *ntfnPtr) static inline void maybeDonateSchedContext(tcb_t *tcb, notification_t *ntfnPtr)
{ {