FPU: Save and restore FPU state based on TCB flag

Remove fault-based FPU state saving and loading.

Signed-off-by: Indan Zupancic <indan@nul.nu>
Signed-off-by: Gerwin Klein <gerwin.klein@proofcraft.systems>
This commit is contained in:
Indan Zupancic
2024-09-08 21:28:47 +01:00
committed by Gerwin Klein
parent 1415cac443
commit 5035def0b9
13 changed files with 12 additions and 130 deletions

View File

@@ -240,19 +240,6 @@ config_string(
UNQUOTE UNQUOTE
) )
config_string(
KernelFPUMaxRestoresSinceSwitch
FPU_MAX_RESTORES_SINCE_SWITCH
"This option is a heuristic to attempt to detect when the FPU is no longer in use,\
allowing the kernel to save the FPU state out so that the FPU does not have to be\
enabled/disabled every thread switch. Every time we restore a thread and there is\
active FPU state, we increment this setting and if it exceeds this threshold we\
switch to the NULL state."
DEFAULT 64
DEPENDS "KernelHaveFPU"
UNDEF_DISABLED UNQUOTE
)
config_option( config_option(
KernelVerificationBuild KernelVerificationBuild
VERIFICATION_BUILD VERIFICATION_BUILD

View File

@@ -57,8 +57,3 @@ void c_handle_vcpu_fault(word_t hsr)
VISIBLE SECTION(".vectors.text"); VISIBLE SECTION(".vectors.text");
#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
#ifdef CONFIG_HAVE_FPU
void c_handle_enfp(void)
VISIBLE SECTION(".vectors.text");
#endif /* CONFIG_HAVE_FPU */

View File

@@ -703,9 +703,8 @@ static inline void armv_vcpu_save(vcpu_t *vcpu, bool_t active)
#endif #endif
isb(); isb();
#ifdef CONFIG_HAVE_FPU #ifdef CONFIG_HAVE_FPU
/* Other FPU registers are still lazily saved and restored when /* Other FPU registers are still lazily saved and restored.
* handleFPUFault is called. See the comments in vcpu_enable * See the comments in vcpu_enable for more information.
* for more information.
*/ */
if (active && nativeThreadUsingFPU(vcpu->vcpuTCB)) { if (active && nativeThreadUsingFPU(vcpu->vcpuTCB)) {
access_fpexc(vcpu, false); access_fpexc(vcpu, false);
@@ -849,15 +848,6 @@ static inline void armv_vcpu_init(vcpu_t *vcpu)
static inline bool_t armv_handleVCPUFault(word_t hsr) static inline bool_t armv_handleVCPUFault(word_t hsr)
{ {
#ifdef CONFIG_HAVE_FPU
if (hsr == HSR_FPU_FAULT || hsr == HSR_TASE_FAULT) {
assert(!isFpuEnable());
handleFPUFault();
setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
return true;
}
#endif
return false; return false;
} }

View File

@@ -714,12 +714,6 @@ static inline void armv_vcpu_init(vcpu_t *vcpu)
static inline bool_t armv_handleVCPUFault(word_t hsr) static inline bool_t armv_handleVCPUFault(word_t hsr)
{ {
if ((ESR_EC(hsr) == ESR_EC_TFP || ESR_EC(hsr) == ESR_EC_CPACR) && !isFpuEnable()) {
handleFPUFault();
setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
return true;
}
#ifdef CONFIG_HARDWARE_DEBUG_API #ifdef CONFIG_HARDWARE_DEBUG_API
if (isDebugFault(hsr)) { if (isDebugFault(hsr)) {
handleDebugFaultEvent(hsr); handleDebugFaultEvent(hsr);

View File

@@ -16,9 +16,6 @@
/* Perform any actions required for the deletion of the given thread. */ /* Perform any actions required for the deletion of the given thread. */
void fpuThreadDelete(tcb_t *thread); void fpuThreadDelete(tcb_t *thread);
/* Handle an FPU exception. */
exception_t handleFPUFault(void);
void switchLocalFpuOwner(user_fpu_state_t *new_owner); void switchLocalFpuOwner(user_fpu_state_t *new_owner);
/* Switch the current owner of the FPU state on the core specified by 'cpu'. */ /* Switch the current owner of the FPU state on the core specified by 'cpu'. */
@@ -31,28 +28,15 @@ static inline bool_t nativeThreadUsingFPU(tcb_t *thread)
NODE_STATE_ON_CORE(ksActiveFPUState, thread->tcbAffinity); NODE_STATE_ON_CORE(ksActiveFPUState, thread->tcbAffinity);
} }
/* Called without global lock held! */
static inline void FORCE_INLINE lazyFPURestore(tcb_t *thread) static inline void FORCE_INLINE lazyFPURestore(tcb_t *thread)
{ {
if (unlikely(NODE_STATE(ksActiveFPUState))) { if (thread->tcbFlags & seL4_TCBFlag_fpuDisabled) {
/* If we have enabled/disabled the FPU too many times without disableFpu();
* someone else trying to use it, we assume it is no longer } else if (nativeThreadUsingFPU(thread)) {
* in use and switch out its state. */
if (unlikely(NODE_STATE(ksFPURestoresSinceSwitch) > CONFIG_FPU_MAX_RESTORES_SINCE_SWITCH)) {
switchLocalFpuOwner(NULL);
NODE_STATE(ksFPURestoresSinceSwitch) = 0;
} else {
if (likely(nativeThreadUsingFPU(thread))) {
/* We are using the FPU, make sure it is enabled */
enableFpu(); enableFpu();
} else { } else {
/* Someone is using the FPU and it might be enabled */ switchLocalFpuOwner(&thread->tcbArch.tcbContext.fpuState);
disableFpu();
}
NODE_STATE(ksFPURestoresSinceSwitch)++;
}
} else {
/* No-one (including us) is using the FPU, so we assume it
* is currently disabled */
} }
} }

View File

@@ -75,9 +75,8 @@ NODE_STATE_DECLARE(sched_context_t, *ksIdleSC);
#ifdef CONFIG_HAVE_FPU #ifdef CONFIG_HAVE_FPU
/* Current state installed in the FPU, or NULL if the FPU is currently invalid */ /* Current state installed in the FPU, or NULL if the FPU is currently invalid */
NODE_STATE_DECLARE(user_fpu_state_t *, ksActiveFPUState); NODE_STATE_DECLARE(user_fpu_state_t *, ksActiveFPUState);
/* Number of times we have restored a user context with an active FPU without switching it */
NODE_STATE_DECLARE(word_t, ksFPURestoresSinceSwitch);
#endif /* CONFIG_HAVE_FPU */ #endif /* CONFIG_HAVE_FPU */
#ifdef CONFIG_DEBUG_BUILD #ifdef CONFIG_DEBUG_BUILD
NODE_STATE_DECLARE(tcb_t *, ksDebugTCBs); NODE_STATE_DECLARE(tcb_t *, ksDebugTCBs);
#endif /* CONFIG_DEBUG_BUILD */ #endif /* CONFIG_DEBUG_BUILD */

View File

@@ -171,8 +171,6 @@ BEGIN_FUNC(lower_el_sync)
mov x0, x25 mov x0, x25
b c_handle_vcpu_fault b c_handle_vcpu_fault
#else #else
cmp x24, #ESR_EL1_EC_ENFP
b.eq el0_enfp
b el0_user b el0_user
#endif #endif
@@ -214,12 +212,6 @@ lel_syscall:
mov x2, x7 mov x2, x7
b c_handle_syscall b c_handle_syscall
el0_enfp:
#ifdef CONFIG_HAVE_FPU
lsp_i x19
b c_handle_enfp
#endif /* CONFIG_HAVE_FPU */
el0_user: el0_user:
mrs x20, ELR mrs x20, ELR
str x20, [sp, #PT_FaultIP] str x20, [sp, #PT_FaultIP]

View File

@@ -27,21 +27,6 @@ void VISIBLE NORETURN c_handle_undefined_instruction(void)
ksKernelEntry.word = getRegister(NODE_STATE(ksCurThread), NextIP); ksKernelEntry.word = getRegister(NODE_STATE(ksCurThread), NextIP);
#endif #endif
#if defined(CONFIG_HAVE_FPU) && defined(CONFIG_ARCH_AARCH32)
/* We assume the first fault is a FP exception and enable FPU, if not already enabled */
if (!isFpuEnable()) {
handleFPUFault();
/* Restart the FP instruction that cause the fault */
setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
} else {
handleUserLevelFault(0, 0);
}
restore_user_context();
UNREACHABLE();
#endif
/* There's only one user-level fault on ARM, and the code is (0,0) */ /* There's only one user-level fault on ARM, and the code is (0,0) */
#ifdef CONFIG_ARCH_AARCH32 #ifdef CONFIG_ARCH_AARCH32
handleUserLevelFault(0, 0); handleUserLevelFault(0, 0);
@@ -60,17 +45,6 @@ void VISIBLE NORETURN c_handle_undefined_instruction(void)
UNREACHABLE(); UNREACHABLE();
} }
#if defined(CONFIG_HAVE_FPU) && defined(CONFIG_ARCH_AARCH64)
void VISIBLE NORETURN c_handle_enfp(void)
{
c_entry_hook();
handleFPUFault();
restore_user_context();
UNREACHABLE();
}
#endif /* CONFIG_HAVE_FPU */
#ifdef CONFIG_EXCEPTION_FASTPATH #ifdef CONFIG_EXCEPTION_FASTPATH
void NORETURN vm_fault_slowpath(vm_fault_type_t type) void NORETURN vm_fault_slowpath(vm_fault_type_t type)
{ {

View File

@@ -133,14 +133,6 @@ void VISIBLE NORETURN c_handle_exception(void)
handleVMFaultEvent(scause); handleVMFaultEvent(scause);
break; break;
default: default:
#ifdef CONFIG_HAVE_FPU
if (!isFpuEnable()) {
/* we assume the illegal instruction is caused by FPU first */
handleFPUFault();
setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
break;
}
#endif
handleUserLevelFault(scause, 0); handleUserLevelFault(scause, 0);
break; break;
} }

View File

@@ -42,13 +42,7 @@ void VISIBLE NORETURN c_handle_interrupt(int irq, int syscall)
c_entry_hook(); c_entry_hook();
if (irq == int_unimpl_dev) { if (irq == int_page_fault) {
handleFPUFault();
#ifdef TRACK_KERNEL_ENTRIES
ksKernelEntry.path = Entry_UnimplementedDevice;
ksKernelEntry.word = irq;
#endif
} else if (irq == int_page_fault) {
/* Error code is in Error. Pull out bit 5, which is whether it was instruction or data */ /* Error code is in Error. Pull out bit 5, which is whether it was instruction or data */
vm_fault_type_t type = (NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers[Error] >> 4u) & 1u; vm_fault_type_t type = (NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers[Error] >> 4u) & 1u;
#ifdef TRACK_KERNEL_ENTRIES #ifdef TRACK_KERNEL_ENTRIES

View File

@@ -29,6 +29,7 @@ transferCaps(seL4_MessageInfo_t info,
BOOT_CODE void configureIdleThread(tcb_t *tcb) BOOT_CODE void configureIdleThread(tcb_t *tcb)
{ {
tcb->tcbFlags = seL4_TCBFlag_fpuDisabled;
Arch_configureIdleThread(tcb); Arch_configureIdleThread(tcb);
setThreadState(tcb, ThreadState_IdleThreadState); setThreadState(tcb, ThreadState_IdleThreadState);
} }

View File

@@ -19,7 +19,6 @@ void switchLocalFpuOwner(user_fpu_state_t *new_owner)
saveFpuState(NODE_STATE(ksActiveFPUState)); saveFpuState(NODE_STATE(ksActiveFPUState));
} }
if (new_owner) { if (new_owner) {
NODE_STATE(ksFPURestoresSinceSwitch) = 0;
loadFpuState(new_owner); loadFpuState(new_owner);
} else { } else {
disableFpu(); disableFpu();
@@ -39,24 +38,6 @@ void switchFpuOwner(user_fpu_state_t *new_owner, word_t cpu)
} }
} }
/* Handle an FPU fault.
*
* This CPU exception is thrown when userspace attempts to use the FPU while
* it is disabled. We need to save the current state of the FPU, and hand
* it over. */
exception_t handleFPUFault(void)
{
/* If we have already given the FPU to the user, we should not reach here.
* This should only be able to occur on CPUs without an FPU at all, which
* we presumably are happy to assume will not be running seL4. */
assert(!nativeThreadUsingFPU(NODE_STATE(ksCurThread)));
/* Otherwise, lazily switch over the FPU. */
switchLocalFpuOwner(&NODE_STATE(ksCurThread)->tcbArch.tcbContext.fpuState);
return EXCEPTION_NONE;
}
/* Prepare for the deletion of the given thread. */ /* Prepare for the deletion of the given thread. */
void fpuThreadDelete(tcb_t *thread) void fpuThreadDelete(tcb_t *thread)
{ {

View File

@@ -44,9 +44,8 @@ UP_STATE_DEFINE(tcb_t *, ksSchedulerAction);
#ifdef CONFIG_HAVE_FPU #ifdef CONFIG_HAVE_FPU
/* Currently active FPU state, or NULL if there is no active FPU state */ /* Currently active FPU state, or NULL if there is no active FPU state */
UP_STATE_DEFINE(user_fpu_state_t *, ksActiveFPUState); UP_STATE_DEFINE(user_fpu_state_t *, ksActiveFPUState);
UP_STATE_DEFINE(word_t, ksFPURestoresSinceSwitch);
#endif /* CONFIG_HAVE_FPU */ #endif /* CONFIG_HAVE_FPU */
#ifdef CONFIG_KERNEL_MCS #ifdef CONFIG_KERNEL_MCS
/* the amount of time passed since the kernel time was last updated */ /* the amount of time passed since the kernel time was last updated */
UP_STATE_DEFINE(ticks_t, ksConsumed); UP_STATE_DEFINE(ticks_t, ksConsumed);