diff --git a/config.cmake b/config.cmake index 4f7d43a79..4f825a6f9 100644 --- a/config.cmake +++ b/config.cmake @@ -209,7 +209,8 @@ config_string( config_string( KernelTimerTickMS TIMER_TICK_MS "Timer tick period in milliseconds" DEFAULT 2 - UNQUOTE UNDEF_DISABLED + UNQUOTE + DEPENDS "NOT KernelIsMCS" UNDEF_DISABLED ) config_string( KernelTimeSlice TIME_SLICE "Number of timer ticks until a thread is preempted." diff --git a/include/api/syscall.h b/include/api/syscall.h index e947ddb8e..f3384b453 100644 --- a/include/api/syscall.h +++ b/include/api/syscall.h @@ -21,6 +21,19 @@ #define TIME_ARG_SIZE (sizeof(ticks_t) / sizeof(word_t)) +#ifdef CONFIG_KERNEL_MCS +#define MCS_DO_IF_BUDGET(_block) \ + updateTimestamp(); \ + if (likely(checkBudgetRestart())) { \ + _block \ + } +#else +#define MCS_DO_IF_BUDGET(_block) \ + { \ + _block \ + } +#endif + exception_t handleSyscall(syscall_t syscall); exception_t handleInterruptEntry(void); exception_t handleUnknownSyscall(word_t w); diff --git a/include/api/types.h b/include/api/types.h index f5033d7e9..d24cc7ac4 100644 --- a/include/api/types.h +++ b/include/api/types.h @@ -26,6 +26,7 @@ typedef word_t prio_t; typedef uint64_t ticks_t; +typedef uint64_t time_t; enum domainConstants { minDom = 0, diff --git a/include/config.h b/include/config.h index a88d58f29..56c738657 100644 --- a/include/config.h +++ b/include/config.h @@ -72,10 +72,12 @@ #define CONFIG_MAX_NUM_BOOTINFO_UNTYPED_CAPS 166 #endif +#ifndef CONFIG_KERNEL_MCS /* length of a timer tick in ms */ #ifndef CONFIG_TIMER_TICK_MS #define CONFIG_TIMER_TICK_MS 2 #endif +#endif /* maximum number of different tracepoints which can be placed in the kernel */ #ifndef CONFIG_MAX_NUM_TRACE_POINTS diff --git a/include/kernel/thread.h b/include/kernel/thread.h index 404cb3015..f3f2cfe05 100644 --- a/include/kernel/thread.h +++ b/include/kernel/thread.h @@ -15,6 +15,10 @@ #include #include #include +#ifdef CONFIG_KERNEL_MCS +#include +#include +#endif static inline CONST word_t ready_queues_index(word_t dom, word_t prio) { @@ -80,6 +84,47 @@ static inline bool_t isHighestPrio(word_t dom, prio_t prio) prio >= getHighestPrio(dom); } +#ifdef CONFIG_KERNEL_MCS +static inline bool_t isCurThreadExpired(void) +{ + return NODE_STATE(ksCurThread)->tcbSchedContext->scRemaining < + (NODE_STATE(ksConsumed) + getKernelWcetTicks()); +} + +static inline bool_t isCurDomainExpired(void) +{ + return CONFIG_NUM_DOMAINS > 1 && + NODE_STATE(ksDomainTime) < (NODE_STATE(ksConsumed) + getKernelWcetTicks()); +} + +static inline void commitTime(sched_context_t *sc) +{ + assert(sc->scCore == SMP_TERNARY(getCurrentCPUIndex(), 0)); + if (unlikely(sc->scRemaining < NODE_STATE(ksConsumed))) { + /* avoid underflow */ + sc->scRemaining = 0; + } else { + sc->scRemaining -= NODE_STATE(ksConsumed); + } + + if (CONFIG_NUM_DOMAINS > 1) { + if (unlikely(ksDomainTime < NODE_STATE(ksConsumed))) { + ksDomainTime = 0; + } else { + ksDomainTime -= NODE_STATE(ksConsumed); + } + } + + NODE_STATE(ksConsumed) = 0llu; +} + +static inline void rollbackTime(void) +{ + NODE_STATE(ksCurTime) -= NODE_STATE(ksConsumed); + NODE_STATE(ksConsumed) = 0llu; +} +#endif + void configureIdleThread(tcb_t *tcb); void activateThread(void); void suspend(tcb_t *target); @@ -103,7 +148,9 @@ void setMCPriority(tcb_t *tptr, prio_t mcp); void scheduleTCB(tcb_t *tptr); void possibleSwitchTo(tcb_t *tptr); void setThreadState(tcb_t *tptr, _thread_state_t ts); +#ifndef CONFIG_KERNEL_MCS void timerTick(void); +#endif void rescheduleRequired(void); /* declare that the thread has had its registers (in its user_context_t) modified and it @@ -120,4 +167,58 @@ static inline void updateRestartPC(tcb_t *tcb) setRegister(tcb, FaultIP, getRegister(tcb, NextIP)); } +#ifdef CONFIG_KERNEL_MCS +/* Update the kernels timestamp and stores in ksCurTime. + * The difference between the previous kernel timestamp and the one just read + * is stored in ksConsumed. + * + * Should be called on every kernel entry + * where threads can be billed. + * + * @pre (NODE_STATE(ksConsumed) == 0 + */ +static inline void updateTimestamp(void) +{ + time_t prev = NODE_STATE(ksCurTime); + NODE_STATE(ksCurTime) = getCurrentTime(); + NODE_STATE(ksConsumed) = NODE_STATE(ksCurTime) - prev; +} + +/* Check if the current thread/domain budget has expired. + * if it has, bill the thread, add it to the scheduler and + * set up a reschedule. + * + * @return true if the thread/domain has enough budget to + * get through the current kernel operation. + */ +bool_t checkBudget(void); + +/* Everything checkBudget does, but also set the thread + * state to ThreadState_Restart. To be called from kernel entries + * where the operation should be restarted once the current thread + * has budget again. + */ +bool_t checkBudgetRestart(void); + +/* Set the next kernel tick, which is either the end of the current + * domains timeslice OR the end of the current threads timeslice. + */ +void setNextInterrupt(void); + +/* End the timeslice for the current thread. + * This will recharge the threads timeslice and place it at the + * end of the scheduling queue for its priority. + */ +void endTimeslice(void); + +static inline void checkReschedule(void) +{ + if (isCurThreadExpired()) { + endTimeslice(); + } else if (isCurDomainExpired()) { + rescheduleRequired(); + } +} +#endif + #endif diff --git a/include/machine/timer.h b/include/machine/timer.h index 2d10c8186..d13d96f61 100644 --- a/include/machine/timer.h +++ b/include/machine/timer.h @@ -1,17 +1,40 @@ /* - * Copyright 2014, General Dynamics C4 Systems + * Copyright 2018, Data61 + * Commonwealth Scientific and Industrial Research Organisation (CSIRO) + * ABN 41 687 119 230. * * This software may be distributed and modified according to the terms of * the GNU General Public License version 2. Note that NO WARRANTY is provided. * See "LICENSE_GPLv2.txt" for details. * - * @TAG(GD_GPL) + * @TAG(DATA61_GPL) */ -#ifndef __TIMER_H -#define __TIMER_H +#ifndef __MACHINE_TIMER_H +#define __MACHINE_TIMER_H +#include #include -static inline void resetTimer(void); +#ifdef CONFIG_KERNEL_MCS +#include +#include -#endif +/* Read the current time from the timer. */ +/** MODIFIES: [*] */ +static inline ticks_t getCurrentTime(void); +/* set the next deadline irq - deadline is absolute */ +/** MODIFIES: [*] */ +static inline void setDeadline(ticks_t deadline); +/* ack previous deadline irq */ +/** MODIFIES: [*] */ +static inline void ackDeadlineIRQ(void); + +/* get the expected wcet of the kernel for this platform */ +static PURE inline ticks_t getKernelWcetTicks(void) +{ + return usToTicks(getKernelWcetUs()); +} +#else /* CONFIG_KERNEL_MCS */ +static inline void resetTimer(void); +#endif /* !CONFIG_KERNEL_MCS */ +#endif /* __MACHINE_TIMER_H */ diff --git a/include/model/statedata.h b/include/model/statedata.h index d515e8cdb..2d0fffb86 100644 --- a/include/model/statedata.h +++ b/include/model/statedata.h @@ -70,6 +70,13 @@ NODE_STATE_DECLARE(tcb_t, *ksCurThread); NODE_STATE_DECLARE(tcb_t, *ksIdleThread); NODE_STATE_DECLARE(tcb_t, *ksSchedulerAction); +#ifdef CONFIG_KERNEL_MCS +NODE_STATE_DECLARE(time_t, ksConsumed); +NODE_STATE_DECLARE(time_t, ksCurTime); +NODE_STATE_DECLARE(bool_t, ksReprogram); +NODE_STATE_DECLARE(sched_context_t, *ksCurSC); +#endif + #ifdef CONFIG_HAVE_FPU /* Current state installed in the FPU, or NULL if the FPU is currently invalid */ NODE_STATE_DECLARE(user_fpu_state_t *, ksActiveFPUState); @@ -97,7 +104,11 @@ extern const dschedule_t ksDomSchedule[]; extern const word_t ksDomScheduleLength; extern word_t ksDomScheduleIdx; extern dom_t ksCurDomain; +#ifdef CONFIG_KERNEL_MCS +extern ticks_t ksDomainTime; +#else extern word_t ksDomainTime; +#endif extern word_t tlbLockCount VISIBLE; extern char ksIdleThreadTCB[CONFIG_MAX_NUM_NODES][BIT(seL4_TCBBits)]; diff --git a/include/util.h b/include/util.h index 5eb340b73..2a96512fa 100644 --- a/include/util.h +++ b/include/util.h @@ -24,6 +24,10 @@ /* time constants */ #define MS_IN_S 1000llu +#define US_IN_MS 1000llu +#define HZ_IN_KHZ 1000llu +#define KHZ_IN_MHZ 1000llu +#define HZ_IN_MHZ 1000000llu #ifndef __ASSEMBLER__ diff --git a/src/api/syscall.c b/src/api/syscall.c index 83efb8b78..7481affbe 100644 --- a/src/api/syscall.c +++ b/src/api/syscall.c @@ -237,20 +237,22 @@ exception_t handleUnknownSyscall(word_t w) } #endif /* CONFIG_ENABLE_BENCHMARKS */ + MCS_DO_IF_BUDGET({ #ifdef CONFIG_SET_TLS_BASE_SELF - if (w == SysSetTLSBase) { - word_t tls_base = getRegister(NODE_STATE(ksCurThread), capRegister); - /* - * This updates the real register as opposed to the thread state - * value. For many architectures, the TLS variables only get - * updated on a thread switch. - */ - return Arch_setTLSRegister(tls_base); - } + if (w == SysSetTLSBase) + { + word_t tls_base = getRegister(NODE_STATE(ksCurThread), capRegister); + /* + * This updates the real register as opposed to the thread state + * value. For many architectures, the TLS variables only get + * updated on a thread switch. + */ + return Arch_setTLSRegister(tls_base); + } #endif - - current_fault = seL4_Fault_UnknownSyscall_new(w); - handleFault(NODE_STATE(ksCurThread)); + current_fault = seL4_Fault_UnknownSyscall_new(w); + handleFault(NODE_STATE(ksCurThread)); + }) schedule(); activateThread(); @@ -260,9 +262,10 @@ exception_t handleUnknownSyscall(word_t w) exception_t handleUserLevelFault(word_t w_a, word_t w_b) { - current_fault = seL4_Fault_UserException_new(w_a, w_b); - handleFault(NODE_STATE(ksCurThread)); - + MCS_DO_IF_BUDGET({ + current_fault = seL4_Fault_UserException_new(w_a, w_b); + handleFault(NODE_STATE(ksCurThread)); + }) schedule(); activateThread(); @@ -271,12 +274,14 @@ exception_t handleUserLevelFault(word_t w_a, word_t w_b) exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType) { - exception_t status; + MCS_DO_IF_BUDGET({ - status = handleVMFault(NODE_STATE(ksCurThread), vm_faultType); - if (status != EXCEPTION_NONE) { - handleFault(NODE_STATE(ksCurThread)); - } + exception_t status = handleVMFault(NODE_STATE(ksCurThread), vm_faultType); + if (status != EXCEPTION_NONE) + { + handleFault(NODE_STATE(ksCurThread)); + } + }) schedule(); activateThread(); @@ -446,6 +451,17 @@ static void handleRecv(bool_t isBlocking) } } +#ifdef CONFIG_KERNEL_MCS +static inline void mcsIRQ(irq_t irq) +{ + commitTime(ksCurSC); + checkReschedule(); +} +#else +#define mcsIRQ(irq) +#endif + + static void handleYield(void) { tcbSchedDequeue(NODE_STATE(ksCurThread)); @@ -457,65 +473,72 @@ exception_t handleSyscall(syscall_t syscall) { exception_t ret; irq_t irq; - - switch (syscall) { - case SysSend: - ret = handleInvocation(false, true); - if (unlikely(ret != EXCEPTION_NONE)) { - irq = getActiveIRQ(); - if (irq != irqInvalid) { - handleInterrupt(irq); - Arch_finaliseInterrupt(); + MCS_DO_IF_BUDGET({ + switch (syscall) + { + case SysSend: + ret = handleInvocation(false, true); + if (unlikely(ret != EXCEPTION_NONE)) { + irq = getActiveIRQ(); + if (irq != irqInvalid) { + mcsIRQ(irq); + handleInterrupt(irq); + Arch_finaliseInterrupt(); + } } - } - break; - case SysNBSend: - ret = handleInvocation(false, false); - if (unlikely(ret != EXCEPTION_NONE)) { - irq = getActiveIRQ(); - if (irq != irqInvalid) { - handleInterrupt(irq); - Arch_finaliseInterrupt(); + break; + + case SysNBSend: + ret = handleInvocation(false, false); + if (unlikely(ret != EXCEPTION_NONE)) { + irq = getActiveIRQ(); + if (irq != irqInvalid) { + mcsIRQ(irq); + handleInterrupt(irq); + Arch_finaliseInterrupt(); + } } - } - break; + break; - case SysCall: - ret = handleInvocation(true, true); - if (unlikely(ret != EXCEPTION_NONE)) { - irq = getActiveIRQ(); - if (irq != irqInvalid) { - handleInterrupt(irq); - Arch_finaliseInterrupt(); + case SysCall: + ret = handleInvocation(true, true); + if (unlikely(ret != EXCEPTION_NONE)) { + irq = getActiveIRQ(); + if (irq != irqInvalid) { + mcsIRQ(irq); + handleInterrupt(irq); + Arch_finaliseInterrupt(); + } } + break; + + case SysRecv: + handleRecv(true); + break; + + case SysReply: + handleReply(); + break; + + case SysReplyRecv: + handleReply(); + handleRecv(true); + break; + + case SysNBRecv: + handleRecv(false); + break; + + case SysYield: + handleYield(); + break; + + default: + fail("Invalid syscall"); } - break; - case SysRecv: - handleRecv(true); - break; - - case SysReply: - handleReply(); - break; - - case SysReplyRecv: - handleReply(); - handleRecv(true); - break; - - case SysNBRecv: - handleRecv(false); - break; - - case SysYield: - handleYield(); - break; - - default: - fail("Invalid syscall"); - } + }) schedule(); activateThread(); diff --git a/src/arch/arm/object/vcpu.c b/src/arch/arm/object/vcpu.c index 77b170371..0ad7625e1 100644 --- a/src/arch/arm/object/vcpu.c +++ b/src/arch/arm/object/vcpu.c @@ -742,21 +742,25 @@ exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb) void handleVCPUFault(word_t hsr) { + MCS_DO_IF_BUDGET({ #ifdef CONFIG_ARCH_AARCH64 - if (armv_handleVCPUFault(hsr)) { - return; - } + if (armv_handleVCPUFault(hsr)) + { + return; + } #endif #ifdef CONFIG_HAVE_FPU - if (hsr == HSR_FPU_FAULT || hsr == HSR_TASE_FAULT) { - assert(!isFpuEnable()); - handleFPUFault(); - setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread))); - return; - } + if (hsr == HSR_FPU_FAULT || hsr == HSR_TASE_FAULT) + { + assert(!isFpuEnable()); + handleFPUFault(); + setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread))); + return; + } #endif - current_fault = seL4_Fault_VCPUFault_new(hsr); - handleFault(NODE_STATE(ksCurThread)); + current_fault = seL4_Fault_VCPUFault_new(hsr); + handleFault(NODE_STATE(ksCurThread)); + }) schedule(); activateThread(); } diff --git a/src/kernel/boot.c b/src/kernel/boot.c index 9e73cab91..2c8ba71d7 100644 --- a/src/kernel/boot.c +++ b/src/kernel/boot.c @@ -371,7 +371,7 @@ BOOT_CODE bool_t create_idle_thread(void) #ifdef CONFIG_KERNEL_MCS bool_t result = configure_sched_context(NODE_STATE_ON_CORE(ksIdleThread, i), SC_PTR(&ksIdleThreadSC[SMP_TERNARY(i, 0)]), - CONFIG_BOOT_THREAD_TIME_SLICE); + usToTicks(CONFIG_BOOT_THREAD_TIME_SLICE * US_IN_MS)); SMP_COND_STATEMENT(NODE_STATE_ON_CORE(ksIdleThread, i)->tcbSchedContext->scCore = i;) if (!result) { printf("Kernel init failed: Unable to allocate sc for idle thread\n"); @@ -424,9 +424,12 @@ BOOT_CODE tcb_t *create_initial_thread(cap_t root_cnode_cap, cap_t it_pd_cap, vp /* initialise TCB */ #ifdef CONFIG_KERNEL_MCS - if (!configure_sched_context(tcb, SC_PTR(rootserver.sc), CONFIG_BOOT_THREAD_TIME_SLICE)) { + if (!configure_sched_context(tcb, SC_PTR(rootserver.sc), usToTicks(CONFIG_BOOT_THREAD_TIME_SLICE * US_IN_MS))) { return NULL; } + + NODE_STATE(ksConsumed) = 0; + NODE_STATE(ksReprogram) = true; #endif tcb->tcbPriority = seL4_MaxPrio; @@ -435,7 +438,11 @@ BOOT_CODE tcb_t *create_initial_thread(cap_t root_cnode_cap, cap_t it_pd_cap, vp setThreadState(tcb, ThreadState_Running); ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain; +#ifdef CONFIG_KERNEL_MCS + ksDomainTime = usToTicks(ksDomSchedule[ksDomScheduleIdx].length * US_IN_MS); +#else ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length; +#endif assert(ksCurDomain < CONFIG_NUM_DOMAINS && ksDomainTime > 0); #ifndef CONFIG_KERNEL_MCS @@ -473,6 +480,9 @@ BOOT_CODE void init_core_state(tcb_t *scheduler_action) #endif NODE_STATE(ksSchedulerAction) = scheduler_action; NODE_STATE(ksCurThread) = NODE_STATE(ksIdleThread); +#ifdef CONFIG_KERNEL_MCS + NODE_STATE(ksCurSC) = NODE_STATE(ksCurThread->tcbSchedContext); +#endif } BOOT_CODE static bool_t provide_untyped_cap( diff --git a/src/kernel/thread.c b/src/kernel/thread.c index c59a69c03..f5abea3b6 100644 --- a/src/kernel/thread.c +++ b/src/kernel/thread.c @@ -277,11 +277,31 @@ static void nextDomain(void) if (ksDomScheduleIdx >= ksDomScheduleLength) { ksDomScheduleIdx = 0; } +#ifdef CONFIG_KERNEL_MCS + NODE_STATE(ksReprogram) = true; +#endif ksWorkUnitsCompleted = 0; ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain; +#ifdef CONFIG_KERNEL_MCS + ksDomainTime = usToTicks(ksDomSchedule[ksDomScheduleIdx].length * US_IN_MS); +#else ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length; +#endif } +#ifdef CONFIG_KERNEL_MCS +static void switchSchedContext(void) +{ + if (unlikely(NODE_STATE(ksCurSC) != NODE_STATE(ksCurThread)->tcbSchedContext)) { + NODE_STATE(ksReprogram) = true; + commitTime(ksCurSC); + } else { + rollbackTime(); + } + NODE_STATE(ksCurSC) = NODE_STATE(ksCurThread)->tcbSchedContext; +} +#endif + static void scheduleChooseNewThread(void) { if (ksDomainTime == 0) { @@ -337,6 +357,15 @@ void schedule(void) doMaskReschedule(ARCH_NODE_STATE(ipiReschedulePending)); ARCH_NODE_STATE(ipiReschedulePending) = 0; #endif /* ENABLE_SMP_SUPPORT */ + +#ifdef CONFIG_KERNEL_MCS + switchSchedContext(); + + if (NODE_STATE(ksReprogram)) { + setNextInterrupt(); + NODE_STATE(ksReprogram) = false; + } +#endif } void chooseThread(void) @@ -356,6 +385,9 @@ void chooseThread(void) thread = NODE_STATE(ksReadyQueues)[ready_queues_index(dom, prio)].head; assert(thread); assert(isSchedulable(thread)); +#ifdef CONFIG_KERNEL_MCS + assert(thread->tcbSchedContext->scRemaining > getKernelWcetTicks()); +#endif switchToThread(thread); } else { switchToIdleThread(); @@ -364,6 +396,11 @@ void chooseThread(void) void switchToThread(tcb_t *thread) { +#ifdef CONFIG_KERNEL_MCS + assert(thread->tcbSchedContext != NULL); + assert(thread->tcbSchedContext->scRemaining >= getKernelWcetTicks()); +#endif + #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_switch(NODE_STATE(ksCurThread), thread); #endif @@ -465,7 +502,54 @@ static void recharge(sched_context_t *sc) sc->scRemaining = sc->scBudget; assert(sc->scBudget > 0); } -#endif + +void setNextInterrupt(void) +{ + time_t next_thread = NODE_STATE(ksCurTime) + NODE_STATE(ksCurThread)->tcbSchedContext->scRemaining; + + if (CONFIG_NUM_DOMAINS > 1) { + time_t next_domain = ksCurTime + ksDomainTime; + setDeadline(MIN(next_thread, next_domain) - getTimerPrecision()); + } else { + setDeadline(next_thread - getTimerPrecision()); + } +} + +bool_t checkBudget(void) +{ + if (unlikely(isCurThreadExpired())) { + commitTime(ksCurSC); + endTimeslice(); + return false; + } else if (unlikely(isCurDomainExpired())) { + commitTime(ksCurSC); + rescheduleRequired(); + return false; + } else { + return true; + } +} + +bool_t checkBudgetRestart(void) +{ + assert(isRunnable(NODE_STATE(ksCurThread))); + bool_t result = checkBudget(); + if (!result) { + setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); + } + return result; +} + +void endTimeslice(void) +{ + recharge(NODE_STATE(ksCurThread)->tcbSchedContext); + if (likely(thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState) == + ThreadState_Running)) { + SCHED_APPEND_CURRENT_TCB; + } + rescheduleRequired(); +} +#else void timerTick(void) { @@ -476,16 +560,6 @@ void timerTick(void) ThreadState_RunningVM #endif ) { -#ifdef CONFIG_KERNEL_MCS - assert(NODE_STATE(ksCurThread)->tcbSchedContext != NULL); - if (NODE_STATE(ksCurThread)->tcbSchedContext->scRemaining > 1) { - NODE_STATE(ksCurThread)->tcbSchedContext->scRemaining--; - } else { - recharge(NODE_STATE(ksCurThread)->tcbSchedContext); - SCHED_APPEND_CURRENT_TCB; - rescheduleRequired(); - } -#else if (NODE_STATE(ksCurThread)->tcbTimeSlice > 1) { NODE_STATE(ksCurThread)->tcbTimeSlice--; } else { @@ -493,7 +567,6 @@ void timerTick(void) SCHED_APPEND_CURRENT_TCB; rescheduleRequired(); } -#endif } if (CONFIG_NUM_DOMAINS > 1) { @@ -503,6 +576,7 @@ void timerTick(void) } } } +#endif void rescheduleRequired(void) { @@ -515,5 +589,8 @@ void rescheduleRequired(void) SCHED_ENQUEUE(NODE_STATE(ksSchedulerAction)); } NODE_STATE(ksSchedulerAction) = SchedulerAction_ChooseNewThread; +#ifdef CONFIG_KERNEL_MCS + NODE_STATE(ksReprogram) = true; +#endif } diff --git a/src/model/statedata.c b/src/model/statedata.c index 3656ec70b..2a8906a9b 100644 --- a/src/model/statedata.c +++ b/src/model/statedata.c @@ -47,6 +47,16 @@ UP_STATE_DEFINE(user_fpu_state_t *, ksActiveFPUState); UP_STATE_DEFINE(word_t, ksFPURestoresSinceSwitch); #endif /* CONFIG_HAVE_FPU */ +#ifdef CONFIG_KERNEL_MCS +/* the amount of time passed since the kernel time was last updated */ +UP_STATE_DEFINE(ticks_t, ksConsumed); +/* whether we need to reprogram the timer before exiting the kernel */ +UP_STATE_DEFINE(bool_t, ksReprogram); +/* the current kernel time (recorded on kernel entry) */ +UP_STATE_DEFINE(ticks_t, ksCurTime); +/* current scheduling context pointer */ +UP_STATE_DEFINE(sched_context_t *, ksCurSC); +#endif #ifdef CONFIG_DEBUG_BUILD UP_STATE_DEFINE(tcb_t *, ksDebugTCBs); @@ -66,7 +76,11 @@ compile_assert(irqCNodeSize, sizeof(intStateIRQNode) >= ((INT_STATE_ARRAY_SIZE) dom_t ksCurDomain; /* Domain timeslice remaining */ +#ifdef CONFIG_KERNEL_MCS +ticks_t ksDomainTime; +#else word_t ksDomainTime; +#endif /* An index into ksDomSchedule for active domain and length. */ word_t ksDomScheduleIdx; diff --git a/src/object/interrupt.c b/src/object/interrupt.c index 8031466ff..3ff85f708 100644 --- a/src/object/interrupt.c +++ b/src/object/interrupt.c @@ -201,6 +201,9 @@ void handleInterrupt(irq_t irq) case IRQSignal: { cap_t cap; +#ifdef CONFIG_KERNEL_MCS + updateTimestamp(); +#endif cap = intStateIRQNode[irq].cap; if (cap_get_capType(cap) == cap_notification_cap && @@ -214,17 +217,33 @@ void handleInterrupt(irq_t irq) } #ifndef CONFIG_ARCH_RISCV maskInterrupt(true, irq); +#endif +#ifdef CONFIG_KERNEL_MCS + /* Bill the current thread. We know it has enough budget, as otherwise we would be + * dealing with a timer interrupt not a signal interrupt */ + commitTime(ksCurSC); + checkReschedule(); #endif break; } case IRQTimer: +#ifdef CONFIG_KERNEL_MCS + updateTimestamp(); + ackDeadlineIRQ(); + commitTime(ksCurSC); + checkBudget(); +#else timerTick(); resetTimer(); +#endif break; #ifdef ENABLE_SMP_SUPPORT case IRQIPI: +#ifdef CONFIG_KERNEL_MCS + updateTimestamp(); +#endif handleIPI(irq, true); break; #endif /* ENABLE_SMP_SUPPORT */ diff --git a/src/object/schedcontrol.c b/src/object/schedcontrol.c index efae416ad..3a3049b34 100644 --- a/src/object/schedcontrol.c +++ b/src/object/schedcontrol.c @@ -10,6 +10,7 @@ * @TAG(DATA61_GPL) */ +#include #include #include #include @@ -46,7 +47,7 @@ static exception_t decodeSchedControl_Configure(word_t length, cap_t cap, extra_ return EXCEPTION_SYSCALL_ERROR; } - ticks_t budget = mode_parseTimeArg(0, buffer); + time_t budget_us = mode_parseTimeArg(0, buffer); cap_t targetCap = extraCaps.excaprefs[0]->cap; if (unlikely(cap_get_capType(targetCap) != cap_sched_context_cap)) { @@ -56,10 +57,17 @@ static exception_t decodeSchedControl_Configure(word_t length, cap_t cap, extra_ return EXCEPTION_SYSCALL_ERROR; } + if (budget_us > getMaxUsToTicks() || budget_us < getKernelWcetUs()) { + userError("SchedControl_Configure: budget out of range."); + current_syscall_error.type = seL4_RangeError; + current_syscall_error.rangeErrorMin = getKernelWcetUs(); + current_syscall_error.rangeErrorMax = getMaxUsToTicks(); + return EXCEPTION_SYSCALL_ERROR; + } + setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeSchedControl_Configure(SC_PTR(cap_sched_context_cap_get_capSCPtr(targetCap)), - budget, - cap_sched_control_cap_get_core(cap)); + usToTicks(budget_us), cap_sched_control_cap_get_core(cap)); } exception_t decodeSchedControlInvocation(word_t label, cap_t cap, word_t length, extra_caps_t extraCaps, diff --git a/src/object/tcb.c b/src/object/tcb.c index 436f57afe..efa86997e 100644 --- a/src/object/tcb.c +++ b/src/object/tcb.c @@ -87,6 +87,7 @@ void tcbSchedEnqueue(tcb_t *tcb) { #ifdef CONFIG_KERNEL_MCS assert(isSchedulable(tcb)); + assert(tcb->tcbSchedContext->scRemaining > getKernelWcetTicks()); #endif if (!thread_state_get_tcbQueued(tcb->tcbState)) { @@ -121,6 +122,7 @@ void tcbSchedAppend(tcb_t *tcb) { #ifdef CONFIG_KERNEL_MCS assert(isSchedulable(tcb)); + assert(tcb->tcbSchedContext->scRemaining > getKernelWcetTicks()); #endif if (!thread_state_get_tcbQueued(tcb->tcbState)) { tcb_queue_t queue;