arm: New virtual PPI event fault type

This commit introduces a new fault type, seL4_Fault_VPPIEvent.

This change means the kernel can reserve PPI interrupts and virtualise
them via delivering the irq to the active vcpu through a
specific fault. This enables multiplexing PPI IRQs across multiple VCPUS
which requires correctly masking and unmasking the IRQ depending on
which VCPU is running.

A new VCPU invocation, seL4_ARM_VCPU_AckVPPI is also added for
acknowledging the handling of the IRQ. This takes an IRQ as a parameter
but will only accept IRQ numbers that are sent as VPPIEvent faults.

Co-authored-by: Rafal Kolanski <rafal.kolanski@data61.csiro.au>
Co-authored-by: Kent McLeod <Kent.Mcleod@data61.csiro.au>
This commit is contained in:
Alison Felizzi
2019-06-24 10:25:15 +10:00
committed by Kent Mcleod
parent 4cb3414def
commit f795e7c0ec
15 changed files with 131 additions and 0 deletions

View File

@@ -103,6 +103,11 @@ Upcoming release: BINARY COMPATIBLE
- Ram memory is part of this reservation and is instead handed out as regular Untypeds.
- Memory reserved for use by the kernel or other reserved regions are not accessible via any untypeds.
- Devices used by the kernel are also not accessible via any untypeds.
* New fault type when running in Arm hypervisor mode: seL4_Fault_VPPIEvent
- The kernel can keep track of IRQ state for each VCPU for a reduced set of PPI IRQs and deliver IRQ events as
VCPU faults for these interrupt numbers.
- Additionally a new VCPU invocation is introduced: seL4_ARM_VCPU_AckVPPI.
This is used to acknowledge a virtual PPI that was delivered as a fault.
## Upgrade Notes

View File

@@ -215,6 +215,12 @@ block VCPUFault {
padding 28
field seL4_FaultType 4
}
block VPPIEvent {
field irq 32
padding 28
field seL4_FaultType 4
}
#endif
---- ARM-specific object types

View File

@@ -170,6 +170,13 @@ block VCPUFault {
padding 28
field seL4_FaultType 4
}
block VPPIEvent {
field irq 64
padding 32
padding 28
field seL4_FaultType 4
}
#endif
-- VM attributes

View File

@@ -37,6 +37,11 @@ static inline void handleReservedIRQ(irq_t irq)
VGICMaintenance();
return;
}
if (irqVPPIEventIndex(irq) != VPPIEventIRQ_invalid) {
VPPIEvent(irq);
return;
}
#endif
#ifdef CONFIG_ARM_SMMU

View File

@@ -61,17 +61,25 @@ struct gicVCpuIface {
virq_t lr[GIC_VCPU_MAX_NUM_LR];
};
enum VPPIEventIRQ {
n_VPPIEventIRQ,
VPPIEventIRQ_invalid = n_VPPIEventIRQ,
};
typedef word_t VPPIEventIRQ_t;
struct vcpu {
/* TCB associated with this VCPU. */
struct tcb *vcpuTCB;
struct gicVCpuIface vgic;
word_t regs[seL4_VCPUReg_Num];
bool_t vppi_masked[n_VPPIEventIRQ];
};
typedef struct vcpu vcpu_t;
compile_assert(vcpu_size_correct, sizeof(struct vcpu) <= BIT(VCPU_SIZE_BITS))
void VGICMaintenance(void);
void handleVCPUFault(word_t hsr);
void VPPIEvent(irq_t irq);
void vcpu_init(vcpu_t *vcpu);
@@ -103,11 +111,13 @@ exception_t decodeVCPUWriteReg(cap_t cap, unsigned int length, word_t *buffer);
exception_t decodeVCPUReadReg(cap_t cap, unsigned int length, bool_t call, word_t *buffer);
exception_t decodeVCPUInjectIRQ(cap_t cap, unsigned int length, word_t *buffer);
exception_t decodeVCPUSetTCB(cap_t cap, extra_caps_t extraCaps);
exception_t decodeVCPUAckVPPI(cap_t cap, unsigned int length, word_t *buffer);
exception_t invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value);
exception_t invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call);
exception_t invokeVCPUInjectIRQ(vcpu_t *vcpu, unsigned long index, virq_t virq);
exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb);
exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi);
static word_t vcpu_hw_read_reg(word_t reg_index);
static void vcpu_hw_write_reg(word_t reg_index, word_t reg);
@@ -161,6 +171,14 @@ static inline void vcpu_write_reg(vcpu_t *vcpu, word_t reg, word_t value)
vcpu->regs[reg] = value;
}
static inline VPPIEventIRQ_t irqVPPIEventIndex(irq_t irq)
{
switch (IRQT_TO_IRQ(irq)) {
default:
return VPPIEventIRQ_invalid;
}
}
#else /* end of CONFIG_ARM_HYPERVISOR_SUPPORT */
/* used in boot.c with a guard, use a marco to avoid exposing vcpu_t */

View File

@@ -264,6 +264,19 @@
<param dir="in" name="value" type="seL4_Word"
description="Value to be written to the VCPU register"/>
</method>
<method id="ARMVCPUAckVPPI" name="AckVPPI" condition="defined(CONFIG_ARM_HYPERVISOR_SUPPORT)"
manual_name="Acknowledge Virtual PPI IRQ">
<brief>
Acknowledge a PPI IRQ previously forwarded from a VPPIEvent fault
</brief>
<description>
Acknowledge and unmask the PPI interrupt so that further interrupts can be forwarded
through VPPIEvent faults.
</description>
<param dir="in" name="irq" type="seL4_Word"
description="irq to ack."/>
</method>
</interface>
<interface name="seL4_IRQControl" manual_name="IRQ Control" cap_description="An IRQControl capability. This gives you the authority to make this call.">

View File

@@ -30,6 +30,7 @@ tagged_union seL4_Fault seL4_FaultType {
#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
tag VGICMaintenance 7
tag VCPUFault 8
tag VPPIEvent 9
#endif
#else
-- arch specific faults
@@ -38,6 +39,7 @@ tagged_union seL4_Fault seL4_FaultType {
#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
tag VGICMaintenance 6
tag VCPUFault 7
tag VPPIEvent 8
#endif
#endif

View File

@@ -73,6 +73,11 @@ enum {
SEL4_FORCE_LONG_ENUM(seL4_VGICMaintenance_Msg),
} seL4_VGICMaintenance_Msg;
enum {
seL4_VPPIEvent_IRQ,
SEL4_FORCE_LONG_ENUM(seL4_VPPIEvent_Msg),
} seL4_VPPIEvent_Msg;
enum {
seL4_VCPUFault_HSR,
seL4_VCPUFault_Length,

View File

@@ -49,6 +49,8 @@ LIBSEL4_INLINE_FUNC seL4_Fault_t seL4_getArchFault(seL4_MessageInfo_t tag)
return seL4_Fault_VGICMaintenance_new(seL4_GetMR(seL4_VGICMaintenance_IDX));
case seL4_Fault_VCPUFault:
return seL4_Fault_VCPUFault_new(seL4_GetMR(seL4_VCPUFault_HSR));
case seL4_Fault_VPPIEvent:
return seL4_Fault_VPPIEvent_new(seL4_GetMR(seL4_VPPIEvent_IRQ));
#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
#ifdef CONFIG_KERNEL_MCS
case seL4_Fault_Timeout:

View File

@@ -87,6 +87,14 @@ block VCPUFault {
padding 28
field seL4_FaultType 4
}
block VPPIEvent {
padding 384
field irq 32
padding 28
field seL4_FaultType 4
}
#endif
#ifdef CONFIG_HARDWARE_DEBUG_API

View File

@@ -68,6 +68,12 @@ enum {
SEL4_FORCE_LONG_ENUM(seL4_VGICMaintenance_Msg),
} seL4_VGICMaintenance_Msg;
enum {
seL4_VPPIEvent_IRQ,
SEL4_FORCE_LONG_ENUM(seL4_VPPIEvent_Msg),
} seL4_VPPIEvent_Msg;
enum {
seL4_VCPUFault_HSR,
seL4_VCPUFault_Length,

View File

@@ -51,6 +51,8 @@ LIBSEL4_INLINE_FUNC seL4_Fault_t seL4_getArchFault(seL4_MessageInfo_t tag)
return seL4_Fault_VGICMaintenance_new(seL4_GetMR(seL4_VGICMaintenance_IDX));
case seL4_Fault_VCPUFault:
return seL4_Fault_VCPUFault_new(seL4_GetMR(seL4_VCPUFault_HSR));
case seL4_Fault_VPPIEvent:
return seL4_Fault_VPPIEvent_new(seL4_GetMR(seL4_VPPIEvent_IRQ));
#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
#ifdef CONFIG_KERNEL_MCS
case seL4_Fault_Timeout:

View File

@@ -88,6 +88,13 @@ block VCPUFault {
padding 60
field seL4_FaultType 4
}
block VPPIEvent {
padding 768
field irq 64
padding 60
field seL4_FaultType 4
}
#endif
#ifdef CONFIG_HARDWARE_DEBUG_API

View File

@@ -28,6 +28,8 @@ bool_t Arch_handleFaultReply(tcb_t *receiver, tcb_t *sender, word_t faultType)
return true;
case seL4_Fault_VCPUFault:
return true;
case seL4_Fault_VPPIEvent:
return true;
#endif
default:
fail("Invalid fault");
@@ -64,6 +66,8 @@ word_t Arch_setMRs_fault(tcb_t *sender, tcb_t *receiver, word_t *receiveIPCBuffe
}
case seL4_Fault_VCPUFault:
return setMR(receiver, receiveIPCBuffer, seL4_VCPUFault_HSR, seL4_Fault_VCPUFault_get_hsr(sender->tcbFault));
case seL4_Fault_VPPIEvent:
return 0;
#endif
default:

View File

@@ -124,6 +124,17 @@ void vcpu_restore(vcpu_t *vcpu)
vcpu_enable(vcpu);
}
void VPPIEvent(irq_t irq)
{
if (ARCH_NODE_STATE(armHSVCPUActive)) {
maskInterrupt(true, irq);
assert(irqVPPIEventIndex(irq) != VPPIEventIRQ_invalid);
ARCH_NODE_STATE(armHSCurVCPU)->vppi_masked[irqVPPIEventIndex(irq)] = true;
current_fault = seL4_Fault_VPPIEvent_new(IRQT_TO_IRQ(irq));
handleFault(NODE_STATE(ksCurThread));
}
}
void VGICMaintenance(void)
{
uint32_t eisr0, eisr1;
@@ -450,6 +461,8 @@ exception_t decodeARMVCPUInvocation(
return decodeVCPUWriteReg(cap, length, buffer);
case ARMVCPUInjectIRQ:
return decodeVCPUInjectIRQ(cap, length, buffer);
case ARMVCPUAckVPPI:
return decodeVCPUAckVPPI(cap, length, buffer);
default:
userError("VCPU: Illegal operation.");
current_syscall_error.type = seL4_IllegalOperation;
@@ -457,6 +470,34 @@ exception_t decodeARMVCPUInvocation(
}
}
exception_t decodeVCPUAckVPPI(cap_t cap, unsigned int length, word_t *buffer)
{
vcpu_t *vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
if (length < 1) {
current_syscall_error.type = seL4_TruncatedMessage;
return EXCEPTION_SYSCALL_ERROR;
}
irq_t irq = getSyscallArg(0, buffer);
VPPIEventIRQ_t vppi = irqVPPIEventIndex(irq);
if (vppi == VPPIEventIRQ_invalid) {
userError("VCPUAckVPPI: Invalid irq number.");
current_syscall_error.type = seL4_InvalidArgument;
current_syscall_error.invalidArgumentNumber = 0;
}
setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
return invokeVCPUAckVPPI(vcpu, vppi);
}
exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi)
{
vcpu->vppi_masked[vppi] = false;
return EXCEPTION_NONE;
}
exception_t decodeVCPUSetTCB(cap_t cap, extra_caps_t extraCaps)
{
cap_t tcbCap;