aarch64: add support for 40-bit PA

This commit adds support for using a 40-bit physical addresses in
aarch64-hyp mode.

40-bit PA support is implemented by using a 3-stage translation, with a
13 bit page upper directory as the vspace root. PageGlobalDirectories
are not used in this configuration.

To use 40-bit PAs, platforms should set KernelArmPASizeBits40 to ON.

Co-authored-by: Yanyan Shen <yanyan.shen@data61.csiro.au>
Co-authored-by: Chris Guikema <chris.guikema@dornerworks.com>
This commit is contained in:
Anna Lyons
2019-07-09 10:16:51 +10:00
committed by Kent McLeod
parent d1153fbed8
commit b1788e02d5
11 changed files with 174 additions and 33 deletions

View File

@@ -42,6 +42,33 @@ hw_asid_t getHWASID(asid_t asid);
static const region_t BOOT_RODATA *mode_reserved_region = NULL;
#ifdef AARCH64_VSPACE_S2_START_L1
#define cap_vtable_root_cap cap_page_upper_directory_cap
#define cap_vtable_root_get_mappedASID(_c) \
cap_page_upper_directory_cap_get_capPUDMappedASID(_c)
#define cap_vtable_root_get_basePtr(_c) \
VSPACE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(_c))
#define cap_vtable_root_isMapped(_c) \
cap_page_upper_directory_cap_get_capPUDIsMapped(_c)
#define cap_vtable_cap_new(_a, _v, _m) cap_page_upper_directory_cap_new(_a, _v, _m, 0)
#define vtable_invalid_new(_a, _v) pude_pude_invalid_new(_a, _v)
#define vtable_invalid_get_stored_asid_valid(_v) \
pude_pude_invalid_get_stored_asid_valid(_v)
#define vtable_invalid_get_stored_hw_asid(_v) pude_pude_invalid_get_stored_hw_asid(_v)
static inline exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr, cte_t *cte)
{
cap_page_upper_directory_cap_ptr_set_capPUDMappedASID(&cte->cap, asid);
cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&cte->cap, 1);
poolPtr->array[asid & MASK(asidLowBits)] =
PUDE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cte->cap));
return EXCEPTION_NONE;
}
#else
#define cap_vtable_root_cap cap_page_global_directory_cap
#define cap_vtable_root_get_mappedASID(_c) \
cap_page_global_directory_cap_get_capPGDMappedASID(_c)
@@ -64,4 +91,5 @@ static inline exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *po
return EXCEPTION_NONE;
}
#endif
#endif /* __ARCH_MODE_KERNEL_VSPACE_H_ */

View File

@@ -108,11 +108,25 @@ static void arm_load_thread_id(tcb_t *thread)
#define TCR_EL2_ORGN0_WBWC BIT(10)
#define TCR_EL2_SH0_ISH (3 << 12)
#define TCR_EL2_TG0_4K (0 << 14)
#define TCR_EL2_TCR_PS_16T (4 << 16)
/* The default value for TCR_EL2 is for 44-bit PARange. */
#define TCR_EL2_TCR_PS_4G 0
#define TCR_EL2_TCR_PS_64G 1
#define TCR_EL2_TCR_PS_1T 2
#define TCR_EL2_TCR_PS_4T 3
#define TCR_EL2_TCR_PS_16T 4
#define TCR_EL2_TCR_PS_256T 5
#define TCR_EL2_TCR_PS_4P 6
#define TCR_EL2_TCR_PS_SHIFT 16
#ifdef AARCH64_VSPACE_S2_START_L1
#define TCR_EL2_TCR_PS TCR_EL2_TCR_PS_1T
#else
#define TCR_EL2_TCR_PS TCR_EL2_TCR_PS_16T
#endif
#define TCR_EL2_DEFAULT (TCR_EL2_T0SZ | TCR_EL2_IRGN0_WBWC | TCR_EL2_ORGN0_WBWC | \
TCR_EL2_SH0_ISH | TCR_EL2_TG0_4K | TCR_EL2_TCR_PS_16T | \
TCR_EL2_SH0_ISH | TCR_EL2_TG0_4K | \
(TCR_EL2_TCR_PS << TCR_EL2_TCR_PS_SHIFT) | \
TCR_EL2_RES1)
/* Check if the elfloader set up the TCR_EL2 correctly. */

View File

@@ -202,6 +202,13 @@ tagged_union pgde pgde_type {
tag pgde_pud 3
}
block pude_invalid {
field stored_hw_asid 8
field stored_asid_valid 1
padding 53
field pude_type 2
}
block pude_1g {
padding 9
field UXN 1
@@ -229,6 +236,7 @@ block pude_pd {
}
tagged_union pude pude_type {
tag pude_invalid 0
tag pude_1g 1
tag pude_pd 3
}

View File

@@ -38,10 +38,18 @@ enum vm_rights {
};
typedef word_t vm_rights_t;
#define PGDE_SIZE_BITS seL4_PGDEntryBits
#define PGD_INDEX_BITS seL4_PGDIndexBits
#define PUDE_SIZE_BITS seL4_PUDEntryBits
#define PUD_INDEX_BITS seL4_PUDIndexBits
/* If hypervisor support for aarch64 is enabled and we run on processors with
* 40-bit PA, the stage-2 translation for EL1/EL0 uses a 3-level translation, skipping the PGD level.
* Yet the kernel will still use a stage-1 translation with 48 bit input addresses and a 4-level
* translation. Therefore, PUD and PGD size for the kernel can be different from EL1/EL0
* so we do not use the libsel4 definitions */
#define PGD_SIZE_BITS 12
#define PGD_INDEX_BITS 9
#define PUD_SIZE_BITS 12
#define PUD_INDEX_BITS 9
#define UPUD_SIZE_BITS seL4_PUDBits
#define UPUD_INDEX_BITS seL4_PUDIndexBits
#define PDE_SIZE_BITS seL4_PageDirEntryBits
#define PD_INDEX_BITS seL4_PageDirIndexBits
#define PTE_SIZE_BITS seL4_PageTableEntryBits
@@ -54,11 +62,19 @@ typedef word_t vm_rights_t;
#define VCPU_SIZE_BITS seL4_VCPUBits
#ifdef AARCH64_VSPACE_S2_START_L1
/* For hyp with 40 bit PA, EL1 and EL0 use a 3 level translation and skips the PGD */
typedef pude_t vspace_root_t;
#else
/* Otherwise we use a 4-level translation */
typedef pgde_t vspace_root_t;
#endif
#define VSPACE_PTR(r) ((vspace_root_t *)(r))
#define GET_PGD_INDEX(x) (((x) >> (PGD_INDEX_OFFSET)) & MASK(PGD_INDEX_BITS))
#define GET_PUD_INDEX(x) (((x) >> (PUD_INDEX_OFFSET)) & MASK(PUD_INDEX_BITS))
#define GET_UPUD_INDEX(x) (((x) >> (PUD_INDEX_OFFSET)) & MASK(UPUD_INDEX_BITS))
#define GET_PD_INDEX(x) (((x) >> (PD_INDEX_OFFSET)) & MASK(PD_INDEX_BITS))
#define GET_PT_INDEX(x) (((x) >> (PT_INDEX_OFFSET)) & MASK(PT_INDEX_BITS))

View File

@@ -486,13 +486,20 @@ static inline void vcpu_init_vtcr(void)
}
/* Set up the stage-2 translation control register for cores supporting 44-bit PA */
uint32_t vtcr_el2 = VTCR_EL2_T0SZ(20); // 44-bit input IPA
uint32_t vtcr_el2;
#ifdef CONFIG_ARM_PA_SIZE_BITS_40
vtcr_el2 = VTCR_EL2_T0SZ(24); // 40-bit input IPA
vtcr_el2 |= VTCR_EL2_PS(PS_1T); // 40-bit PA size
vtcr_el2 |= VTCR_EL2_SL0(SL0_4K_L1); // 4KiB, start at level 1
#else
vtcr_el2 = VTCR_EL2_T0SZ(20); // 44-bit input IPA
vtcr_el2 |= VTCR_EL2_PS(PS_16T); // 44-bit PA size
vtcr_el2 |= VTCR_EL2_SL0(SL0_4K_L0); // 4KiB, start at level 0
#endif
vtcr_el2 |= VTCR_EL2_IRGN0(NORMAL_WB_WA_CACHEABLE); // inner write-back, read/write allocate
vtcr_el2 |= VTCR_EL2_ORGN0(NORMAL_WB_WA_CACHEABLE); // outer write-back, read/write allocate
vtcr_el2 |= VTCR_EL2_SH0(SH0_INNER); // inner shareable
vtcr_el2 |= VTCR_EL2_TG0(TG0_4K); // 4KiB page size
vtcr_el2 |= VTCR_EL2_PS(PS_16T); // 44-bit PA size
vtcr_el2 |= BIT(31); // reserved as 1
MSR(REG_VTCR_EL2, vtcr_el2);

View File

@@ -89,4 +89,10 @@
#define ENABLE_SMP_SUPPORT
#endif
#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
#ifdef CONFIG_ARM_PA_SIZE_BITS_40
#define AARCH64_VSPACE_S2_START_L1
#endif
#endif
#endif /* __CONFIG_H */

View File

@@ -101,7 +101,7 @@
</method>
</interface>
<interface name="seL4_ARM_PageUpperDirectory" manual_name="Page Upper Directory">
<method id="ARMPageUpperDirectoryMap" name="Map">
<method id="ARMPageUpperDirectoryMap" name="Map" condition="!(defined CONFIG_ARM_HYPERVISOR_SUPPORT &amp;&amp; defined CONFIG_ARM_PA_SIZE_BITS_40)">
<brief>
Map an upper page directory
</brief>
@@ -115,7 +115,8 @@
<param dir="in" name="attr" type="seL4_ARM_VMAttributes"
description="Memory attributes"/>
</method>
<method id="ARMPageUpperDirectoryUnmap" name="Unmap">
<method id="ARMPageUpperDirectoryUnmap" name="Unmap"
condition="!(defined CONFIG_ARM_HYPERVISOR_SUPPORT &amp;&amp; defined CONFIG_ARM_PA_SIZE_BITS_40)">
</method>
</interface>
<interface name="seL4_ARM_PageDirectory" manual_name="Page Directory">

View File

@@ -140,17 +140,32 @@ enum {
#define seL4_IOPageTableBits 12
#define seL4_WordSizeBits 3
#define seL4_PUDEntryBits 3
#if defined(CONFIG_ARM_HYPERVISOR_SUPPORT) && defined (CONFIG_ARM_PA_SIZE_BITS_40)
/* for a 3 level translation, we skip the PGD */
#define seL4_PGDBits 0
#define seL4_PGDEntryBits 0
#define seL4_PGDIndexBits 0
#define seL4_PUDBits 13
#define seL4_PUDIndexBits 10
#define seL4_VSpaceBits seL4_PUDBits
#define seL4_VSpaceIndexBits seL4_PUDIndexBits
#define seL4_ARM_VSpaceObject seL4_ARM_PageUpperDirectoryObject
#else
#define seL4_PGDBits 12
#define seL4_PGDEntryBits 3
#define seL4_PGDIndexBits 9
#define seL4_PUDBits 12
#define seL4_PUDEntryBits 3
#define seL4_PUDIndexBits 9
#define seL4_VSpaceBits seL4_PGDBits
#define seL4_VSpaceIndexBits seL4_PGDIndexBits
#define seL4_ARM_VSpaceObject seL4_ARM_PageGlobalDirectoryObject
#endif
#define seL4_ARM_VCPUBits 12
#define seL4_VCPUBits 12
@@ -195,7 +210,14 @@ SEL4_SIZE_SANITY(seL4_PUDEntryBits, seL4_PUDIndexBits, seL4_PUDBits);
* address size fault.
*/
/* First address in the virtual address space that is not accessible to user level */
#if defined(CONFIG_ARM_PA_SIZE_BITS_44)
#define seL4_UserTop 0x00000fffffffffff
#elif defined(CONFIG_ARM_PA_SIZE_BITS_40)
#define seL4_UserTop 0x000000ffffffffff
#else
#error "Unknown physical address width"
#endif
#else
/* First address in the virtual address space that is not accessible to user level */
#define seL4_UserTop 0x00007fffffffffff

View File

@@ -37,7 +37,7 @@
* 8-bit VMID. Note that this assumes that the IPA size for S2
* translation does not use full 48-bit.
*/
#define VTABLE_VMID_SLOT 511
#define VTABLE_VMID_SLOT MASK(seL4_VSpaceIndexBits)
#define RESERVED 3
/*
@@ -319,10 +319,14 @@ static BOOT_CODE void map_it_frame_cap(cap_t vspace_cap, cap_t frame_cap, bool_t
assert(cap_frame_cap_get_capFMappedASID(frame_cap) != 0);
#ifdef AARCH64_VSPACE_S2_START_L1
pud = vspaceRoot;
#else
vspaceRoot += GET_PGD_INDEX(vptr);
assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
pud += GET_PUD_INDEX(vptr);
#endif
pud += GET_UPUD_INDEX(vptr);
assert(pude_pude_pd_ptr_get_present(pud));
pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
pd += GET_PD_INDEX(vptr);
@@ -377,10 +381,14 @@ static BOOT_CODE void map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap)
assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
#ifdef AARCH64_VSPACE_S2_START_L1
pud = vspaceRoot;
#else
vspaceRoot += GET_PGD_INDEX(vptr);
assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
pud += GET_PUD_INDEX(vptr);
#endif
pud += GET_UPUD_INDEX(vptr);
assert(pude_pude_pd_ptr_get_present(pud));
pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
*(pd + GET_PD_INDEX(vptr)) = pde_pde_small_new(
@@ -410,10 +418,14 @@ static BOOT_CODE void map_it_pd_cap(cap_t vspace_cap, cap_t pd_cap)
assert(cap_page_directory_cap_get_capPDIsMapped(pd_cap));
#ifdef AARCH64_VSPACE_S2_START_L1
pud = vspaceRoot;
#else
vspaceRoot += GET_PGD_INDEX(vptr);
assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
*(pud + GET_PUD_INDEX(vptr)) = pude_pude_pd_new(
#endif
*(pud + GET_UPUD_INDEX(vptr)) = pude_pude_pd_new(
pptr_to_paddr(pd)
);
}
@@ -431,6 +443,7 @@ static BOOT_CODE cap_t create_it_pd_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vp
return cap;
}
#ifndef AARCH64_VSPACE_S2_START_L1
static BOOT_CODE void map_it_pud_cap(cap_t vspace_cap, cap_t pud_cap)
{
pgde_t *pgd = PGD_PTR(pptr_of_cap(vspace_cap));
@@ -455,12 +468,15 @@ static BOOT_CODE cap_t create_it_pud_cap(cap_t vspace_cap, pptr_t pptr, vptr_t v
map_it_pud_cap(vspace_cap, cap);
return cap;
}
#endif /* AARCH64_VSPACE_S2_START_L1 */
BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg)
{
return get_n_paging(it_v_reg, PGD_INDEX_OFFSET) +
get_n_paging(it_v_reg, PUD_INDEX_OFFSET) +
get_n_paging(it_v_reg, PD_INDEX_OFFSET);
return
#ifndef AARCH64_VSPACE_S2_START_L1
get_n_paging(it_v_reg, PGD_INDEX_OFFSET) +
#endif
get_n_paging(it_v_reg, PUD_INDEX_OFFSET) +
get_n_paging(it_v_reg, PD_INDEX_OFFSET);
}
BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
@@ -479,6 +495,7 @@ BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_re
slot_pos_before = ndks_boot.slot_pos_cur;
write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), vspace_cap);
#ifndef AARCH64_VSPACE_S2_START_L1
/* Create any PUDs needed for the user land image */
for (vptr = ROUND_DOWN(it_v_reg.start, PGD_INDEX_OFFSET);
vptr < it_v_reg.end;
@@ -487,7 +504,7 @@ BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_re
return cap_null_cap_new();
}
}
#endif
/* Create any PDs needed for the user land image */
for (vptr = ROUND_DOWN(it_v_reg.start, PUD_INDEX_OFFSET);
vptr < it_v_reg.end;
@@ -642,10 +659,16 @@ static lookupPGDSlot_ret_t lookupPGDSlot(vspace_root_t *vspace, vptr_t vptr)
static lookupPUDSlot_ret_t lookupPUDSlot(vspace_root_t *vspace, vptr_t vptr)
{
lookupPGDSlot_ret_t pgdSlot;
lookupPUDSlot_ret_t ret;
pgdSlot = lookupPGDSlot(vspace, vptr);
#ifdef AARCH64_VSPACE_S2_START_L1
pude_t *pud = PUDE_PTR(vspace);
word_t pudIndex = GET_UPUD_INDEX(vptr);
ret.status = EXCEPTION_NONE;
ret.pudSlot = pud + pudIndex;
return ret;
#else
lookupPGDSlot_ret_t pgdSlot = lookupPGDSlot(vspace, vptr);
if (!pgde_pgde_pud_ptr_get_present(pgdSlot.pgdSlot)) {
current_lookup_fault = lookup_fault_missing_capability_new(PGD_INDEX_OFFSET);
@@ -656,7 +679,7 @@ static lookupPUDSlot_ret_t lookupPUDSlot(vspace_root_t *vspace, vptr_t vptr)
} else {
pude_t *pud;
pude_t *pudSlot;
word_t pudIndex = GET_PUD_INDEX(vptr);
word_t pudIndex = GET_UPUD_INDEX(vptr);
pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(pgdSlot.pgdSlot));
pudSlot = pud + pudIndex;
@@ -664,6 +687,7 @@ static lookupPUDSlot_ret_t lookupPUDSlot(vspace_root_t *vspace, vptr_t vptr)
ret.pudSlot = pudSlot;
return ret;
}
#endif
}
static lookupPDSlot_ret_t lookupPDSlot(vspace_root_t *vspace, vptr_t vptr)
@@ -1394,7 +1418,7 @@ static void doFlush(int invLabel, vptr_t start, vptr_t end, paddr_t pstart)
/* ================= INVOCATION HANDLING STARTS HERE ================== */
static exception_t performVSpaceFlush(int invLabel, vspace_root_t *vspaceRoot, asid_t asid,
vptr_t start, vptr_t end, paddr_t pstart)
vptr_t start, vptr_t end, paddr_t pstart)
{
if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
@@ -1419,6 +1443,7 @@ static exception_t performVSpaceFlush(int invLabel, vspace_root_t *vspaceRoot, a
return EXCEPTION_NONE;
}
#ifndef AARCH64_VSPACE_S2_START_L1
static exception_t performUpperPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pgde_t pgde, pgde_t *pgdSlot)
{
ctSlot->cap = cap;
@@ -1440,6 +1465,7 @@ static exception_t performUpperPageDirectoryInvocationUnmap(cap_t cap, cte_t *ct
cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&(ctSlot->cap), 0);
return EXCEPTION_NONE;
}
#endif
static exception_t performPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pude_t pude, pude_t *pudSlot)
{
@@ -1611,8 +1637,8 @@ static exception_t performASIDControlInvocation(void *frame, cte_t *slot,
}
static exception_t decodeARMVSpaceRootInvocation(word_t invLabel, unsigned int length,
cte_t *cte, cap_t cap, extra_caps_t extraCaps,
word_t *buffer)
cte_t *cte, cap_t cap, extra_caps_t extraCaps,
word_t *buffer)
{
vptr_t start, end;
paddr_t pstart;
@@ -1708,6 +1734,7 @@ static exception_t decodeARMVSpaceRootInvocation(word_t invLabel, unsigned int l
}
}
#ifndef AARCH64_VSPACE_S2_START_L1
static exception_t decodeARMPageUpperDirectoryInvocation(word_t invLabel, unsigned int length,
cte_t *cte, cap_t cap, extra_caps_t extraCaps,
word_t *buffer)
@@ -1794,6 +1821,7 @@ static exception_t decodeARMPageUpperDirectoryInvocation(word_t invLabel, unsign
setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
return performUpperPageDirectoryInvocationMap(cap, cte, pgde, pgdSlot.pgdSlot);
}
#endif
static exception_t decodeARMPageDirectoryInvocation(word_t invLabel, unsigned int length,
cte_t *cte, cap_t cap, extra_caps_t extraCaps,
@@ -2268,10 +2296,11 @@ exception_t decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr,
switch (cap_get_capType(cap)) {
case cap_vtable_root_cap:
return decodeARMVSpaceRootInvocation(invLabel, length, cte, cap, extraCaps, buffer);
#ifndef AARCH64_VSPACE_S2_START_L1
case cap_page_upper_directory_cap:
return decodeARMPageUpperDirectoryInvocation(invLabel, length, cte,
cap, extraCaps, buffer);
#endif
case cap_page_directory_cap:
return decodeARMPageDirectoryInvocation(invLabel, length, cte,
cap, extraCaps, buffer);
@@ -2496,7 +2525,7 @@ void Arch_userStackTrace(tcb_t *tptr)
return;
}
vspaceRoot = VSPACE_PTR(cap_vtable_root_get_basePtr(threadRoot));
vspaceRoot = cap_vtable_root_get_basePtr(threadRoot);
sp = getRegister(tptr, SP_EL0);
/* check for alignment so we don't have to worry about accessing

View File

@@ -84,7 +84,7 @@ asid_pool_t *armKSASIDTable[BIT(asidHighBits)];
*/
vspace_root_t armKSGlobalUserVSpace[BIT(seL4_VSpaceIndexBits)] ALIGN_BSS(BIT(seL4_VSpaceBits));
pgde_t armKSGlobalKernelPGD[BIT(PGD_INDEX_BITS)] ALIGN_BSS(BIT(seL4_PGDBits));
pgde_t armKSGlobalKernelPGD[BIT(PGD_INDEX_BITS)] ALIGN_BSS(BIT(PGD_SIZE_BITS));
pude_t armKSGlobalKernelPUD[BIT(PUD_INDEX_BITS)] ALIGN_BSS(BIT(seL4_PUDBits));
pde_t armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS)][BIT(PD_INDEX_BITS)] ALIGN_BSS(BIT(seL4_PageDirBits));

View File

@@ -144,16 +144,24 @@ finaliseCap_ret_t Arch_finaliseCap(cap_t cap, bool_t final)
case cap_page_global_directory_cap:
if (final && cap_page_global_directory_cap_get_capPGDIsMapped(cap)) {
deleteASID(cap_page_global_directory_cap_get_capPGDMappedASID(cap),
(vspace_root_t *)(cap_page_global_directory_cap_get_capPGDBasePtr(cap)));
VSPACE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(cap)));
}
break;
case cap_page_upper_directory_cap:
#ifdef AARCH64_VSPACE_S2_START_L1
if (final && cap_page_upper_directory_cap_get_capPUDIsMapped(cap)) {
deleteASID(cap_page_upper_directory_cap_get_capPUDMappedASID(cap),
PUDE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap)));
}
#else
if (final && cap_page_upper_directory_cap_get_capPUDIsMapped(cap)) {
unmapPageUpperDirectory(cap_page_upper_directory_cap_get_capPUDMappedASID(cap),
cap_page_upper_directory_cap_get_capPUDMappedAddress(cap),
PUDE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap)));
}
#endif
break;
case cap_page_directory_cap:
@@ -292,8 +300,10 @@ word_t Arch_getObjectSize(word_t t)
return seL4_PageDirBits;
case seL4_ARM_PageUpperDirectoryObject:
return seL4_PUDBits;
#ifndef AARCH64_VSPACE_S2_START_L1
case seL4_ARM_PageGlobalDirectoryObject:
return seL4_PGDBits;
#endif
#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
case seL4_ARM_VCPUObject:
return VCPU_SIZE_BITS;
@@ -336,14 +346,14 @@ cap_t Arch_createObject(object_t t, void *regionBase, word_t userSize, bool_t de
VMReadWrite, /* capFVMRights */
!!deviceMemory /* capFIsDevice */
);
#ifndef AARCH64_VSPACE_S2_START_L1
case seL4_ARM_PageGlobalDirectoryObject:
return cap_page_global_directory_cap_new(
asidInvalid, /* capPGDMappedASID */
(word_t)regionBase, /* capPGDBasePtr */
0 /* capPGDIsMapped */
);
#endif
case seL4_ARM_PageUpperDirectoryObject:
return cap_page_upper_directory_cap_new(
asidInvalid, /* capPUDMappedASID */