[libcpu] rv64: support for ARCH_REMAP_KERNEL (#9067)

* [libcpu] support for ARCH_REMAP_KERNEL

These changes introduce support for the ARCH_REMAP_KERNEL configuration,
which isolates kernel space in high virtual address regions. This feature
is necessary to enhance memory protection and management by segregating
user and kernel spaces more effectively.

Changes:
- Updated conditional macros to check for ARCH_REMAP_KERNEL instead of
  ARCH_KERNEL_IN_HIGH_VA in board initialization files to reflect the new
  configuration option.
- Modified qemu-virt64-riscv Kconfig and SConstruct files to include and
  utilize ARCH_REMAP_KERNEL.
- Created a new linker script `link_smart.lds` for smart linking in qemu-virt64-riscv.
- Updated rtconfig.py to use a more flexible execution path setup.
- Enhanced user address space definitions in `lwp_arch.h` to support the
  new virtual address mappings.
- Adjusted kernel memory initialization and mapping logic in `c906/mmu.c`
  and `virt64/mmu.c` to account for high virtual address regions.
- Added Kconfig option to enable ARCH_REMAP_KERNEL for RISCV64 architectures.
- Enhanced memory setup functions to support new mapping scheme, including
  updates to early page table setup and address relocation logic.

These modifications ensure that the system can utilize high memory
addresses for the kernel, improving memory isolation and system stability.

Signed-off-by: Shell <smokewood@qq.com>

* fixup: CI run failed

* bsp: default config without using smart

* fixup: static checks

* restore rt_hw_mmu_kernel_map_init for D1

---------

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell
2024-06-18 11:15:59 +08:00
committed by GitHub
parent 73727fa06e
commit 65c9947225
17 changed files with 700 additions and 323 deletions

View File

@@ -133,6 +133,7 @@ if RT_USING_SMART
hex "The virtural address of kernel start"
default 0xffff000000000000 if ARCH_ARMV8
default 0xc0000000 if ARCH_ARM
default 0xffffffc000000000 if ARCH_RISCV && ARCH_REMAP_KERNEL
default 0x80000000 if ARCH_RISCV
depends on ARCH_MM_MMU
@@ -255,6 +256,14 @@ config ARCH_RISCV64
select ARCH_CPU_64BIT
bool
if ARCH_RISCV64
config ARCH_REMAP_KERNEL
bool
depends on RT_USING_SMART
help
Remapping kernel image to high virtual address region
endif
config ARCH_IA32
bool

View File

@@ -18,6 +18,7 @@
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#include <board.h>
#include <cache.h>
#include <mm_aspace.h>
#include <mm_page.h>
@@ -324,6 +325,14 @@ static inline void _init_region(void *vaddr, size_t size)
}
#endif
#if defined(RT_USING_SMART) && defined(ARCH_REMAP_KERNEL)
#define KERN_SPACE_START ((void *)KERNEL_VADDR_START)
#define KERN_SPACE_SIZE (0xfffffffffffff000UL - KERNEL_VADDR_START + 0x1000)
#else
#define KERN_SPACE_START ((void *)0x1000)
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
#endif
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
rt_size_t *vtable, rt_size_t pv_off)
{
@@ -363,8 +372,7 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
}
}
rt_aspace_init(&rt_kernel_space, (void *)0x1000, USER_VADDR_START - 0x1000,
vtable);
rt_aspace_init(&rt_kernel_space, KERN_SPACE_START, KERN_SPACE_SIZE, vtable);
_init_region(v_address, size);
return 0;
@@ -569,6 +577,62 @@ void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_size_t vaddr_start, rt_siz
rt_hw_tlb_invalidate_all_local();
}
#define SATP_BASE ((size_t)SATP_MODE << SATP_MODE_OFFSET)
void rt_hw_mem_setup_early(void)
{
rt_size_t pv_off;
rt_size_t ps = 0x0;
rt_size_t vs = 0x0;
rt_size_t *early_pgtbl = (size_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
/* calculate pv_offset */
void *symb_pc;
void *symb_linker;
__asm__ volatile("la %0, _start\n" : "=r"(symb_pc));
__asm__ volatile("la %0, _start_link_addr\n" : "=r"(symb_linker));
symb_linker = *(void **)symb_linker;
pv_off = symb_pc - symb_linker;
rt_kmem_pvoff_set(pv_off);
if (pv_off)
{
if (pv_off & (1ul << (ARCH_INDEX_WIDTH * 2 + ARCH_PAGE_SHIFT)))
{
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__, pv_off);
RT_ASSERT(0);
}
/**
* identical mapping,
* PC are still at lower region before relocating to high memory
*/
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
ps += L1_PAGE_SIZE;
}
/* relocate text region */
__asm__ volatile("la %0, _start\n" : "=r"(ps));
ps &= ~(L1_PAGE_SIZE - 1);
vs = ps - pv_off;
/* relocate region */
rt_size_t vs_idx = GET_L1(vs);
rt_size_t ve_idx = GET_L1(vs + 0x80000000);
for (size_t i = vs_idx; i < ve_idx; i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
ps += L1_PAGE_SIZE;
}
/* apply new mapping */
asm volatile("sfence.vma x0, x0");
write_csr(satp, SATP_BASE | ((size_t)early_pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0, x0");
}
/* return to lower text section */
}
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;

View File

@@ -64,8 +64,8 @@
#define PAGE_ATTR_CB (PTE_BUF | PTE_CACHE)
#define PAGE_ATTR_DEV (PTE_SO)
#define PAGE_DEFAULT_ATTR_LEAF (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define PAGE_DEFAULT_ATTR_NEXT (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_V)
#define PAGE_DEFAULT_ATTR_LEAF (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define PAGE_DEFAULT_ATTR_NEXT (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_V)
#define PAGE_IS_LEAF(pte) __MASKVALUE(pte, PAGE_ATTR_RWX)

View File

@@ -269,6 +269,14 @@ static inline void _init_region(void *vaddr, size_t size)
}
#endif
#if defined(RT_USING_SMART) && defined(ARCH_REMAP_KERNEL)
#define KERN_SPACE_START ((void *)KERNEL_VADDR_START)
#define KERN_SPACE_SIZE (0xfffffffffffff000UL - KERNEL_VADDR_START + 0x1000)
#else
#define KERN_SPACE_START ((void *)0x1000)
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
#endif
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
rt_size_t *vtable, rt_size_t pv_off)
{
@@ -308,8 +316,7 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
}
}
rt_aspace_init(&rt_kernel_space, (void *)0x1000, USER_VADDR_START - 0x1000,
vtable);
rt_aspace_init(&rt_kernel_space, KERN_SPACE_START, KERN_SPACE_SIZE, vtable);
_init_region(v_address, size);
return 0;
@@ -492,6 +499,62 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
rt_page_cleanup();
}
#define SATP_BASE ((size_t)SATP_MODE << SATP_MODE_OFFSET)
void rt_hw_mem_setup_early(void)
{
rt_size_t pv_off;
rt_size_t ps = 0x0;
rt_size_t vs = 0x0;
rt_size_t *early_pgtbl = (size_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
/* calculate pv_offset */
void *symb_pc;
void *symb_linker;
__asm__ volatile("la %0, _start\n" : "=r"(symb_pc));
__asm__ volatile("la %0, _start_link_addr\n" : "=r"(symb_linker));
symb_linker = *(void **)symb_linker;
pv_off = symb_pc - symb_linker;
rt_kmem_pvoff_set(pv_off);
if (pv_off)
{
if (pv_off & (1ul << (ARCH_INDEX_WIDTH * 2 + ARCH_PAGE_SHIFT)))
{
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__, pv_off);
RT_ASSERT(0);
}
/**
* identical mapping,
* PC are still at lower region before relocating to high memory
*/
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
ps += L1_PAGE_SIZE;
}
/* relocate text region */
__asm__ volatile("la %0, _start\n" : "=r"(ps));
ps &= ~(L1_PAGE_SIZE - 1);
vs = ps - pv_off;
/* relocate region */
rt_size_t vs_idx = GET_L1(vs);
rt_size_t ve_idx = GET_L1(vs + 0x80000000);
for (size_t i = vs_idx; i < ve_idx; i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
ps += L1_PAGE_SIZE;
}
/* apply new mapping */
asm volatile("sfence.vma x0, x0");
write_csr(satp, SATP_BASE | ((size_t)early_pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0, x0");
}
/* return to lower text section */
}
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;

View File

@@ -94,5 +94,39 @@ _start:
*/
csrw sscratch, zero
call init_bss
call sbi_init
j primary_cpu_entry
#ifdef ARCH_MM_MMU
call rt_hw_mem_setup_early
call rt_kmem_pvoff
/* a0 := pvoff */
beq a0, zero, 1f
/* relocate pc */
la x1, _after_pc_relocation
sub x1, x1, a0
ret
_after_pc_relocation:
/* relocate gp */
sub gp, gp, a0
/* relocate context: sp */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
/* reset s0-fp */
mv s0, zero
/* relocate stvec */
la t0, trap_entry
csrw stvec, t0
1:
#endif
call sbi_init
call primary_cpu_entry
_never_return_here:
j .
.global _start_link_addr
_start_link_addr:
.dword __text_start