bsps/x86_64: Add support for RTEMS interrupts

Updates #2898.
This commit is contained in:
Amaan Cheval
2018-08-13 16:20:38 +05:30
committed by Joel Sherrill
parent 4544749e3c
commit 686932125d
12 changed files with 653 additions and 38 deletions

View File

@@ -0,0 +1,151 @@
/*
* Copyright (c) 2018.
* Amaan Cheval <amaan.cheval@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdint.h>
#include <rtems.h>
#include <rtems/score/idt.h>
#include <rtems/score/basedefs.h>
#include <rtems/score/x86_64.h>
#include <rtems/score/cpuimpl.h>
#include <bsp/irq-generic.h>
/*
* The IDT maps every interrupt vector to an interrupt_descriptor based on the
* vector number.
*/
interrupt_descriptor amd64_idt[IDT_SIZE] RTEMS_ALIGNED(8) = { { 0 } };
struct idt_record idtr = {
.limit = (IDT_SIZE * 16) - 1,
.base = (uintptr_t) amd64_idt
};
/**
* IRQs that the RTEMS Interrupt Manager will manage
* @see DISTINCT_INTERRUPT_ENTRY
*/
static uintptr_t rtemsIRQs[BSP_IRQ_VECTOR_NUMBER] = {
(uintptr_t) rtems_irq_prologue_0,
(uintptr_t) rtems_irq_prologue_1,
(uintptr_t) rtems_irq_prologue_2,
(uintptr_t) rtems_irq_prologue_3,
(uintptr_t) rtems_irq_prologue_4,
(uintptr_t) rtems_irq_prologue_5,
(uintptr_t) rtems_irq_prologue_6,
(uintptr_t) rtems_irq_prologue_7,
(uintptr_t) rtems_irq_prologue_8,
(uintptr_t) rtems_irq_prologue_9,
(uintptr_t) rtems_irq_prologue_10,
(uintptr_t) rtems_irq_prologue_11,
(uintptr_t) rtems_irq_prologue_12,
(uintptr_t) rtems_irq_prologue_13,
(uintptr_t) rtems_irq_prologue_14,
(uintptr_t) rtems_irq_prologue_15,
(uintptr_t) rtems_irq_prologue_16,
(uintptr_t) rtems_irq_prologue_17,
(uintptr_t) rtems_irq_prologue_18,
(uintptr_t) rtems_irq_prologue_19,
(uintptr_t) rtems_irq_prologue_20,
(uintptr_t) rtems_irq_prologue_21,
(uintptr_t) rtems_irq_prologue_22,
(uintptr_t) rtems_irq_prologue_23,
(uintptr_t) rtems_irq_prologue_24,
(uintptr_t) rtems_irq_prologue_25,
(uintptr_t) rtems_irq_prologue_26,
(uintptr_t) rtems_irq_prologue_27,
(uintptr_t) rtems_irq_prologue_28,
(uintptr_t) rtems_irq_prologue_29,
(uintptr_t) rtems_irq_prologue_30,
(uintptr_t) rtems_irq_prologue_31,
(uintptr_t) rtems_irq_prologue_32
};
void lidt(struct idt_record *ptr)
{
__asm__ volatile ("lidt %0" :: "m"(*ptr));
}
interrupt_descriptor amd64_create_interrupt_descriptor(
uintptr_t handler, uint8_t types_and_attributes
)
{
interrupt_descriptor entry = {
.offset_0 = handler & 0xffff,
.segment_selector = amd64_get_cs(),
.interrupt_stack_table = 0,
.type_and_attributes = types_and_attributes,
.offset_1 = (handler >> 16) & 0xffff,
.offset_2 = handler >> 32,
.reserved_zero = 0,
};
return entry;
}
uintptr_t amd64_get_handler_from_idt(uint32_t vector)
{
interrupt_descriptor entry = amd64_idt[vector];
uintptr_t handler = entry.offset_0 | (entry.offset_1 << 16) |
((uint64_t) entry.offset_2 << 32);
return handler;
}
void amd64_install_raw_interrupt(
uint32_t vector, uintptr_t new_handler, uintptr_t *old_handler
)
{
*old_handler = amd64_get_handler_from_idt(vector);
interrupt_descriptor new_desc = amd64_create_interrupt_descriptor(
new_handler,
IDT_INTERRUPT_GATE | IDT_PRESENT
);
amd64_idt[vector] = new_desc;
}
void amd64_dispatch_isr(rtems_vector_number vector)
{
bsp_interrupt_handler_dispatch(vector);
}
rtems_status_code bsp_interrupt_facility_initialize(void)
{
uintptr_t old;
for (uint32_t i = 0; i < BSP_IRQ_VECTOR_NUMBER; i++) {
amd64_install_raw_interrupt(i, rtemsIRQs[i], &old);
}
lidt(&idtr);
return RTEMS_SUCCESSFUL;
}
void bsp_interrupt_vector_disable(rtems_vector_number vector)
{
/* XXX */
}
void bsp_interrupt_vector_enable(rtems_vector_number vector)
{
/* XXX */
}

View File

@@ -0,0 +1,191 @@
/*
* This file contains the _ISR_Handler that acts as the common handler for all
* vectors to be managed by the RTEMS interrupt manager.
*/
/*
* Copyright (c) 2018.
* Amaan Cheval <amaan.cheval@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include <rtems/score/percpu.h>
#ifndef CPU_STACK_ALIGNMENT
#error "Missing header? CPU_STACK_ALIGNMENT not defined"
#endif
BEGIN_CODE
PUBLIC(apic_spurious_handler)
SYM(apic_spurious_handler):
iretq
/*
* These are callee-saved registers, which means we can use them in our
* interrupts as persistent scratch registers (i.e. calls will not destroy
* them), as long as we also save and restore it for the interrupted task.
*/
.set SCRATCH_REG0, rbp
.set SCRATCH_REG1, rbx
/*
* We need to set a distinct handler for every interrupt vector so that
* we can pass the vector number to _ISR_Handler correctly.
*/
#define DISTINCT_INTERRUPT_ENTRY(vector) \
.p2align 4 ; \
PUBLIC(rtems_irq_prologue_ ## vector) ; \
SYM(rtems_irq_prologue_ ## vector): ; \
pushq REG_ARG0 ; \
movq $vector, REG_ARG0 ; \
pushq SCRATCH_REG0 ; \
pushq SCRATCH_REG1 ; \
jmp SYM(_ISR_Handler)
DISTINCT_INTERRUPT_ENTRY(0)
DISTINCT_INTERRUPT_ENTRY(1)
DISTINCT_INTERRUPT_ENTRY(2)
DISTINCT_INTERRUPT_ENTRY(3)
DISTINCT_INTERRUPT_ENTRY(4)
DISTINCT_INTERRUPT_ENTRY(5)
DISTINCT_INTERRUPT_ENTRY(6)
DISTINCT_INTERRUPT_ENTRY(7)
DISTINCT_INTERRUPT_ENTRY(8)
DISTINCT_INTERRUPT_ENTRY(9)
DISTINCT_INTERRUPT_ENTRY(10)
DISTINCT_INTERRUPT_ENTRY(11)
DISTINCT_INTERRUPT_ENTRY(12)
DISTINCT_INTERRUPT_ENTRY(13)
DISTINCT_INTERRUPT_ENTRY(14)
DISTINCT_INTERRUPT_ENTRY(15)
DISTINCT_INTERRUPT_ENTRY(16)
DISTINCT_INTERRUPT_ENTRY(17)
DISTINCT_INTERRUPT_ENTRY(18)
DISTINCT_INTERRUPT_ENTRY(19)
DISTINCT_INTERRUPT_ENTRY(20)
DISTINCT_INTERRUPT_ENTRY(21)
DISTINCT_INTERRUPT_ENTRY(22)
DISTINCT_INTERRUPT_ENTRY(23)
DISTINCT_INTERRUPT_ENTRY(24)
DISTINCT_INTERRUPT_ENTRY(25)
DISTINCT_INTERRUPT_ENTRY(26)
DISTINCT_INTERRUPT_ENTRY(27)
DISTINCT_INTERRUPT_ENTRY(28)
DISTINCT_INTERRUPT_ENTRY(29)
DISTINCT_INTERRUPT_ENTRY(30)
DISTINCT_INTERRUPT_ENTRY(31)
DISTINCT_INTERRUPT_ENTRY(32)
SYM(_ISR_Handler):
.save_cpu_interrupt_frame:
.set SAVED_RSP, SCRATCH_REG0
movq rsp, SAVED_RSP
/* Make space for CPU_Interrupt_frame */
subq $CPU_INTERRUPT_FRAME_SIZE, rsp
.set ALIGNMENT_MASK, ~(CPU_STACK_ALIGNMENT - 1)
andq $ALIGNMENT_MASK, rsp
// XXX: Save interrupt mask?
/* Save caller-saved registers to CPU_Interrupt_frame */
movq rax, (8 * CPU_SIZEOF_POINTER)(rsp)
movq rcx, (7 * CPU_SIZEOF_POINTER)(rsp)
movq rdx, (6 * CPU_SIZEOF_POINTER)(rsp)
movq rsi, (5 * CPU_SIZEOF_POINTER)(rsp)
movq r8, (4 * CPU_SIZEOF_POINTER)(rsp)
movq r9, (3 * CPU_SIZEOF_POINTER)(rsp)
movq r10, (2 * CPU_SIZEOF_POINTER)(rsp)
movq r11, (1 * CPU_SIZEOF_POINTER)(rsp)
/* Save the initial rsp */
movq SAVED_RSP, (0 * CPU_SIZEOF_POINTER)(rsp)
.switch_stack_if_needed:
/* Save current aligned rsp so we can find CPU_Interrupt_frame again later */
movq rsp, SAVED_RSP
/*
* Switch to interrupt stack if necessary; it's necessary if this is the
* outermost interrupt, which means we've been using the task's stack so far
*/
#ifdef RTEMS_SMP
/* XXX: We should call _CPU_SMP_Get_current_processor here */
#endif
.set Per_CPU_Info, SCRATCH_REG1
movq $SYM(_Per_CPU_Information), Per_CPU_Info
cmpq $0, PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
jne .skip_switch
.switch_stack:
movq PER_CPU_INTERRUPT_STACK_HIGH(Per_CPU_Info), rsp
.skip_switch:
incq PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
incq PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
.call_isr_dispatch:
/* REG_ARG0 already includes the vector number, so we can simply call */
call amd64_dispatch_isr
.restore_stack:
/* If this is the outermost stack, this restores the task stack */
movq SAVED_RSP, rsp
decq PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
decq PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
/* XXX: Bug in QEMU causing ZF to not be set by decq necessitating the cmpb */
cmpb $0, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
/* If dispatch is non-zero, it is disabled, so skip scheduling it */
jne .restore_cpu_interrupt_frame
.schedule_dispatch:
cmpb $0, PER_CPU_DISPATCH_NEEDED(Per_CPU_Info)
je .restore_cpu_interrupt_frame
call _Thread_Dispatch
.restore_cpu_interrupt_frame:
/* Restore registers from CPU_Interrupt_frame */
movq (8 * CPU_SIZEOF_POINTER)(rsp), rax
movq (7 * CPU_SIZEOF_POINTER)(rsp), rcx
movq (6 * CPU_SIZEOF_POINTER)(rsp), rdx
movq (5 * CPU_SIZEOF_POINTER)(rsp), rsi
movq (4 * CPU_SIZEOF_POINTER)(rsp), r8
movq (3 * CPU_SIZEOF_POINTER)(rsp), r9
movq (2 * CPU_SIZEOF_POINTER)(rsp), r10
movq (1 * CPU_SIZEOF_POINTER)(rsp), r11
/* Restore the rsp value from just before _ISR_Handler was called */
movq (0 * CPU_SIZEOF_POINTER)(rsp), SAVED_RSP
movq SAVED_RSP, rsp
/* Restore args DISTINCT_INTERRUPT_ENTRY pushed to task stack */
popq SCRATCH_REG1
popq SCRATCH_REG0
popq REG_ARG0
iretq
END_CODE
END

View File

@@ -27,8 +27,10 @@
#include <bsp.h>
#include <bsp/bootcard.h>
#include <libcpu/page.h>
#include <bsp/irq-generic.h>
void bsp_start(void)
{
paging_init();
bsp_interrupt_initialize();
}

View File

@@ -1,5 +1,9 @@
## This file was generated by "./boostrap -H".
include_bspdir = $(includedir)/bsp
include_bsp_HEADERS =
include_bsp_HEADERS += ../../../../../bsps/x86_64/include/bsp/irq.h
include_libcpudir = $(includedir)/libcpu
include_libcpu_HEADERS =
include_libcpu_HEADERS += ../../../../../bsps/x86_64/include/libcpu/page.h

View File

@@ -0,0 +1,46 @@
/*
* This file contains the mandatory defines to support the irq.h and
* irq-generic.c interfaces (initialized finally with bsp_interrupt_initialize).
*/
/*
* Copyright (c) 2018.
* Amaan Cheval <amaan.cheval@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef LIBBSP_GENERIC_AMD64_IRQ_H
#define LIBBSP_GENERIC_AMD64_IRQ_H
#ifndef ASM
#include <rtems.h>
#include <rtems/irq.h>
#include <rtems/irq-extension.h>
#define BSP_INTERRUPT_VECTOR_MIN 0x0
#define BSP_IRQ_VECTOR_NUMBER 34
#define BSP_INTERRUPT_VECTOR_MAX BSP_IRQ_VECTOR_NUMBER
#endif /* !ASM */
#endif /* LIBBSP_GENERIC_RISCV_IRQ_H */

View File

@@ -27,6 +27,9 @@ librtemsbsp_a_SOURCES += ../../../../../../bsps/x86_64/amd64/start/page.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/start/sbrk.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/dev/getentropy/getentropy-cpucounter.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/start/bspreset-empty.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/x86_64/amd64/interrupts/idt.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/x86_64/amd64/interrupts/isr_handler.S
librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/irq/irq-default-handler.c
# clock
librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/dev/clock/clock-simidle.c
# console
@@ -39,5 +42,6 @@ librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/dev/btimer/btimer-stub.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/cache/nocache.c
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/shared/irq-sources.am
include $(srcdir)/../../../../../../bsps/shared/shared-sources.am
include $(srcdir)/../../../../../../bsps/x86_64/amd64/headers.am

View File

@@ -38,6 +38,7 @@
#endif
#include <rtems/system.h>
#include <rtems/score/idt.h>
#include <rtems/score/isr.h>
#include <rtems/score/wkspace.h>
#include <rtems/score/tls.h>
@@ -52,17 +53,17 @@ void _CPU_Initialize(void)
{
}
uint32_t _CPU_ISR_Get_level(void)
{
return 0;
}
void _CPU_ISR_install_raw_handler(
uint32_t vector,
proc_ptr new_handler,
proc_ptr *old_handler
)
{
amd64_install_raw_interrupt(
vector,
(uintptr_t) new_handler,
(uintptr_t*) old_handler
);
}
void _CPU_ISR_install_vector(
@@ -73,11 +74,7 @@ void _CPU_ISR_install_vector(
{
}
void _CPU_Install_interrupt_stack(void)
{
}
void *_CPU_Thread_Idle_body(uintptr_t ignored)
{
for( ; ; ) { }
for ( ; ; ) { }
}

View File

@@ -14,4 +14,5 @@ include_rtems_score_HEADERS += include/rtems/score/cpu.h
include_rtems_score_HEADERS += include/rtems/score/cpu_asm.h
include_rtems_score_HEADERS += include/rtems/score/cpuatomic.h
include_rtems_score_HEADERS += include/rtems/score/cpuimpl.h
include_rtems_score_HEADERS += include/rtems/score/idt.h
include_rtems_score_HEADERS += include/rtems/score/x86_64.h

View File

@@ -101,12 +101,57 @@ typedef struct {
double some_float_register;
} Context_Control_fp;
/*
* Caller-saved registers for interrupt frames
*/
typedef struct {
uint32_t special_interrupt_register;
/**
* @note: rdi is a caller-saved register too, but it's used in function calls
* and is hence saved separately on the stack;
*
* @see DISTINCT_INTERRUPT_ENTRY
* @see _ISR_Handler
*/
uint64_t rax;
uint64_t rcx;
uint64_t rdx;
uint64_t rsi;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
/*
* This holds the rsp just before _ISR_Handler is called; it's needed because
* in the handler, we align the stack to make further calls, and we're not
* sure how alignment may move the stack-pointer around, leaving no way to get
* back to the stack, and therefore the interrupt frame.
*/
uint64_t saved_rsp;
/* XXX:
* - FS segment selector for TLS
* - x87 status word?
* - MMX?
* - XMM?
*/
} CPU_Interrupt_frame;
#endif /* !ASM */
#define CPU_INTERRUPT_FRAME_SIZE 72
/*
* When SMP is enabled, percpuasm.c has a similar assert, but since we use the
* interrupt frame regardless of SMP, we'll confirm it here.
*/
#ifndef ASM
RTEMS_STATIC_ASSERT(
sizeof(CPU_Interrupt_frame) == CPU_INTERRUPT_FRAME_SIZE,
CPU_INTERRUPT_FRAME_SIZE
);
#endif
#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
@@ -126,31 +171,55 @@ typedef struct {
#define _CPU_Initialize_vectors()
// XXX: For RTEMS critical sections
#define _CPU_ISR_Disable( _isr_cookie ) \
{ \
(_isr_cookie) = 0; /* do something to prevent warnings */ \
}
#define _CPU_ISR_Enable( _isr_cookie ) \
{ \
(void) (_isr_cookie); /* prevent warnings from -Wunused-but-set-variable */ \
}
#define _CPU_ISR_Flash( _isr_cookie ) \
{ \
}
RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
{
return false;
#define _CPU_ISR_Enable(_level) \
{ \
amd64_enable_interrupts(); \
_level = 0; \
(void) _level; /* Prevent -Wunused-but-set-variable */ \
}
#define _CPU_ISR_Set_level( new_level ) \
{ \
}
#define _CPU_ISR_Disable(_level) \
{ \
amd64_enable_interrupts(); \
_level = 1; \
(void) _level; /* Prevent -Wunused-but-set-variable */ \
}
uint32_t _CPU_ISR_Get_level( void );
#define _CPU_ISR_Flash(_level) \
{ \
amd64_enable_interrupts(); \
amd64_disable_interrupts(); \
_level = 1; \
(void) _level; /* Prevent -Wunused-but-set-variable */ \
}
RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled(uint32_t level)
{
return (level & EFLAGS_INTR_ENABLE) != 0;
}
RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level(uint32_t new_level)
{
if ( new_level ) {
amd64_disable_interrupts();
}
else {
amd64_enable_interrupts();
}
}
RTEMS_INLINE_ROUTINE uint32_t _CPU_ISR_Get_level(void)
{
uint64_t rflags;
__asm__ volatile ( "pushf; \
popq %0"
: "=rm" (rflags)
);
uint32_t level = (rflags & EFLAGS_INTR_ENABLE) ? 0 : 1;
return level;
}
/* end of ISR handler macros */
@@ -228,8 +297,6 @@ void _CPU_ISR_install_vector(
proc_ptr *old_handler
);
void _CPU_Install_interrupt_stack( void );
void *_CPU_Thread_Idle_body( uintptr_t ignored );
void _CPU_Context_switch(

View File

@@ -45,6 +45,14 @@ RTEMS_INLINE_ROUTINE void outport_byte(uint16_t port, uint8_t val)
__asm__ volatile ( "outb %0, %1" : : "a" (val), "Nd" (port) );
}
RTEMS_INLINE_ROUTINE uint16_t amd64_get_cs(void)
{
uint16_t segment = 0;
__asm__ volatile ( "movw %%cs, %0" : "=r" (segment) : "0" (segment) );
return segment;
}
RTEMS_INLINE_ROUTINE void amd64_set_cr3(uint64_t segment)
{
@@ -58,6 +66,16 @@ RTEMS_INLINE_ROUTINE void cpuid(
: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
: "a" (code) );
}
RTEMS_INLINE_ROUTINE void amd64_enable_interrupts(void)
{
__asm__ volatile ( "sti" );
}
RTEMS_INLINE_ROUTINE void amd64_disable_interrupts(void)
{
__asm__ volatile ( "cli" );
}
#endif /* !ASM */
#endif

View File

@@ -0,0 +1,131 @@
/*
* Copyright (c) 2018.
* Amaan Cheval <amaan.cheval@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _RTEMS_SCORE_IDT_H
#define _RTEMS_SCORE_IDT_H
#include <rtems/score/basedefs.h>
#include <rtems/rtems/intr.h>
#ifdef __cplusplus
extern "C" {
#endif
#define IDT_INTERRUPT_GATE (0b1110)
#define IDT_PRESENT (0b10000000)
/*
* XXX: The IDT size should be smaller given that we likely won't map all 256
* vectors, but for simplicity, this works better.
*/
#define IDT_SIZE 256
/* Target vector number for spurious IRQs */
#define BSP_VECTOR_SPURIOUS 0xFF
/* Target vector number for the APIC timer */
#define BSP_VECTOR_APIC_TIMER 32
typedef struct _interrupt_descriptor {
uint16_t offset_0; // bits 0-15
uint16_t segment_selector; // a segment selector in the GDT or LDT
/* bits 0-2 are the offset into the IST, stored in the TSS */
uint8_t interrupt_stack_table;
uint8_t type_and_attributes;
uint16_t offset_1; // bits 16-31
uint32_t offset_2; // bits 32-63
uint32_t reserved_zero;
} interrupt_descriptor;
extern interrupt_descriptor amd64_idt[IDT_SIZE];
struct idt_record {
uint16_t limit; /* Size of IDT array - 1 */
uintptr_t base; /* Pointer to IDT array */
} RTEMS_PACKED;
RTEMS_STATIC_ASSERT(
sizeof(struct idt_record) == 10,
"IDT pointer must be exactly 10 bytes"
);
void lidt(struct idt_record *idtr);
interrupt_descriptor amd64_create_interrupt_descriptor(
uintptr_t handler, uint8_t types_and_attributes
);
uintptr_t amd64_get_handler_from_idt(uint32_t vector);
void amd64_install_raw_interrupt(
uint32_t vector, uintptr_t new_handler, uintptr_t *old_handler
);
/*
* Called by _ISR_Handler to dispatch "RTEMS interrupts", i.e. call the
* registered RTEMS ISR.
*/
void amd64_dispatch_isr(rtems_vector_number vector);
/* Defined in isr_handler.S */
extern void rtems_irq_prologue_0(void);
extern void rtems_irq_prologue_1(void);
extern void rtems_irq_prologue_2(void);
extern void rtems_irq_prologue_3(void);
extern void rtems_irq_prologue_4(void);
extern void rtems_irq_prologue_5(void);
extern void rtems_irq_prologue_6(void);
extern void rtems_irq_prologue_7(void);
extern void rtems_irq_prologue_8(void);
extern void rtems_irq_prologue_9(void);
extern void rtems_irq_prologue_10(void);
extern void rtems_irq_prologue_11(void);
extern void rtems_irq_prologue_12(void);
extern void rtems_irq_prologue_13(void);
extern void rtems_irq_prologue_14(void);
extern void rtems_irq_prologue_15(void);
extern void rtems_irq_prologue_16(void);
extern void rtems_irq_prologue_17(void);
extern void rtems_irq_prologue_18(void);
extern void rtems_irq_prologue_19(void);
extern void rtems_irq_prologue_20(void);
extern void rtems_irq_prologue_21(void);
extern void rtems_irq_prologue_22(void);
extern void rtems_irq_prologue_23(void);
extern void rtems_irq_prologue_24(void);
extern void rtems_irq_prologue_25(void);
extern void rtems_irq_prologue_26(void);
extern void rtems_irq_prologue_27(void);
extern void rtems_irq_prologue_28(void);
extern void rtems_irq_prologue_29(void);
extern void rtems_irq_prologue_30(void);
extern void rtems_irq_prologue_31(void);
extern void rtems_irq_prologue_32(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -76,11 +76,14 @@ void _CPU_Context_Initialize(
(void) is_fp;
// XXX: Should be used in the future
(void) new_level;
(void) tls_area;
// XXX: Leaving interrupts off regardless of `new_level` for now
the_context->rflags = CPU_EFLAGS_INTERRUPTS_OFF;
if ( new_level ) {
the_context->rflags = CPU_EFLAGS_INTERRUPTS_OFF;
}
else {
the_context->rflags = CPU_EFLAGS_INTERRUPTS_ON;
}
_stack = ((uintptr_t) stack_area_begin) + stack_area_size;
_stack &= ~(CPU_STACK_ALIGNMENT - 1);