forked from Imagelibrary/rtems
* cpu.c: Implemented _CPU_Context_Initialize as a C function instead of a macro. It works with limited functionality. Implemented _CPU_Thread_Idle_body to use sleep instruction. * Makefile.am: Changed cpu_asm.c -> cpu_asm.S * cpu_asm.S: renamed from cpu_asm.c and implemented functions is asm * rtems/asm.h: Appended "macros.inc" to the end of "asm.h" * rtems/score/cpu.h: + Included "avr/io.h". + Added use 16 bit object definition. + Modified Context_Control struct to relect the registers that need to be saved. + Implemented _CPU_ISR_Disable, _CPU_ISR_Enable, and _CPU_ISR_Flash. Added function definitions for _CPU_Context_Initialize and _CPU_Push.
397 lines
8.6 KiB
ArmAsm
397 lines
8.6 KiB
ArmAsm
/* cpu_asm.c ===> cpu_asm.S or cpu_asm.s
|
|
*
|
|
* This file contains the basic algorithms for all assembly code used
|
|
* in an specific CPU port of RTEMS. These algorithms must be implemented
|
|
* in assembly language
|
|
*
|
|
* NOTE: This is supposed to be a .S or .s file NOT a C file.
|
|
*
|
|
* COPYRIGHT (c) 1989-2008.
|
|
* On-Line Applications Research Corporation (OAR).
|
|
*
|
|
* The license and distribution terms for this file may be
|
|
* found in the file LICENSE in this distribution or at
|
|
* http://www.rtems.com/license/LICENSE.
|
|
*
|
|
* $Id$
|
|
*/
|
|
|
|
/*
|
|
* This is supposed to be an assembly file. This means that system.h
|
|
* and cpu.h should not be included in a "real" cpu_asm file. An
|
|
* implementation in assembly should include "cpu_asm.h>
|
|
*/
|
|
#include <avr/io.h>
|
|
#include <avr/sfr_defs.h>
|
|
#include <rtems/asm.h>
|
|
|
|
|
|
#define jmpb_hi r25
|
|
#define jmpb_lo r24
|
|
#define val_hi r23
|
|
#define val_lo r22
|
|
|
|
#define ret_lo r24
|
|
#define ret_hi r25
|
|
|
|
|
|
|
|
PUBLIC( setjmp )
|
|
|
|
SYM( setjmp ):
|
|
X_movw XL, jmpb_lo
|
|
/*;save call-saved registers and frame pointer*/
|
|
.irp .L_regno, 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,28,29
|
|
st X+, r\.L_regno
|
|
.endr
|
|
/*;get return address*/
|
|
|
|
pop ZH
|
|
pop ZL
|
|
/*save stack pointer (after popping)*/
|
|
|
|
in ret_lo, AVR_STACK_POINTER_LO_ADDR
|
|
st X+, ret_lo
|
|
|
|
#ifdef _HAVE_AVR_STACK_POINTER_HI
|
|
in ret_lo, AVR_STACK_POINTER_HI_ADDR
|
|
st X+, ret_lo
|
|
#else
|
|
in ret_lo, __zero_reg__
|
|
st X+, ret_lo
|
|
#endif
|
|
/*save status reg (I flag)*/
|
|
in ret_lo, AVR_STATUS_ADDR
|
|
st X+, ret_lo
|
|
/*save return addr*/
|
|
st X+, ZL
|
|
st X+, ZH
|
|
/*return zero*/
|
|
clr ret_hi
|
|
clr ret_lo
|
|
ijmp
|
|
|
|
.size _U(setjmp),.-_U(setjmp)
|
|
|
|
|
|
.global _U(longjmp)
|
|
.type _U(longjmp), @function
|
|
|
|
_U(longjmp):
|
|
X_movw XL, jmpb_lo
|
|
/*return value*/
|
|
X_movw ret_lo, val_lo
|
|
/*if zero, change to 1*/
|
|
cpi ret_lo, 1
|
|
cpc ret_hi, __zero_reg__
|
|
adc ret_lo, __zero_reg__
|
|
/*restore call-saved registers and frame pointer*/
|
|
.irp .L_regno, 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,28,29
|
|
ld r\.L_regno, X+
|
|
.endr
|
|
/*; restore stack pointer (SP value before the setjmp() call) and SREG*/
|
|
ld ZL, X+
|
|
ld ZH, X+
|
|
ld __tmp_reg__, X+
|
|
#if defined (__AVR_XMEGA__) && __AVR_XMEGA__
|
|
/* A write to SPL will automatically disable interrupts for up to 4
|
|
instructions or until the next I/O memory write. */
|
|
out AVR_STATUS_ADDR, __tmp_reg__
|
|
out AVR_STACK_POINTER_LO_ADDR, ZL
|
|
out AVR_STACK_POINTER_HI_ADDR, ZH
|
|
#else
|
|
# ifdef _HAVE_AVR_STACK_POINTER_HI
|
|
/* interrupts disabled for shortest possible time (3 cycles) */
|
|
cli
|
|
out AVR_STACK_POINTER_HI_ADDR, ZH
|
|
# endif
|
|
/* Restore status register (including the interrupt enable flag).
|
|
Interrupts are re-enabled only after the next instruction. */
|
|
out AVR_STATUS_ADDR, __tmp_reg__
|
|
out AVR_STACK_POINTER_LO_ADDR, ZL
|
|
#endif
|
|
; get return address and jump
|
|
ld ZL, X+
|
|
ld ZH, X+
|
|
#if defined(__AVR_3_BYTE_PC__) && __AVR_3_BYTE_PC__
|
|
ld __tmp_reg__, X+
|
|
.L_jmp3:
|
|
push ZL
|
|
push ZH
|
|
push __tmp_reg__
|
|
ret
|
|
#else
|
|
ijmp
|
|
#endif
|
|
.size _U(longjmp), . - _U(longjmp)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
* _CPU_Context_save_fp_context
|
|
*
|
|
* This routine is responsible for saving the FP context
|
|
* at *fp_context_ptr. If the point to load the FP context
|
|
* from is changed then the pointer is modified by this routine.
|
|
*
|
|
* Sometimes a macro implementation of this is in cpu.h which dereferences
|
|
* the ** and a similarly named routine in this file is passed something
|
|
* like a (Context_Control_fp *). The general rule on making this decision
|
|
* is to avoid writing assembly language.
|
|
*
|
|
* NO_CPU Specific Information:
|
|
*
|
|
* XXX document implementation including references if appropriate
|
|
|
|
|
|
void _CPU_Context_save_fp(
|
|
Context_Control_fp **fp_context_ptr
|
|
)
|
|
{
|
|
}
|
|
*/
|
|
|
|
PUBLIC(_CPU_Context_save_fp)
|
|
|
|
SYM(_CPU_Context_save_fp):
|
|
ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
* _CPU_Context_restore_fp_context
|
|
*
|
|
* This routine is responsible for restoring the FP context
|
|
* at *fp_context_ptr. If the point to load the FP context
|
|
* from is changed then the pointer is modified by this routine.
|
|
*
|
|
* Sometimes a macro implementation of this is in cpu.h which dereferences
|
|
* the ** and a similarly named routine in this file is passed something
|
|
* like a (Context_Control_fp *). The general rule on making this decision
|
|
* is to avoid writing assembly language.
|
|
*
|
|
* NO_CPU Specific Information:
|
|
*
|
|
* XXX document implementation including references if appropriate
|
|
|
|
|
|
void _CPU_Context_restore_fp(
|
|
Context_Control_fp **fp_context_ptr
|
|
)
|
|
{
|
|
}
|
|
*/
|
|
|
|
|
|
PUBLIC(_CPU_Context_restore_fp)
|
|
|
|
SYM(_CPU_Context_restore_fp):
|
|
ret
|
|
|
|
|
|
|
|
/* _CPU_Context_switch
|
|
*
|
|
* This routine performs a normal non-FP context switch.
|
|
*
|
|
* NO_CPU Specific Information:
|
|
*
|
|
* XXX document implementation including references if appropriate
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
PUBLIC(_CPU_Context_switch)
|
|
|
|
SYM(_CPU_Context_switch):
|
|
mov r26, r18
|
|
mov r27, r19
|
|
st X+, r2
|
|
st X+, r3
|
|
st X+, r4
|
|
st X+, r5
|
|
st X+, r6
|
|
st X+, r7
|
|
st X+, r8
|
|
st X+, r9
|
|
st X+, r10
|
|
st X+, r11
|
|
st X+, r12
|
|
st X+, r13
|
|
st X+, r14
|
|
st X+, r15
|
|
st X+, r16
|
|
st X+, r17
|
|
st X+, r28
|
|
st X+, r29
|
|
st X+, r29
|
|
lds r25,0x5f /*load sreg*/
|
|
st X+, r25
|
|
lds r25,0x5d /*spl*/
|
|
st X+, r25
|
|
lds r25,0x5e /*sph*/
|
|
|
|
|
|
restore:
|
|
mov r26,r22
|
|
mov r27,r23
|
|
ld r2, X+
|
|
ld r3, X+
|
|
ld r4, X+
|
|
ld r5, X+
|
|
ld r6, X+
|
|
ld r7, X+
|
|
ld r8, X+
|
|
ld r9, X+
|
|
ld r10, X+
|
|
ld r11, X+
|
|
ld r12, X+
|
|
ld r13, X+
|
|
ld r14, X+
|
|
ld r15, X+
|
|
ld r16, X+
|
|
ld r17, X+
|
|
ld r28, X+
|
|
ld r29, X+
|
|
ld r25, X+
|
|
sts 0x5f,r25 /*sreg*/
|
|
ld r25, X+
|
|
sts 0x5d,r25 /*spl*/
|
|
ld r25, X+
|
|
sts 0x5e ,r25 /*sph*/
|
|
ret
|
|
|
|
|
|
PUBLIC(_CPU_Push)
|
|
SYM(_CPU_Push):
|
|
lds r20, 0x5d /*spl*/
|
|
lds r21, 0x5e /*sph*/
|
|
sts 0x5d, r24 /*spl*/
|
|
sts 0x5e, r25 /*sph*/
|
|
push r22
|
|
push r23
|
|
sts 0x5d, r20 /*spl*/
|
|
sts 0x5e, r21 /*sph*/
|
|
ret
|
|
|
|
/*
|
|
* _CPU_Context_restore
|
|
*
|
|
* This routine is generally used only to restart self in an
|
|
* efficient manner. It may simply be a label in _CPU_Context_switch.
|
|
*
|
|
* NOTE: May be unnecessary to reload some registers.
|
|
*
|
|
* NO_CPU Specific Information:
|
|
*
|
|
* XXX document implementation including references if appropriate
|
|
|
|
|
|
void _CPU_Context_restore(
|
|
Context_Control *new_context
|
|
)
|
|
{
|
|
printk( "AVR _CPU_Context_restore\n" );
|
|
}
|
|
*/
|
|
|
|
PUBLIC(_CPU_Context_restore)
|
|
|
|
SYM(_CPU_Context_restore):
|
|
//call printk("AVR _CPU_Context_restore\n")
|
|
ret
|
|
|
|
|
|
|
|
/* void __ISR_Handler()
|
|
*
|
|
* This routine provides the RTEMS interrupt management.
|
|
*
|
|
* NO_CPU Specific Information:
|
|
*
|
|
* XXX document implementation including references if appropriate
|
|
|
|
|
|
void _ISR_Handler(void)
|
|
{
|
|
|
|
*/
|
|
/*
|
|
* This discussion ignores a lot of the ugly details in a real
|
|
* implementation such as saving enough registers/state to be
|
|
* able to do something real. Keep in mind that the goal is
|
|
* to invoke a user's ISR handler which is written in C and
|
|
* uses a certain set of registers.
|
|
*
|
|
* Also note that the exact order is to a large extent flexible.
|
|
* Hardware will dictate a sequence for a certain subset of
|
|
* _ISR_Handler while requirements for setting
|
|
*/
|
|
|
|
/*
|
|
* At entry to "common" _ISR_Handler, the vector number must be
|
|
* available. On some CPUs the hardware puts either the vector
|
|
* number or the offset into the vector table for this ISR in a
|
|
* known place. If the hardware does not give us this information,
|
|
* then the assembly portion of RTEMS for this port will contain
|
|
* a set of distinct interrupt entry points which somehow place
|
|
* the vector number in a known place (which is safe if another
|
|
* interrupt nests this one) and branches to _ISR_Handler.
|
|
*
|
|
* save some or all context on stack
|
|
* may need to save some special interrupt information for exit
|
|
*
|
|
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
|
|
* if ( _ISR_Nest_level == 0 )
|
|
* switch to software interrupt stack
|
|
* #endif
|
|
*
|
|
* _ISR_Nest_level++;
|
|
*
|
|
* _Thread_Dispatch_disable_level++;
|
|
*
|
|
* (*_ISR_Vector_table[ vector ])( vector );
|
|
*
|
|
* _Thread_Dispatch_disable_level--;
|
|
*
|
|
* --_ISR_Nest_level;
|
|
*
|
|
* if ( _ISR_Nest_level )
|
|
* goto the label "exit interrupt (simple case)"
|
|
*
|
|
* if ( _Thread_Dispatch_disable_level )
|
|
* _ISR_Signals_to_thread_executing = FALSE;
|
|
* goto the label "exit interrupt (simple case)"
|
|
*
|
|
* if ( _Context_Switch_necessary || _ISR_Signals_to_thread_executing ) {
|
|
* _ISR_Signals_to_thread_executing = FALSE;
|
|
* call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
|
|
* prepare to get out of interrupt
|
|
* return from interrupt (maybe to _ISR_Dispatch)
|
|
*
|
|
* LABEL "exit interrupt (simple case):
|
|
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
|
|
* if outermost interrupt
|
|
* restore stack
|
|
* #endif
|
|
* prepare to get out of interrupt
|
|
* return from interrupt
|
|
*/
|
|
/*} */
|
|
PUBLIC(_ISR_Handler)
|
|
|
|
SYM(_ISR_Handler):
|
|
ret
|
|
|