2009-07-03 Josh Switnicki <josh.switnicki@utoronto.ca>

* cpu.c: Implemented _CPU_Context_Initialize as a C function instead
	of a macro.  It works with limited functionality.  Implemented
	_CPU_Thread_Idle_body to use sleep instruction.
	* Makefile.am: Changed cpu_asm.c -> cpu_asm.S
	* cpu_asm.S: renamed from cpu_asm.c and implemented functions is asm
	* rtems/asm.h: Appended "macros.inc" to the end of "asm.h"
	* rtems/score/cpu.h:
	  + Included "avr/io.h".
	  + Added use 16 bit object definition.
	  + Modified Context_Control struct to relect the registers
	    that need to be saved.
	  + Implemented _CPU_ISR_Disable, _CPU_ISR_Enable, and _CPU_ISR_Flash.
	    Added function definitions for _CPU_Context_Initialize and
	    _CPU_Push.
This commit is contained in:
Joel Sherrill
2009-07-06 15:36:23 +00:00
parent 7ffdc70c0a
commit 7c46cf58b3
6 changed files with 760 additions and 37 deletions

View File

@@ -1,3 +1,20 @@
2009-07-03 Josh Switnicki <josh.switnicki@utoronto.ca>
* cpu.c: Implemented _CPU_Context_Initialize as a C function instead
of a macro. It works with limited functionality. Implemented
_CPU_Thread_Idle_body to use sleep instruction.
* Makefile.am: Changed cpu_asm.c -> cpu_asm.S
* cpu_asm.S: renamed from cpu_asm.c and implemented functions is asm
* rtems/asm.h: Appended "macros.inc" to the end of "asm.h"
* rtems/score/cpu.h:
+ Included "avr/io.h".
+ Added use 16 bit object definition.
+ Modified Context_Control struct to relect the registers
that need to be saved.
+ Implemented _CPU_ISR_Disable, _CPU_ISR_Enable, and _CPU_ISR_Flash.
Added function definitions for _CPU_Context_Initialize and
_CPU_Push.
2009-05-05 Joel Sherrill <joel.sherrill@oarcorp.com>
* rtems/score/cpu.h: Lower number of priorities and do not inline as

View File

@@ -12,7 +12,7 @@ include_rtems_score_HEADERS = rtems/score/cpu.h rtems/score/avr.h \
rtems/score/cpu_asm.h rtems/score/types.h
noinst_LIBRARIES = libscorecpu.a
libscorecpu_a_SOURCES = cpu.c cpu_asm.c
libscorecpu_a_SOURCES = cpu.c cpu_asm.S
libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS)
include $(srcdir)/preinstall.am

View File

@@ -42,6 +42,42 @@ void _CPU_Initialize(void)
/* FP context initialization support goes here */
}
/*PAGE
*
* _CPU_Context_Initialize
*
* This kernel routine initializes the basic non-FP context area associated
* with each thread.
*
* Input parameters:
* the_context - pointer to the context area
* stack_base - address of memory for the SPARC
* size - size in bytes of the stack area
* new_level - interrupt level for this context area
* entry_point - the starting execution point for this this context
* is_fp - TRUE if this context is associated with an FP thread
*
* Output parameters: NONE
*/
void _CPU_Context_Initialize(
Context_Control *the_context,
uint32_t *stack_base,
uint32_t size,
uint32_t new_level,
void *entry_point,
bool is_fp
)
{
uint16_t _stack; //declare helper variable
_stack = (uint16_t) (stack_base) + (uint16_t) (size); //calc stack pointer
the_context->stack_pointer = _stack - 2; //save stack pointer (- 2 bytes)
_CPU_Push(_stack, (uint16_t)(entry_point)); //push entry point onto context stack
the_context->status = 0; //init status to zero
if (new_level == TRUE) _CPU_ISR_Enable( 0 );
}
/*PAGE
*
* _CPU_ISR_Get_level
@@ -120,7 +156,7 @@ void _CPU_ISR_install_vector(
/*
* We put the actual user ISR address in '_ISR_vector_table'. This will
* be used by the _ISR_Handler so the user gets control.
*/
*/
_ISR_Vector_table[ vector ] = new_handler;
}
@@ -162,7 +198,7 @@ void _CPU_Install_interrupt_stack( void )
void *_CPU_Thread_Idle_body( uintptr_t ignored )
{
for( ; ; )
for( ; ; ) asm volatile ("sleep"::);
/* insert your "halt" instruction here */ ;
return (void *) 0;
}

View File

@@ -21,22 +21,115 @@
* and cpu.h should not be included in a "real" cpu_asm file. An
* implementation in assembly should include "cpu_asm.h>
*/
#include <avr/io.h>
#include <avr/sfr_defs.h>
#include <rtems/asm.h>
#define jmpb_hi r25
#define jmpb_lo r24
#define val_hi r23
#define val_lo r22
#define ret_lo r24
#define ret_hi r25
PUBLIC( setjmp )
SYM( setjmp ):
X_movw XL, jmpb_lo
/*;save call-saved registers and frame pointer*/
.irp .L_regno, 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,28,29
st X+, r\.L_regno
.endr
/*;get return address*/
pop ZH
pop ZL
/*save stack pointer (after popping)*/
in ret_lo, AVR_STACK_POINTER_LO_ADDR
st X+, ret_lo
#ifdef _HAVE_AVR_STACK_POINTER_HI
in ret_lo, AVR_STACK_POINTER_HI_ADDR
st X+, ret_lo
#else
in ret_lo, __zero_reg__
st X+, ret_lo
#endif
/*save status reg (I flag)*/
in ret_lo, AVR_STATUS_ADDR
st X+, ret_lo
/*save return addr*/
st X+, ZL
st X+, ZH
/*return zero*/
clr ret_hi
clr ret_lo
ijmp
.size _U(setjmp),.-_U(setjmp)
.global _U(longjmp)
.type _U(longjmp), @function
_U(longjmp):
X_movw XL, jmpb_lo
/*return value*/
X_movw ret_lo, val_lo
/*if zero, change to 1*/
cpi ret_lo, 1
cpc ret_hi, __zero_reg__
adc ret_lo, __zero_reg__
/*restore call-saved registers and frame pointer*/
.irp .L_regno, 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,28,29
ld r\.L_regno, X+
.endr
/*; restore stack pointer (SP value before the setjmp() call) and SREG*/
ld ZL, X+
ld ZH, X+
ld __tmp_reg__, X+
#if defined (__AVR_XMEGA__) && __AVR_XMEGA__
/* A write to SPL will automatically disable interrupts for up to 4
instructions or until the next I/O memory write. */
out AVR_STATUS_ADDR, __tmp_reg__
out AVR_STACK_POINTER_LO_ADDR, ZL
out AVR_STACK_POINTER_HI_ADDR, ZH
#else
# ifdef _HAVE_AVR_STACK_POINTER_HI
/* interrupts disabled for shortest possible time (3 cycles) */
cli
out AVR_STACK_POINTER_HI_ADDR, ZH
# endif
/* Restore status register (including the interrupt enable flag).
Interrupts are re-enabled only after the next instruction. */
out AVR_STATUS_ADDR, __tmp_reg__
out AVR_STACK_POINTER_LO_ADDR, ZL
#endif
; get return address and jump
ld ZL, X+
ld ZH, X+
#if defined(__AVR_3_BYTE_PC__) && __AVR_3_BYTE_PC__
ld __tmp_reg__, X+
.L_jmp3:
push ZL
push ZH
push __tmp_reg__
ret
#else
ijmp
#endif
.size _U(longjmp), . - _U(longjmp)
#include <rtems/system.h>
#include <rtems/score/cpu.h>
/* #include "cpu_asm.h> */
#include <rtems/bspIo.h> /* XXX remove me later */
/* XXX remove me when really implemented */
int setjmp(void)
{
return 0;
}
int longjmp(void)
{
return 0;
}
/*
* _CPU_Context_save_fp_context
@@ -53,13 +146,25 @@ int longjmp(void)
* NO_CPU Specific Information:
*
* XXX document implementation including references if appropriate
*/
void _CPU_Context_save_fp(
Context_Control_fp **fp_context_ptr
)
{
}
*/
PUBLIC(_CPU_Context_save_fp)
SYM(_CPU_Context_save_fp):
ret
/*
* _CPU_Context_restore_fp_context
@@ -76,13 +181,22 @@ void _CPU_Context_save_fp(
* NO_CPU Specific Information:
*
* XXX document implementation including references if appropriate
*/
void _CPU_Context_restore_fp(
Context_Control_fp **fp_context_ptr
)
{
}
*/
PUBLIC(_CPU_Context_restore_fp)
SYM(_CPU_Context_restore_fp):
ret
/* _CPU_Context_switch
*
@@ -91,15 +205,85 @@ void _CPU_Context_restore_fp(
* NO_CPU Specific Information:
*
* XXX document implementation including references if appropriate
*/
*/
void _CPU_Context_switch(
Context_Control *run,
Context_Control *heir
)
{
printk( "AVR _CPU_Context_switch\n" );
}
PUBLIC(_CPU_Context_switch)
SYM(_CPU_Context_switch):
mov r26, r18
mov r27, r19
st X+, r2
st X+, r3
st X+, r4
st X+, r5
st X+, r6
st X+, r7
st X+, r8
st X+, r9
st X+, r10
st X+, r11
st X+, r12
st X+, r13
st X+, r14
st X+, r15
st X+, r16
st X+, r17
st X+, r28
st X+, r29
st X+, r29
lds r25,0x5f /*load sreg*/
st X+, r25
lds r25,0x5d /*spl*/
st X+, r25
lds r25,0x5e /*sph*/
restore:
mov r26,r22
mov r27,r23
ld r2, X+
ld r3, X+
ld r4, X+
ld r5, X+
ld r6, X+
ld r7, X+
ld r8, X+
ld r9, X+
ld r10, X+
ld r11, X+
ld r12, X+
ld r13, X+
ld r14, X+
ld r15, X+
ld r16, X+
ld r17, X+
ld r28, X+
ld r29, X+
ld r25, X+
sts 0x5f,r25 /*sreg*/
ld r25, X+
sts 0x5d,r25 /*spl*/
ld r25, X+
sts 0x5e ,r25 /*sph*/
ret
PUBLIC(_CPU_Push)
SYM(_CPU_Push):
lds r20, 0x5d /*spl*/
lds r21, 0x5e /*sph*/
sts 0x5d, r24 /*spl*/
sts 0x5e, r25 /*sph*/
push r22
push r23
sts 0x5d, r20 /*spl*/
sts 0x5e, r21 /*sph*/
ret
/*
* _CPU_Context_restore
@@ -112,7 +296,7 @@ void _CPU_Context_switch(
* NO_CPU Specific Information:
*
* XXX document implementation including references if appropriate
*/
void _CPU_Context_restore(
Context_Control *new_context
@@ -120,6 +304,15 @@ void _CPU_Context_restore(
{
printk( "AVR _CPU_Context_restore\n" );
}
*/
PUBLIC(_CPU_Context_restore)
SYM(_CPU_Context_restore):
//call printk("AVR _CPU_Context_restore\n")
ret
/* void __ISR_Handler()
*
@@ -128,10 +321,12 @@ void _CPU_Context_restore(
* NO_CPU Specific Information:
*
* XXX document implementation including references if appropriate
*/
void _ISR_Handler(void)
{
*/
/*
* This discussion ignores a lot of the ugly details in a real
* implementation such as saving enough registers/state to be
@@ -193,4 +388,9 @@ void _ISR_Handler(void)
* prepare to get out of interrupt
* return from interrupt
*/
}
/*} */
PUBLIC(_ISR_Handler)
SYM(_ISR_Handler):
ret

View File

@@ -69,6 +69,7 @@
* EXAMPLE: #define d0 REG (d0)
*/
/*
* Define macros to handle section beginning and ends.
*/
@@ -94,4 +95,378 @@
#define PUBLIC(sym) .globl SYM (sym)
#define EXTERN(sym) .globl SYM (sym)
#endif
/* Copyright (c) 2002, 2005, 2006, 2007 Marek Michalkiewicz
Copyright (c) 2006 Dmitry Xmelkov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holders nor the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
/*
macros.inc - macros for use in assembler sources
Contributors:
Created by Marek Michalkiewicz <marekm@linux.org.pl>
*/
#include <avr/io.h>
/* if not defined, assume old version with underscores */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
/* the assembler line separator (just in case it ever changes) */
#define _L $
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
#define _U(x) CONCAT1(__USER_LABEL_PREFIX__, x)
#define _R(x) CONCAT1(__REGISTER_PREFIX__, x)
/* these should help to fix the "can't have function named r1()" bug
which may require adding '%' in front of register names. */
#define r0 _R(r0)
#define r1 _R(r1)
#define r2 _R(r2)
#define r3 _R(r3)
#define r4 _R(r4)
#define r5 _R(r5)
#define r6 _R(r6)
#define r7 _R(r7)
#define r8 _R(r8)
#define r9 _R(r9)
#define r10 _R(r10)
#define r11 _R(r11)
#define r12 _R(r12)
#define r13 _R(r13)
#define r14 _R(r14)
#define r15 _R(r15)
#define r16 _R(r16)
#define r17 _R(r17)
#define r18 _R(r18)
#define r19 _R(r19)
#define r20 _R(r20)
#define r21 _R(r21)
#define r22 _R(r22)
#define r23 _R(r23)
#define r24 _R(r24)
#define r25 _R(r25)
#define r26 _R(r26)
#define r27 _R(r27)
#define r28 _R(r28)
#define r29 _R(r29)
#define r30 _R(r30)
#define r31 _R(r31)
#ifndef __tmp_reg__
#define __tmp_reg__ r0
#endif
#ifndef __zero_reg__
#define __zero_reg__ r1
#endif
#if __AVR_MEGA__
#define XJMP jmp
#define XCALL call
#else
#define XJMP rjmp
#define XCALL rcall
#endif
/* used only by fplib/strtod.S - libgcc internal function calls */
#define PROLOGUE_SAVES(offset) XJMP (__prologue_saves__ + 2 * (offset))
#define EPILOGUE_RESTORES(offset) XJMP (__epilogue_restores__ + 2 * (offset))
#if FLASHEND > 0x10000 /* ATmega103 */
#define BIG_CODE 1
#else
#define BIG_CODE 0
#endif
#ifndef __AVR_HAVE_MOVW__
# if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__
# define __AVR_HAVE_MOVW__ 1
# endif
#endif
#ifndef __AVR_HAVE_LPMX__
# if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__
# define __AVR_HAVE_LPMX__ 1
# endif
#endif
#ifndef __AVR_HAVE_MUL__
# if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__
# define __AVR_HAVE_MUL__ 1
# endif
#endif
/*
Smart version of movw:
- uses "movw" if possible (supported by MCU, and both registers even)
- handles overlapping register pairs correctly
- no instruction generated if source and destination are the same
(may expand to 0, 1 or 2 instructions).
*/
.macro X_movw dst src
.L_movw_dst = -1
.L_movw_src = -1
.L_movw_n = 0
.irp reg, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, \
r10,r11,r12,r13,r14,r15,r16,r17,r18,r19, \
r20,r21,r22,r23,r24,r25,r26,r27,r28,r29, \
r30,r31
.ifc \reg,\dst
.L_movw_dst = .L_movw_n
.endif
.ifc \reg,\src
.L_movw_src = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.L_movw_n = 0
.irp reg, R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, \
R10,R11,R12,R13,R14,R15,R16,R17,R18,R19, \
R20,R21,R22,R23,R24,R25,R26,R27,R28,R29, \
R30,R31
.ifc \reg,\dst
.L_movw_dst = .L_movw_n
.endif
.ifc \reg,\src
.L_movw_src = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.if .L_movw_dst < 0
.L_movw_n = 0
.rept 32
.if \dst == .L_movw_n
.L_movw_dst = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.endif
.if .L_movw_src < 0
.L_movw_n = 0
.rept 32
.if \src == .L_movw_n
.L_movw_src = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.endif
.if (.L_movw_dst < 0) || (.L_movw_src < 0)
.err ; Invalid 'X_movw' arg.
.endif
.if ((.L_movw_src) - (.L_movw_dst)) /* different registers */
.if (((.L_movw_src) | (.L_movw_dst)) & 0x01)
.if (((.L_movw_src)-(.L_movw_dst)) & 0x80) /* src < dest */
mov (.L_movw_dst)+1, (.L_movw_src)+1
mov (.L_movw_dst), (.L_movw_src)
.else /* src > dest */
mov (.L_movw_dst), (.L_movw_src)
mov (.L_movw_dst)+1, (.L_movw_src)+1
.endif
.else /* both even -> overlap not possible */
#if defined(__AVR_HAVE_MOVW__) && __AVR_HAVE_MOVW__
movw \dst, \src
#else
mov (.L_movw_dst), (.L_movw_src)
mov (.L_movw_dst)+1, (.L_movw_src)+1
#endif
.endif
.endif
.endm
/* Macro 'X_lpm' extends enhanced lpm instruction for classic chips.
Usage:
X_lpm reg, dst
where
reg is 0..31, r0..r31 or R0..R31
dst is z, Z, z+ or Z+
It is possible to omit both arguments.
Possible results for classic chips:
lpm
lpm / mov Rd,r0
lpm / adiw ZL,1
lpm / mov Rd,r0 / adiw ZL,1
For enhanced chips it is one instruction always.
ATTENTION: unlike enhanced chips SREG (S,V,N,Z,C) flags are
changed in case of 'Z+' dst. R0 is scratch.
*/
.macro X_lpm dst=r0, src=Z
/* dst evaluation */
.L_lpm_dst = -1
.L_lpm_n = 0
.irp reg, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, \
r10,r11,r12,r13,r14,r15,r16,r17,r18,r19, \
r20,r21,r22,r23,r24,r25,r26,r27,r28,r29, \
r30,r31
.ifc \reg,\dst
.L_lpm_dst = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.L_lpm_n = 0
.irp reg, R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, \
R10,R11,R12,R13,R14,R15,R16,R17,R18,R19, \
R20,R21,R22,R23,R24,R25,R26,R27,R28,R29, \
R30,R31
.ifc \reg,\dst
.L_lpm_dst = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.if .L_lpm_dst < 0
.L_lpm_n = 0
.rept 32
.if \dst == .L_lpm_n
.L_lpm_dst = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.endif
.if (.L_lpm_dst < 0)
.err ; Invalid dst arg of 'X_lpm' macro.
.endif
/* src evaluation */
.L_lpm_src = -1
.L_lpm_n = 0
.irp reg, z,Z,z+,Z+
.ifc \reg,\src
.L_lpm_src = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.if (.L_lpm_src < 0)
.err ; Invalid src arg of 'X_lpm' macro.
.endif
/* instruction(s) */
.if .L_lpm_src < 2
.if .L_lpm_dst == 0
lpm
.else
#if defined(__AVR_HAVE_LPMX__) && __AVR_HAVE_LPMX__
lpm .L_lpm_dst, Z
#else
lpm
mov .L_lpm_dst, r0
#endif
.endif
.else
.if (.L_lpm_dst >= 30)
.err ; Registers 30 and 31 are inhibited as 'X_lpm *,Z+' dst.
.endif
#if defined(__AVR_HAVE_LPMX__) && __AVR_HAVE_LPMX__
lpm .L_lpm_dst, Z+
#else
lpm
.if .L_lpm_dst
mov .L_lpm_dst, r0
.endif
adiw r30, 1
#endif
.endif
.endm
/*
LPM_R0_ZPLUS_INIT is used before the loop to initialize RAMPZ
for future devices with RAMPZ:Z auto-increment - [e]lpm r0, Z+.
LPM_R0_ZPLUS_NEXT is used inside the loop to load a byte from
the program memory at [RAMPZ:]Z to R0, and increment [RAMPZ:]Z.
The argument in both macros is a register that contains the
high byte (bits 23-16) of the address, bits 15-0 should be in
the Z (r31:r30) register. It can be any register except for:
r0, r1 (__zero_reg__ - assumed to always contain 0), r30, r31.
*/
.macro LPM_R0_ZPLUS_INIT hhi
#if __AVR_ENHANCED__
#if BIG_CODE
out AVR_RAMPZ_ADDR, \hhi
#endif
#endif
.endm
.macro LPM_R0_ZPLUS_NEXT hhi
#if __AVR_ENHANCED__
#if BIG_CODE
/* ELPM with RAMPZ:Z post-increment, load RAMPZ only once */
elpm r0, Z+
#else
/* LPM with Z post-increment, max 64K, no RAMPZ (ATmega83/161/163/32) */
lpm r0, Z+
#endif
#else
#if BIG_CODE
/* ELPM without post-increment, load RAMPZ each time (ATmega103) */
out AVR_RAMPZ_ADDR, \hhi
elpm
adiw r30,1
adc \hhi, __zero_reg__
#else
/* LPM without post-increment, max 64K, no RAMPZ (AT90S*) */
lpm
adiw r30,1
#endif
#endif
.endm

View File

@@ -24,12 +24,17 @@ extern "C" {
#endif
#include <rtems/score/avr.h> /* pick up machine definitions */
#include <avr/io.h>
#ifndef ASM
#include <rtems/score/types.h>
#endif
/* conditional compilation parameters */
#ifndef RTEMS_USE_16_BIT_OBJECT
#define RTEMS_USE_16_BIT_OBJECT
#endif
/*
* Should the calls to _Thread_Enable_dispatch be inlined?
*
@@ -419,14 +424,34 @@ extern "C" {
*/
typedef struct {
uint32_t some_integer_register;
uint32_t some_system_register;
uint32_t stack_pointer;
uint8_t reg2;
uint8_t reg3;
uint8_t reg4;
uint8_t reg5;
uint8_t reg6;
uint8_t reg7;
uint8_t reg8;
uint8_t reg9;
uint8_t reg10;
uint8_t reg11;
uint8_t reg12;
uint8_t reg13;
uint8_t reg14;
uint8_t reg15;
uint8_t reg16;
uint8_t reg17;
uint8_t reg28;
uint8_t reg29;
uint8_t status; //SREG
uint16_t stack_pointer;
} Context_Control;
#define _CPU_Context_Get_SP( _context ) \
(_context)->stack_pointer
typedef struct {
double some_float_register;
} Context_Control_fp;
@@ -633,8 +658,9 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
#define _CPU_ISR_Disable( _isr_cookie ) \
{ \
(_isr_cookie) = 0; /* do something to prevent warnings */ \
}
(_isr_cookie) = SREG; /* do something to prevent warnings */ \
asm volatile ("cli"::); \
}
/*
* Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
@@ -648,6 +674,8 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
#define _CPU_ISR_Enable( _isr_cookie ) \
{ \
SREG = _isr_cookie; \
asm volatile ("sei"::); \
}
/*
@@ -663,6 +691,10 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
#define _CPU_ISR_Flash( _isr_cookie ) \
{ \
SREG=(_isr_cookie); \
asm volatile("sei"::); \
(_isr_cookie) = SREG; \
asm volatile("cli"::); \
}
/*
@@ -716,12 +748,23 @@ uint32_t _CPU_ISR_Get_level( void );
*
* XXX document implementation including references if appropriate
*/
/*
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
_isr, _entry_point, _is_fp ) \
{ \
}
\
do { \
uint16_t *_stack;\
_stack = (uint16_t) (_stack_base) + (uint16_t)(_size);\
(_the_context)->stack_pointer = _stack-1; \
*(_stack) = *(_entry_point); \
printk("the ret address is %x\n", *(uint16_t *)(_stack));\
printk("sp = 0x%x\nep = 0x%x\n",_stack, *(_entry_point)); \
printk("stack base = 0x%x\n size = 0x%x\n",_stack_base, _size);\
printk("struct starting address = 0x%x\n", _the_context);\
printk("struct stack pointer address = 0x%x\n",(_the_context)->stack_pointer);\
} while ( 0 )
*/
/*
* This routine is responsible for somehow restarting the currently
* executing task. If you are lucky, then all that is necessary
@@ -915,6 +958,58 @@ uint32_t _CPU_ISR_Get_level( void );
/* functions */
/*context_initialize asm function*/
void context_initialize(unsigned short* context,
unsigned short stack_add,
unsigned short entry_point);
/*PAGE
*
* _CPU_Context_Initialize
*
* This kernel routine initializes the basic non-FP context area associated
* with each thread.
*
* Input parameters:
* the_context - pointer to the context area
* stack_base - address of memory for the SPARC
* size - size in bytes of the stack area
* new_level - interrupt level for this context area
* entry_point - the starting execution point for this this context
* is_fp - TRUE if this context is associated with an FP thread
*
* Output parameters: NONE
*/
void _CPU_Context_Initialize(
Context_Control *the_context,
uint32_t *stack_base,
uint32_t size,
uint32_t new_level,
void *entry_point,
bool is_fp
);
/*
*
* _CPU_Push
*
* this routine pushes 2 bytes onto the stack
*
*
*
*
*
*
*
*/
void _CPU_Push(uint16_t _SP_, uint16_t entry_point);
/*
* _CPU_Initialize
*