* rtems/include/rtems/rtems/ratemon.h,
	rtems/include/rtems/rtems/status.h,
	score/include/rtems/score/coremutex.h,
	score/include/rtems/score/object.h: Removed extraneous commas.
This commit is contained in:
Joel Sherrill
2008-08-14 15:25:14 +00:00
parent b281e425ab
commit 4daebbd022
10 changed files with 626 additions and 522 deletions

View File

@@ -1,3 +1,10 @@
2008-08-14 Sebastian Huber <sebastian.huber@embedded-brains.de>
* rtems/include/rtems/rtems/ratemon.h,
rtems/include/rtems/rtems/status.h,
score/include/rtems/score/coremutex.h,
score/include/rtems/score/object.h: Removed extraneous commas.
2008-08-08 Joel Sherrill <joel.sherrill@OARcorp.com>
* rtems/include/rtems/rtems/msgmp.h: Fix MP code missed by Sebastian.

View File

@@ -119,7 +119,7 @@ typedef enum {
* has expired. The owner is still executing and has taken too much
* all time to complete this iteration of the period.
*/
RATE_MONOTONIC_EXPIRED,
RATE_MONOTONIC_EXPIRED
} rtems_rate_monotonic_period_states;
/**

View File

@@ -169,7 +169,7 @@ typedef enum {
*
* @note This status will @b NOT be returned to the user.
*/
RTEMS_PROXY_BLOCKING = 28,
RTEMS_PROXY_BLOCKING = 28
} rtems_status_code;
/**

View File

@@ -30,6 +30,13 @@
*/
extern void _ISR15_Handler(void);
extern void _CPU_Emulation_handler(void);
extern void _CPU_Reset_handler(void);
extern void _CPU_NMI_handler(void);
extern void _CPU_Exception_handler(void);
extern void _CPU_Unhandled_Interrupt_handler(void);
void _CPU_Initialize(
void (*thread_dispatch) /* ignored on this CPU */
)
@@ -45,7 +52,7 @@ void _CPU_Initialize(
* dependent variable.
*/
_CPU_Thread_dispatch_pointer = thread_dispatch;
/*_CPU_Thread_dispatch_pointer = thread_dispatch;*/
/*
* If there is not an easy way to initialize the FP context
@@ -55,8 +62,31 @@ void _CPU_Initialize(
*/
/* FP context initialization support goes here */
proc_ptr ignored;
#if 0
/* occassionally useful debug stuff */
int i;
_CPU_ISR_install_raw_handler(0, _CPU_Emulation_handler, &ignored);
_CPU_ISR_install_raw_handler(1, _CPU_Reset_handler, &ignored);
_CPU_ISR_install_raw_handler(2, _CPU_NMI_handler, &ignored);
_CPU_ISR_install_raw_handler(3, _CPU_Exception_handler, &ignored);
for (i = 5; i < 15; i++)
_CPU_ISR_install_raw_handler(i, _CPU_Unhandled_Interrupt_handler, &ignored);
#endif
/* install handler that will be used to call _Thread_Dispatch */
_CPU_ISR_install_raw_handler( 15, _ISR15_Handler, &ignored );
/* enable self nesting */
__asm__ __volatile__ ("syscfg = %0" : : "d" (0x00000004));
}
/*PAGE
*
* _CPU_ISR_Get_level
@@ -78,7 +108,7 @@ uint32_t _CPU_ISR_Get_level( void )
_tmpimask = *((uint32_t*)IMASK);
return _tmpimask;
return (_tmpimask & 0xffe0) ? 0 : 1;
}
/*PAGE
@@ -136,24 +166,29 @@ void _CPU_ISR_install_vector(
proc_ptr *old_handler
)
{
proc_ptr ignored;
*old_handler = _ISR_Vector_table[ vector ];
/*
* If the interrupt vector table is a table of pointer to isr entry
* points, then we need to install the appropriate RTEMS interrupt
* handler for this vector number.
*/
_CPU_ISR_install_raw_handler( vector, _ISR_Handler, old_handler );
/*
* We put the actual user ISR address in '_ISR_vector_table'. This will
* be used by the _ISR_Handler so the user gets control.
*/
_ISR_Vector_table[ vector ] = new_handler;
_CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
}
#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
void *_CPU_Thread_Idle_body(uint32_t ignored) {
while (1) {
__asm__ __volatile__("ssync; idle; ssync");
}
}
#endif
/*
* Copied from the arm port.
*/
@@ -170,12 +205,8 @@ void _CPU_Context_Initialize(
stack_high = ((uint32_t )(stack_base) + size);
the_context->register_sp = stack_high;
// gcc/config/bfin/bfin.h defines CPU_MINIMUM_STACK_FRAME_SIZE = 0 thus we do sp=fp
// is this correct ?????
the_context->register_fp = stack_high;
the_context->register_rets = (uint32_t) entry_point;
//mask the interrupt level
the_context->imask = new_level ? 0 : 0xffff;
}

View File

@@ -4,9 +4,14 @@
* in the Blackfin port of RTEMS. These algorithms must be implemented
* in assembly language
*
* Copyright (c) 2008 Kallisti Labs, Los Gatos, CA, USA
* written by Allan Hessenflow <allanh@kallisti.com>
*
* Based on earlier version:
*
* Copyright (c) 2006 by Atos Automacao Industrial Ltda.
* written by Alain Schaefer <alain.schaefer@easc.ch>
* and Antonio Giovanini <antonio@atos.com.br>
* written by Alain Schaefer <alain.schaefer@easc.ch>
* and Antonio Giovanini <antonio@atos.com.br>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -14,8 +19,8 @@
*
* $Id$
*/
#include <rtems/asm.h>
#include <rtems/score/cpu_asm.h>
#include <rtems/score/bfin.h>
@@ -24,6 +29,201 @@
#define LO(con32) ((con32) & 0xFFFF)
#define HI(con32) (((con32) >> 16) & 0xFFFF)
#if 0
/* some debug routines */
.globl __CPU_write_char;
__CPU_write_char:
p0.h = 0xffc0;
p0.l = 0x0400;
txWaitLoop:
r1 = w[p0 + 0x14];
cc = bittst(r1, 5);
if !cc jump txWaitLoop;
w[p0 + 0x00] = r0;
rts;
.globl __CPU_write_crlf;
__CPU_write_crlf:
r0 = '\r';
[--sp] = rets;
call __CPU_write_char;
rets = [sp++];
r0 = '\n';
jump __CPU_write_char;
__CPU_write_space:
r0 = ' ';
jump __CPU_write_char;
.globl __CPU_write_nybble;
__CPU_write_nybble:
r1 = 0x0f;
r0 = r0 & r1;
r0 += '0';
r1 = '9';
cc = r0 <= r1;
if cc jump __CPU_write_char;
r0 += 'a' - '0' - 10;
jump __CPU_write_char;
.globl __CPU_write_byte;
__CPU_write_byte:
[--sp] = r0;
[--sp] = rets;
r0 >>= 4;
call __CPU_write_nybble;
rets = [sp++];
r0 = [sp++];
jump __CPU_write_nybble;
__CPU_write_chawmp:
[--sp] = r0;
[--sp] = rets;
r0 >>= 8;
call __CPU_write_byte;
rets = [sp++];
r0 = [sp++];
jump __CPU_write_byte;
__CPU_write_gawble:
[--sp] = r0;
[--sp] = rets;
r0 >>= 16;
call __CPU_write_chawmp;
rets = [sp++];
r0 = [sp++];
jump __CPU_write_chawmp;
__CPU_dump_registers:
[--sp] = rets;
[--sp] = r0;
[--sp] = r1;
[--sp] = p0;
r0 = [sp + 8];
call __CPU_write_gawble;
call __CPU_write_space;
r0 = [sp + 4];
call __CPU_write_gawble;
call __CPU_write_space;
r0 = r2;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = r3;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = r4;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = r5;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = r6;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = r7;
call __CPU_write_gawble;
call __CPU_write_crlf;
r0 = [sp];
call __CPU_write_gawble;
call __CPU_write_space;
r0 = p1;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = p2;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = p3;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = p4;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = p5;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = fp;
call __CPU_write_gawble;
call __CPU_write_space;
r0 = sp;
r0 += 16;
call __CPU_write_gawble;
call __CPU_write_crlf;
p0 = [sp++];
r1 = [sp++];
r0 = [sp++];
rets = [sp++];
rts;
.globl __CPU_Exception_handler;
__CPU_Exception_handler:
usp = sp;
sp.h = 0xffb0;
sp.l = 0x1000;
[--sp] = (r7:0,p5:0);
r0 = 'x';
call __CPU_write_char;
jump hcf;
.globl __CPU_Emulation_handler;
__CPU_Emulation_handler:
usp = sp;
sp.h = 0xffb0;
sp.l = 0x1000;
[--sp] = (r7:0,p5:0);
r0 = 'e';
call __CPU_write_char;
jump hcf;
.globl __CPU_Reset_handler;
__CPU_Reset_handler:
usp = sp;
sp.h = 0xffb0;
sp.l = 0x1000;
[--sp] = (r7:0,p5:0);
r0 = 'r';
call __CPU_write_char;
jump hcf;
.globl __CPU_NMI_handler;
__CPU_NMI_handler:
usp = sp;
sp.h = 0xffb0;
sp.l = 0x1000;
[--sp] = (r7:0,p5:0);
r0 = 'n';
call __CPU_write_char;
jump hcf;
.globl __CPU_Unhandled_Interrupt_handler;
__CPU_Unhandled_Interrupt_handler:
usp = sp;
sp.h = 0xffb0;
sp.l = 0x1000;
[--sp] = (r7:0,p5:0);
call __CPU_write_crlf;
r0 = 'i';
call __CPU_write_char;
p0.h = HI(IPEND);
p0.l = LO(IPEND);
r0 = [p0];
call __CPU_write_chawmp;
jump hcf;
hcf:
idle;
jump hcf;
#endif
/* _CPU_Context_switch
*
* This routine performs a normal non-FP context switch.
@@ -31,187 +231,84 @@
* bfin Specific Information:
*
* For now we simply save all registers.
*
*
*/
.globl __CPU_Context_switch
/* make sure this sequence stays in sync with the definition for
Context_Control in rtems/score/cpu.h */
.globl __CPU_Context_switch
__CPU_Context_switch:
/* Start saving context R0 = current, R1=heir */
/*save P0 first*/
[FP+0x8] = P0;
P0 = R0;
[ P0 + R0_OFFSET ] = R0;
[ P0 + R1_OFFSET] = R1;
[ P0 + R2_OFFSET] = R2;
[ P0 + R4_OFFSET] = R4;
[ P0 + R3_OFFSET] = R3;
[ P0 + R5_OFFSET] = R5;
[ P0 + R6_OFFSET] = R6;
[ P0 + R7_OFFSET] = R7;
[ P0 + P1_OFFSET] = P1;
/* save the original value of P0 */
P1 = [FP+0x8];
[ P0 + P0_OFFSET] = P1;
[ P0 + P2_OFFSET] = P2;
[ P0 + P3_OFFSET] = P3;
[ P0 + P4_OFFSET] = P4;
[ P0 + P5_OFFSET] = P5;
[ P0 + FP_OFFSET] = FP;
[ P0 + SP_OFFSET] = SP;
/* save ASTAT */
R0 = ASTAT;
[P0 + ASTAT_OFFSET] = R0;
/* Start saving context R0 = current, R1=heir */
p0 = r0;
[p0++] = r4;
[p0++] = r5;
[p0++] = r6;
[p0++] = r7;
/* save Loop Counters */
R0 = LC0;
[P0 + LC0_OFFSET] = R0;
R0 = LC1;
[P0 + LC1_OFFSET] = R0;
/* save pointer registers */
[p0++] = p3;
[p0++] = p4;
[p0++] = p5;
[p0++] = fp;
[p0++] = sp;
/* save Accumulators */
R0 = A0.W;
[P0 + A0W_OFFSET] = R0;
R0 = A0.X;
[P0 + A0X_OFFSET] = R0;
R0 = A1.W;
[P0 + A1W_OFFSET] = R0;
R0 = A1.X;
[P0 + A1X_OFFSET] = R0;
/* save Index Registers */
R0 = I0;
[P0 + I0_OFFSET] = R0;
R0 = I1;
[P0 + I1_OFFSET] = R0;
R0 = I2;
[P0 + I2_OFFSET] = R0;
R0 = I3;
[P0 + I3_OFFSET] = R0;
/* save length registers */
r0 = l0;
[p0++] = r0;
r0 = l1;
[p0++] = r0;
r0 = l2;
[p0++] = r0;
r0 = l3;
[p0++] = r0;
/* save Modifier Registers */
R0 = M0;
[P0 + M0_OFFSET] = R0;
R0 = M1;
[P0 + M1_OFFSET] = R0;
R0 = M2;
[P0 + M2_OFFSET] = R0;
R0 = M3;
[P0 + M3_OFFSET] = R0;
/* save rets */
r0 = rets;
[p0++] = r0;
/* save Length Registers */
R0 = L0;
[P0 + L0_OFFSET] = R0;
R0 = L1;
[P0 + L1_OFFSET] = R0;
R0 = L2;
[P0 + L2_OFFSET] = R0;
R0 = L3;
[P0 + L3_OFFSET] = R0;
/* Base Registers */
R0 = B0;
[P0 + B0_OFFSET] = R0;
R0 = B1;
[P0 + B1_OFFSET] = R0;
R0 = B2;
[P0 + B2_OFFSET] = R0;
R0 = B3;
[P0 + B3_OFFSET] = R0;
/* save RETS */
R0 = RETS;
[ P0 + RETS_OFFSET] = R0;
/* save IMASK */
p1.h = HI(IMASK);
p1.l = LO(IMASK);
r0 = [p1];
[p0++] = r0;
p0 = r1;
restore:
P0 = R1;
R1 = [P0 + R1_OFFSET];
R2 = [P0 + R2_OFFSET];
R3 = [P0 + R3_OFFSET];
R4 = [P0 + R4_OFFSET];
R5 = [P0 + R5_OFFSET];
R6 = [P0 + R6_OFFSET];
R7 = [P0 + R7_OFFSET];
P2 = [P0 + P2_OFFSET];
P3 = [P0 + P3_OFFSET];
P4 = [P0 + P4_OFFSET];
P5 = [P0 + P5_OFFSET];
/* restore data registers */
r4 = [p0++];
r5 = [p0++];
r6 = [p0++];
r7 = [p0++];
/* might have to be placed more to the end */
FP = [P0 + FP_OFFSET];
SP = [P0 + SP_OFFSET];
/* restore pointer registers */
p3 = [p0++];
p4 = [p0++];
p5 = [p0++];
fp = [p0++];
sp = [p0++];
/* save ASTAT */
R0 = [P0 + ASTAT_OFFSET];
ASTAT = R0;
/* restore length registers */
r0 = [p0++];
l0 = r0;
r0 = [p0++];
l1 = r0;
r0 = [p0++];
l2 = r0;
r0 = [p0++];
l3 = r0;
/* save Loop Counters */
R0 = [P0 + LC0_OFFSET];
LC0 = R0;
R0 = [P0 + LC1_OFFSET];
LC1 = R0;
/* restore rets */
r0 = [p0++];
rets = r0;
/* save Accumulators */
R0 = [P0 + A0W_OFFSET];
A0.W = R0;
R0 = [P0 + A0X_OFFSET];
A0.X = R0;
R0 = [P0 + A1W_OFFSET];
A1.W = R0;
R0 = [P0 + A1X_OFFSET];
A1.X = R0;
/* restore IMASK */
r0 = [p0++];
p1.h = HI(IMASK);
p1.l = LO(IMASK);
[p1] = r0;
/* save Index Registers */
R0 = [P0 + I0_OFFSET];
I0 = R0;
R0 = [P0 + I1_OFFSET];
I1 = R0;
R0 = [P0 + I2_OFFSET];
I2 = R0;
R0 = [P0 + I3_OFFSET];
I3 = R0;
rts;
/* save Modifier Registers */
R0 = [P0 + M0_OFFSET];
M0 = R0;
R0 = [P0 + M1_OFFSET];
M1 = R0;
R0 = [P0 + M2_OFFSET];
M2 = R0;
R0 = [P0 + M3_OFFSET];
M3 = R0;
/* save Length Registers */
R0 = [P0 + L0_OFFSET];
L0 = R0;
R0 = [P0 + L1_OFFSET];
L1 = R0;
R0 = [P0 + L2_OFFSET];
L2 = R0;
R0 = [P0 + L3_OFFSET];
L3 = R0;
/* Base Registers */
R0 = [P0 + B0_OFFSET];
B0 = R0;
R0 = [P0 + B1_OFFSET];
B1 = R0;
R0 = [P0 + B2_OFFSET];
B2 = R0;
R0 = [P0 + B3_OFFSET];
B3 = R0;
/* restore RETS */
P1 = [P0 + RETS_OFFSET];
RETS = P1;
/* now restore the P1 + P0 */
P1 = [P0 + R1_OFFSET];
P0 = [P0 + P0_OFFSET];
rts;
/*
* _CPU_Context_restore
@@ -226,194 +323,276 @@ restore:
* none
*
*/
.globl __CPU_Context_restore
.globl __CPU_Context_restore
__CPU_Context_restore:
jump restore;
p0 = r0;
jump restore;
.globl __ISR_Thread_Dispatch
__ISR_Thread_Dispatch:
.extern __Thread_Dispatch
R0.l = __Thread_Dispatch;
R0.h = __Thread_Dispatch;
/* Puts the address of th Thread_Dispatch function on Stack
* Where it will be restored to the RTI register
*/
P0 = [FP];
/* save the old reti */
R1 = [P0+0xc];
[P0+0xc] = R0;
/*
* Overwriting the RETS Register is save because Thread_Dispatch is
* disabled when we are between call/link or unlink/rts
*/
[P0+0x8] = R1;
/* save old rets */
rts;
.globl __ISR_Handler
.globl __ISR_Handler
.extern __CPU_Interrupt_stack_high;
.extern __ISR_Nest_level
.extern __Thread_Dispatch_disable_level
.extern __Context_Switch_necessary
.extern __ISR_Signals_to_thread_executing
__ISR_Handler:
/* First of all check the Stackpointer and */
/* switch to Scratchpad if necessary */
/* save P0 and R0 in the scratchpad */
USP = P0;
/* load base adress of scratchpad */
P0.H = HI(SCRATCH);
P0.L = LO(SCRATCH);
[--SP] = ASTAT; /* save cc flag*/
/* if SP is already inside the SCRATCHPAD */
CC=SP<P0 (iu)
if !CC jump continue;
/* set PO to top of scratchpad */
P0.h=HI(SCRATCH_TOP);
P0.l=LO(SCRATCH_TOP);
/*save the old SP*/
[P0] = SP;
/*P0 += -4;*/
/*set the new Stackpointer*/
SP = P0;
/*restore the old PO*/
/* The Stackpointer is now setup as we want */
continue:
/* restore P0 and save some context */
P0 = USP;
/* save some state on the isr stack (scratchpad), this enables interrupt nesting */
[--SP] = RETI;
[--SP] = RETS;
[--SP] = ASTAT;
[--SP] = FP;
FP = SP;
[--SP] = (R7:0, P5:0) ;
/* Context is saved, now check which Instruction we were executing
* If we were between a call and link or between a unlink and rts
* we have to disable Thread_Dispatch because correct restore of context after
* Thread_Dispatch would not be possible. */
P0 = RETI;
R0 = P0;
R0.L = 0x0000;
R1.H = 0xffa0;
R1.L = 0x0000;
CC = R0 == R1;
if CC jump disablethreaddispatch;
R0 = W[P0](Z);
/* shift 16 bits to the right (select the high nibble ) */
/*R0 >>= 16;*/
R3 = 0;
/* Check if RETI is a LINK instruction */
R1.h = HI(0x0000);
R1.l = LO(0xE800);
CC=R0==R1;
if cc jump disablethreaddispatch;
/* Check if RETI is a RTS instruction */
R1.h = HI(0x0000);
R1.l = LO(0x0010);
CC=R0==R1;
if cc jump disablethreaddispatch;
jump afterthreaddispatch;
disablethreaddispatch:
/* _Thread_Dispatch_disable_level++ */
.extern _Thread_Dispatch_disable_level
P0.H = __Thread_Dispatch_disable_level;
P0.L = __Thread_Dispatch_disable_level;
R0 = [P0];
R0 += 1;
[P0] = R0;
R3 = 1;
afterthreaddispatch:
/* Put R3 on the stack */
[--SP] = R3;
/* all interrupts are disabled at this point */
/* the following few items are pushed onto the task stack for at
most one interrupt; nested interrupts will be using the interrupt
stack for everything. */
[--sp] = astat;
[--sp] = p1;
[--sp] = p0;
[--sp] = r1;
[--sp] = r0;
p0.h = __ISR_Nest_level;
p0.l = __ISR_Nest_level;
r0 = [p0];
r0 += 1;
[p0] = r0;
cc = r0 <= 1 (iu);
if !cc jump noStackSwitch;
/* setup interrupt stack */
r0 = sp;
p0.h = __CPU_Interrupt_stack_high;
p0.l = __CPU_Interrupt_stack_high;
sp = [p0];
[--sp] = r0;
noStackSwitch:
/* disable thread dispatch */
p0.h = __Thread_Dispatch_disable_level;
p0.l = __Thread_Dispatch_disable_level;
r0 = [p0];
r0 += 1;
[p0] = r0;
/* Obtain a bitlist of the pending interrupts. */
P0.H = HI(IPEND);
P0.L = LO(IPEND);
R1 = [P0];
/*
* Search through the bit list stored in R0 to find the first enabled
* bit. The offset of this bit is the index of the interrupt that is
* to be handled.
*/
R0 = -1;
intloop:
R0 += 1;
R1 = ROT R1 by -1;
if !cc jump intloop;
[--sp] = reti; /* interrupts are now enabled */
/* pass SP as parameter to the C function */
R1 = SP
/* figure out what vector we are */
p0.h = HI(IPEND);
p0.l = LO(IPEND);
r1 = [p0];
/* we should only get here for events that require RTI to return */
r1 = r1 >> 5;
r0 = 4;
/* at least one bit must be set, so this loop will exit */
vectorIDLoop:
r0 += 1;
r1 = rot r1 by -1;
if !cc jump vectorIDLoop;
/* pass values by register as well as by stack */
/* to comply with the c calling conventions */
[--SP] = R0;
[--SP] = R1;
.extern _ISR_Handler2
call _ISR_Handler2
[--sp] = r2;
p0.h = __ISR_Vector_table;
p0.l = __ISR_Vector_table;
r2 = [p0];
r1 = r0 << 2;
r1 = r1 + r2;
p0 = r1;
p0 = [p0];
cc = p0 == 0;
if cc jump noHandler;
/* inc 2 to compensate the passing of arguments */
R3 = [SP++];
R3 = [SP++];
/* check if _Thread_Dispatch_disable_level has been incremented */
R3 = [SP++]
CC=R3==0
if cc jump dont_decrement;
.extern _Thread_Dispatch_disable_level
P0.H = __Thread_Dispatch_disable_level;
P0.L = __Thread_Dispatch_disable_level;
R0 = [P0];
R0 += -1;
[P0] = R0;
dont_decrement:
(R7:0, P5:0) = [SP++];
FP = [SP++];
ASTAT = [SP++];
RETS = [SP++];
RETI = [SP++];
/* Interrupts are now disabled again */
/*should restore the old stack !!!*/
/*if sp now points to SCRATCH_TOP */
/* load base adress of scratchpad */
USP = P0;
P0.H = HI(SCRATCH_TOP);
P0.L = LO(SCRATCH_TOP);
CC=SP==P0
if !cc jump restoreP0
/* restore the stack */
SP=[P0];
restoreP0:
P0 = USP;
ASTAT = [SP++]; /* restore cc flag */
/*now we should be on the old "user-stack" again */
/* return from interrupt, will jump to adress stored in RETI */
RTI;
/* r2, r0, r1, p0, p1, astat are already saved */
[--sp] = a1.x;
[--sp] = a1.w;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = r3;
[--sp] = p3;
[--sp] = p2;
[--sp] = lt1;
[--sp] = lt0;
[--sp] = lc1;
[--sp] = lc0;
[--sp] = lb1;
[--sp] = lb0;
[--sp] = i3;
[--sp] = i2;
[--sp] = i1;
[--sp] = i0;
[--sp] = m3;
[--sp] = m2;
[--sp] = m1;
[--sp] = m0;
[--sp] = l3;
[--sp] = l2;
[--sp] = l1;
[--sp] = l0;
[--sp] = b3;
[--sp] = b2;
[--sp] = b1;
[--sp] = b0;
[--sp] = rets;
r1 = fp; /* is this really what should be passed here? */
/* call user isr; r0 = vector number, r1 = frame pointer */
sp += -12; /* bizarre abi... */
call (p0);
sp += 12;
rets = [sp++];
b0 = [sp++];
b1 = [sp++];
b2 = [sp++];
b3 = [sp++];
l0 = [sp++];
l1 = [sp++];
l2 = [sp++];
l3 = [sp++];
m0 = [sp++];
m1 = [sp++];
m2 = [sp++];
m3 = [sp++];
i0 = [sp++];
i1 = [sp++];
i2 = [sp++];
i3 = [sp++];
lb0 = [sp++];
lb1 = [sp++];
lc0 = [sp++];
lc1 = [sp++];
lt0 = [sp++];
lt1 = [sp++];
p2 = [sp++];
p3 = [sp++];
r3 = [sp++];
a0.w = [sp++];
a0.x = [sp++];
a1.w = [sp++];
a1.x = [sp++];
noHandler:
r2 = [sp++];
/* this disables interrupts again */
reti = [sp++];
p0.h = __ISR_Nest_level;
p0.l = __ISR_Nest_level;
r0 = [p0];
r0 += -1;
[p0] = r0;
cc = r0 == 0;
if !cc jump noStackRestore;
sp = [sp];
noStackRestore:
/* check this stuff to insure context_switch_necessary and
isr_signals_to_thread_executing are being handled appropriately. */
p0.h = __Thread_Dispatch_disable_level;
p0.l = __Thread_Dispatch_disable_level;
r0 = [p0];
r0 += -1;
[p0] = r0;
cc = r0 == 0;
if !cc jump noDispatch
/* do thread dispatch if necessary */
p0.h = __Context_Switch_necessary;
p0.l = __Context_Switch_necessary;
r0 = [p0];
cc = r0 == 0;
p0.h = __ISR_Signals_to_thread_executing;
p0.l = __ISR_Signals_to_thread_executing;
if !cc jump doDispatch
r0 = [p0];
cc = r0 == 0;
if cc jump noDispatch
doDispatch:
r0 = 0;
[p0] = r0;
raise 15;
noDispatch:
r0 = [sp++];
r1 = [sp++];
p0 = [sp++];
p1 = [sp++];
astat = [sp++];
rti
/* the approach here is for the main interrupt handler, when a dispatch is
wanted, to do a "raise 15". when the main interrupt handler does its
"rti", the "raise 15" takes effect and we end up here. we can now
safely call _Thread_Dispatch, and do an "rti" to get back to the
original interrupted function. this does require self-nesting to be
enabled; the maximum nest depth is the number of tasks. */
.global __ISR15_Handler
.extern __Thread_Dispatch
__ISR15_Handler:
[--sp] = reti;
[--sp] = rets;
[--sp] = astat;
[--sp] = a1.x;
[--sp] = a1.w;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p3;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
[--sp] = lt1;
[--sp] = lt0;
[--sp] = lc1;
[--sp] = lc0;
[--sp] = lb1;
[--sp] = lb0;
[--sp] = i3;
[--sp] = i2;
[--sp] = i1;
[--sp] = i0;
[--sp] = m3;
[--sp] = m2;
[--sp] = m1;
[--sp] = m0;
[--sp] = l3;
[--sp] = l2;
[--sp] = l1;
[--sp] = l0;
[--sp] = b3;
[--sp] = b2;
[--sp] = b1;
[--sp] = b0;
sp += -12; /* bizarre abi... */
call __Thread_Dispatch;
sp += 12;
b0 = [sp++];
b1 = [sp++];
b2 = [sp++];
b3 = [sp++];
l0 = [sp++];
l1 = [sp++];
l2 = [sp++];
l3 = [sp++];
m0 = [sp++];
m1 = [sp++];
m2 = [sp++];
m3 = [sp++];
i0 = [sp++];
i1 = [sp++];
i2 = [sp++];
i3 = [sp++];
lb0 = [sp++];
lb1 = [sp++];
lc0 = [sp++];
lc1 = [sp++];
lt0 = [sp++];
lt1 = [sp++];
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p3 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];
a0.w = [sp++];
a0.x = [sp++];
a1.w = [sp++];
a1.x = [sp++];
astat = [sp++];
rets = [sp++];
reti = [sp++];
rti;

View File

@@ -11,6 +11,8 @@
* $Id$
*/
#if 0 /* this file no longer used */
#include <rtems/system.h>
#include <rtems/score/cpu.h>
@@ -104,3 +106,6 @@ uint32_t SIC_IAR_Value ( uint8_t Vector )
return 0x88888888;
}
}
#endif /* 0 */

View File

@@ -105,7 +105,20 @@ extern "C" {
*
* XXX document implementation including references if appropriate
*/
#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
/*
* Does the CPU follow the simple vectored interrupt model?
*
* If TRUE, then RTEMS allocates the vector table it internally manages.
* If FALSE, then the BSP is assumed to allocate and manage the vector
* table
*
* BFIN Specific Information:
*
* XXX document implementation including references if appropriate
*/
#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
/*
* Does the CPU follow the simple vectored interrupt model?
@@ -152,7 +165,7 @@ extern "C" {
*
* XXX document implementation including references if appropriate
*/
#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
/**
* Does the RTEMS invoke the user's ISR with the vector number and
@@ -305,7 +318,7 @@ extern "C" {
*
* XXX document implementation including references if appropriate
*/
#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
#define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
/**
* Does the stack grow up (toward higher addresses) or down
@@ -453,89 +466,29 @@ extern "C" {
* that must be saved during a voluntary context switch from one thread
* to another.
*/
/* make sure this stays in sync with the assembly function
__CPU_Context_switch in cpu_asm.S */
typedef struct {
/* we are saving all registers, maybe we should not */
uint32_t register_r0;
uint32_t register_r1;
uint32_t register_r2;
uint32_t register_r3;
uint32_t register_r4;
uint32_t register_r5;
uint32_t register_r6;
uint32_t register_r7;
uint32_t register_p0;
uint32_t register_p1;
uint32_t register_p2;
uint32_t register_p3;
uint32_t register_p4;
uint32_t register_p5;
uint32_t register_fp;
uint32_t register_sp;
uint32_t register_i0;
uint32_t register_i1;
uint32_t register_i2;
uint32_t register_i3;
uint32_t register_m0;
uint32_t register_m1;
uint32_t register_m2;
uint32_t register_m3;
uint32_t register_b0;
uint32_t register_b1;
uint32_t register_b2;
uint32_t register_b3;
uint32_t register_l0;
uint32_t register_l1;
uint32_t register_l2;
uint32_t register_l3;
uint32_t register_a0dotx;
uint32_t register_a0dotw;
uint32_t register_a1dotx;
uint32_t register_a1dotw;
uint32_t register_astat;
uint32_t register_rets;
uint32_t register_lc0;
uint32_t register_lt0;
uint32_t register_lb0;
uint32_t register_lc1;
uint32_t register_lt1;
uint32_t register_lb1;
/*BFIN_CYCLES_REGNUM,
BFIN_CYCLES2_REGNUM, */
uint32_t register_usp;
uint32_t register_seqstat;
uint32_t register_syscfg;
uint32_t register_reti;
uint32_t register_retx;
uint32_t register_retn;
uint32_t register_rete;
uint32_t register_pc;
/*
Pseudo Registers
BFIN_PC_REGNUM,
BFIN_CC_REGNUM,
BFIN_EXTRA1, Address of .text section.
BFIN_EXTRA2, Address of .data section.
BFIN_EXTRA3, Address of .bss section.
BFIN_FDPIC_EXEC_REGNUM,
BFIN_FDPIC_INTERP_REGNUM,
MMRs
BFIN_IPEND_REGNUM,
LAST ENTRY SHOULD NOT BE CHANGED.
BFIN_NUM_REGS The number of all registers.
*/
uint32_t imask;
} Context_Control;
#define _CPU_Context_Get_SP( _context ) \
@@ -624,7 +577,7 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_high;
*
* XXX document implementation including references if appropriate
*/
SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();
/* SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();*/
/*
* Nothing prevents the porter from declaring more CPU specific variables.
@@ -790,9 +743,7 @@ SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();
*/
#define _CPU_ISR_Disable( _level ) \
{ \
asm volatile ("cli %0 \n" \
: "=d" (_level) ); \
\
asm volatile ("cli %0 \n" : "=d" (_level) ); \
}
@@ -808,10 +759,8 @@ SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();
*
* XXX document implementation including references if appropriate
*/
#define _CPU_ISR_Enable( _level ) \
{ \
asm volatile ("STI %0 \n" \
: : "d" (_level) ); \
#define _CPU_ISR_Enable( _level ) { \
__asm__ __volatile__ ("sti %0 \n" : : "d" (_level) ); \
}
/**
@@ -827,13 +776,10 @@ SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();
*
* XXX document implementation including references if appropriate
*/
#define _CPU_ISR_Flash( _level )
/* { \
asm volatile ("cli %0;\n" \
"ssync; \n" \
"sti %1; \n" \
: "=r" (_level) : "0"(_level) ); \
}*/
#define _CPU_ISR_Flash( _level ) { \
__asm__ __volatile__ ("sti %0; ssync; sti %1" \
: : "d"(0xffff), "d"(_level)); \
}
/**
* @ingroup CPUInterrupt
@@ -854,9 +800,7 @@ SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();
*/
#define _CPU_ISR_Set_level( _new_level ) \
{ \
if ( _new_level ) asm volatile ( "cli R0;" : : : "R0" ); \
else asm volatile ( "R0.l = 0xFFFF;\n"\
"sti R0;" : : : "R0" ); \
__asm__ __volatile__ ( "sti %0" : : "d"(_new_level ? 0 : 0xffff) ); \
}
@@ -1001,8 +945,9 @@ void _CPU_Context_Initialize(
asm volatile ( "cli R1; \
R1 = %0; \
_halt: \
idle; \
jump _halt;"\
: "=r" (_error) ); \
: : "r" (_error) ); \
}
/* end of Fatal Error manager macros */

View File

@@ -20,69 +20,6 @@
#ifndef _RTEMS_SCORE_CPU_ASM_H
#define _RTEMS_SCORE_CPU_ASM_H
/* offsets for the registers in the thread context */
#define R0_OFFSET 0
#define R1_OFFSET 4
#define R2_OFFSET 8
#define R3_OFFSET 12
#define R4_OFFSET 16
#define R5_OFFSET 20
#define R6_OFFSET 24
#define R7_OFFSET 28
#define P0_OFFSET 32
#define P1_OFFSET 36
#define P2_OFFSET 40
#define P3_OFFSET 44
#define P4_OFFSET 48
#define P5_OFFSET 52
#define FP_OFFSET 56
#define SP_OFFSET 60
#define I0_OFFSET 64
#define I1_OFFSET 68
#define I2_OFFSET 72
#define I3_OFFSET 76
#define M0_OFFSET 80
#define M1_OFFSET 84
#define M2_OFFSET 88
#define M3_OFFSET 92
#define B0_OFFSET 96
#define B1_OFFSET 100
#define B2_OFFSET 104
#define B3_OFFSET 108
#define L0_OFFSET 112
#define L1_OFFSET 116
#define L2_OFFSET 120
#define L3_OFFSET 124
#define A0X_OFFSET 128
#define A0W_OFFSET 132
#define A1X_OFFSET 136
#define A1W_OFFSET 140
#define ASTAT_OFFSET 144
#define RETS_OFFSET 148
#define LC0_OFFSET 152
#define LT0_OFFSET 156
#define LB0_OFFSET 160
#define LC1_OFFSET 164
#define LT1_OFFSET 168
#define LB1_OFFSET 172
#define USP_OFFSET 174
#define SEQSTAT_OFFSET 178
#define SYSCFG_OFFSET 182
#define RETI_OFFSET 184
#define RETX_OFFSET 188
#define RETN_OFFSET 192
#define RETE_OFFSET 296
#define PC_OFFSET 200
#endif

View File

@@ -113,7 +113,7 @@ typedef enum {
/** This status indicates that a thread of logically greater importance
* than the ceiling priority attempted to lock this mutex.
*/
CORE_MUTEX_STATUS_CEILING_VIOLATED,
CORE_MUTEX_STATUS_CEILING_VIOLATED
} CORE_mutex_Status;

View File

@@ -291,11 +291,11 @@ typedef enum {
* an object with the specified ID.
*/
typedef enum {
OBJECTS_LOCAL = 0, /* object is local */
OBJECTS_ERROR = 1, /* id was invalid */
#if defined(RTEMS_MULTIPROCESSING)
OBJECTS_REMOTE = 2, /* object is remote */
#endif
OBJECTS_LOCAL = 0, /* object is local */
OBJECTS_ERROR = 1 /* id was invalid */
} Objects_Locations;
/**