2009-07-03 Josh Switnicki <josh.switnicki@utoronto.ca>

* start/start.S: copied gcrt1.S and macros.inc from avr-libc.
	Change from call main to call boot_card for RTEMS.
	* start/macros.inc: New file.
This commit is contained in:
Joel Sherrill
2009-07-06 15:28:09 +00:00
parent 81551a2541
commit 7ffdc70c0a
3 changed files with 631 additions and 54 deletions

View File

@@ -1,3 +1,9 @@
2009-07-03 Josh Switnicki <josh.switnicki@utoronto.ca>
* start/start.S: copied gcrt1.S and macros.inc from avr-libc.
Change from call main to call boot_card for RTEMS.
* start/macros.inc: New file.
2009-05-08 Joel Sherrill <joel.sherrill@oarcorp.com>
* startup/linkcmds: Correct to have 65K total RAM (mostly external) and

View File

@@ -0,0 +1,364 @@
/* Copyright (c) 2002, 2005, 2006, 2007 Marek Michalkiewicz
Copyright (c) 2006 Dmitry Xmelkov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holders nor the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
/*
macros.inc - macros for use in assembler sources
Contributors:
Created by Marek Michalkiewicz <marekm@linux.org.pl>
*/
#include <avr/io.h>
/* if not defined, assume old version with underscores */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
/* the assembler line separator (just in case it ever changes) */
#define _L $
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
#define _U(x) CONCAT1(__USER_LABEL_PREFIX__, x)
#define _R(x) CONCAT1(__REGISTER_PREFIX__, x)
/* these should help to fix the "can't have function named r1()" bug
which may require adding '%' in front of register names. */
#define r0 _R(r0)
#define r1 _R(r1)
#define r2 _R(r2)
#define r3 _R(r3)
#define r4 _R(r4)
#define r5 _R(r5)
#define r6 _R(r6)
#define r7 _R(r7)
#define r8 _R(r8)
#define r9 _R(r9)
#define r10 _R(r10)
#define r11 _R(r11)
#define r12 _R(r12)
#define r13 _R(r13)
#define r14 _R(r14)
#define r15 _R(r15)
#define r16 _R(r16)
#define r17 _R(r17)
#define r18 _R(r18)
#define r19 _R(r19)
#define r20 _R(r20)
#define r21 _R(r21)
#define r22 _R(r22)
#define r23 _R(r23)
#define r24 _R(r24)
#define r25 _R(r25)
#define r26 _R(r26)
#define r27 _R(r27)
#define r28 _R(r28)
#define r29 _R(r29)
#define r30 _R(r30)
#define r31 _R(r31)
#ifndef __tmp_reg__
#define __tmp_reg__ r0
#endif
#ifndef __zero_reg__
#define __zero_reg__ r1
#endif
#if __AVR_MEGA__
#define XJMP jmp
#define XCALL call
#else
#define XJMP rjmp
#define XCALL rcall
#endif
/* used only by fplib/strtod.S - libgcc internal function calls */
#define PROLOGUE_SAVES(offset) XJMP (__prologue_saves__ + 2 * (offset))
#define EPILOGUE_RESTORES(offset) XJMP (__epilogue_restores__ + 2 * (offset))
#if FLASHEND > 0x10000 /* ATmega103 */
#define BIG_CODE 1
#else
#define BIG_CODE 0
#endif
#ifndef __AVR_HAVE_MOVW__
# if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__
# define __AVR_HAVE_MOVW__ 1
# endif
#endif
#ifndef __AVR_HAVE_LPMX__
# if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__
# define __AVR_HAVE_LPMX__ 1
# endif
#endif
#ifndef __AVR_HAVE_MUL__
# if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__
# define __AVR_HAVE_MUL__ 1
# endif
#endif
/*
Smart version of movw:
- uses "movw" if possible (supported by MCU, and both registers even)
- handles overlapping register pairs correctly
- no instruction generated if source and destination are the same
(may expand to 0, 1 or 2 instructions).
*/
.macro X_movw dst src
.L_movw_dst = -1
.L_movw_src = -1
.L_movw_n = 0
.irp reg, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, \
r10,r11,r12,r13,r14,r15,r16,r17,r18,r19, \
r20,r21,r22,r23,r24,r25,r26,r27,r28,r29, \
r30,r31
.ifc \reg,\dst
.L_movw_dst = .L_movw_n
.endif
.ifc \reg,\src
.L_movw_src = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.L_movw_n = 0
.irp reg, R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, \
R10,R11,R12,R13,R14,R15,R16,R17,R18,R19, \
R20,R21,R22,R23,R24,R25,R26,R27,R28,R29, \
R30,R31
.ifc \reg,\dst
.L_movw_dst = .L_movw_n
.endif
.ifc \reg,\src
.L_movw_src = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.if .L_movw_dst < 0
.L_movw_n = 0
.rept 32
.if \dst == .L_movw_n
.L_movw_dst = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.endif
.if .L_movw_src < 0
.L_movw_n = 0
.rept 32
.if \src == .L_movw_n
.L_movw_src = .L_movw_n
.endif
.L_movw_n = .L_movw_n + 1
.endr
.endif
.if (.L_movw_dst < 0) || (.L_movw_src < 0)
.err ; Invalid 'X_movw' arg.
.endif
.if ((.L_movw_src) - (.L_movw_dst)) /* different registers */
.if (((.L_movw_src) | (.L_movw_dst)) & 0x01)
.if (((.L_movw_src)-(.L_movw_dst)) & 0x80) /* src < dest */
mov (.L_movw_dst)+1, (.L_movw_src)+1
mov (.L_movw_dst), (.L_movw_src)
.else /* src > dest */
mov (.L_movw_dst), (.L_movw_src)
mov (.L_movw_dst)+1, (.L_movw_src)+1
.endif
.else /* both even -> overlap not possible */
#if defined(__AVR_HAVE_MOVW__) && __AVR_HAVE_MOVW__
movw \dst, \src
#else
mov (.L_movw_dst), (.L_movw_src)
mov (.L_movw_dst)+1, (.L_movw_src)+1
#endif
.endif
.endif
.endm
/* Macro 'X_lpm' extends enhanced lpm instruction for classic chips.
Usage:
X_lpm reg, dst
where
reg is 0..31, r0..r31 or R0..R31
dst is z, Z, z+ or Z+
It is possible to omit both arguments.
Possible results for classic chips:
lpm
lpm / mov Rd,r0
lpm / adiw ZL,1
lpm / mov Rd,r0 / adiw ZL,1
For enhanced chips it is one instruction always.
ATTENTION: unlike enhanced chips SREG (S,V,N,Z,C) flags are
changed in case of 'Z+' dst. R0 is scratch.
*/
.macro X_lpm dst=r0, src=Z
/* dst evaluation */
.L_lpm_dst = -1
.L_lpm_n = 0
.irp reg, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, \
r10,r11,r12,r13,r14,r15,r16,r17,r18,r19, \
r20,r21,r22,r23,r24,r25,r26,r27,r28,r29, \
r30,r31
.ifc \reg,\dst
.L_lpm_dst = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.L_lpm_n = 0
.irp reg, R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, \
R10,R11,R12,R13,R14,R15,R16,R17,R18,R19, \
R20,R21,R22,R23,R24,R25,R26,R27,R28,R29, \
R30,R31
.ifc \reg,\dst
.L_lpm_dst = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.if .L_lpm_dst < 0
.L_lpm_n = 0
.rept 32
.if \dst == .L_lpm_n
.L_lpm_dst = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.endif
.if (.L_lpm_dst < 0)
.err ; Invalid dst arg of 'X_lpm' macro.
.endif
/* src evaluation */
.L_lpm_src = -1
.L_lpm_n = 0
.irp reg, z,Z,z+,Z+
.ifc \reg,\src
.L_lpm_src = .L_lpm_n
.endif
.L_lpm_n = .L_lpm_n + 1
.endr
.if (.L_lpm_src < 0)
.err ; Invalid src arg of 'X_lpm' macro.
.endif
/* instruction(s) */
.if .L_lpm_src < 2
.if .L_lpm_dst == 0
lpm
.else
#if defined(__AVR_HAVE_LPMX__) && __AVR_HAVE_LPMX__
lpm .L_lpm_dst, Z
#else
lpm
mov .L_lpm_dst, r0
#endif
.endif
.else
.if (.L_lpm_dst >= 30)
.err ; Registers 30 and 31 are inhibited as 'X_lpm *,Z+' dst.
.endif
#if defined(__AVR_HAVE_LPMX__) && __AVR_HAVE_LPMX__
lpm .L_lpm_dst, Z+
#else
lpm
.if .L_lpm_dst
mov .L_lpm_dst, r0
.endif
adiw r30, 1
#endif
.endif
.endm
/*
LPM_R0_ZPLUS_INIT is used before the loop to initialize RAMPZ
for future devices with RAMPZ:Z auto-increment - [e]lpm r0, Z+.
LPM_R0_ZPLUS_NEXT is used inside the loop to load a byte from
the program memory at [RAMPZ:]Z to R0, and increment [RAMPZ:]Z.
The argument in both macros is a register that contains the
high byte (bits 23-16) of the address, bits 15-0 should be in
the Z (r31:r30) register. It can be any register except for:
r0, r1 (__zero_reg__ - assumed to always contain 0), r30, r31.
*/
.macro LPM_R0_ZPLUS_INIT hhi
#if __AVR_ENHANCED__
#if BIG_CODE
out AVR_RAMPZ_ADDR, \hhi
#endif
#endif
.endm
.macro LPM_R0_ZPLUS_NEXT hhi
#if __AVR_ENHANCED__
#if BIG_CODE
/* ELPM with RAMPZ:Z post-increment, load RAMPZ only once */
elpm r0, Z+
#else
/* LPM with Z post-increment, max 64K, no RAMPZ (ATmega83/161/163/32) */
lpm r0, Z+
#endif
#else
#if BIG_CODE
/* ELPM without post-increment, load RAMPZ each time (ATmega103) */
out AVR_RAMPZ_ADDR, \hhi
elpm
adiw r30,1
adc \hhi, __zero_reg__
#else
/* LPM without post-increment, max 64K, no RAMPZ (AT90S*) */
lpm
adiw r30,1
#endif
#endif
.endm

View File

@@ -1,85 +1,292 @@
#warning Call to boot_card has changed and needs checking.
#warning The call is "void boot_card(const char* cmdline);"
#warning Please check and remove these warnings.
/* Copyright (c) 2002, Marek Michalkiewicz <marekm@amelek.gda.pl>
Copyright (c) 2007, Eric B. Weddington
All rights reserved.
#define SPL 0x3d
#define SPH 0x3e
#define SREG 0x3f
#define RAMPZ 0x3b
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
#define RAMEND (64 * 1024)
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
/*
.weak __stack
.set __stack, RAMEND - 1
*/
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
.weak __heap_end
.set __heap_end, 0
* Neither the name of the copyright holders nor the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
/* $Id$ */
#if (__GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
#error "GCC version >= 3.3 required"
#endif
#include "macros.inc"
.macro vector name
.if (. - __vectors < _VECTORS_SIZE)
.weak \name
.set \name, __bad_interrupt
XJMP \name
.endif
.endm
.section .vectors,"ax",@progbits
.global __vectors
.func __vectors
__vectors:
__reset:
jmp __start
.endfunc
XJMP __init
vector __vector_1
vector __vector_2
vector __vector_3
vector __vector_4
vector __vector_5
vector __vector_6
vector __vector_7
vector __vector_8
vector __vector_9
vector __vector_10
vector __vector_11
vector __vector_12
vector __vector_13
vector __vector_14
vector __vector_15
vector __vector_16
vector __vector_17
vector __vector_18
vector __vector_19
vector __vector_20
vector __vector_21
vector __vector_22
vector __vector_23
vector __vector_24
vector __vector_25
vector __vector_26
vector __vector_27
vector __vector_28
vector __vector_29
vector __vector_30
vector __vector_31
vector __vector_32
vector __vector_33
vector __vector_34
vector __vector_35
vector __vector_36
vector __vector_37
vector __vector_38
vector __vector_39
vector __vector_40
vector __vector_41
vector __vector_42
vector __vector_43
vector __vector_44
vector __vector_45
vector __vector_46
vector __vector_47
vector __vector_48
vector __vector_49
vector __vector_50
vector __vector_51
vector __vector_52
vector __vector_53
vector __vector_54
vector __vector_55
vector __vector_56
vector __vector_57
vector __vector_58
vector __vector_59
vector __vector_60
vector __vector_61
vector __vector_62
vector __vector_63
vector __vector_64
vector __vector_65
vector __vector_66
vector __vector_67
vector __vector_68
vector __vector_69
vector __vector_70
vector __vector_71
vector __vector_72
vector __vector_73
vector __vector_74
vector __vector_75
vector __vector_76
vector __vector_77
vector __vector_78
vector __vector_79
vector __vector_80
vector __vector_81
vector __vector_82
vector __vector_83
vector __vector_84
vector __vector_85
vector __vector_86
vector __vector_87
vector __vector_88
vector __vector_89
vector __vector_90
vector __vector_91
vector __vector_92
vector __vector_93
vector __vector_94
vector __vector_95
vector __vector_96
vector __vector_97
vector __vector_98
vector __vector_99
vector __vector_100
vector __vector_101
vector __vector_102
vector __vector_103
vector __vector_104
vector __vector_105
vector __vector_106
vector __vector_107
vector __vector_108
vector __vector_109
vector __vector_110
vector __vector_111
vector __vector_112
vector __vector_113
vector __vector_114
vector __vector_115
vector __vector_116
vector __vector_117
vector __vector_118
vector __vector_119
vector __vector_120
vector __vector_121
vector __vector_122
vector __vector_123
vector __vector_124
vector __vector_125
.endfunc
/* Handle unexpected interrupts (enabled and no handler), which
usually indicate a bug. Jump to the __vector_default function
if defined by the user, otherwise jump to the reset address.
This must be in a different section, otherwise the assembler
will resolve "rjmp" offsets and there will be no relocs. */
.text
.global __bad_interrupt
.func __bad_interrupt
__bad_interrupt:
.weak __vector_default
.set __vector_default, __vectors
XJMP __vector_default
.endfunc
.section .init0,"ax",@progbits
.weak __start
; .func __start
__start:
.section .init2,"ax",@progbits
clr r1
out 0x3f, r1
ldi r28,lo8(__stack)
ldi r29,hi8(__stack)
out SPH, r29
out SPL, r28
.weak __init
; .func __init
__init:
#ifndef __AVR_ASM_ONLY__
.weak __stack
/* By default, malloc() uses the current value of the stack pointer
minus __malloc_margin as the highest available address.
In some applications with external SRAM, the stack can be below
the data section (in the internal SRAM - faster), and __heap_end
should be set to the highest address available for malloc(). */
.weak __heap_end
.set __heap_end, 0
.section .init2,"ax",@progbits
clr __zero_reg__
out AVR_STATUS_ADDR, __zero_reg__
ldi r28,lo8(__stack)
#ifdef _HAVE_AVR_STACK_POINTER_HI
ldi r29,hi8(__stack)
out AVR_STACK_POINTER_HI_ADDR, r29
#endif /* _HAVE_AVR_STACK_POINTER_HI */
out AVR_STACK_POINTER_LO_ADDR, r28
#ifdef __AVR_3_BYTE_PC__
ldi r16, hh8(pm(__vectors))
out _SFR_IO_ADDR(EIND), r16
#endif /* __AVR_3_BYTE_PC__ */
#ifdef __AVR_HAVE_RAMPD__
out AVR_RAMPD_ADDR, __zero_reg__
out AVR_RAMPX_ADDR, __zero_reg__
out AVR_RAMPY_ADDR, __zero_reg__
out AVR_RAMPZ_ADDR, __zero_reg__
#endif
#if BIG_CODE
/* Only for >64K devices with RAMPZ, replaces the default code
provided by libgcc.S which is only linked in if necessary. */
.section .init4,"ax",@progbits
.global __do_copy_data
__do_copy_data:
.global __do_clear_bss
__do_clear_bss:
#if 0
ldi r17, hi8(__data_end)
ldi r26, lo8(__data_start)
ldi r27, hi8(__data_start)
ldi r30, lo8(__data_load_start)
ldi r31, hi8(__data_load_start)
ldi r16, hh8(__data_load_start)
out RAMPZ, r16
0:
/* On the enhanced core, "elpm" with post-increment updates RAMPZ
automatically. Otherwise we have to handle it ourselves. */
#ifdef __AVR_ENHANCED__
ldi r16, hh8(__data_load_start)
#else
ldi r16, hh8(__data_load_start - 0x10000)
.L__do_copy_data_carry:
inc r16
#endif
out AVR_RAMPZ_ADDR, r16
rjmp .L__do_copy_data_start
.L__do_copy_data_loop:
#ifdef __AVR_ENHANCED__
elpm r0, Z+
#else
elpm
#endif
st X+, r0
#ifndef __AVR_ENHANCED__
adiw r30, 1
brcs .L__do_copy_data_carry
#endif
.L__do_copy_data_start:
cpi r26, lo8(__data_end)
cpc r27, r17
breq 1f
elpm r0, Z+
st X+, r0
rjmp 0b
1:
#endif
brne .L__do_copy_data_loop
#ifdef __AVR_HAVE_RAMPD__
out AVR_RAMPZ_ADDR, __zero_reg__
#endif /* __AVR_HAVE_RAMPD__*/
#endif /* BIG_CODE */
.set __stack, RAMEND
#endif /* !__AVR_ASM_ONLY__ */
.section .init9,"ax",@progbits
call boot_card
jmp exit
#ifdef __AVR_ASM_ONLY__
XJMP boot_card
#else /* !__AVR_ASM_ONLY__ */
XCALL boot_card
XJMP exit
#endif /* __AVR_ASM_ONLY__ */
; .endfunc
.global exit
.func exit
exit: out 0x2f, r0
.endfunc
.global abort
.func abort
abort: out 0x29, r0
.endfunc