Compare commits

...

2 Commits

Author SHA1 Message Date
copilot-swe-agent[bot]
35e4335ee7 Remove vbus component and related code
Co-authored-by: BernardXiong <1241087+BernardXiong@users.noreply.github.com>
2025-10-28 23:36:25 +00:00
copilot-swe-agent[bot]
6d2f401e17 Initial plan 2025-10-28 23:25:07 +00:00
43 changed files with 5 additions and 4537 deletions

View File

@@ -28,6 +28,5 @@ extern int __bss_end;
#define HEAP_END (void*)(0x20000000 + 0xA0000)
void rt_hw_board_init(void);
int rt_vbus_do_init(void);
#endif

View File

@@ -18,10 +18,6 @@
#include <shell.h>
#endif
#ifdef RT_USING_VBUS
#include <vbus.h>
#endif
/* thread phase init */
void rt_init_thread_entry(void *parameter)
{
@@ -32,10 +28,6 @@ void rt_init_thread_entry(void *parameter)
finsh_set_device(RT_CONSOLE_DEVICE_NAME);
#endif
#endif
#ifdef RT_USING_VBUS
rt_vbus_do_init();
#endif
}
/*the led thread*/
@@ -45,8 +37,6 @@ static struct rt_thread led_thread;
static void led_thread_entry(void *parameter)
{
rt_device_t led_dev;
rt_device_t vbus_dev;
rt_err_t err;
rt_led_hw_init();
@@ -57,32 +47,14 @@ static void led_thread_entry(void *parameter)
return;
}
vbus_dev = rt_device_find("vecho");
if (vbus_dev == RT_NULL)
{
rt_kprintf("can not find the vbus device\n");
return;
}
err = rt_device_open(vbus_dev, RT_DEVICE_OFLAG_RDWR);
if (err != RT_EOK)
{
rt_kprintf("open vbus failed: %d\n", err);
return;
}
while (1)
{
rt_uint8_t led_value;
int len;
len = rt_device_read(vbus_dev, 0, &led_value, sizeof(led_value));
if (len <= 0)
{
rt_kprintf("vbus read err: %d, %d\n", len, rt_get_errno());
}
rt_uint8_t led_value = 1;
led_dev->write(led_dev, 1, &led_value, sizeof(led_value));
rt_thread_delay(500);
led_value = 0;
led_dev->write(led_dev, 1, &led_value, sizeof(led_value));
rt_thread_delay(500);
}
}

View File

@@ -50,6 +50,5 @@ extern int __bss_end;
void rt_hw_board_init(void);
int rt_hw_board_heap_init(void);
int rt_vbus_do_init(void);
#endif

View File

@@ -1,13 +0,0 @@
#ifndef __VBUS_CONF_H__
#define __VBUS_CONF_H__
/* Number of blocks in VBus. The total size of VBus is
* RT_VMM_RB_BLK_NR * 64byte * 2. */
#define RT_VMM_RB_BLK_NR 20
/* We don't use the IRQ number to trigger IRQ in this BSP. */
#define RT_VBUS_GUEST_VIRQ 0
#define RT_VBUS_HOST_VIRQ 0
#endif /* end of include guard: __VBUS_CONF_H__ */

View File

@@ -1,67 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-01-07 Grissiom add comment
*/
#include <rtthread.h>
#ifdef RT_USING_VBUS
#include <rtdevice.h>
#include <vbus.h>
#include <board.h>
struct rt_vbus_ring rt_vbus_rings[2] rt_section("vbus_ring");
int rt_vbus_do_init(void)
{
return rt_vbus_init(&rt_vbus_rings[1], &rt_vbus_rings[0]);
}
INIT_COMPONENT_EXPORT(rt_vbus_do_init);
int rt_vbus_hw_init(void)
{
NVIC_ClearPendingIRQ(M0_M4CORE_IRQn);
NVIC_EnableIRQ(M0_M4CORE_IRQn);
return 0;
}
void M4CORE_IRQHandler(void)
{
LPC_CREG->M4TXEVENT = 0;
rt_vbus_isr(M0_M4CORE_IRQn, RT_NULL);
}
int rt_vbus_hw_eoi(int irqnr, void *param)
{
/* Nothing to do here as we cleared the interrupt in IRQHandler. */
return 0;
}
struct rt_vbus_dev rt_vbus_chn_devx[] = {
{
.req =
{
.prio = 30,
.name = "vecho",
.is_server = 0,
.recv_wm.low = RT_VMM_RB_BLK_NR / 3,
.recv_wm.high = RT_VMM_RB_BLK_NR * 2 / 3,
.post_wm.low = RT_VMM_RB_BLK_NR / 3,
.post_wm.high = RT_VMM_RB_BLK_NR * 2 / 3,
}
},
{
.req =
{
.name = RT_NULL,
}
},
};
#endif /* RT_USING_VBUS */

View File

@@ -1,40 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-01-07 Grissiom init commit
*/
#ifndef __VBUS_HW_H__
#define __VBUS_HW_H__
#include <rtthread.h>
#include <board.h>
rt_inline void rt_vbus_tick(unsigned int target_cpu, unsigned int irqnr)
{
__SEV();
}
/* Read memory barrier. */
rt_inline void rt_vbus_smp_rmb(void)
{
__DMB();
}
/* Write memory barrier. */
rt_inline void rt_vbus_smp_wmb(void)
{
__DSB();
}
/* General memory barrier. */
rt_inline void rt_vbus_smp_mb(void)
{
__DSB();
}
#endif /* __VBUS_HW_H__ */

View File

@@ -220,9 +220,6 @@
#define RT_LWIP_MSKADDR3 0
// </section>
#define RT_USING_VBUS
#define _RT_VBUS_RING_SZ 64
#define RT_VBUS_GUEST_VIRQ 0
// </RDTConfigurator>

View File

@@ -1,8 +0,0 @@
#ifndef __VBUS_LOCAL_CONF_H__
#define __VBUS_LOCAL_CONF_H__
#define RT_VBUS_USING_FLOW_CONTROL
#define RT_VBUS_USING_TESTS
#endif /* end of include guard: __VBUS_LOCAL_CONF_H__ */

View File

@@ -18,10 +18,6 @@
#include <shell.h>
#endif
#ifdef RT_USING_VBUS
#include <vbus.h>
#endif
static const unsigned char _M0_CODE[] rt_section("M0_CODE") = {
// #include "M0_CODE.h"
};
@@ -64,10 +60,6 @@ void rt_init_thread_entry(void *parameter)
#endif
#endif
#ifdef RT_USING_VBUS
rt_vbus_do_init();
#endif
_boot_M0();
}
@@ -79,8 +71,6 @@ static void led_thread_entry(void *parameter)
{
rt_uint8_t led_value;
rt_device_t led_dev;
rt_device_t vbus_dev;
rt_err_t err;
rt_led_hw_init();
@@ -91,34 +81,11 @@ static void led_thread_entry(void *parameter)
return;
}
vbus_dev = rt_device_find("vecho");
if (vbus_dev == RT_NULL)
{
rt_kprintf("can not find the vbus device\n");
return;
}
err = rt_device_open(vbus_dev, RT_DEVICE_OFLAG_RDWR);
if (err != RT_EOK)
{
rt_kprintf("open vbus failed: %d\n", err);
return;
}
led_value = 0;
while (1)
{
int len;
led_dev->write(led_dev, 0, &led_value, sizeof(led_value));
led_value = !led_value;
len = rt_device_write(vbus_dev, 0, &led_value, sizeof(led_value));
if (len <= 0)
{
rt_kprintf("vbus write err: %d, %d\n", len, rt_get_errno());
}
rt_thread_delay(1000);
}
}

View File

@@ -50,6 +50,5 @@ extern int __bss_end;
void rt_hw_board_init(void);
int rt_hw_board_heap_init(void);
int rt_vbus_do_init(void);
#endif

View File

@@ -1,13 +0,0 @@
#ifndef __VBUS_CONF_H__
#define __VBUS_CONF_H__
/* Number of blocks in VBus. The total size of VBus is
* RT_VMM_RB_BLK_NR * 64byte * 2. */
#define RT_VMM_RB_BLK_NR 20
/* We don't use the IRQ number to trigger IRQ in this BSP. */
#define RT_VBUS_GUEST_VIRQ 0
#define RT_VBUS_HOST_VIRQ 0
#endif /* end of include guard: __VBUS_CONF_H__ */

View File

@@ -1,67 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-01-07 Grissiom add comment
*/
#include <rtthread.h>
#ifdef RT_USING_VBUS
#include <rtdevice.h>
#include <vbus.h>
#include <board.h>
struct rt_vbus_ring rt_vbus_rings[2] rt_section("vbus_ring");
int rt_vbus_do_init(void)
{
return rt_vbus_init(&rt_vbus_rings[0], &rt_vbus_rings[1]);
}
INIT_COMPONENT_EXPORT(rt_vbus_do_init);
int rt_vbus_hw_init(void)
{
NVIC_ClearPendingIRQ(M0CORE_IRQn);
NVIC_EnableIRQ(M0CORE_IRQn);
return 0;
}
void M0CORE_IRQHandler(void)
{
LPC_CREG->M0TXEVENT = 0;
rt_vbus_isr(M0CORE_IRQn, RT_NULL);
}
int rt_vbus_hw_eoi(int irqnr, void *param)
{
/* Nothing to do here as we cleared the interrupt in IRQHandler. */
return 0;
}
struct rt_vbus_dev rt_vbus_chn_devx[] = {
{
.req =
{
.prio = 30,
.name = "vecho",
.is_server = 1,
.recv_wm.low = RT_VMM_RB_BLK_NR / 3,
.recv_wm.high = RT_VMM_RB_BLK_NR * 2 / 3,
.post_wm.low = RT_VMM_RB_BLK_NR / 3,
.post_wm.high = RT_VMM_RB_BLK_NR * 2 / 3,
}
},
{
.req =
{
.name = RT_NULL,
}
},
};
#endif /* RT_USING_VBUS */

View File

@@ -1,40 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-01-07 Grissiom init commit
*/
#ifndef __VBUS_HW_H__
#define __VBUS_HW_H__
#include <rtthread.h>
#include <board.h>
rt_inline void rt_vbus_tick(unsigned int target_cpu, unsigned int irqnr)
{
__SEV();
}
/* Read memory barrier. */
rt_inline void rt_vbus_smp_rmb(void)
{
__DMB();
}
/* Write memory barrier. */
rt_inline void rt_vbus_smp_wmb(void)
{
__DSB();
}
/* General memory barrier. */
rt_inline void rt_vbus_smp_mb(void)
{
__DSB();
}
#endif /* __VBUS_HW_H__ */

View File

@@ -222,10 +222,6 @@
#define RT_LWIP_MSKADDR3 0
// </section>
#define RT_USING_VBUS
#define RT_VBUS_GUEST_VIRQ 0
#define _RT_VBUS_RING_SZ 64
// </RDTConfigurator>
#define RT_CPUS_NR 1
#define RT_BACKTRACE_LEVEL_MAX_NR 32

View File

@@ -1,8 +0,0 @@
#ifndef __VBUS_LOCAL_CONF_H__
#define __VBUS_LOCAL_CONF_H__
#define RT_VBUS_USING_FLOW_CONTROL
#define RT_VBUS_USING_TESTS
#endif /* end of include guard: __VBUS_LOCAL_CONF_H__ */

View File

@@ -37,7 +37,6 @@ rsource "libc/Kconfig"
rsource "net/Kconfig"
rsource "mprotect/Kconfig"
rsource "utilities/Kconfig"
rsource "vbus/Kconfig"
endif
if ARCH_MM_MMU

View File

@@ -1,56 +0,0 @@
menuconfig RT_USING_VBUS
bool "VBus: virtual software bus"
default n
if RT_USING_VBUS
config RT_USING_VBUS_RFS
bool "Enable Remote File System on VBus"
default n
help
When enable remote file system, the application can visit the remote file system
through VBus with POSIX file I/O.
config RT_USING_VBUS_RSHELL
bool "Enable Remote Shell on VBus"
default n
help
When enable remote shell, the finsh/msh of RT-Thread can be operated from another
Operating System.
config RT_VBUS_USING_TESTS
bool "Enable tests on VBus"
default n
config _RT_VBUS_RING_BASE
hex "VBus address"
help
VBus ring buffer physical address.
config _RT_VBUS_RING_SZ
int "VBus ring size"
help
VBus size of the ring buffer.
config RT_VBUS_GUEST_VIRQ
int "RT_VBUS_GUEST_VIRQ"
help
The interrupt number used to notify the client on a particular system.
config RT_VBUS_HOST_VIRQ
int "RT_VBUS_HOST_VIRQ"
help
The interrupt be triggered on a particular system when the client notify the host.
config RT_VBUS_SHELL_DEV_NAME
string "RT_VBUS_SHELL_DEV_NAME"
default "vbser0"
help
The name of the UBUS shell device.
config RT_VBUS_RFS_DEV_NAME
string "RT_VBUS_RFS_DEV_NAME"
default "rfs"
help
The name of the UBUS rfs device.
endif

View File

@@ -1,23 +0,0 @@
# RT-Thread building script for component
import SCons, os
from building import *
group = []
if not GetDepend(['RT_USING_VBUS']):
Return('group')
cwd = GetCurrentDir()
src = Glob('*.c')
for c, f in [['RT_USING_VBUS_RFS', 'utilities/rfs.c'],
['RT_USING_VBUS_RSHELL', 'utilities/rshell.c'],
]:
if GetDepend(c):
src += Glob(f)
CPPPATH = [cwd, os.path.join(cwd, 'share_hdr')]
group = DefineGroup('VBus', src, depend = ['RT_USING_VBUS'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -1,256 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-11-04 Grissiom add comment
*/
#include <rthw.h>
#include <rtthread.h>
#include "prio_queue.h"
struct rt_prio_queue_item {
struct rt_prio_queue_item *next;
/* data follows */
};
static void _do_push(struct rt_prio_queue *que,
rt_uint8_t prio,
struct rt_prio_queue_item *item)
{
if (que->head[prio] == RT_NULL)
{
que->head[prio] = item;
que->bitmap |= 1 << prio;
}
else
{
RT_ASSERT(que->tail[prio]);
que->tail[prio]->next = item;
}
que->tail[prio] = item;
}
static struct rt_prio_queue_item* _do_pop(struct rt_prio_queue *que)
{
int ffs;
struct rt_prio_queue_item *item;
ffs = __rt_ffs(que->bitmap);
if (ffs == 0)
return RT_NULL;
ffs--;
item = que->head[ffs];
RT_ASSERT(item);
que->head[ffs] = item->next;
if (que->head[ffs] == RT_NULL)
{
que->bitmap &= ~(1 << ffs);
}
return item;
}
rt_err_t rt_prio_queue_init(struct rt_prio_queue *que,
const char *name,
void *buf,
rt_size_t bufsz,
rt_size_t itemsz)
{
RT_ASSERT(que);
rt_memset(que, 0, sizeof(*que));
rt_list_init(&(que->suspended_pop_list));
rt_mp_init(&que->pool, name, buf, bufsz,
sizeof(struct rt_prio_queue_item) + itemsz);
que->item_sz = itemsz;
return RT_EOK;
}
void rt_prio_queue_detach(struct rt_prio_queue *que)
{
/* wake up all suspended pop threads, push thread is suspended on mempool.
*/
while (!rt_list_isempty(&(que->suspended_pop_list)))
{
rt_thread_t thread;
/* disable interrupt */
rt_base_t level = rt_hw_interrupt_disable();
/* get next suspend thread */
thread = RT_THREAD_LIST_NODE_ENTRY(que->suspended_pop_list.next);
/* set error code to -RT_ERROR */
thread->error = -RT_ERROR;
rt_thread_resume(thread);
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
rt_mp_detach(&que->pool);
}
#ifdef RT_USING_HEAP
struct rt_prio_queue* rt_prio_queue_create(const char *name,
rt_size_t item_nr,
rt_size_t item_sz)
{
struct rt_prio_queue *que;
rt_size_t bufsz;
bufsz = item_nr * (sizeof(struct rt_prio_queue_item)
+ item_sz
+ sizeof(void*));
RT_ASSERT(item_nr);
que = rt_malloc(sizeof(*que) + bufsz);
if (!que)
return RT_NULL;
rt_prio_queue_init(que, name, que+1, bufsz, item_sz);
return que;
}
void rt_prio_queue_delete(struct rt_prio_queue *que)
{
rt_prio_queue_detach(que);
rt_free(que);
}
#endif
rt_err_t rt_prio_queue_push(struct rt_prio_queue *que,
rt_uint8_t prio,
void *data,
rt_int32_t timeout)
{
rt_base_t level;
struct rt_prio_queue_item *item;
RT_ASSERT(que);
if (prio >= RT_PRIO_QUEUE_PRIO_MAX)
return -RT_ERROR;
item = rt_mp_alloc(&que->pool, timeout);
if (item == RT_NULL)
return -RT_ENOMEM;
rt_memcpy(item+1, data, que->item_sz);
item->next = RT_NULL;
level = rt_hw_interrupt_disable();
_do_push(que, prio, item);
if (!rt_list_isempty(&(que->suspended_pop_list)))
{
rt_thread_t thread;
/* get thread entry */
thread = RT_THREAD_LIST_NODE_ENTRY(que->suspended_pop_list.next);
/* resume it */
rt_thread_resume(thread);
rt_hw_interrupt_enable(level);
/* perform a schedule */
rt_schedule();
return RT_EOK;
}
rt_hw_interrupt_enable(level);
return RT_EOK;
}
rt_err_t rt_prio_queue_pop(struct rt_prio_queue *que,
void *data,
rt_int32_t timeout)
{
rt_base_t level;
struct rt_prio_queue_item *item;
RT_ASSERT(que);
RT_ASSERT(data);
level = rt_hw_interrupt_disable();
for (item = _do_pop(que);
item == RT_NULL;
item = _do_pop(que))
{
rt_thread_t thread;
if (timeout == 0)
{
rt_hw_interrupt_enable(level);
return -RT_ETIMEOUT;
}
RT_DEBUG_NOT_IN_INTERRUPT;
thread = rt_thread_self();
thread->error = RT_EOK;
rt_thread_suspend(thread);
rt_list_insert_before(&(que->suspended_pop_list), &RT_THREAD_LIST_NODE(thread));
if (timeout > 0)
{
rt_tick_t timeout_tick = timeout;
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout_tick);
rt_timer_start(&(thread->thread_timer));
}
rt_hw_interrupt_enable(level);
rt_schedule();
/* thread is waked up */
if (thread->error != RT_EOK)
return thread->error;
level = rt_hw_interrupt_disable();
}
rt_hw_interrupt_enable(level);
rt_memcpy(data, item+1, que->item_sz);
rt_mp_free(item);
return RT_EOK;
}
void rt_prio_queue_dump(struct rt_prio_queue *que)
{
int level = 0;
rt_kprintf("bitmap: %08x\n", que->bitmap);
for (level = 0; level < RT_PRIO_QUEUE_PRIO_MAX; level++)
{
struct rt_prio_queue_item *item;
rt_kprintf("%2d: ", level);
for (item = que->head[level];
item;
item = item->next)
{
rt_kprintf("%p, ", item);
}
rt_kprintf("\n");
}
}

View File

@@ -1,54 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-11-04 Grissiom add comment
*/
#ifndef __PRIO_QUEUE_H__
#define __PRIO_QUEUE_H__
#include <rtthread.h>
#define RT_PRIO_QUEUE_PRIO_MAX 32
struct rt_prio_queue_item;
struct rt_prio_queue {
rt_uint32_t bitmap;
struct rt_prio_queue_item *head[RT_PRIO_QUEUE_PRIO_MAX];
struct rt_prio_queue_item *tail[RT_PRIO_QUEUE_PRIO_MAX];
/* push thread suspend on the mempool, not queue */
rt_list_t suspended_pop_list;
rt_size_t item_sz;
struct rt_mempool pool;
};
rt_err_t rt_prio_queue_init(struct rt_prio_queue *que,
const char *name,
void *buf,
rt_size_t bufsz,
rt_size_t itemsz);
void rt_prio_queue_detach(struct rt_prio_queue *que);
rt_err_t rt_prio_queue_push(struct rt_prio_queue *que,
rt_uint8_t prio,
void *data,
rt_int32_t timeout);
rt_err_t rt_prio_queue_pop(struct rt_prio_queue *que,
void *data,
rt_int32_t timeout);
#ifdef RT_USING_HEAP
struct rt_prio_queue* rt_prio_queue_create(const char *name,
rt_size_t item_nr,
rt_size_t item_sz);
void rt_prio_queue_delete(struct rt_prio_queue *que);
#endif
void rt_prio_queue_dump(struct rt_prio_queue *que);
#endif /* end of include guard: __PRIO_QUEUE_H__ */

View File

@@ -1,89 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __VBUS_API_H__
#define __VBUS_API_H__
#define RT_VBUS_USING_FLOW_CONTROL
#define RT_VBUS_CHANNEL_NR 32
#define RT_VBUS_BLK_HEAD_SZ 4
#define RT_VBUS_MAX_PKT_SZ (256 - RT_VBUS_BLK_HEAD_SZ)
#define RT_VMM_RB_BLK_NR (_RT_VBUS_RING_SZ / 64 - 1)
#ifndef __ASSEMBLY__
#include <stddef.h> /* For size_t */
struct rt_vbus_blk
{
unsigned char id;
unsigned char qos;
unsigned char len;
unsigned char reserved;
unsigned char data[60];
} __attribute__((packed));
struct rt_vbus_ring
{
volatile size_t put_idx;
volatile size_t get_idx;
/* whether the writer is blocked on this ring. For RTT, it means the
* central writer thread is waiting. For Linux, it means there are some
* threads waiting for space to write.
*
* Note that we don't record whether there are reading thread blocked. When
* there is new data, the other side will always be waked up. */
volatile unsigned int blocked;
struct rt_vbus_blk blks[RT_VMM_RB_BLK_NR];
};
enum
{
RT_VBUS_CHN0_CMD_ENABLE,
RT_VBUS_CHN0_CMD_DISABLE,
RT_VBUS_CHN0_CMD_SET,
RT_VBUS_CHN0_CMD_ACK,
RT_VBUS_CHN0_CMD_NAK,
/* If the recieving side reached high water mark. It has the right to
* suspend the channel. All the server/client should know about this
* command but the one that does not implement flow control could ignore
* this command. */
RT_VBUS_CHN0_CMD_SUSPEND,
RT_VBUS_CHN0_CMD_RESUME,
RT_VBUS_CHN0_CMD_MAX,
};
enum rt_vbus_chn_status
{
/* initial state, available for reuse */
RT_VBUS_CHN_ST_AVAILABLE,
/* ACK DISABLE send(CS) or received(CS), but not ready for reuse.(the
* channel is not closed by this end) */
RT_VBUS_CHN_ST_CLOSED,
/* ENABLE send(client) or received(server) */
RT_VBUS_CHN_ST_ESTABLISHING,
/* ACK SET send(C) or received(S) */
RT_VBUS_CHN_ST_ESTABLISHED,
/* Channel suspended by flow control. */
RT_VBUS_CHN_ST_SUSPEND,
/* DISABLE received(CS) */
RT_VBUS_CHN_ST_CLOSING,
};
#endif
#undef BUILD_ASSERT
/* borrowed from http://lxr.linux.no/linux+v2.6.26.5/include/linux/kernel.h#L494 */
#define BUILD_ASSERT(condition) ((void)sizeof(char[1 - 2*!(condition)]))
/* max length of a channel name, including the \0 */
#define RT_VBUS_CHN_NAME_MAX 16
#endif /* end of include guard: __VBUS_API_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -1,177 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2014-06-09 Grissiom version 2.0.2; add comment
* 2015-01-06 Grissiom version 2.0.3; API change, no functional changes
*/
#ifndef __VBUS_H__
#define __VBUS_H__
#include <vbus_api.h>
int rt_vbus_init(void *outr, void *inr);
void rt_vbus_resume_out_thread(void);
/** Post data on channel.
*
* @param chnr the channel number
* @param prio the priority of the data
* @param datap pointer to the actual data
* @param size number of byte of the data
* @param timeout the value used in the blocking API
*
* Note: rt_vbus_post is an asynchronous function that when it returns, the
* @datap and @size is recorded in the post queue at least but there is no
* guarantee that the data is copied into the ring buffer. To avoid data
* corruption, you need to wait on the RT_VBUS_EVENT_ID_TX event.
*
* However, if you just post static data such as static string, there is no
* need to wait.
*
* @sa rt_vbus_register_listener .
*/
rt_err_t rt_vbus_post(rt_uint8_t chnr,
rt_uint8_t prio,
const void *datap,
rt_size_t size,
rt_int32_t timeout);
struct rt_vbus_data {
/* Number of bytes in current data package. */
unsigned char size;
/* Used internally in VBus. Don't modify this field as it may corrupt the
* receive queue. */
struct rt_vbus_data *next;
/* Data follows the struct */
};
struct rt_vbus_wm_cfg {
unsigned int low, high;
};
struct rt_vbus_request {
unsigned char prio;
const char *name;
int is_server;
struct rt_vbus_wm_cfg recv_wm, post_wm;
};
/** Request a channel.
*
* @return channel number. Negative if error happened.
*/
int rt_vbus_request_chn(struct rt_vbus_request *req, int timeout);
/** Close channel @chnr */
void rt_vbus_close_chn(unsigned char chnr);
/** Set the water mark level for posting into the channel @chnr. */
void rt_vbus_set_post_wm(unsigned char chnr, unsigned int low, unsigned int high);
/** Set the water mark level for receiving from the channel @chnr. */
void rt_vbus_set_recv_wm(unsigned char chnr, unsigned int low, unsigned int high);
typedef void (*rt_vbus_event_listener)(void *ctx);
enum rt_vbus_event_id {
/* On a packet received in channel. */
RT_VBUS_EVENT_ID_RX,
/* On the data of rt_vbus_post has been written to the ring buffer. */
RT_VBUS_EVENT_ID_TX,
/* On the channel has been closed. */
RT_VBUS_EVENT_ID_DISCONN,
RT_VBUS_EVENT_ID_MAX,
};
/** Register callback @indi on the event @eve on the @chnr.
*
* @ctx will passed to @indi on calling the @indi.
*/
void rt_vbus_register_listener(unsigned char chnr,
enum rt_vbus_event_id eve,
rt_vbus_event_listener indi,
void *ctx);
/** Listen on any events happen on the @chnr for @timeout ticks.
*
* This function blocks until events occur or timeout happened.
*/
rt_err_t rt_vbus_listen_on(rt_uint8_t chnr,
rt_int32_t timeout);
/** Push a data package into the receive queue of the channel @chnr. */
void rt_vbus_data_push(unsigned int chnr,
struct rt_vbus_data *data);
/** Pop a data package from the receive queue of the channel @chnr.
*
* The actual data is following the struct rt_vbus_data. After using it, it
* should be freed by rt_free.
*/
struct rt_vbus_data* rt_vbus_data_pop(unsigned int chnr);
struct rt_vbus_dev
{
/* Runtime infomations. */
rt_uint8_t chnr;
struct rt_vbus_data *act;
rt_size_t pos;
/* There will be a request for each channel. So no need to seperate them so
* clearly. */
struct rt_vbus_request req;
};
rt_err_t rt_vbus_chnx_init(void);
/** Get the corresponding channel number from the VBus device @dev. */
rt_uint8_t rt_vbus_get_chnnr(rt_device_t dev);
/** Register a call back on the other side disconnect the channel.
*
* @sa rt_vbus_register_listener .
*/
void rt_vbus_chnx_register_disconn(rt_device_t dev,
rt_vbus_event_listener indi,
void *ctx);
/* Commands for the device control interface. */
#define VBUS_IOCRECV_WM 0xD1
#define VBUS_IOCPOST_WM 0xD2
/** Configure event listener */
#define VBUS_IOC_LISCFG 0xD3
struct rt_vbus_dev_liscfg
{
enum rt_vbus_event_id event;
rt_vbus_event_listener listener;
void *ctx;
};
int rt_vbus_shell_start(void);
#ifdef RT_USING_VBUS_RFS
int dfs_rfs_init(void);
#endif
/** VBus hardware init function.
*
* BSP should implement this function to initialize the interrupts etc.
*/
int rt_vbus_hw_init(void);
/** VBus ISR function.
*
* BSP should call this function when the interrupt from other core is
* triggered. @param is not used by VBus and will pass to rt_vbus_hw_eoi.
*/
void rt_vbus_isr(int irqnr, void *param);
/** VBus End Of Interrupt function.
*
* This function will be called when VBus finished the ISR handling. BSP should
* define this function to clear the interrupt flag etc.
*/
int rt_vbus_hw_eoi(int irqnr, void *param);
#endif /* end of include guard: __VBUS_H__ */

View File

@@ -1,269 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-11-04 Grissiom add comment
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#include "vbus.h"
static void _rx_indicate(void *ctx)
{
rt_device_t dev = ctx;
if (dev->rx_indicate)
dev->rx_indicate(dev, 0);
}
static void _tx_complete(void *ctx)
{
rt_device_t dev = ctx;
if (dev->tx_complete)
dev->tx_complete(dev, 0);
}
static rt_err_t _open(rt_device_t dev, rt_uint16_t oflag)
{
int chnr;
struct rt_vbus_dev *vdev = dev->user_data;
if (vdev->chnr)
return RT_EOK;
/* FIXME: request the same name for twice will crash */
chnr = rt_vbus_request_chn(&vdev->req, RT_WAITING_FOREVER);
if (chnr < 0)
return chnr;
vdev->chnr = chnr;
rt_vbus_register_listener(chnr, RT_VBUS_EVENT_ID_RX, _rx_indicate, dev);
rt_vbus_register_listener(chnr, RT_VBUS_EVENT_ID_TX, _tx_complete, dev);
return RT_EOK;
}
static rt_err_t _close(rt_device_t dev)
{
struct rt_vbus_dev *vdev = dev->user_data;
RT_ASSERT(vdev->chnr != 0);
rt_vbus_close_chn(vdev->chnr);
vdev->chnr = 0;
return RT_EOK;
}
static rt_ssize_t _read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
{
rt_size_t outsz = 0;
struct rt_vbus_dev *vdev = dev->user_data;
RT_ASSERT(vdev->chnr != 0);
if (vdev->act == RT_NULL)
{
vdev->act = rt_vbus_data_pop(vdev->chnr);
vdev->pos = 0;
}
while (1)
{
rt_err_t err;
while (vdev->act)
{
rt_size_t cpysz;
if (size - outsz > vdev->act->size - vdev->pos)
cpysz = vdev->act->size - vdev->pos;
else
cpysz = size - outsz;
rt_memcpy((char*)buffer + outsz, ((char*)(vdev->act+1)) + vdev->pos, cpysz);
vdev->pos += cpysz;
outsz += cpysz;
if (outsz == size)
{
return outsz;
}
else if (outsz > size)
RT_ASSERT(0);
/* free old and get new */
rt_free(vdev->act);
vdev->act = rt_vbus_data_pop(vdev->chnr);
vdev->pos = 0;
}
/* TODO: We don't want to touch the rx_indicate here. But this lead to
* some duplication. Maybe we should find a better way to handle this.
*/
if (rt_interrupt_get_nest() == 0)
{
err = rt_vbus_listen_on(vdev->chnr, RT_WAITING_FOREVER);
}
else
{
err = rt_vbus_listen_on(vdev->chnr, 0);
}
if (err != RT_EOK)
{
rt_set_errno(err);
return outsz;
}
vdev->act = rt_vbus_data_pop(vdev->chnr);
vdev->pos = 0;
}
}
static rt_ssize_t _write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
{
rt_err_t err;
struct rt_vbus_dev *vdev = dev->user_data;
RT_ASSERT(vdev->chnr != 0);
if (rt_interrupt_get_nest() == 0)
{
/* Thread context. */
err = rt_vbus_post(vdev->chnr, vdev->req.prio,
buffer, size, RT_WAITING_FOREVER);
}
else
{
/* Interrupt context. */
err = rt_vbus_post(vdev->chnr, vdev->req.prio,
buffer, size, 0);
}
if (err)
{
rt_set_errno(err);
return 0;
}
return size;
}
rt_err_t _control(rt_device_t dev, int cmd, void *args)
{
RT_ASSERT(dev);
switch (cmd) {
case VBUS_IOC_LISCFG: {
struct rt_vbus_dev *vdev = dev->user_data;
struct rt_vbus_dev_liscfg *liscfg = args;
RT_ASSERT(vdev->chnr != 0);
if (!liscfg)
return -RT_ERROR;
rt_vbus_register_listener(vdev->chnr, liscfg->event,
liscfg->listener, liscfg->ctx);
return RT_EOK;
}
break;
#ifdef RT_VBUS_USING_FLOW_CONTROL
case VBUS_IOCRECV_WM: {
struct rt_vbus_dev *vdev = dev->user_data;
struct rt_vbus_wm_cfg *cfg;
RT_ASSERT(vdev->chnr != 0);
if (!args)
return -RT_ERROR;
cfg = (struct rt_vbus_wm_cfg*)args;
if (cfg->low > cfg->high)
return -RT_ERROR;
rt_vbus_set_recv_wm(vdev->chnr, cfg->low, cfg->high);
return RT_EOK;
}
break;
case VBUS_IOCPOST_WM: {
struct rt_vbus_dev *vdev = dev->user_data;
struct rt_vbus_wm_cfg *cfg;
RT_ASSERT(vdev->chnr != 0);
if (!args)
return -RT_ERROR;
cfg = (struct rt_vbus_wm_cfg*)args;
if (cfg->low > cfg->high)
return -RT_ERROR;
rt_vbus_set_post_wm(vdev->chnr, cfg->low, cfg->high);
return RT_EOK;
}
break;
#endif
default:
break;
};
return -RT_ENOSYS;
}
rt_uint8_t rt_vbus_get_chnnr(rt_device_t dev)
{
struct rt_vbus_dev *vdev;
RT_ASSERT(dev);
vdev = dev->user_data;
return vdev->chnr;
}
void rt_vbus_chnx_register_disconn(rt_device_t dev,
rt_vbus_event_listener indi,
void *ctx)
{
if (dev && dev->user_data)
{
struct rt_vbus_dev *vdev = dev->user_data;
RT_ASSERT(vdev->chnr != 0);
rt_vbus_register_listener(vdev->chnr, RT_VBUS_EVENT_ID_DISCONN,
indi, ctx);
}
}
#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
extern struct rt_vbus_dev rt_vbus_chn_devx[];
static struct rt_device _devx[32];
rt_err_t rt_vbus_chnx_init(void)
{
int i;
struct rt_vbus_dev *p;
for (i = 0, p = rt_vbus_chn_devx;
i < ARRAY_SIZE(_devx) && p->req.name;
i++, p++)
{
_devx[i].type = RT_Device_Class_Char;
_devx[i].open = _open;
_devx[i].close = _close;
_devx[i].read = _read;
_devx[i].write = _write;
_devx[i].control = _control;
_devx[i].user_data = p;
rt_device_register(&_devx[i], p->req.name, RT_DEVICE_FLAG_RDWR);
}
return RT_EOK;
}

View File

@@ -1,50 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2014-04-16 Grissiom first version
*/
#include <rthw.h>
#include <rtthread.h>
#include "watermark_queue.h"
void rt_wm_que_set_mark(struct rt_watermark_queue *wg,
unsigned int low, unsigned int high)
{
RT_ASSERT(low <= high);
wg->high_mark = high;
wg->low_mark = low;
}
void rt_wm_que_init(struct rt_watermark_queue *wg,
unsigned int low, unsigned int high)
{
rt_wm_que_set_mark(wg, low, high);
rt_list_init(&wg->suspended_threads);
wg->level = 0;
}
void rt_wm_que_dump(struct rt_watermark_queue *wg)
{
struct rt_list_node *node;
rt_kprintf("wg %p: low: %d, high: %d, cur: %d\n",
wg, wg->low_mark, wg->high_mark, wg->level);
rt_kprintf("thread suspend:");
for (node = wg->suspended_threads.next;
node != &wg->suspended_threads;
node = node->next)
{
rt_thread_t thread;
thread = RT_THREAD_LIST_NODE_ENTRY(wg->suspended_threads.next);
rt_kprintf(" %.*s", RT_NAME_MAX, thread->parent.name);
}
rt_kprintf("\n");
}

View File

@@ -1,129 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2014-04-16 Grissiom first version
*/
struct rt_watermark_queue
{
/* Current water level. */
unsigned int level;
unsigned int high_mark;
unsigned int low_mark;
rt_list_t suspended_threads;
};
/** Init the struct rt_watermark_queue.
*/
void rt_wm_que_init(struct rt_watermark_queue *wg,
unsigned int low, unsigned int high);
void rt_wm_que_set_mark(struct rt_watermark_queue *wg,
unsigned int low, unsigned int high);
void rt_wm_que_dump(struct rt_watermark_queue *wg);
/* Water marks are often used in performance critical places. Benchmark shows
* inlining functions will have 10% performance gain in some situation(for
* example, VBus). So keep the inc/dec compact and inline. */
/** Increase the water level.
*
* It should be called in the thread that want to raise the water level. If the
* current level is above the high mark, the thread will be suspended up to
* @timeout ticks.
*
* @return RT_EOK if water level increased successfully. -RT_EFULL on @timeout
* is zero and the level is above water mark. -RT_ETIMEOUT if timeout occurred.
*/
rt_inline rt_err_t rt_wm_que_inc(struct rt_watermark_queue *wg,
int timeout)
{
rt_base_t level;
/* Assert as early as possible. */
if (timeout != 0)
{
RT_DEBUG_IN_THREAD_CONTEXT;
}
level = rt_hw_interrupt_disable();
while (wg->level > wg->high_mark)
{
rt_thread_t thread;
if (timeout == 0)
{
rt_hw_interrupt_enable(level);
return -RT_EFULL;
}
thread = rt_thread_self();
thread->error = RT_EOK;
rt_thread_suspend(thread);
rt_list_insert_after(&wg->suspended_threads, &RT_THREAD_LIST_NODE(thread));
if (timeout > 0)
{
rt_tick_t timeout_tick = timeout;
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout_tick);
rt_timer_start(&(thread->thread_timer));
}
rt_hw_interrupt_enable(level);
rt_schedule();
if (thread->error != RT_EOK)
return thread->error;
level = rt_hw_interrupt_disable();
}
wg->level++;
if (wg->level == 0)
{
wg->level = ~0;
}
rt_hw_interrupt_enable(level);
return RT_EOK;
}
/** Decrease the water level.
*
* It should be called by the consumer that drain the water out. If the water
* level reached low mark, all the thread suspended in this queue will be waken
* up. It's safe to call this function in interrupt context.
*/
rt_inline void rt_wm_que_dec(struct rt_watermark_queue *wg)
{
int need_sched = 0;
rt_base_t level;
if (wg->level == 0)
return;
level = rt_hw_interrupt_disable();
wg->level--;
if (wg->level == wg->low_mark)
{
/* There should be spaces between the low mark and high mark, so it's
* safe to resume all the threads. */
while (!rt_list_isempty(&wg->suspended_threads))
{
rt_thread_t thread;
thread = RT_THREAD_LIST_NODE_ENTRY(wg->suspended_threads.next);
rt_thread_resume(thread);
need_sched = 1;
}
}
rt_hw_interrupt_enable(level);
if (need_sched)
rt_schedule();
}

View File

@@ -1,23 +0,0 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
if rtconfig.PLATFORM in ['armcc', 'armclang']:
src += Glob('*_rvds.S')
if rtconfig.PLATFORM in ['gcc']:
src += Glob('*_init.S')
src += Glob('*_gcc.S')
if rtconfig.PLATFORM in ['iccarm']:
src += Glob('*_iar.S')
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -1,72 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __ARMV7_H__
#define __ARMV7_H__
/* the exception stack without VFP registers */
struct rt_hw_exp_stack
{
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long fp;
unsigned long ip;
unsigned long sp;
unsigned long lr;
unsigned long pc;
unsigned long cpsr;
};
struct rt_hw_stack
{
unsigned long cpsr;
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long fp;
unsigned long ip;
unsigned long lr;
unsigned long pc;
};
#define USERMODE 0x10
#define FIQMODE 0x11
#define IRQMODE 0x12
#define SVCMODE 0x13
#define MONITORMODE 0x16
#define ABORTMODE 0x17
#define HYPMODE 0x1b
#define UNDEFMODE 0x1b
#define MODEMASK 0x1f
#define NOINT 0xc0
#define T_Bit (1<<5)
#define F_Bit (1<<6)
#define I_Bit (1<<7)
#define A_Bit (1<<8)
#define E_Bit (1<<9)
#define J_Bit (1<<24)
#endif

View File

@@ -1,93 +0,0 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
#include <rtconfig.h>
.section .text, "ax"
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
cpsid i
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
bx lr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
.section .bss.share.isr
_guest_switch_lvl:
.word 0
.globl vmm_virq_update
.section .text.isr, "ax"
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
orrne r4, r4, #0x20 @ it's thumb code
stmfd sp!, {r4} @ push cpsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
ldr ip, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r0, [ip]
str r3, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
bx lr

View File

@@ -1,20 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __CP15_H__
#define __CP15_H__
unsigned long rt_cpu_get_smp_id(void);
void rt_cpu_mmu_disable(void);
void rt_cpu_mmu_enable(void);
void rt_cpu_tlb_set(volatile unsigned long*);
void rt_cpu_vector_set_base(unsigned int addr);
#endif

View File

@@ -1,125 +0,0 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
.globl rt_cpu_get_smp_id
rt_cpu_get_smp_id:
mrc p15, #0, r0, c0, c0, #5
bx lr
.globl rt_cpu_vector_set_base
rt_cpu_vector_set_base:
mcr p15, #0, r0, c12, c0, #0
dsb
bx lr
.globl rt_hw_cpu_dcache_enable
rt_hw_cpu_dcache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_hw_cpu_icache_enable
rt_hw_cpu_icache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
_FLD_MAX_WAY:
.word 0x3ff
_FLD_MAX_IDX:
.word 0x7ff
.globl rt_cpu_dcache_clean_flush
rt_cpu_dcache_clean_flush:
push {r4-r11}
dmb
mrc p15, #1, r0, c0, c0, #1 @ read clid register
ands r3, r0, #0x7000000 @ get level of coherency
mov r3, r3, lsr #23
beq finished
mov r10, #0
loop1:
add r2, r10, r10, lsr #1
mov r1, r0, lsr r2
and r1, r1, #7
cmp r1, #2
blt skip
mcr p15, #2, r10, c0, c0, #0
isb
mrc p15, #1, r1, c0, c0, #0
and r2, r1, #7
add r2, r2, #4
ldr r4, _FLD_MAX_WAY
ands r4, r4, r1, lsr #3
clz r5, r4
ldr r7, _FLD_MAX_IDX
ands r7, r7, r1, lsr #13
loop2:
mov r9, r4
loop3:
orr r11, r10, r9, lsl r5
orr r11, r11, r7, lsl r2
mcr p15, #0, r11, c7, c14, #2
subs r9, r9, #1
bge loop3
subs r7, r7, #1
bge loop2
skip:
add r10, r10, #2
cmp r3, r10
bgt loop1
finished:
dsb
isb
pop {r4-r11}
bx lr
.globl rt_hw_cpu_dcache_disable
rt_hw_cpu_dcache_disable:
push {r4-r11, lr}
bl rt_cpu_dcache_clean_flush
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
pop {r4-r11, lr}
bx lr
.globl rt_hw_cpu_icache_disable
rt_hw_cpu_icache_disable:
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_mmu_disable
rt_cpu_mmu_disable:
mcr p15, #0, r0, c8, c7, #0 @ invalidate tlb
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #1
mcr p15, #0, r0, c1, c0, #0 @ clear mmu bit
dsb
bx lr
.globl rt_cpu_mmu_enable
rt_cpu_mmu_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x001
mcr p15, #0, r0, c1, c0, #0 @ set mmu enable bit
dsb
bx lr
.globl rt_cpu_tlb_set
rt_cpu_tlb_set:
mcr p15, #0, r0, c2, c0, #0
dmb
bx lr

View File

@@ -1,20 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <board.h>
/**
* @addtogroup AM33xx
*/
/*@{*/
/*@}*/

View File

@@ -1,306 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
*/
#include <rtthread.h>
#include <board.h>
#include "gic.h"
#include "cp15.h"
struct arm_gic
{
rt_uint32_t offset;
rt_uint32_t dist_hw_base;
rt_uint32_t cpu_hw_base;
};
static struct arm_gic _gic_table[ARM_GIC_MAX_NR];
#define GIC_CPU_CTRL(hw_base) __REG32((hw_base) + 0x00)
#define GIC_CPU_PRIMASK(hw_base) __REG32((hw_base) + 0x04)
#define GIC_CPU_BINPOINT(hw_base) __REG32((hw_base) + 0x08)
#define GIC_CPU_INTACK(hw_base) __REG32((hw_base) + 0x0c)
#define GIC_CPU_EOI(hw_base) __REG32((hw_base) + 0x10)
#define GIC_CPU_RUNNINGPRI(hw_base) __REG32((hw_base) + 0x14)
#define GIC_CPU_HIGHPRI(hw_base) __REG32((hw_base) + 0x18)
#define GIC_DIST_CTRL(hw_base) __REG32((hw_base) + 0x000)
#define GIC_DIST_TYPE(hw_base) __REG32((hw_base) + 0x004)
#define GIC_DIST_IGROUP(hw_base, n) __REG32((hw_base) + 0x080 + ((n)/32) * 4)
#define GIC_DIST_ENABLE_SET(hw_base, n) __REG32((hw_base) + 0x100 + ((n)/32) * 4)
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) __REG32((hw_base) + 0x180 + ((n)/32) * 4)
#define GIC_DIST_PENDING_SET(hw_base, n) __REG32((hw_base) + 0x200 + ((n)/32) * 4)
#define GIC_DIST_PENDING_CLEAR(hw_base, n) __REG32((hw_base) + 0x280 + ((n)/32) * 4)
#define GIC_DIST_ACTIVE_SET(hw_base, n) __REG32((hw_base) + 0x300 + ((n)/32) * 4)
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) __REG32((hw_base) + 0x380 + ((n)/32) * 4)
#define GIC_DIST_PRI(hw_base, n) __REG32((hw_base) + 0x400 + ((n)/4) * 4)
#define GIC_DIST_TARGET(hw_base, n) __REG32((hw_base) + 0x800 + ((n)/4) * 4)
#define GIC_DIST_CONFIG(hw_base, n) __REG32((hw_base) + 0xc00 + ((n)/16) * 4)
#define GIC_DIST_SOFTINT(hw_base) __REG32((hw_base) + 0xf00)
#define GIC_DIST_CPENDSGI(hw_base, n) __REG32((hw_base) + 0xf10 + ((n)/4) * 4)
#define GIC_DIST_ICPIDR2(hw_base) __REG32((hw_base) + 0xfe8)
static unsigned int _gic_max_irq;
int arm_gic_get_active_irq(rt_uint32_t index)
{
int irq;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = GIC_CPU_INTACK(_gic_table[index].cpu_hw_base);
irq += _gic_table[index].offset;
return irq;
}
void arm_gic_ack(rt_uint32_t index, int irq)
{
rt_uint32_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
GIC_CPU_EOI(_gic_table[index].cpu_hw_base) = irq;
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask;
}
void arm_gic_mask(rt_uint32_t index, int irq)
{
rt_uint32_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
void arm_gic_clear_pending(rt_uint32_t index, int irq)
{
rt_uint32_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
void arm_gic_clear_active(rt_uint32_t index, int irq)
{
rt_uint32_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
void arm_gic_set_cpu(rt_uint32_t index, int irq, unsigned int cpumask)
{
rt_uint32_t old_tgt;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq);
old_tgt &= ~(0x0FFUL << ((irq % 4)*8));
old_tgt |= cpumask << ((irq % 4)*8);
GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt;
}
void arm_gic_umask(rt_uint32_t index, int irq)
{
rt_uint32_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask;
}
void arm_gic_dump_type(rt_uint32_t index)
{
unsigned int gic_type;
gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base);
rt_kprintf("GICv%d on %p, max IRQs: %d, %s security extension(%08x)\n",
(GIC_DIST_ICPIDR2(_gic_table[index].dist_hw_base) >> 4) & 0xf,
_gic_table[index].dist_hw_base,
_gic_max_irq,
gic_type & (1 << 10) ? "has" : "no",
gic_type);
}
void arm_gic_dump(rt_uint32_t index)
{
unsigned int i, k;
k = GIC_CPU_HIGHPRI(_gic_table[index].cpu_hw_base);
rt_kprintf("--- high pending priority: %d(%08x)\n", k, k);
rt_kprintf("--- hw mask ---\n");
for (i = 0; i < _gic_max_irq / 32; i++)
{
rt_kprintf("0x%08x, ",
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base,
i * 32));
}
rt_kprintf("\n--- hw pending ---\n");
for (i = 0; i < _gic_max_irq / 32; i++)
{
rt_kprintf("0x%08x, ",
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base,
i * 32));
}
rt_kprintf("\n--- hw active ---\n");
for (i = 0; i < _gic_max_irq / 32; i++)
{
rt_kprintf("0x%08x, ",
GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base,
i * 32));
}
rt_kprintf("\n");
}
#ifdef RT_USING_FINSH
#include <finsh.h>
FINSH_FUNCTION_EXPORT_ALIAS(arm_gic_dump, gic, show gic status);
#endif
int arm_gic_dist_init(rt_uint32_t index, rt_uint32_t dist_base, int irq_start)
{
unsigned int gic_type, i;
rt_uint32_t cpumask = 1 << 0;
RT_ASSERT(index < ARM_GIC_MAX_NR);
_gic_table[index].dist_hw_base = dist_base;
_gic_table[index].offset = irq_start;
/* Find out how many interrupts are supported. */
gic_type = GIC_DIST_TYPE(dist_base);
_gic_max_irq = ((gic_type & 0x1f) + 1) * 32;
/*
* The GIC only supports up to 1020 interrupt sources.
* Limit this to either the architected maximum, or the
* platform maximum.
*/
if (_gic_max_irq > 1020)
_gic_max_irq = 1020;
if (_gic_max_irq > ARM_GIC_NR_IRQS)
_gic_max_irq = ARM_GIC_NR_IRQS;
#ifndef RT_PRETENT_AS_CPU0
/* If we are run on the second core, the GIC should have already been setup
* by BootStrapProcessor. */
if ((rt_cpu_get_smp_id() & 0xF) != 0)
return 0;
#endif
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
GIC_DIST_CTRL(dist_base) = 0x0;
/* Set all global interrupts to be level triggered, active low. */
for (i = 32; i < _gic_max_irq; i += 16)
GIC_DIST_CONFIG(dist_base, i) = 0x0;
/* Set all global interrupts to this CPU only. */
for (i = 32; i < _gic_max_irq; i += 4)
GIC_DIST_TARGET(dist_base, i) = cpumask;
/* Set priority on all interrupts. */
for (i = 0; i < _gic_max_irq; i += 4)
GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0;
/* Disable all interrupts. */
for (i = 0; i < _gic_max_irq; i += 32)
GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffff;
/* All interrupts defaults to IGROUP1(IRQ). */
for (i = 0; i < _gic_max_irq; i += 32)
GIC_DIST_IGROUP(dist_base, i) = 0xffffffff;
/* Enable group0 and group1 interrupt forwarding. */
GIC_DIST_CTRL(dist_base) = 0x03;
return 0;
}
int arm_gic_cpu_init(rt_uint32_t index, rt_uint32_t cpu_base)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
_gic_table[index].cpu_hw_base = cpu_base;
#ifndef RT_PRETENT_AS_CPU0
/* If we are run on the second core, the GIC should have already been setup
* by BootStrapProcessor. */
if ((rt_cpu_get_smp_id() & 0xF) != 0)
return 0;
#endif
GIC_CPU_PRIMASK(cpu_base) = 0xf0;
/* Enable CPU interrupt */
GIC_CPU_CTRL(cpu_base) = 0x01;
return 0;
}
void arm_gic_set_group(rt_uint32_t index, int vector, int group)
{
/* As for GICv2, there are only group0 and group1. */
RT_ASSERT(group <= 1);
RT_ASSERT(vector < _gic_max_irq);
if (group == 0)
{
GIC_DIST_IGROUP(_gic_table[index].dist_hw_base,
vector) &= ~(1 << (vector % 32));
}
else if (group == 1)
{
GIC_DIST_IGROUP(_gic_table[index].dist_hw_base,
vector) |= (1 << (vector % 32));
}
}
void arm_gic_trigger(rt_uint32_t index, int target_cpu, int irq)
{
unsigned int reg;
RT_ASSERT(irq <= 15);
RT_ASSERT(target_cpu <= 255);
reg = (target_cpu << 16) | irq;
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = reg;
}
void arm_gic_clear_sgi(rt_uint32_t index, int target_cpu, int irq)
{
RT_ASSERT(irq <= 15);
RT_ASSERT(target_cpu <= 255);
GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = target_cpu << (irq % 4);
}

View File

@@ -1,31 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
*/
#ifndef __GIC_H__
#define __GIC_H__
int arm_gic_dist_init(rt_uint32_t index, rt_uint32_t dist_base, int irq_start);
int arm_gic_cpu_init(rt_uint32_t index, rt_uint32_t cpu_base);
void arm_gic_mask(rt_uint32_t index, int irq);
void arm_gic_umask(rt_uint32_t index, int irq);
void arm_gic_set_cpu(rt_uint32_t index, int irq, unsigned int cpumask);
void arm_gic_set_group(rt_uint32_t index, int vector, int group);
int arm_gic_get_active_irq(rt_uint32_t index);
void arm_gic_ack(rt_uint32_t index, int irq);
void arm_gic_trigger(rt_uint32_t index, int target_cpu, int irq);
void arm_gic_clear_sgi(rt_uint32_t index, int target_cpu, int irq);
void arm_gic_dump_type(rt_uint32_t index);
#endif

View File

@@ -1,126 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-06 Bernard first version
* 2014-04-03 Grissiom port to VMM
*/
#include <rthw.h>
#include <rtthread.h>
#include "realview.h"
#include "gic.h"
#define MAX_HANDLERS NR_IRQS_PBA8
extern volatile rt_atomic_t rt_interrupt_nest;
/* exception and interrupt handler table */
struct rt_irq_desc isr_table[MAX_HANDLERS];
/* Those varibles will be accessed in ISR, so we need to share them. */
rt_uint32_t rt_interrupt_from_thread rt_section(".bss.share.int");
rt_uint32_t rt_interrupt_to_thread rt_section(".bss.share.int");
rt_uint32_t rt_thread_switch_interrupt_flag rt_section(".bss.share.int");
const unsigned int VECTOR_BASE = 0x00;
extern void rt_cpu_vector_set_base(unsigned int addr);
extern int system_vectors;
static void rt_hw_vector_init(void)
{
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
rt_uint32_t gic_cpu_base;
rt_uint32_t gic_dist_base;
/* initialize vector table */
rt_hw_vector_init();
/* initialize exceptions table */
rt_memset(isr_table, 0x00, sizeof(isr_table));
/* initialize ARM GIC */
gic_dist_base = REALVIEW_GIC_DIST_BASE;
gic_cpu_base = REALVIEW_GIC_CPU_BASE;
arm_gic_dist_init(0, gic_dist_base, 0);
arm_gic_cpu_init(0, gic_cpu_base);
/*arm_gic_dump_type(0);*/
/* init interrupt nest, and context in thread sp */
rt_interrupt_nest = 0;
rt_interrupt_from_thread = 0;
rt_interrupt_to_thread = 0;
rt_thread_switch_interrupt_flag = 0;
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
arm_gic_mask(0, vector);
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
arm_gic_umask(0, vector);
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if (vector < MAX_HANDLERS)
{
old_handler = isr_table[vector].handler;
if (handler != RT_NULL)
{
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(isr_table[vector].name, name, RT_NAME_MAX);
#endif /* RT_USING_INTERRUPT_INFO */
isr_table[vector].handler = handler;
isr_table[vector].param = param;
}
}
return old_handler;
}
/**
* Trigger a software IRQ
*
* Since we are running in single core, the target CPU are always CPU0.
*/
void rt_hw_interrupt_trigger(int vector)
{
arm_gic_trigger(0, 1, vector);
}
void rt_hw_interrupt_clear(int vector)
{
arm_gic_clear_sgi(0, 1, vector);
}

View File

@@ -1,46 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-06 Bernard first version
*/
#ifndef __INTERRUPT_H__
#define __INTERRUPT_H__
#define INT_IRQ 0x00
#define INT_FIQ 0x01
#define INTC_REVISION(hw_base) REG32((hw_base) + 0x0)
#define INTC_SYSCONFIG(hw_base) REG32((hw_base) + 0x10)
#define INTC_SYSSTATUS(hw_base) REG32((hw_base) + 0x14)
#define INTC_SIR_IRQ(hw_base) REG32((hw_base) + 0x40)
#define INTC_SIR_FIQ(hw_base) REG32((hw_base) + 0x44)
#define INTC_CONTROL(hw_base) REG32((hw_base) + 0x48)
#define INTC_PROTECTION(hw_base) REG32((hw_base) + 0x4c)
#define INTC_IDLE(hw_base) REG32((hw_base) + 0x50)
#define INTC_IRQ_PRIORITY(hw_base) REG32((hw_base) + 0x60)
#define INTC_FIQ_PRIORITY(hw_base) REG32((hw_base) + 0x64)
#define INTC_THRESHOLD(hw_base) REG32((hw_base) + 0x68)
#define INTC_SICR(hw_base) REG32((hw_base) + 0x6c)
#define INTC_SCR(hw_base, n) REG32((hw_base) + 0x70 + ((n) * 0x04))
#define INTC_ITR(hw_base, n) REG32((hw_base) + 0x80 + ((n) * 0x20))
#define INTC_MIR(hw_base, n) REG32((hw_base) + 0x84 + ((n) * 0x20))
#define INTC_MIR_CLEAR(hw_base, n) REG32((hw_base) + 0x88 + ((n) * 0x20))
#define INTC_MIR_SET(hw_base, n) REG32((hw_base) + 0x8c + ((n) * 0x20))
#define INTC_ISR_SET(hw_base, n) REG32((hw_base) + 0x90 + ((n) * 0x20))
#define INTC_ISR_CLEAR(hw_base, n) REG32((hw_base) + 0x94 + ((n) * 0x20))
#define INTC_PENDING_IRQ(hw_base, n) REG32((hw_base) + 0x98 + ((n) * 0x20))
#define INTC_PENDING_FIQ(hw_base, n) REG32((hw_base) + 0x9c + ((n) * 0x20))
#define INTC_ILR(hw_base, n) REG32((hw_base) + 0x100 + ((n) * 0x04))
void rt_hw_interrupt_control(int vector, int priority, int route);
int rt_hw_interrupt_get_active(int fiq_irq);
void rt_hw_interrupt_ack(int fiq_irq);
void rt_hw_interrupt_trigger(int vector);
void rt_hw_interrupt_clear(int vector);
#endif

View File

@@ -1,203 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-10 bernard porting to AM1808
*/
#include <rtthread.h>
#include <rthw.h>
#include <board.h>
#include "cp15.h"
#define DESC_SEC (0x2)
#define CB (3<<2) //cache_on, write_back
#define CNB (2<<2) //cache_on, write_through
#define NCB (1<<2) //cache_off,WR_BUF on
#define NCNB (0<<2) //cache_off,WR_BUF off
#define AP_RW (3<<10) //supervisor=RW, user=RW
#define AP_RO (2<<10) //supervisor=RW, user=RO
#define XN (1<<4) // eXecute Never
#define DOMAIN_FAULT (0x0)
#define DOMAIN_CHK (0x1)
#define DOMAIN_NOTCHK (0x3)
#define DOMAIN0 (0x0<<5)
#define DOMAIN1 (0x1<<5)
#define DOMAIN0_ATTR (DOMAIN_CHK<<0)
#define DOMAIN1_ATTR (DOMAIN_FAULT<<2)
/* Read/Write, cache, write back */
#define RW_CB (AP_RW|DOMAIN0|CB|DESC_SEC)
/* Read/Write, cache, write through */
#define RW_CNB (AP_RW|DOMAIN0|CNB|DESC_SEC)
/* Read/Write without cache and write buffer */
#define RW_NCNB (AP_RW|DOMAIN0|NCNB|DESC_SEC)
/* Read/Write without cache and write buffer, no execute */
#define RW_NCNBXN (AP_RW|DOMAIN0|NCNB|DESC_SEC|XN)
/* Read/Write without cache and write buffer */
#define RW_FAULT (AP_RW|DOMAIN1|NCNB|DESC_SEC)
/* dump 2nd level page table */
void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
{
int i;
int fcnt = 0;
for (i = 0; i < 256; i++)
{
rt_uint32_t pte2 = ptb[i];
if ((pte2 & 0x3) == 0)
{
if (fcnt == 0)
rt_kprintf(" ");
rt_kprintf("%04x: ", i);
fcnt++;
if (fcnt == 16)
{
rt_kprintf("fault\n");
fcnt = 0;
}
continue;
}
if (fcnt != 0)
{
rt_kprintf("fault\n");
fcnt = 0;
}
rt_kprintf(" %04x: %x: ", i, pte2);
if ((pte2 & 0x3) == 0x1)
{
rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
((pte2 >> 7) | (pte2 >> 4))& 0xf,
(pte2 >> 15) & 0x1,
((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
}
else
{
rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
}
}
}
void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
{
int i;
int fcnt = 0;
rt_kprintf("page table@%p\n", ptb);
for (i = 0; i < 1024*4; i++)
{
rt_uint32_t pte1 = ptb[i];
if ((pte1 & 0x3) == 0)
{
rt_kprintf("%03x: ", i);
fcnt++;
if (fcnt == 16)
{
rt_kprintf("fault\n");
fcnt = 0;
}
continue;
}
if (fcnt != 0)
{
rt_kprintf("fault\n");
fcnt = 0;
}
rt_kprintf("%03x: %08x: ", i, pte1);
if ((pte1 & 0x3) == 0x3)
{
rt_kprintf("LPAE\n");
}
else if ((pte1 & 0x3) == 0x1)
{
rt_kprintf("pte,ns:%d,domain:%d\n",
(pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
/*
*rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
* - 0x80000000 + 0xC0000000));
*/
}
else if (pte1 & (1 << 18))
{
rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
(pte1 >> 19) & 0x1,
((pte1 >> 13) | (pte1 >> 10))& 0xf,
(pte1 >> 4) & 0x1,
((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
}
else
{
rt_kprintf("section,ns:%d,ap:%x,"
"xn:%d,texcb:%02x,domain:%d\n",
(pte1 >> 19) & 0x1,
((pte1 >> 13) | (pte1 >> 10))& 0xf,
(pte1 >> 4) & 0x1,
(((pte1 & (0x7 << 12)) >> 10) |
((pte1 & 0x0c) >> 2)) & 0x1f,
(pte1 >> 5) & 0xf);
}
}
}
/* level1 page table, each entry for 1MB memory. */
volatile static unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
rt_uint32_t vaddrEnd,
rt_uint32_t paddrStart,
rt_uint32_t attr)
{
volatile rt_uint32_t *pTT;
volatile int i, nSec;
pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
for(i = 0; i <= nSec; i++)
{
*pTT = attr | (((paddrStart >> 20) + i) << 20);
pTT++;
}
}
unsigned long rt_hw_set_domain_register(unsigned long domain_val)
{
unsigned long old_domain;
asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
return old_domain;
}
void rt_hw_mmu_init(void)
{
rt_hw_cpu_dcache_disable();
rt_hw_cpu_icache_disable();
rt_cpu_mmu_disable();
/* set page table */
/* 4G 1:1 memory */
rt_hw_mmu_setmtt(0, 0xffffffff-1, 0, RW_CB);
/* IO memory region */
rt_hw_mmu_setmtt(0x44000000, 0x80000000-1, 0x44000000, RW_NCNBXN);
/*rt_hw_cpu_dump_page_table(MMUTable);*/
rt_hw_set_domain_register(0x55555555);
rt_cpu_tlb_set(MMUTable);
rt_cpu_mmu_enable();
rt_hw_cpu_icache_enable();
rt_hw_cpu_dcache_enable();
}

View File

@@ -1,20 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include <rtthread.h>
#include "pmu.h"
void rt_hw_pmu_dump_feature(void)
{
unsigned long reg;
reg = rt_hw_pmu_get_control();
rt_kprintf("ARM PMU Implementor: %c, ID code: %02x, %d counters\n",
reg >> 24, (reg >> 16) & 0xff, (reg >> 11) & 0x1f);
RT_ASSERT(ARM_PMU_CNTER_NR == ((reg >> 11) & 0x1f));
}

View File

@@ -1,159 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __PMU_H__
#define __PMU_H__
#include "board.h"
/* Number of counters */
#define ARM_PMU_CNTER_NR 4
enum rt_hw_pmu_event_type {
ARM_PMU_EVENT_PMNC_SW_INCR = 0x00,
ARM_PMU_EVENT_L1_ICACHE_REFILL = 0x01,
ARM_PMU_EVENT_ITLB_REFILL = 0x02,
ARM_PMU_EVENT_L1_DCACHE_REFILL = 0x03,
ARM_PMU_EVENT_L1_DCACHE_ACCESS = 0x04,
ARM_PMU_EVENT_DTLB_REFILL = 0x05,
ARM_PMU_EVENT_MEM_READ = 0x06,
ARM_PMU_EVENT_MEM_WRITE = 0x07,
ARM_PMU_EVENT_INSTR_EXECUTED = 0x08,
ARM_PMU_EVENT_EXC_TAKEN = 0x09,
ARM_PMU_EVENT_EXC_EXECUTED = 0x0A,
ARM_PMU_EVENT_CID_WRITE = 0x0B,
};
/* Enable bit */
#define ARM_PMU_PMCR_E (0x01 << 0)
/* Event counter reset */
#define ARM_PMU_PMCR_P (0x01 << 1)
/* Cycle counter reset */
#define ARM_PMU_PMCR_C (0x01 << 2)
/* Cycle counter divider */
#define ARM_PMU_PMCR_D (0x01 << 3)
#ifdef __GNUC__
rt_inline void rt_hw_pmu_enable_cnt(int divide64)
{
unsigned long pmcr;
unsigned long pmcntenset;
asm volatile ("mrc p15, 0, %0, c9, c12, 0" : "=r"(pmcr));
pmcr |= ARM_PMU_PMCR_E | ARM_PMU_PMCR_P | ARM_PMU_PMCR_C;
if (divide64)
pmcr |= ARM_PMU_PMCR_D;
else
pmcr &= ~ARM_PMU_PMCR_D;
asm volatile ("mcr p15, 0, %0, c9, c12, 0" :: "r"(pmcr));
/* enable all the counters */
pmcntenset = ~0;
asm volatile ("mcr p15, 0, %0, c9, c12, 1" :: "r"(pmcntenset));
/* clear overflows(just in case) */
asm volatile ("mcr p15, 0, %0, c9, c12, 3" :: "r"(pmcntenset));
}
rt_inline unsigned long rt_hw_pmu_get_control(void)
{
unsigned long pmcr;
asm ("mrc p15, 0, %0, c9, c12, 0" : "=r"(pmcr));
return pmcr;
}
rt_inline unsigned long rt_hw_pmu_get_ceid(void)
{
unsigned long reg;
/* only PMCEID0 is supported, PMCEID1 is RAZ. */
asm ("mrc p15, 0, %0, c9, c12, 6" : "=r"(reg));
return reg;
}
rt_inline unsigned long rt_hw_pmu_get_cnten(void)
{
unsigned long pmcnt;
asm ("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcnt));
return pmcnt;
}
rt_inline void rt_hw_pmu_reset_cycle(void)
{
unsigned long pmcr;
asm volatile ("mrc p15, 0, %0, c9, c12, 0" : "=r"(pmcr));
pmcr |= ARM_PMU_PMCR_C;
asm volatile ("mcr p15, 0, %0, c9, c12, 0" :: "r"(pmcr));
asm volatile ("isb");
}
rt_inline void rt_hw_pmu_reset_event(void)
{
unsigned long pmcr;
asm volatile ("mrc p15, 0, %0, c9, c12, 0" : "=r"(pmcr));
pmcr |= ARM_PMU_PMCR_P;
asm volatile ("mcr p15, 0, %0, c9, c12, 0" :: "r"(pmcr));
asm volatile ("isb");
}
rt_inline unsigned long rt_hw_pmu_get_cycle(void)
{
unsigned long cyc;
asm volatile ("isb");
asm volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r"(cyc));
return cyc;
}
rt_inline void rt_hw_pmu_select_counter(int idx)
{
RT_ASSERT(idx < ARM_PMU_CNTER_NR);
asm volatile ("mcr p15, 0, %0, c9, c12, 5" : : "r"(idx));
/* Linux add an isb here, don't know why here. */
asm volatile ("isb");
}
rt_inline void rt_hw_pmu_select_event(int idx,
enum rt_hw_pmu_event_type eve)
{
RT_ASSERT(idx < ARM_PMU_CNTER_NR);
rt_hw_pmu_select_counter(idx);
asm volatile ("mcr p15, 0, %0, c9, c13, 1" : : "r"(eve));
}
rt_inline unsigned long rt_hw_pmu_read_counter(int idx)
{
unsigned long reg;
rt_hw_pmu_select_counter(idx);
asm volatile ("isb");
asm volatile ("mrc p15, 0, %0, c9, c13, 2" : "=r"(reg));
return reg;
}
rt_inline unsigned long rt_hw_pmu_get_ovsr(void)
{
unsigned long reg;
asm volatile ("isb");
asm ("mrc p15, 0, %0, c9, c12, 3" : "=r"(reg));
return reg;
}
rt_inline void rt_hw_pmu_clear_ovsr(unsigned long reg)
{
asm ("mcr p15, 0, %0, c9, c12, 3" : : "r"(reg));
asm volatile ("isb");
}
#endif
void rt_hw_pmu_dump_feature(void);
#endif /* end of include guard: __PMU_H__ */

View File

@@ -1,63 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-23 Bernard the first version
* 2011-10-05 Bernard add thumb mode
*/
#include <rtthread.h>
#include <board.h>
/**
* @addtogroup AM33xx
*/
/*@{*/
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
rt_uint8_t *stack_addr, void *texit)
{
rt_uint32_t *stk;
stack_addr += sizeof(rt_uint32_t);
stack_addr = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stack_addr, 8);
stk = (rt_uint32_t *)stack_addr;
*(--stk) = (rt_uint32_t)tentry; /* entry point */
*(--stk) = (rt_uint32_t)texit; /* lr */
*(--stk) = 0xdeadbeef; /* r12 */
*(--stk) = 0xdeadbeef; /* r11 */
*(--stk) = 0xdeadbeef; /* r10 */
*(--stk) = 0xdeadbeef; /* r9 */
*(--stk) = 0xdeadbeef; /* r8 */
*(--stk) = 0xdeadbeef; /* r7 */
*(--stk) = 0xdeadbeef; /* r6 */
*(--stk) = 0xdeadbeef; /* r5 */
*(--stk) = 0xdeadbeef; /* r4 */
*(--stk) = 0xdeadbeef; /* r3 */
*(--stk) = 0xdeadbeef; /* r2 */
*(--stk) = 0xdeadbeef; /* r1 */
*(--stk) = (rt_uint32_t)parameter; /* r0 : argument */
/* cpsr */
if ((rt_uint32_t)tentry & 0x01)
*(--stk) = SVCMODE | 0x20; /* thumb mode */
else
*(--stk) = SVCMODE; /* arm mode */
/* return task's current stack address */
return (rt_uint8_t *)stk;
}
/*@}*/

View File

@@ -1,237 +0,0 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
#include <rtconfig.h>
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
.equ Mode_SVC, 0x13
.equ Mode_ABT, 0x17
.equ Mode_UND, 0x1B
.equ Mode_SYS, 0x1F
.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
.equ UND_Stack_Size, 0x00000000
.equ SVC_Stack_Size, 0x00000100
.equ ABT_Stack_Size, 0x00000000
.equ RT_FIQ_STACK_PGSZ, 0x00000000
.equ RT_IRQ_STACK_PGSZ, 0x00000100
.equ USR_Stack_Size, 0x00000100
#define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
.section .data.share.isr
/* stack */
.globl stack_start
.globl stack_top
.align 3
stack_start:
.rept ISR_Stack_Size
.byte 0
.endr
stack_top:
.text
/* reset entry */
.globl _reset
_reset:
/* set the cpu to SVC32 mode and disable interrupt */
mrs r0, cpsr
bic r0, r0, #0x1f
orr r0, r0, #0x13
msr cpsr_c, r0
/* setup stack */
bl stack_setup
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
stack_setup:
ldr r0, =stack_top
@ Set the startup stack for svc
mov sp, r0
@ Enter Undefined Instruction Mode and set its Stack Pointer
msr cpsr_c, #Mode_UND|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #UND_Stack_Size
@ Enter Abort Mode and set its Stack Pointer
msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #ABT_Stack_Size
@ Enter FIQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #RT_FIQ_STACK_PGSZ
@ Enter IRQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #RT_IRQ_STACK_PGSZ
/* come back to SVC mode */
msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
bx lr
/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
.section .text.isr, "ax"
.align 5
.globl vector_fiq
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc, lr, #4
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_current_thread
.globl vmm_thread
.globl vmm_virq_check
.align 5
.globl vector_irq
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
@ if rt_thread_switch_interrupt_flag set, jump to
@ rt_hw_context_switch_interrupt_do and don't return
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
rt_hw_context_switch_interrupt_do:
mov r1, #0 @ clear flag
str r1, [r0]
mov r1, sp @ r1 point to {r0-r3} in stack
add sp, sp, #4*4
ldmfd sp!, {r4-r12,lr}@ reload saved registers
mrs r0, spsr @ get cpsr of interrupt thread
sub r2, lr, #4 @ save old task's pc to r2
@ Switch to SVC mode with no interrupt. If the usr mode guest is
@ interrupted, this will just switch to the stack of kernel space.
@ save the registers in kernel space won't trigger data abort.
msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
stmfd sp!, {r2} @ push old task's pc
stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread
stmfd sp!, {r1-r4} @ push old task's r0-r3
stmfd sp!, {r0} @ push old task's cpsr
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] @ get new task's stack pointer
ldmfd sp!, {r4} @ pop new task's cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
.macro push_svc_reg
sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
stmia sp, {r0 - r12} @/* Calling r0-r12 */
mov r0, sp
mrs r6, spsr @/* Save CPSR */
str lr, [r0, #15*4] @/* Push PC */
str r6, [r0, #16*4] @/* Push CPSR */
cps #Mode_SVC
str sp, [r0, #13*4] @/* Save calling SP */
str lr, [r0, #14*4] @/* Save calling PC */
.endm
.align 5
.globl vector_swi
vector_swi:
push_svc_reg
bl rt_hw_trap_swi
b .
.align 5
.globl vector_undef
vector_undef:
push_svc_reg
bl rt_hw_trap_undef
b .
.align 5
.globl vector_pabt
vector_pabt:
push_svc_reg
bl rt_hw_trap_pabt
b .
.align 5
.globl vector_dabt
vector_dabt:
push_svc_reg
bl rt_hw_trap_dabt
b .
.align 5
.globl vector_resv
vector_resv:
push_svc_reg
bl rt_hw_trap_resv
b .

View File

@@ -1,187 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
*/
#include <rtthread.h>
#include <rthw.h>
#include <board.h>
#include "armv7.h"
#include "gic.h"
extern struct rt_thread *rt_current_thread;
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
extern long list_thread(void);
#endif
/**
* this function will show registers of CPU
*
* @param regs the registers point
*/
void rt_hw_show_register(struct rt_hw_exp_stack *regs)
{
rt_kprintf("Execption:\n");
rt_kprintf("r00:0x%08x r01:0x%08x r02:0x%08x r03:0x%08x\n", regs->r0, regs->r1, regs->r2, regs->r3);
rt_kprintf("r04:0x%08x r05:0x%08x r06:0x%08x r07:0x%08x\n", regs->r4, regs->r5, regs->r6, regs->r7);
rt_kprintf("r08:0x%08x r09:0x%08x r10:0x%08x\n", regs->r8, regs->r9, regs->r10);
rt_kprintf("fp :0x%08x ip :0x%08x\n", regs->fp, regs->ip);
rt_kprintf("sp :0x%08x lr :0x%08x pc :0x%08x\n", regs->sp, regs->lr, regs->pc);
rt_kprintf("cpsr:0x%08x\n", regs->cpsr);
}
/**
* When comes across an instruction which it cannot handle,
* it takes the undefined instruction trap.
*
* @param regs system registers
*
* @note never invoke this function in application
*/
void rt_hw_trap_undef(struct rt_hw_exp_stack *regs)
{
rt_kprintf("undefined instruction:\n");
rt_hw_show_register(regs);
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
list_thread();
#endif
rt_hw_cpu_shutdown();
}
/**
* The software interrupt instruction (SWI) is used for entering
* Supervisor mode, usually to request a particular supervisor
* function.
*
* @param regs system registers
*
* @note never invoke this function in application
*/
void rt_hw_trap_swi(struct rt_hw_exp_stack *regs)
{
rt_kprintf("software interrupt:\n");
rt_hw_show_register(regs);
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
list_thread();
#endif
rt_hw_cpu_shutdown();
}
/**
* An abort indicates that the current memory access cannot be completed,
* which occurs during an instruction prefetch.
*
* @param regs system registers
*
* @note never invoke this function in application
*/
void rt_hw_trap_pabt(struct rt_hw_exp_stack *regs)
{
rt_kprintf("prefetch abort:\n");
rt_hw_show_register(regs);
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
list_thread();
#endif
rt_hw_cpu_shutdown();
}
/**
* An abort indicates that the current memory access cannot be completed,
* which occurs during a data access.
*
* @param regs system registers
*
* @note never invoke this function in application
*/
void rt_hw_trap_dabt(struct rt_hw_exp_stack *regs)
{
rt_kprintf("data abort:");
rt_hw_show_register(regs);
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
list_thread();
#endif
rt_hw_cpu_shutdown();
}
/**
* Normally, system will never reach here
*
* @param regs system registers
*
* @note never invoke this function in application
*/
void rt_hw_trap_resv(struct rt_hw_exp_stack *regs)
{
rt_kprintf("reserved trap:\n");
rt_hw_show_register(regs);
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
list_thread();
#endif
rt_hw_cpu_shutdown();
}
#define GIC_ACK_INTID_MASK 0x000003ff
void rt_hw_trap_irq(void)
{
void *param;
unsigned long ir;
unsigned long fullir;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
fullir = arm_gic_get_active_irq(0);
ir = fullir & GIC_ACK_INTID_MASK;
if (ir == 1023)
{
/* Spurious interrupt */
return;
}
/* get interrupt service routine */
isr_func = isr_table[ir].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[ir].counter++;
#endif
if (isr_func)
{
/* Interrupt for myself. */
param = isr_table[ir].param;
/* turn to interrupt service routine */
isr_func(ir, param);
}
/* end of interrupt */
arm_gic_ack(0, fullir);
}
void rt_hw_trap_fiq(void)
{
void *param;
unsigned long ir;
unsigned long fullir;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
fullir = arm_gic_get_active_irq(0);
ir = fullir & GIC_ACK_INTID_MASK;
/* get interrupt service routine */
isr_func = isr_table[ir].handler;
param = isr_table[ir].param;
/* turn to interrupt service routine */
isr_func(ir, param);
/* end of interrupt */
arm_gic_ack(0, fullir);
}

View File

@@ -1,52 +0,0 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
#include <rtconfig.h>
.section .vectors, "ax"
.code 32
.globl system_vectors
system_vectors:
ldr pc, _vector_reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
ldr pc, _vector_resv
ldr pc, _vector_irq
ldr pc, _vector_fiq
.globl _reset
.globl vector_undef
.globl vector_swi
.globl vector_pabt
.globl vector_dabt
.globl vector_resv
.globl vector_irq
.globl vector_fiq
_vector_reset:
.word _reset
_vector_undef:
.word vector_undef
_vector_swi:
.word vector_swi
_vector_pabt:
.word vector_pabt
_vector_dabt:
.word vector_dabt
_vector_resv:
.word vector_resv
_vector_irq:
.word vector_irq
_vector_fiq:
.word vector_fiq
.balignl 16,0xdeadbeef