mirror of
https://github.com/RT-Thread/rt-thread.git
synced 2025-12-26 09:08:25 +00:00
使用 AStyle.exe 统一代码格式
This commit is contained in:
@@ -204,7 +204,7 @@ RTM_EXPORT(rt_data_queue_push);
|
||||
* When the return value is RT_ETIMEOUT, it means the specified time out.
|
||||
*/
|
||||
rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
|
||||
const void** data_ptr,
|
||||
const void **data_ptr,
|
||||
rt_size_t *size,
|
||||
rt_int32_t timeout)
|
||||
{
|
||||
@@ -327,7 +327,7 @@ RTM_EXPORT(rt_data_queue_pop);
|
||||
* When the return value is -RT_EEMPTY, it means the data queue is empty.
|
||||
*/
|
||||
rt_err_t rt_data_queue_peek(struct rt_data_queue *queue,
|
||||
const void** data_ptr,
|
||||
const void **data_ptr,
|
||||
rt_size_t *size)
|
||||
{
|
||||
rt_base_t level;
|
||||
|
||||
@@ -54,16 +54,16 @@ static int pipe_fops_open(struct dfs_fd *fd)
|
||||
|
||||
switch (fd->flags & O_ACCMODE)
|
||||
{
|
||||
case O_RDONLY:
|
||||
pipe->readers ++;
|
||||
break;
|
||||
case O_WRONLY:
|
||||
pipe->writers ++;
|
||||
break;
|
||||
case O_RDWR:
|
||||
pipe->readers ++;
|
||||
pipe->writers ++;
|
||||
break;
|
||||
case O_RDONLY:
|
||||
pipe->readers ++;
|
||||
break;
|
||||
case O_WRONLY:
|
||||
pipe->writers ++;
|
||||
break;
|
||||
case O_RDWR:
|
||||
pipe->readers ++;
|
||||
pipe->writers ++;
|
||||
break;
|
||||
}
|
||||
device->ref_count ++;
|
||||
|
||||
@@ -95,26 +95,26 @@ static int pipe_fops_close(struct dfs_fd *fd)
|
||||
|
||||
switch (fd->flags & O_ACCMODE)
|
||||
{
|
||||
case O_RDONLY:
|
||||
pipe->readers --;
|
||||
break;
|
||||
case O_WRONLY:
|
||||
pipe->writers --;
|
||||
break;
|
||||
case O_RDWR:
|
||||
pipe->readers --;
|
||||
pipe->writers --;
|
||||
break;
|
||||
case O_RDONLY:
|
||||
pipe->readers --;
|
||||
break;
|
||||
case O_WRONLY:
|
||||
pipe->writers --;
|
||||
break;
|
||||
case O_RDWR:
|
||||
pipe->readers --;
|
||||
pipe->writers --;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pipe->writers == 0)
|
||||
{
|
||||
rt_wqueue_wakeup(&(pipe->reader_queue), (void*)(POLLIN | POLLERR | POLLHUP));
|
||||
rt_wqueue_wakeup(&(pipe->reader_queue), (void *)(POLLIN | POLLERR | POLLHUP));
|
||||
}
|
||||
|
||||
if (pipe->readers == 0)
|
||||
{
|
||||
rt_wqueue_wakeup(&(pipe->writer_queue), (void*)(POLLOUT | POLLERR | POLLHUP));
|
||||
rt_wqueue_wakeup(&(pipe->writer_queue), (void *)(POLLOUT | POLLERR | POLLHUP));
|
||||
}
|
||||
|
||||
if (device->ref_count == 1)
|
||||
@@ -162,15 +162,15 @@ static int pipe_fops_ioctl(struct dfs_fd *fd, int cmd, void *args)
|
||||
|
||||
switch (cmd)
|
||||
{
|
||||
case FIONREAD:
|
||||
*((int*)args) = rt_ringbuffer_data_len(pipe->fifo);
|
||||
break;
|
||||
case FIONWRITE:
|
||||
*((int*)args) = rt_ringbuffer_space_len(pipe->fifo);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case FIONREAD:
|
||||
*((int *)args) = rt_ringbuffer_data_len(pipe->fifo);
|
||||
break;
|
||||
case FIONWRITE:
|
||||
*((int *)args) = rt_ringbuffer_space_len(pipe->fifo);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -224,14 +224,14 @@ static int pipe_fops_read(struct dfs_fd *fd, void *buf, size_t count)
|
||||
}
|
||||
|
||||
rt_mutex_release(&pipe->lock);
|
||||
rt_wqueue_wakeup(&(pipe->writer_queue), (void*)POLLOUT);
|
||||
rt_wqueue_wakeup(&(pipe->writer_queue), (void *)POLLOUT);
|
||||
rt_wqueue_wait(&(pipe->reader_queue), 0, -1);
|
||||
rt_mutex_take(&(pipe->lock), RT_WAITING_FOREVER);
|
||||
}
|
||||
}
|
||||
|
||||
/* wakeup writer */
|
||||
rt_wqueue_wakeup(&(pipe->writer_queue), (void*)POLLOUT);
|
||||
rt_wqueue_wakeup(&(pipe->writer_queue), (void *)POLLOUT);
|
||||
|
||||
out:
|
||||
rt_mutex_release(&pipe->lock);
|
||||
@@ -270,7 +270,7 @@ static int pipe_fops_write(struct dfs_fd *fd, const void *buf, size_t count)
|
||||
if (count == 0)
|
||||
return 0;
|
||||
|
||||
pbuf = (uint8_t*)buf;
|
||||
pbuf = (uint8_t *)buf;
|
||||
rt_mutex_take(&pipe->lock, -1);
|
||||
|
||||
while (1)
|
||||
@@ -305,7 +305,7 @@ static int pipe_fops_write(struct dfs_fd *fd, const void *buf, size_t count)
|
||||
}
|
||||
|
||||
rt_mutex_release(&pipe->lock);
|
||||
rt_wqueue_wakeup(&(pipe->reader_queue), (void*)POLLIN);
|
||||
rt_wqueue_wakeup(&(pipe->reader_queue), (void *)POLLIN);
|
||||
/* pipe full, waiting on suspended write list */
|
||||
rt_wqueue_wait(&(pipe->writer_queue), 0, -1);
|
||||
rt_mutex_take(&pipe->lock, -1);
|
||||
@@ -314,7 +314,7 @@ static int pipe_fops_write(struct dfs_fd *fd, const void *buf, size_t count)
|
||||
|
||||
if (wakeup)
|
||||
{
|
||||
rt_wqueue_wakeup(&(pipe->reader_queue), (void*)POLLIN);
|
||||
rt_wqueue_wakeup(&(pipe->reader_queue), (void *)POLLIN);
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -492,7 +492,7 @@ static rt_size_t rt_pipe_read(rt_device_t device, rt_off_t pos, void *buffer, rt
|
||||
}
|
||||
if (count == 0) return 0;
|
||||
|
||||
pbuf = (uint8_t*)buffer;
|
||||
pbuf = (uint8_t *)buffer;
|
||||
rt_mutex_take(&(pipe->lock), RT_WAITING_FOREVER);
|
||||
|
||||
while (read_bytes < count)
|
||||
@@ -534,7 +534,7 @@ static rt_size_t rt_pipe_write(rt_device_t device, rt_off_t pos, const void *buf
|
||||
}
|
||||
if (count == 0) return 0;
|
||||
|
||||
pbuf = (uint8_t*)buffer;
|
||||
pbuf = (uint8_t *)buffer;
|
||||
rt_mutex_take(&pipe->lock, -1);
|
||||
|
||||
while (write_bytes < count)
|
||||
@@ -628,7 +628,7 @@ rt_pipe_t *rt_pipe_create(const char *name, int bufsz)
|
||||
return RT_NULL;
|
||||
}
|
||||
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
|
||||
dev->fops = (void*)&pipe_fops;
|
||||
dev->fops = (void *)&pipe_fops;
|
||||
#endif
|
||||
|
||||
return pipe;
|
||||
|
||||
@@ -126,7 +126,7 @@ static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
|
||||
return blk;
|
||||
}
|
||||
|
||||
rt_inline void list_append(rt_rbb_t rbb, rt_slist_t* n)
|
||||
rt_inline void list_append(rt_rbb_t rbb, rt_slist_t *n)
|
||||
{
|
||||
/* append the node to the tail */
|
||||
rbb->tail->next = n;
|
||||
@@ -135,15 +135,15 @@ rt_inline void list_append(rt_rbb_t rbb, rt_slist_t* n)
|
||||
rbb->tail = n;
|
||||
}
|
||||
|
||||
rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t* n)
|
||||
rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t *n)
|
||||
{
|
||||
rt_slist_t* l = &rbb->blk_list;
|
||||
struct rt_slist_node* node = l;
|
||||
rt_slist_t *l = &rbb->blk_list;
|
||||
struct rt_slist_node *node = l;
|
||||
|
||||
/* remove slist head */
|
||||
while (node->next && node->next != n) node = node->next;
|
||||
/* remove node */
|
||||
if (node->next != (rt_slist_t*)0)
|
||||
if (node->next != (rt_slist_t *)0)
|
||||
{
|
||||
node->next = node->next->next;
|
||||
n->next = RT_NULL;
|
||||
@@ -441,8 +441,8 @@ rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_bl
|
||||
* 3. the data_total_size will out of range
|
||||
*/
|
||||
if (block->status != RT_RBB_BLK_PUT ||
|
||||
last_block->buf > block->buf ||
|
||||
data_total_size + block->size > queue_data_len)
|
||||
last_block->buf > block->buf ||
|
||||
data_total_size + block->size > queue_data_len)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -91,11 +91,11 @@ rt_size_t rt_ringbuffer_put(struct rt_ringbuffer *rb,
|
||||
}
|
||||
|
||||
rt_memcpy(&rb->buffer_ptr[rb->write_index],
|
||||
&ptr[0],
|
||||
rb->buffer_size - rb->write_index);
|
||||
&ptr[0],
|
||||
rb->buffer_size - rb->write_index);
|
||||
rt_memcpy(&rb->buffer_ptr[0],
|
||||
&ptr[rb->buffer_size - rb->write_index],
|
||||
length - (rb->buffer_size - rb->write_index));
|
||||
&ptr[rb->buffer_size - rb->write_index],
|
||||
length - (rb->buffer_size - rb->write_index));
|
||||
|
||||
/* we are going into the other side of the mirror */
|
||||
rb->write_mirror = ~rb->write_mirror;
|
||||
@@ -115,8 +115,8 @@ RTM_EXPORT(rt_ringbuffer_put);
|
||||
* @return Return the data size we put into the ring buffer.
|
||||
*/
|
||||
rt_size_t rt_ringbuffer_put_force(struct rt_ringbuffer *rb,
|
||||
const rt_uint8_t *ptr,
|
||||
rt_uint16_t length)
|
||||
const rt_uint8_t *ptr,
|
||||
rt_uint16_t length)
|
||||
{
|
||||
rt_uint16_t space_length;
|
||||
|
||||
@@ -145,11 +145,11 @@ rt_size_t rt_ringbuffer_put_force(struct rt_ringbuffer *rb,
|
||||
}
|
||||
|
||||
rt_memcpy(&rb->buffer_ptr[rb->write_index],
|
||||
&ptr[0],
|
||||
rb->buffer_size - rb->write_index);
|
||||
&ptr[0],
|
||||
rb->buffer_size - rb->write_index);
|
||||
rt_memcpy(&rb->buffer_ptr[0],
|
||||
&ptr[rb->buffer_size - rb->write_index],
|
||||
length - (rb->buffer_size - rb->write_index));
|
||||
&ptr[rb->buffer_size - rb->write_index],
|
||||
length - (rb->buffer_size - rb->write_index));
|
||||
|
||||
/* we are going into the other side of the mirror */
|
||||
rb->write_mirror = ~rb->write_mirror;
|
||||
@@ -205,11 +205,11 @@ rt_size_t rt_ringbuffer_get(struct rt_ringbuffer *rb,
|
||||
}
|
||||
|
||||
rt_memcpy(&ptr[0],
|
||||
&rb->buffer_ptr[rb->read_index],
|
||||
rb->buffer_size - rb->read_index);
|
||||
&rb->buffer_ptr[rb->read_index],
|
||||
rb->buffer_size - rb->read_index);
|
||||
rt_memcpy(&ptr[rb->buffer_size - rb->read_index],
|
||||
&rb->buffer_ptr[0],
|
||||
length - (rb->buffer_size - rb->read_index));
|
||||
&rb->buffer_ptr[0],
|
||||
length - (rb->buffer_size - rb->read_index));
|
||||
|
||||
/* we are going into the other side of the mirror */
|
||||
rb->read_mirror = ~rb->read_mirror;
|
||||
@@ -246,7 +246,7 @@ rt_size_t rt_ringbuffer_peek(struct rt_ringbuffer *rb, rt_uint8_t **ptr)
|
||||
|
||||
*ptr = &rb->buffer_ptr[rb->read_index];
|
||||
|
||||
if((rt_size_t)(rb->buffer_size - rb->read_index) > size)
|
||||
if ((rt_size_t)(rb->buffer_size - rb->read_index) > size)
|
||||
{
|
||||
rb->read_index += size;
|
||||
return size;
|
||||
@@ -281,7 +281,7 @@ rt_size_t rt_ringbuffer_putchar(struct rt_ringbuffer *rb, const rt_uint8_t ch)
|
||||
rb->buffer_ptr[rb->write_index] = ch;
|
||||
|
||||
/* flip mirror */
|
||||
if (rb->write_index == rb->buffer_size-1)
|
||||
if (rb->write_index == rb->buffer_size - 1)
|
||||
{
|
||||
rb->write_mirror = ~rb->write_mirror;
|
||||
rb->write_index = 0;
|
||||
@@ -314,7 +314,7 @@ rt_size_t rt_ringbuffer_putchar_force(struct rt_ringbuffer *rb, const rt_uint8_t
|
||||
rb->buffer_ptr[rb->write_index] = ch;
|
||||
|
||||
/* flip mirror */
|
||||
if (rb->write_index == rb->buffer_size-1)
|
||||
if (rb->write_index == rb->buffer_size - 1)
|
||||
{
|
||||
rb->write_mirror = ~rb->write_mirror;
|
||||
rb->write_index = 0;
|
||||
@@ -355,7 +355,7 @@ rt_size_t rt_ringbuffer_getchar(struct rt_ringbuffer *rb, rt_uint8_t *ch)
|
||||
/* put byte */
|
||||
*ch = rb->buffer_ptr[rb->read_index];
|
||||
|
||||
if (rb->read_index == rb->buffer_size-1)
|
||||
if (rb->read_index == rb->buffer_size - 1)
|
||||
{
|
||||
rb->read_mirror = ~rb->read_mirror;
|
||||
rb->read_index = 0;
|
||||
|
||||
@@ -90,7 +90,7 @@ static void _workqueue_thread_entry(void *parameter)
|
||||
}
|
||||
|
||||
static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
|
||||
struct rt_work *work, rt_tick_t ticks)
|
||||
struct rt_work *work, rt_tick_t ticks)
|
||||
{
|
||||
rt_base_t level;
|
||||
rt_err_t err;
|
||||
@@ -116,7 +116,7 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
|
||||
|
||||
/* whether the workqueue is doing work */
|
||||
if (queue->work_current == RT_NULL &&
|
||||
((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND))
|
||||
((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND))
|
||||
{
|
||||
/* resume work thread */
|
||||
rt_thread_resume(queue->work_thread);
|
||||
@@ -140,7 +140,7 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
|
||||
else
|
||||
{
|
||||
rt_timer_init(&(work->timer), "work", _delayed_work_timeout_handler,
|
||||
work, ticks, RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER);
|
||||
work, ticks, RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER);
|
||||
work->flags |= RT_WORK_STATE_SUBMITTING;
|
||||
}
|
||||
work->workqueue = queue;
|
||||
@@ -198,7 +198,7 @@ static void _delayed_work_timeout_handler(void *parameter)
|
||||
}
|
||||
/* whether the workqueue is doing work */
|
||||
if (queue->work_current == RT_NULL &&
|
||||
((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND))
|
||||
((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND))
|
||||
{
|
||||
/* resume work thread */
|
||||
rt_thread_resume(queue->work_thread);
|
||||
@@ -356,7 +356,7 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo
|
||||
rt_list_insert_after(&queue->work_list, &(work->list));
|
||||
/* whether the workqueue is doing work */
|
||||
if (queue->work_current == RT_NULL &&
|
||||
((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND))
|
||||
((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND))
|
||||
{
|
||||
/* resume work thread */
|
||||
rt_thread_resume(queue->work_thread);
|
||||
|
||||
Reference in New Issue
Block a user