forked from Imagelibrary/rtems
cpukit/jffs2: Revert to non-granular locking
Revert JFFS2 to non-granular locking. This makes the superblock available to the delayed work that it owns so that delayed work processing can ensure that shared resources are being exclusively accessed. This change only affects NAND systems. Moving forward with granular locking will require a significant investment of time in producing a test harness that doesn't require hardware such that this can get a reasonable amount of test coverage.
This commit is contained in:
committed by
Joel Sherrill
parent
36960f9d9b
commit
efc36cb467
@@ -1,19 +1,30 @@
|
|||||||
#ifndef __LINUX_MUTEX_H
|
#ifndef __LINUX_MUTEX_H
|
||||||
#define __LINUX_MUTEX_H
|
#define __LINUX_MUTEX_H
|
||||||
|
|
||||||
#include <rtems/thread.h>
|
struct mutex { };
|
||||||
|
|
||||||
struct mutex { rtems_mutex r_m; };
|
|
||||||
|
|
||||||
#define DEFINE_MUTEX(m) struct mutex m
|
#define DEFINE_MUTEX(m) struct mutex m
|
||||||
|
|
||||||
#define mutex_init(m) rtems_mutex_init(&(m)->r_m, "JFFS2 Mutex");
|
static inline void mutex_init(struct mutex *m)
|
||||||
|
{
|
||||||
|
(void) m;
|
||||||
|
}
|
||||||
|
|
||||||
#define mutex_lock(m) rtems_mutex_lock(&(m)->r_m);
|
static inline void mutex_lock(struct mutex *m)
|
||||||
|
{
|
||||||
|
(void) m;
|
||||||
|
}
|
||||||
|
|
||||||
#define mutex_lock_interruptible(m) ({ mutex_lock(m); 0; })
|
static inline int mutex_lock_interruptible(struct mutex *m)
|
||||||
|
{
|
||||||
|
(void) m;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#define mutex_unlock(m) rtems_mutex_unlock(&(m)->r_m);
|
static inline void mutex_unlock(struct mutex *m)
|
||||||
|
{
|
||||||
|
(void) m;
|
||||||
|
}
|
||||||
|
|
||||||
#define mutex_is_locked(m) 1
|
#define mutex_is_locked(m) 1
|
||||||
|
|
||||||
|
|||||||
@@ -1,20 +1,16 @@
|
|||||||
#ifndef __LINUX_RWSEM_H__
|
#ifndef __LINUX_RWSEM_H__
|
||||||
#define __LINUX_RWSEM_H__
|
#define __LINUX_RWSEM_H__
|
||||||
|
|
||||||
#include <pthread.h>
|
struct rw_semaphore {};
|
||||||
|
|
||||||
struct rw_semaphore {
|
#define init_rwsem(rwsem)
|
||||||
pthread_rwlock_t lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define init_rwsem(rwsem) pthread_rwlock_init(&(rwsem)->lock, NULL)
|
#define down_read(rwsem)
|
||||||
|
|
||||||
#define down_read(rwsem) pthread_rwlock_rdlock(&(rwsem)->lock)
|
#define down_write(rwsem)
|
||||||
|
|
||||||
#define down_write(rwsem) pthread_rwlock_wrlock(&(rwsem)->lock)
|
#define up_read(rwsem)
|
||||||
|
|
||||||
#define up_read(rwsem) pthread_rwlock_unlock(&(rwsem)->lock)
|
#define up_write(rwsem)
|
||||||
|
|
||||||
#define up_write(rwsem) pthread_rwlock_unlock(&(rwsem)->lock)
|
|
||||||
|
|
||||||
#endif /* __LINUX_RWSEM_H__ */
|
#endif /* __LINUX_RWSEM_H__ */
|
||||||
|
|||||||
@@ -1,16 +1,34 @@
|
|||||||
#ifndef __LINUX_SPINLOCK_H__
|
#ifndef __LINUX_SPINLOCK_H__
|
||||||
#define __LINUX_SPINLOCK_H__
|
#define __LINUX_SPINLOCK_H__
|
||||||
|
|
||||||
#include <rtems/thread.h>
|
|
||||||
|
|
||||||
typedef struct { rtems_mutex r_m; } spinlock_t;
|
typedef struct { } spinlock_t;
|
||||||
|
#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
|
||||||
|
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
|
||||||
|
|
||||||
#define DEFINE_SPINLOCK(x) spinlock_t x
|
#define spin_lock_init(lock) \
|
||||||
|
CYG_MACRO_START; \
|
||||||
|
CYG_UNUSED_PARAM(spinlock_t *, lock); \
|
||||||
|
CYG_MACRO_END
|
||||||
|
|
||||||
#define spin_lock_init(x) rtems_mutex_init(&(x)->r_m, "JFFS2 Spinlock");
|
#define spin_lock(lock) \
|
||||||
|
CYG_MACRO_START; \
|
||||||
|
CYG_UNUSED_PARAM(spinlock_t *, lock); \
|
||||||
|
CYG_MACRO_END
|
||||||
|
|
||||||
#define spin_lock(x) rtems_mutex_lock(&(x)->r_m);
|
#define spin_unlock(lock) \
|
||||||
|
CYG_MACRO_START; \
|
||||||
|
CYG_UNUSED_PARAM(spinlock_t *, lock); \
|
||||||
|
CYG_MACRO_END
|
||||||
|
|
||||||
#define spin_unlock(x) rtems_mutex_unlock(&(x)->r_m);
|
#define spin_lock_bh(lock) \
|
||||||
|
CYG_MACRO_START; \
|
||||||
|
CYG_UNUSED_PARAM(spinlock_t *, lock); \
|
||||||
|
CYG_MACRO_END
|
||||||
|
|
||||||
|
#define spin_unlock_bh(lock) \
|
||||||
|
CYG_MACRO_START; \
|
||||||
|
CYG_UNUSED_PARAM(spinlock_t *, lock); \
|
||||||
|
CYG_MACRO_END
|
||||||
|
|
||||||
#endif /* __LINUX_SPINLOCK_H__ */
|
#endif /* __LINUX_SPINLOCK_H__ */
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ struct delayed_work {
|
|||||||
volatile bool pending;
|
volatile bool pending;
|
||||||
volatile uint64_t execution_time;
|
volatile uint64_t execution_time;
|
||||||
work_callback_t callback;
|
work_callback_t callback;
|
||||||
|
/* Superblock provided for locking */
|
||||||
|
struct super_block *sb;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_delayed_work(work) RTEMS_CONTAINER_OF(work, struct delayed_work, work)
|
#define to_delayed_work(work) RTEMS_CONTAINER_OF(work, struct delayed_work, work)
|
||||||
|
|||||||
@@ -1307,7 +1307,9 @@ static void process_delayed_work(void)
|
|||||||
work->pending = false;
|
work->pending = false;
|
||||||
mutex_unlock(&work->dw_mutex);
|
mutex_unlock(&work->dw_mutex);
|
||||||
|
|
||||||
|
rtems_jffs2_do_lock(work->sb);
|
||||||
work->callback(&work->work);
|
work->callback(&work->work);
|
||||||
|
rtems_jffs2_do_unlock(work->sb);
|
||||||
}
|
}
|
||||||
mutex_unlock(&delayed_work_mutex);
|
mutex_unlock(&delayed_work_mutex);
|
||||||
}
|
}
|
||||||
@@ -1382,6 +1384,7 @@ int rtems_jffs2_initialize(
|
|||||||
sb = &fs_info->sb;
|
sb = &fs_info->sb;
|
||||||
c = JFFS2_SB_INFO(sb);
|
c = JFFS2_SB_INFO(sb);
|
||||||
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
|
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
|
||||||
|
c->wbuf_dwork.sb = sb;
|
||||||
add_delayed_work_to_chain(&c->wbuf_dwork);
|
add_delayed_work_to_chain(&c->wbuf_dwork);
|
||||||
#endif
|
#endif
|
||||||
spin_lock_init(&c->erase_completion_lock);
|
spin_lock_init(&c->erase_completion_lock);
|
||||||
@@ -1523,7 +1526,7 @@ static struct _inode *new_inode(struct super_block *sb)
|
|||||||
|
|
||||||
inode->i_cache_next = NULL; // Newest inode, about to be cached
|
inode->i_cache_next = NULL; // Newest inode, about to be cached
|
||||||
|
|
||||||
mutex_init(&JFFS2_INODE_INFO(inode)->sem)
|
mutex_init(&JFFS2_INODE_INFO(inode)->sem);
|
||||||
|
|
||||||
// Add to the icache
|
// Add to the icache
|
||||||
for (cached_inode = sb->s_root; cached_inode != NULL;
|
for (cached_inode = sb->s_root; cached_inode != NULL;
|
||||||
|
|||||||
@@ -100,6 +100,10 @@ struct _inode {
|
|||||||
|
|
||||||
struct super_block {
|
struct super_block {
|
||||||
struct jffs2_sb_info jffs2_sb;
|
struct jffs2_sb_info jffs2_sb;
|
||||||
|
/*
|
||||||
|
* If granular locking is ever enabled for JFFS2, the inode cache
|
||||||
|
* (s_root) needs to be protected due to NAND delayed writes.
|
||||||
|
*/
|
||||||
struct _inode * s_root;
|
struct _inode * s_root;
|
||||||
rtems_jffs2_flash_control *s_flash_control;
|
rtems_jffs2_flash_control *s_flash_control;
|
||||||
rtems_jffs2_compressor_control *s_compressor_control;
|
rtems_jffs2_compressor_control *s_compressor_control;
|
||||||
|
|||||||
Reference in New Issue
Block a user