Merge pull request #912 from littlefs-project/relaxed-lookahead

Relaxed lookahead alignment, other internal block alloc readability improvements
This commit is contained in:
Christopher Haster
2024-01-19 12:27:14 -06:00
committed by GitHub
4 changed files with 90 additions and 71 deletions

134
lfs.c
View File

@@ -596,42 +596,48 @@ static int lfs_rawunmount(lfs_t *lfs);
#ifndef LFS_READONLY #ifndef LFS_READONLY
static int lfs_alloc_lookahead(void *p, lfs_block_t block) { static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
lfs_t *lfs = (lfs_t*)p; lfs_t *lfs = (lfs_t*)p;
lfs_block_t off = ((block - lfs->free.off) lfs_block_t off = ((block - lfs->lookahead.start)
+ lfs->block_count) % lfs->block_count; + lfs->block_count) % lfs->block_count;
if (off < lfs->free.size) { if (off < lfs->lookahead.size) {
lfs->free.buffer[off / 32] |= 1U << (off % 32); lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
} }
return 0; return 0;
} }
#endif #endif
// indicate allocated blocks have been committed into the filesystem, this // allocations should call this when all allocated blocks are committed to
// is to prevent blocks from being garbage collected in the middle of a // the filesystem
// commit operation //
static void lfs_alloc_ack(lfs_t *lfs) { // after a checkpoint, the block allocator may realloc any untracked blocks
lfs->free.ack = lfs->block_count; static void lfs_alloc_ckpoint(lfs_t *lfs) {
lfs->lookahead.ckpoint = lfs->block_count;
} }
// drop the lookahead buffer, this is done during mounting and failed // drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state // traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) { static void lfs_alloc_drop(lfs_t *lfs) {
lfs->free.size = 0; lfs->lookahead.size = 0;
lfs->free.i = 0; lfs->lookahead.next = 0;
lfs_alloc_ack(lfs); lfs_alloc_ckpoint(lfs);
} }
#ifndef LFS_READONLY #ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) { static int lfs_fs_rawgc(lfs_t *lfs) {
// Move free offset at the first unused block (lfs->free.i) // move lookahead buffer to the first unused block
// lfs->free.i is equal lfs->free.size when all blocks are used //
lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count; // note we limit the lookahead buffer to at most the amount of blocks
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack); // checkpointed, this prevents the math in lfs_alloc from underflowing
lfs->free.i = 0; lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;
lfs->lookahead.next = 0;
lfs->lookahead.size = lfs_min(
8*lfs->cfg->lookahead_size,
lfs->lookahead.ckpoint);
// find mask of free blocks from tree // find mask of free blocks from tree
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size); memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true); int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
if (err) { if (err) {
lfs_alloc_drop(lfs); lfs_alloc_drop(lfs);
@@ -645,35 +651,48 @@ static int lfs_fs_rawgc(lfs_t *lfs) {
#ifndef LFS_READONLY #ifndef LFS_READONLY
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
while (true) { while (true) {
while (lfs->free.i != lfs->free.size) { // scan our lookahead buffer for free blocks
lfs_block_t off = lfs->free.i; while (lfs->lookahead.next < lfs->lookahead.size) {
lfs->free.i += 1; if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
lfs->free.ack -= 1; & (1U << (lfs->lookahead.next % 8)))) {
if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
// found a free block // found a free block
*block = (lfs->free.off + off) % lfs->block_count; *block = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;
// eagerly find next off so an alloc ack can // eagerly find next free block to maximize how many blocks
// discredit old lookahead blocks // lfs_alloc_ckpoint makes available for scanning
while (lfs->free.i != lfs->free.size && while (true) {
(lfs->free.buffer[lfs->free.i / 32] lfs->lookahead.next += 1;
& (1U << (lfs->free.i % 32)))) { lfs->lookahead.ckpoint -= 1;
lfs->free.i += 1;
lfs->free.ack -= 1; if (lfs->lookahead.next >= lfs->lookahead.size
|| !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
& (1U << (lfs->lookahead.next % 8)))) {
return 0;
}
} }
return 0;
} }
lfs->lookahead.next += 1;
lfs->lookahead.ckpoint -= 1;
} }
// check if we have looked at all blocks since last ack // In order to keep our block allocator from spinning forever when our
if (lfs->free.ack == 0) { // filesystem is full, we mark points where there are no in-flight
LFS_ERROR("No more free space %"PRIu32, // allocations with a checkpoint before starting a set of allocations.
lfs->free.i + lfs->free.off); //
// If we've looked at all blocks since the last checkpoint, we report
// the filesystem as out of storage.
//
if (lfs->lookahead.ckpoint <= 0) {
LFS_ERROR("No more free space 0x%"PRIx32,
(lfs->lookahead.start + lfs->lookahead.next)
% lfs->cfg->block_count);
return LFS_ERR_NOSPC; return LFS_ERR_NOSPC;
} }
// No blocks in our lookahead buffer, we need to scan the filesystem for
// unused blocks in the next lookahead window.
int err = lfs_fs_rawgc(lfs); int err = lfs_fs_rawgc(lfs);
if(err) { if(err) {
return err; return err;
@@ -2588,7 +2607,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
} }
// build up new directory // build up new directory
lfs_alloc_ack(lfs); lfs_alloc_ckpoint(lfs);
lfs_mdir_t dir; lfs_mdir_t dir;
err = lfs_dir_alloc(lfs, &dir); err = lfs_dir_alloc(lfs, &dir);
if (err) { if (err) {
@@ -3274,7 +3293,7 @@ relocate:
#ifndef LFS_READONLY #ifndef LFS_READONLY
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) { static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
file->off = file->pos; file->off = file->pos;
lfs_alloc_ack(lfs); lfs_alloc_ckpoint(lfs);
int err = lfs_file_relocate(lfs, file); int err = lfs_file_relocate(lfs, file);
if (err) { if (err) {
return err; return err;
@@ -3537,7 +3556,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
} }
// extend file with new blocks // extend file with new blocks
lfs_alloc_ack(lfs); lfs_alloc_ckpoint(lfs);
int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache, int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
file->block, file->pos, file->block, file->pos,
&file->block, &file->off); &file->block, &file->off);
@@ -3580,7 +3599,7 @@ relocate:
data += diff; data += diff;
nsize -= diff; nsize -= diff;
lfs_alloc_ack(lfs); lfs_alloc_ckpoint(lfs);
} }
return size; return size;
@@ -4197,15 +4216,14 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs_cache_zero(lfs, &lfs->rcache); lfs_cache_zero(lfs, &lfs->rcache);
lfs_cache_zero(lfs, &lfs->pcache); lfs_cache_zero(lfs, &lfs->pcache);
// setup lookahead, must be multiple of 64-bits, 32-bit aligned // setup lookahead buffer, note mount finishes initializing this after
// we establish a decent pseudo-random seed
LFS_ASSERT(lfs->cfg->lookahead_size > 0); LFS_ASSERT(lfs->cfg->lookahead_size > 0);
LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
(uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
if (lfs->cfg->lookahead_buffer) { if (lfs->cfg->lookahead_buffer) {
lfs->free.buffer = lfs->cfg->lookahead_buffer; lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
} else { } else {
lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size); lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
if (!lfs->free.buffer) { if (!lfs->lookahead.buffer) {
err = LFS_ERR_NOMEM; err = LFS_ERR_NOMEM;
goto cleanup; goto cleanup;
} }
@@ -4262,7 +4280,7 @@ static int lfs_deinit(lfs_t *lfs) {
} }
if (!lfs->cfg->lookahead_buffer) { if (!lfs->cfg->lookahead_buffer) {
lfs_free(lfs->free.buffer); lfs_free(lfs->lookahead.buffer);
} }
return 0; return 0;
@@ -4282,12 +4300,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
LFS_ASSERT(cfg->block_count != 0); LFS_ASSERT(cfg->block_count != 0);
// create free lookahead // create free lookahead
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size); memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
lfs->free.off = 0; lfs->lookahead.start = 0;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
lfs->block_count); lfs->block_count);
lfs->free.i = 0; lfs->lookahead.next = 0;
lfs_alloc_ack(lfs); lfs_alloc_ckpoint(lfs);
// create root dir // create root dir
lfs_mdir_t root; lfs_mdir_t root;
@@ -4495,7 +4513,7 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
// setup free lookahead, to distribute allocations uniformly across // setup free lookahead, to distribute allocations uniformly across
// boots, we start the allocator at a random location // boots, we start the allocator at a random location
lfs->free.off = lfs->seed % lfs->block_count; lfs->lookahead.start = lfs->seed % lfs->block_count;
lfs_alloc_drop(lfs); lfs_alloc_drop(lfs);
return 0; return 0;
@@ -5468,10 +5486,10 @@ static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
lfs->lfs1->root[1] = LFS_BLOCK_NULL; lfs->lfs1->root[1] = LFS_BLOCK_NULL;
// setup free lookahead // setup free lookahead
lfs->free.off = 0; lfs->lookahead.start = 0;
lfs->free.size = 0; lfs->lookahead.size = 0;
lfs->free.i = 0; lfs->lookahead.next = 0;
lfs_alloc_ack(lfs); lfs_alloc_ckpoint(lfs);
// load superblock // load superblock
lfs1_dir_t dir; lfs1_dir_t dir;

19
lfs.h
View File

@@ -224,7 +224,7 @@ struct lfs_config {
// Size of the lookahead buffer in bytes. A larger lookahead buffer // Size of the lookahead buffer in bytes. A larger lookahead buffer
// increases the number of blocks found during an allocation pass. The // increases the number of blocks found during an allocation pass. The
// lookahead buffer is stored as a compact bitmap, so each byte of RAM // lookahead buffer is stored as a compact bitmap, so each byte of RAM
// can track 8 blocks. Must be a multiple of 8. // can track 8 blocks.
lfs_size_t lookahead_size; lfs_size_t lookahead_size;
// Optional statically allocated read buffer. Must be cache_size. // Optional statically allocated read buffer. Must be cache_size.
@@ -235,9 +235,8 @@ struct lfs_config {
// By default lfs_malloc is used to allocate this buffer. // By default lfs_malloc is used to allocate this buffer.
void *prog_buffer; void *prog_buffer;
// Optional statically allocated lookahead buffer. Must be lookahead_size // Optional statically allocated lookahead buffer. Must be lookahead_size.
// and aligned to a 32-bit boundary. By default lfs_malloc is used to // By default lfs_malloc is used to allocate this buffer.
// allocate this buffer.
void *lookahead_buffer; void *lookahead_buffer;
// Optional upper limit on length of file names in bytes. No downside for // Optional upper limit on length of file names in bytes. No downside for
@@ -428,13 +427,13 @@ typedef struct lfs {
lfs_gstate_t gdisk; lfs_gstate_t gdisk;
lfs_gstate_t gdelta; lfs_gstate_t gdelta;
struct lfs_free { struct lfs_lookahead {
lfs_block_t off; lfs_block_t start;
lfs_block_t size; lfs_block_t size;
lfs_block_t i; lfs_block_t next;
lfs_block_t ack; lfs_block_t ckpoint;
uint32_t *buffer; uint8_t *buffer;
} free; } lookahead;
const struct lfs_config *cfg; const struct lfs_config *cfg;
lfs_size_t block_count; lfs_size_t block_count;

View File

@@ -221,7 +221,9 @@ uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
#endif #endif
// Allocate memory, only used if buffers are not provided to littlefs // Allocate memory, only used if buffers are not provided to littlefs
// Note, memory must be 64-bit aligned //
// littlefs current has no alignment requirements, as it only allocates
// byte-level buffers.
static inline void *lfs_malloc(size_t size) { static inline void *lfs_malloc(size_t size) {
#if defined(LFS_MALLOC) #if defined(LFS_MALLOC)
return LFS_MALLOC(size); return LFS_MALLOC(size);

View File

@@ -98,7 +98,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0; lfs_mount(&lfs, cfg) => 0;
// create an orphan // create an orphan
lfs_mdir_t orphan; lfs_mdir_t orphan;
lfs_alloc_ack(&lfs); lfs_alloc_ckpoint(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0; lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0; lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
@@ -170,7 +170,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0; lfs_mount(&lfs, cfg) => 0;
// create an orphan // create an orphan
lfs_mdir_t orphan; lfs_mdir_t orphan;
lfs_alloc_ack(&lfs); lfs_alloc_ckpoint(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0; lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0; lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;