Compare commits

...

11 Commits

Author SHA1 Message Date
Christopher Haster
b5cd957f42 Extended lfs_fs_gc to compact metadata, compact_thresh
This extends lfs_fs_gc to now handle three things:

1. Calls mkconsistent if not already consistent
2. Compacts metadata > compact_thresh
3. Populates the block allocator

Which should be all of the janitorial work that can be done without
additional on-disk data structures.

Normally, metadata compaction occurs when an mdir is full, and results in
mdirs that are at most block_size/2.

Now, if you call lfs_fs_gc, littlefs will eagerly compact any mdirs that
exceed the compact_thresh configuration option. Because the resulting
mdirs are at most block_size/2, it only makes sense for compact_thresh to
be >= block_size/2 and <= block_size.

Additionally, there are some special values:

- compact_thresh=0  => defaults to ~88% block_size, may change
- compact_thresh=-1 => disables metadata compaction during lfs_fs_gc

Note that compact_thresh only affects lfs_fs_gc. Normal compactions
still only occur when full.
2024-01-19 12:25:45 -06:00
Christopher Haster
60567677b9 Relaxed alignment requirements for lfs_malloc
The only reason we needed this alignment was for the lookahead buffer.

Now that the lookahead buffer is relaxed to operate on bytes, we can
relax our malloc alignment requirement all the way down to the byte
level, since we mainly use lfs_malloc to allocate byte-level buffers.

This does introduce a risk that we might need word-level mallocs in the
future. If that happens we will need to decide if changing the malloc
alignment is a breaking change, or gate alignment requirements behind
user provided defines.

Found by HiFiPhile.
2024-01-16 00:27:07 -06:00
Christopher Haster
b1b10c0e75 Relaxed lookahead buffer alignment
This drops the lookahead buffer from operating on 32-bit words to
operating on 8-bit bytes, and removes any alignment requirement. This
may have some minor performance impact, but it is unlikely to be
significant when you consider IO overhead.

The original motivation for 32-bit alignment was an attempt at
future-proofing in case we wanted some more complex on-disk data
structure. This never happened, and even if it did, it could have been
added via additional config options.

This has been a significant pain point for users, since providing
word-aligned byte-sized buffers in C can be a bit annoying.
2023-12-20 00:39:11 -06:00
Christopher Haster
1f9c3c04b1 Reworked the block allocator so the logic is hopefully simpler
Some of this is just better documentation, some of this is reworking the
logic to be more intention driven... if that makes sense...
2023-12-20 00:24:56 -06:00
Christopher Haster
7b68441888 Renamed a number of internal block-allocator fields
- Renamed lfs.free      -> lfs.lookahead
- Renamed lfs.free.off  -> lfs.lookahead.start
- Renamed lfs.free.i    -> lfs.lookahead.next
- Renamed lfs.free.ack  -> lfs.lookahead.ckpoint
- Renamed lfs_alloc_ack -> lfs_alloc_ckpoint

These have been named a bit confusingly, and I think the new names make
their relevant purposes a bit clearer.

At the very it's clear lfs.lookahead is related to the lookahead buffer.
(and doesn't imply a closed free-bitmap).
2023-12-20 00:17:08 -06:00
Christopher Haster
c733d9ec57 Merge pull request #884 from DvdGiessen/static-functions
lfs_fs_raw* functions should be static
2023-10-31 13:26:35 -05:00
Christopher Haster
8f3f32d1f3 Added -Wmissing-prototypes
This warning is useful for catching the easy mistake of missing the
keyword static on functions intended to be internal-only.

Missing the static keyword risks symbol polution and misses potential
compiler optimizations.

This is an interesting warning, while useful for libraries such as
littlefs, it's perfectly valid C to not predeclare all functions, and
common in final application binaries.

Relatedly, this warning is re-disabled for the test/bench runner. There
may be a better way to organize the CFLAGS, maybe into separate
LIB/RUNNER CFLAGS, but I'll leave this to future work if our CFLAGS grow
more complicated.

This was motivated by non-static internal-only functions leaking into a
release. Found and fixed by DvdGiessen.
2023-10-24 12:04:54 -05:00
Daniël van de Giessen
92fc780f71 lfs_fs_raw* functions should be static 2023-10-23 13:35:34 +02:00
Christopher Haster
f77214d1f0 Merge pull request #877 from littlefs-project/devel
Minor release: v2.8
2023-09-22 11:52:21 -05:00
Christopher Haster
f91c5bd687 Bumped minor version to v2.8 2023-09-21 13:02:09 -05:00
Christopher Haster
0eb52a2df1 Merge pull request #875 from littlefs-project/fs-gc
Add lfs_fs_gc to enable proactive finding of free blocks
2023-09-21 13:01:19 -05:00
10 changed files with 237 additions and 124 deletions

View File

@@ -63,6 +63,7 @@ CFLAGS += -fcallgraph-info=su
CFLAGS += -g3
CFLAGS += -I.
CFLAGS += -std=c99 -Wall -Wextra -pedantic
CFLAGS += -Wmissing-prototypes
CFLAGS += -ftrack-macro-expansion=0
ifdef DEBUG
CFLAGS += -O0
@@ -354,6 +355,7 @@ summary-diff sizes-diff: $(OBJ) $(CI)
## Build the test-runner
.PHONY: test-runner build-test
test-runner build-test: CFLAGS+=-Wno-missing-prototypes
ifndef NO_COV
test-runner build-test: CFLAGS+=--coverage
endif
@@ -405,6 +407,7 @@ testmarks-diff: $(TEST_CSV)
## Build the bench-runner
.PHONY: bench-runner build-bench
bench-runner build-bench: CFLAGS+=-Wno-missing-prototypes
ifdef YES_COV
bench-runner build-bench: CFLAGS+=--coverage
endif

248
lfs.c
View File

@@ -593,45 +593,52 @@ static int lfs_rawunmount(lfs_t *lfs);
/// Block allocator ///
// allocations should call this when all allocated blocks are committed to
// the filesystem
//
// after a checkpoint, the block allocator may realloc any untracked blocks
static void lfs_alloc_ckpoint(lfs_t *lfs) {
lfs->lookahead.ckpoint = lfs->block_count;
}
// drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) {
lfs->lookahead.size = 0;
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);
}
#ifndef LFS_READONLY
static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
lfs_t *lfs = (lfs_t*)p;
lfs_block_t off = ((block - lfs->free.off)
lfs_block_t off = ((block - lfs->lookahead.start)
+ lfs->block_count) % lfs->block_count;
if (off < lfs->free.size) {
lfs->free.buffer[off / 32] |= 1U << (off % 32);
if (off < lfs->lookahead.size) {
lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
}
return 0;
}
#endif
// indicate allocated blocks have been committed into the filesystem, this
// is to prevent blocks from being garbage collected in the middle of a
// commit operation
static void lfs_alloc_ack(lfs_t *lfs) {
lfs->free.ack = lfs->block_count;
}
// drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) {
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
}
#ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) {
// Move free offset at the first unused block (lfs->free.i)
// lfs->free.i is equal lfs->free.size when all blocks are used
lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
lfs->free.i = 0;
static int lfs_alloc_scan(lfs_t *lfs) {
// move lookahead buffer to the first unused block
//
// note we limit the lookahead buffer to at most the amount of blocks
// checkpointed, this prevents the math in lfs_alloc from underflowing
lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;
lfs->lookahead.next = 0;
lfs->lookahead.size = lfs_min(
8*lfs->cfg->lookahead_size,
lfs->lookahead.ckpoint);
// find mask of free blocks from tree
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
if (err) {
lfs_alloc_drop(lfs);
@@ -645,36 +652,49 @@ static int lfs_fs_rawgc(lfs_t *lfs) {
#ifndef LFS_READONLY
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
while (true) {
while (lfs->free.i != lfs->free.size) {
lfs_block_t off = lfs->free.i;
lfs->free.i += 1;
lfs->free.ack -= 1;
if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
// scan our lookahead buffer for free blocks
while (lfs->lookahead.next < lfs->lookahead.size) {
if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
& (1U << (lfs->lookahead.next % 8)))) {
// found a free block
*block = (lfs->free.off + off) % lfs->block_count;
*block = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;
// eagerly find next off so an alloc ack can
// discredit old lookahead blocks
while (lfs->free.i != lfs->free.size &&
(lfs->free.buffer[lfs->free.i / 32]
& (1U << (lfs->free.i % 32)))) {
lfs->free.i += 1;
lfs->free.ack -= 1;
// eagerly find next free block to maximize how many blocks
// lfs_alloc_ckpoint makes available for scanning
while (true) {
lfs->lookahead.next += 1;
lfs->lookahead.ckpoint -= 1;
if (lfs->lookahead.next >= lfs->lookahead.size
|| !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
& (1U << (lfs->lookahead.next % 8)))) {
return 0;
}
}
return 0;
}
lfs->lookahead.next += 1;
lfs->lookahead.ckpoint -= 1;
}
// check if we have looked at all blocks since last ack
if (lfs->free.ack == 0) {
LFS_ERROR("No more free space %"PRIu32,
lfs->free.i + lfs->free.off);
// In order to keep our block allocator from spinning forever when our
// filesystem is full, we mark points where there are no in-flight
// allocations with a checkpoint before starting a set of allocations.
//
// If we've looked at all blocks since the last checkpoint, we report
// the filesystem as out of storage.
//
if (lfs->lookahead.ckpoint <= 0) {
LFS_ERROR("No more free space 0x%"PRIx32,
(lfs->lookahead.start + lfs->lookahead.next)
% lfs->cfg->block_count);
return LFS_ERR_NOSPC;
}
int err = lfs_fs_rawgc(lfs);
// No blocks in our lookahead buffer, we need to scan the filesystem for
// unused blocks in the next lookahead window.
int err = lfs_alloc_scan(lfs);
if(err) {
return err;
}
@@ -2586,7 +2606,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
}
// build up new directory
lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
lfs_mdir_t dir;
err = lfs_dir_alloc(lfs, &dir);
if (err) {
@@ -3272,7 +3292,7 @@ relocate:
#ifndef LFS_READONLY
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
file->off = file->pos;
lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
int err = lfs_file_relocate(lfs, file);
if (err) {
return err;
@@ -3535,7 +3555,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
}
// extend file with new blocks
lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
file->block, file->pos,
&file->block, &file->off);
@@ -3578,7 +3598,7 @@ relocate:
data += diff;
nsize -= diff;
lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
}
return size;
@@ -4153,6 +4173,14 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
// wear-leveling.
LFS_ASSERT(lfs->cfg->block_cycles != 0);
// check that compact_thresh makes sense
//
// metadata can't be compacted below block_size/2, and metadata can't
// exceed a block_size
LFS_ASSERT(lfs->cfg->compact_thresh == 0
|| lfs->cfg->compact_thresh >= lfs->cfg->block_size/2);
LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1
|| lfs->cfg->compact_thresh <= lfs->cfg->block_size);
// setup read cache
if (lfs->cfg->read_buffer) {
@@ -4180,15 +4208,14 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs_cache_zero(lfs, &lfs->rcache);
lfs_cache_zero(lfs, &lfs->pcache);
// setup lookahead, must be multiple of 64-bits, 32-bit aligned
// setup lookahead buffer, note mount finishes initializing this after
// we establish a decent pseudo-random seed
LFS_ASSERT(lfs->cfg->lookahead_size > 0);
LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
(uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
if (lfs->cfg->lookahead_buffer) {
lfs->free.buffer = lfs->cfg->lookahead_buffer;
lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
} else {
lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
if (!lfs->free.buffer) {
lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
if (!lfs->lookahead.buffer) {
err = LFS_ERR_NOMEM;
goto cleanup;
}
@@ -4245,7 +4272,7 @@ static int lfs_deinit(lfs_t *lfs) {
}
if (!lfs->cfg->lookahead_buffer) {
lfs_free(lfs->free.buffer);
lfs_free(lfs->lookahead.buffer);
}
return 0;
@@ -4265,12 +4292,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
LFS_ASSERT(cfg->block_count != 0);
// create free lookahead
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
lfs->free.off = 0;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
lfs->lookahead.start = 0;
lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
lfs->block_count);
lfs->free.i = 0;
lfs_alloc_ack(lfs);
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);
// create root dir
lfs_mdir_t root;
@@ -4478,7 +4505,7 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
// setup free lookahead, to distribute allocations uniformly across
// boots, we start the allocator at a random location
lfs->free.off = lfs->seed % lfs->block_count;
lfs->lookahead.start = lfs->seed % lfs->block_count;
lfs_alloc_drop(lfs);
return 0;
@@ -4999,7 +5026,7 @@ static int lfs_fs_forceconsistency(lfs_t *lfs) {
#endif
#ifndef LFS_READONLY
int lfs_fs_rawmkconsistent(lfs_t *lfs) {
static int lfs_fs_rawmkconsistent(lfs_t *lfs) {
// lfs_fs_forceconsistency does most of the work here
int err = lfs_fs_forceconsistency(lfs);
if (err) {
@@ -5045,8 +5072,59 @@ static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
return size;
}
// explicit garbage collection
#ifndef LFS_READONLY
int lfs_fs_rawgrow(lfs_t *lfs, lfs_size_t block_count) {
static int lfs_fs_rawgc(lfs_t *lfs) {
// force consistency, even if we're not necessarily going to write,
// because this function is supposed to take care of janitorial work
// isn't it?
int err = lfs_fs_forceconsistency(lfs);
if (err) {
return err;
}
// try to compact metadata pairs, note we can't really accomplish
// anything if compact_thresh doesn't at least leave a prog_size
// available
if (lfs->cfg->compact_thresh
< lfs->cfg->block_size - lfs->cfg->prog_size) {
// iterate over all mdirs
lfs_mdir_t mdir = {.tail = {0, 1}};
while (!lfs_pair_isnull(mdir.tail)) {
err = lfs_dir_fetch(lfs, &mdir, mdir.tail);
if (err) {
return err;
}
// not erased? exceeds our compaction threshold?
if (!mdir.erased || ((lfs->cfg->compact_thresh == 0)
? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8
: mdir.off > lfs->cfg->compact_thresh)) {
// the easiest way to trigger a compaction is to mark
// the mdir as unerased and add an empty commit
mdir.erased = false;
err = lfs_dir_commit(lfs, &mdir, NULL, 0);
if (err) {
return err;
}
}
}
}
// try to populate the lookahead buffer, unless it's already full
if (lfs->lookahead.size < 8*lfs->cfg->lookahead_size) {
err = lfs_alloc_scan(lfs);
if (err) {
return err;
}
}
return 0;
}
#endif
#ifndef LFS_READONLY
static int lfs_fs_rawgrow(lfs_t *lfs, lfs_size_t block_count) {
// shrinking is not supported
LFS_ASSERT(block_count >= lfs->block_count);
@@ -5451,10 +5529,10 @@ static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
lfs->lfs1->root[1] = LFS_BLOCK_NULL;
// setup free lookahead
lfs->free.off = 0;
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
lfs->lookahead.start = 0;
lfs->lookahead.size = 0;
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);
// load superblock
lfs1_dir_t dir;
@@ -6250,22 +6328,6 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
return err;
}
#ifndef LFS_READONLY
int lfs_fs_gc(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
err = lfs_fs_rawgc(lfs);
LFS_TRACE("lfs_fs_gc -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
#endif
#ifndef LFS_READONLY
int lfs_fs_mkconsistent(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
@@ -6282,6 +6344,22 @@ int lfs_fs_mkconsistent(lfs_t *lfs) {
}
#endif
#ifndef LFS_READONLY
int lfs_fs_gc(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
err = lfs_fs_rawgc(lfs);
LFS_TRACE("lfs_fs_gc -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
#endif
#ifndef LFS_READONLY
int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) {
int err = LFS_LOCK(lfs->cfg);

62
lfs.h
View File

@@ -21,7 +21,7 @@ extern "C"
// Software library version
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
#define LFS_VERSION 0x00020007
#define LFS_VERSION 0x00020008
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
@@ -226,9 +226,20 @@ struct lfs_config {
// Size of the lookahead buffer in bytes. A larger lookahead buffer
// increases the number of blocks found during an allocation pass. The
// lookahead buffer is stored as a compact bitmap, so each byte of RAM
// can track 8 blocks. Must be a multiple of 8.
// can track 8 blocks.
lfs_size_t lookahead_size;
// Threshold for metadata compaction during lfs_fs_gc in bytes. Metadata
// pairs that exceed this threshold will be compacted during lfs_fs_gc.
// Defaults to ~88% block_size when zero, though the default may change
// in the future.
//
// Note this only affects lfs_fs_gc. Normal compactions still only occur
// when full.
//
// Set to -1 to disable metadata compaction during lfs_fs_gc.
lfs_size_t compact_thresh;
// Optional statically allocated read buffer. Must be cache_size.
// By default lfs_malloc is used to allocate this buffer.
void *read_buffer;
@@ -237,9 +248,8 @@ struct lfs_config {
// By default lfs_malloc is used to allocate this buffer.
void *prog_buffer;
// Optional statically allocated lookahead buffer. Must be lookahead_size
// and aligned to a 32-bit boundary. By default lfs_malloc is used to
// allocate this buffer.
// Optional statically allocated lookahead buffer. Must be lookahead_size.
// By default lfs_malloc is used to allocate this buffer.
void *lookahead_buffer;
// Optional upper limit on length of file names in bytes. No downside for
@@ -430,13 +440,13 @@ typedef struct lfs {
lfs_gstate_t gdisk;
lfs_gstate_t gdelta;
struct lfs_free {
lfs_block_t off;
struct lfs_lookahead {
lfs_block_t start;
lfs_block_t size;
lfs_block_t i;
lfs_block_t ack;
uint32_t *buffer;
} free;
lfs_block_t next;
lfs_block_t ckpoint;
uint8_t *buffer;
} lookahead;
const struct lfs_config *cfg;
lfs_size_t block_count;
@@ -712,18 +722,6 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
// Returns a negative error code on failure.
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
// Attempt to proactively find free blocks
//
// Calling this function is not required, but may allowing the offloading of
// the expensive block allocation scan to a less time-critical code path.
//
// Note: littlefs currently does not persist any found free blocks to disk.
// This may change in the future.
//
// Returns a negative error code on failure. Finding no free blocks is
// not an error.
int lfs_fs_gc(lfs_t *lfs);
#ifndef LFS_READONLY
// Attempt to make the filesystem consistent and ready for writing
//
@@ -736,6 +734,24 @@ int lfs_fs_gc(lfs_t *lfs);
int lfs_fs_mkconsistent(lfs_t *lfs);
#endif
#ifndef LFS_READONLY
// Attempt any janitorial work
//
// This currently:
// 1. Calls mkconsistent if not already consistent
// 2. Compacts metadata > compact_thresh
// 3. Populates the block allocator
//
// Though additional janitorial work may be added in the future.
//
// Calling this function is not required, but may allow the offloading of
// expensive janitorial work to a less time-critical code path.
//
// Returns a negative error code on failure. Accomplishing nothing is not
// an error.
int lfs_fs_gc(lfs_t *lfs);
#endif
#ifndef LFS_READONLY
// Grows the filesystem to a new size, updating the superblock with the new
// block count.

View File

@@ -215,7 +215,9 @@ static inline uint32_t lfs_tobe32(uint32_t a) {
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
// Allocate memory, only used if buffers are not provided to littlefs
// Note, memory must be 64-bit aligned
//
// littlefs current has no alignment requirements, as it only allocates
// byte-level buffers.
static inline void *lfs_malloc(size_t size) {
#ifndef LFS_NO_MALLOC
return malloc(size);

View File

@@ -1321,6 +1321,7 @@ void perm_run(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
};
struct lfs_emubd_config bdcfg = {

View File

@@ -95,11 +95,12 @@ intmax_t bench_define(size_t define);
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define COMPACT_THRESH_i 8
#define BLOCK_CYCLES_i 9
#define ERASE_VALUE_i 10
#define ERASE_CYCLES_i 11
#define BADBLOCK_BEHAVIOR_i 12
#define POWERLOSS_BEHAVIOR_i 13
#define READ_SIZE bench_define(READ_SIZE_i)
#define PROG_SIZE bench_define(PROG_SIZE_i)
@@ -109,6 +110,7 @@ intmax_t bench_define(size_t define);
#define BLOCK_COUNT bench_define(BLOCK_COUNT_i)
#define CACHE_SIZE bench_define(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i)
#define COMPACT_THRESH bench_define(COMPACT_THRESH_i)
#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i)
#define ERASE_VALUE bench_define(ERASE_VALUE_i)
#define ERASE_CYCLES bench_define(ERASE_CYCLES_i)
@@ -124,6 +126,7 @@ intmax_t bench_define(size_t define);
BENCH_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1))\
BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
BENCH_DEF(LOOKAHEAD_SIZE, 16) \
BENCH_DEF(COMPACT_THRESH, 0) \
BENCH_DEF(BLOCK_CYCLES, -1) \
BENCH_DEF(ERASE_VALUE, 0xff) \
BENCH_DEF(ERASE_CYCLES, 0) \
@@ -131,7 +134,7 @@ intmax_t bench_define(size_t define);
BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)
#define BENCH_GEOMETRY_DEFINE_COUNT 4
#define BENCH_IMPLICIT_DEFINE_COUNT 13
#define BENCH_IMPLICIT_DEFINE_COUNT 14
#endif

View File

@@ -1346,6 +1346,7 @@ static void run_powerloss_none(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1422,6 +1423,7 @@ static void run_powerloss_linear(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1515,6 +1517,7 @@ static void run_powerloss_log(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1606,6 +1609,7 @@ static void run_powerloss_cycles(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1795,6 +1799,7 @@ static void run_powerloss_exhaustive(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif

View File

@@ -88,12 +88,13 @@ intmax_t test_define(size_t define);
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define DISK_VERSION_i 13
#define COMPACT_THRESH_i 8
#define BLOCK_CYCLES_i 9
#define ERASE_VALUE_i 10
#define ERASE_CYCLES_i 11
#define BADBLOCK_BEHAVIOR_i 12
#define POWERLOSS_BEHAVIOR_i 13
#define DISK_VERSION_i 14
#define READ_SIZE TEST_DEFINE(READ_SIZE_i)
#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i)
@@ -103,6 +104,7 @@ intmax_t test_define(size_t define);
#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i)
#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i)
#define COMPACT_THRESH TEST_DEFINE(COMPACT_THRESH_i)
#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i)
#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i)
#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i)
@@ -119,6 +121,7 @@ intmax_t test_define(size_t define);
TEST_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1)) \
TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
TEST_DEF(LOOKAHEAD_SIZE, 16) \
TEST_DEF(COMPACT_THRESH, 0) \
TEST_DEF(BLOCK_CYCLES, -1) \
TEST_DEF(ERASE_VALUE, 0xff) \
TEST_DEF(ERASE_CYCLES, 0) \
@@ -127,7 +130,7 @@ intmax_t test_define(size_t define);
TEST_DEF(DISK_VERSION, 0)
#define TEST_GEOMETRY_DEFINE_COUNT 4
#define TEST_IMPLICIT_DEFINE_COUNT 14
#define TEST_IMPLICIT_DEFINE_COUNT 15
#endif

View File

@@ -7,6 +7,7 @@ if = 'BLOCK_CYCLES == -1'
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
code = '''
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
@@ -60,6 +61,7 @@ code = '''
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
code = '''
const char *names[] = {"bacon", "eggs", "pancakes"};

View File

@@ -98,7 +98,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
lfs_alloc_ack(&lfs);
lfs_alloc_ckpoint(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
@@ -170,7 +170,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
lfs_alloc_ack(&lfs);
lfs_alloc_ckpoint(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;