Compare commits

..

2 Commits

Author SHA1 Message Date
Christopher Haster
9a620c730c Added LFS_CRC, easier override for lfs_crc
Now you can override littlefs's CRC implementation with some simple
defines:

  -DLFS_CRC=lfs_crc

The motivation for this is the same for LFS_MALLOC/LFS_FREE. I think
these are the main "system-level" utils that users want to override.

Don't override with this something that's not CRC32! Your filesystem
will no longer be compatible with other tools! This is only intended for
provided hardware acceleration!
2023-12-19 14:12:10 -06:00
Christopher Haster
a0c6c54345 Added LFS_MALLOC/FREE, easier overrides for lfs_malloc/free
Now you can override littlefs's malloc with some simple defines:

  -DLFS_MALLOC=my_malloc
  -DLFS_FREE=my_free

This is probably what most users expected when wanting to override
malloc/free in littlefs, but it hasn't been available, since instead
littlefs provides a file-level override of builtin utils.

The thinking was that there's just too many builtins that could be
overriden, lfs_max/min/alignup/npw2/etc/etc/etc, so allowing users to
just override the util file provides the best flexibility without a ton
of ifdefs.

But it's become clear this is awkward for users that just want to
replace malloc.

Maybe the original goal was too optimistic, maybe there's a better way
to structure this file, or maybe the best API is just a bunch of ifdefs,
I have no idea! This will hopefully continue to evolve.
2023-12-19 13:57:17 -06:00
10 changed files with 136 additions and 233 deletions

244
lfs.c
View File

@@ -593,52 +593,45 @@ static int lfs_rawunmount(lfs_t *lfs);
/// Block allocator ///
// allocations should call this when all allocated blocks are committed to
// the filesystem
//
// after a checkpoint, the block allocator may realloc any untracked blocks
static void lfs_alloc_ckpoint(lfs_t *lfs) {
lfs->lookahead.ckpoint = lfs->block_count;
}
// drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) {
lfs->lookahead.size = 0;
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);
}
#ifndef LFS_READONLY
static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
lfs_t *lfs = (lfs_t*)p;
lfs_block_t off = ((block - lfs->lookahead.start)
lfs_block_t off = ((block - lfs->free.off)
+ lfs->block_count) % lfs->block_count;
if (off < lfs->lookahead.size) {
lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
if (off < lfs->free.size) {
lfs->free.buffer[off / 32] |= 1U << (off % 32);
}
return 0;
}
#endif
// indicate allocated blocks have been committed into the filesystem, this
// is to prevent blocks from being garbage collected in the middle of a
// commit operation
static void lfs_alloc_ack(lfs_t *lfs) {
lfs->free.ack = lfs->block_count;
}
// drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) {
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
}
#ifndef LFS_READONLY
static int lfs_alloc_scan(lfs_t *lfs) {
// move lookahead buffer to the first unused block
//
// note we limit the lookahead buffer to at most the amount of blocks
// checkpointed, this prevents the math in lfs_alloc from underflowing
lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;
lfs->lookahead.next = 0;
lfs->lookahead.size = lfs_min(
8*lfs->cfg->lookahead_size,
lfs->lookahead.ckpoint);
static int lfs_fs_rawgc(lfs_t *lfs) {
// Move free offset at the first unused block (lfs->free.i)
// lfs->free.i is equal lfs->free.size when all blocks are used
lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
lfs->free.i = 0;
// find mask of free blocks from tree
memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
if (err) {
lfs_alloc_drop(lfs);
@@ -652,49 +645,36 @@ static int lfs_alloc_scan(lfs_t *lfs) {
#ifndef LFS_READONLY
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
while (true) {
// scan our lookahead buffer for free blocks
while (lfs->lookahead.next < lfs->lookahead.size) {
if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
& (1U << (lfs->lookahead.next % 8)))) {
while (lfs->free.i != lfs->free.size) {
lfs_block_t off = lfs->free.i;
lfs->free.i += 1;
lfs->free.ack -= 1;
if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
// found a free block
*block = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;
*block = (lfs->free.off + off) % lfs->block_count;
// eagerly find next free block to maximize how many blocks
// lfs_alloc_ckpoint makes available for scanning
while (true) {
lfs->lookahead.next += 1;
lfs->lookahead.ckpoint -= 1;
if (lfs->lookahead.next >= lfs->lookahead.size
|| !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
& (1U << (lfs->lookahead.next % 8)))) {
return 0;
}
// eagerly find next off so an alloc ack can
// discredit old lookahead blocks
while (lfs->free.i != lfs->free.size &&
(lfs->free.buffer[lfs->free.i / 32]
& (1U << (lfs->free.i % 32)))) {
lfs->free.i += 1;
lfs->free.ack -= 1;
}
}
lfs->lookahead.next += 1;
lfs->lookahead.ckpoint -= 1;
return 0;
}
}
// In order to keep our block allocator from spinning forever when our
// filesystem is full, we mark points where there are no in-flight
// allocations with a checkpoint before starting a set of allocations.
//
// If we've looked at all blocks since the last checkpoint, we report
// the filesystem as out of storage.
//
if (lfs->lookahead.ckpoint <= 0) {
LFS_ERROR("No more free space 0x%"PRIx32,
(lfs->lookahead.start + lfs->lookahead.next)
% lfs->cfg->block_count);
// check if we have looked at all blocks since last ack
if (lfs->free.ack == 0) {
LFS_ERROR("No more free space %"PRIu32,
lfs->free.i + lfs->free.off);
return LFS_ERR_NOSPC;
}
// No blocks in our lookahead buffer, we need to scan the filesystem for
// unused blocks in the next lookahead window.
int err = lfs_alloc_scan(lfs);
int err = lfs_fs_rawgc(lfs);
if(err) {
return err;
}
@@ -2606,7 +2586,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
}
// build up new directory
lfs_alloc_ckpoint(lfs);
lfs_alloc_ack(lfs);
lfs_mdir_t dir;
err = lfs_dir_alloc(lfs, &dir);
if (err) {
@@ -3292,7 +3272,7 @@ relocate:
#ifndef LFS_READONLY
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
file->off = file->pos;
lfs_alloc_ckpoint(lfs);
lfs_alloc_ack(lfs);
int err = lfs_file_relocate(lfs, file);
if (err) {
return err;
@@ -3555,7 +3535,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
}
// extend file with new blocks
lfs_alloc_ckpoint(lfs);
lfs_alloc_ack(lfs);
int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
file->block, file->pos,
&file->block, &file->off);
@@ -3598,7 +3578,7 @@ relocate:
data += diff;
nsize -= diff;
lfs_alloc_ckpoint(lfs);
lfs_alloc_ack(lfs);
}
return size;
@@ -4173,14 +4153,6 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
// wear-leveling.
LFS_ASSERT(lfs->cfg->block_cycles != 0);
// check that compact_thresh makes sense
//
// metadata can't be compacted below block_size/2, and metadata can't
// exceed a block_size
LFS_ASSERT(lfs->cfg->compact_thresh == 0
|| lfs->cfg->compact_thresh >= lfs->cfg->block_size/2);
LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1
|| lfs->cfg->compact_thresh <= lfs->cfg->block_size);
// setup read cache
if (lfs->cfg->read_buffer) {
@@ -4208,14 +4180,15 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs_cache_zero(lfs, &lfs->rcache);
lfs_cache_zero(lfs, &lfs->pcache);
// setup lookahead buffer, note mount finishes initializing this after
// we establish a decent pseudo-random seed
// setup lookahead, must be multiple of 64-bits, 32-bit aligned
LFS_ASSERT(lfs->cfg->lookahead_size > 0);
LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
(uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
if (lfs->cfg->lookahead_buffer) {
lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
lfs->free.buffer = lfs->cfg->lookahead_buffer;
} else {
lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
if (!lfs->lookahead.buffer) {
lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
if (!lfs->free.buffer) {
err = LFS_ERR_NOMEM;
goto cleanup;
}
@@ -4272,7 +4245,7 @@ static int lfs_deinit(lfs_t *lfs) {
}
if (!lfs->cfg->lookahead_buffer) {
lfs_free(lfs->lookahead.buffer);
lfs_free(lfs->free.buffer);
}
return 0;
@@ -4292,12 +4265,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
LFS_ASSERT(cfg->block_count != 0);
// create free lookahead
memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
lfs->lookahead.start = 0;
lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
lfs->free.off = 0;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
lfs->block_count);
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);
lfs->free.i = 0;
lfs_alloc_ack(lfs);
// create root dir
lfs_mdir_t root;
@@ -4505,7 +4478,7 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
// setup free lookahead, to distribute allocations uniformly across
// boots, we start the allocator at a random location
lfs->lookahead.start = lfs->seed % lfs->block_count;
lfs->free.off = lfs->seed % lfs->block_count;
lfs_alloc_drop(lfs);
return 0;
@@ -5072,57 +5045,6 @@ static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
return size;
}
// explicit garbage collection
#ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) {
// force consistency, even if we're not necessarily going to write,
// because this function is supposed to take care of janitorial work
// isn't it?
int err = lfs_fs_forceconsistency(lfs);
if (err) {
return err;
}
// try to compact metadata pairs, note we can't really accomplish
// anything if compact_thresh doesn't at least leave a prog_size
// available
if (lfs->cfg->compact_thresh
< lfs->cfg->block_size - lfs->cfg->prog_size) {
// iterate over all mdirs
lfs_mdir_t mdir = {.tail = {0, 1}};
while (!lfs_pair_isnull(mdir.tail)) {
err = lfs_dir_fetch(lfs, &mdir, mdir.tail);
if (err) {
return err;
}
// not erased? exceeds our compaction threshold?
if (!mdir.erased || ((lfs->cfg->compact_thresh == 0)
? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8
: mdir.off > lfs->cfg->compact_thresh)) {
// the easiest way to trigger a compaction is to mark
// the mdir as unerased and add an empty commit
mdir.erased = false;
err = lfs_dir_commit(lfs, &mdir, NULL, 0);
if (err) {
return err;
}
}
}
}
// try to populate the lookahead buffer, unless it's already full
if (lfs->lookahead.size < 8*lfs->cfg->lookahead_size) {
err = lfs_alloc_scan(lfs);
if (err) {
return err;
}
}
return 0;
}
#endif
#ifndef LFS_READONLY
static int lfs_fs_rawgrow(lfs_t *lfs, lfs_size_t block_count) {
// shrinking is not supported
@@ -5529,10 +5451,10 @@ static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
lfs->lfs1->root[1] = LFS_BLOCK_NULL;
// setup free lookahead
lfs->lookahead.start = 0;
lfs->lookahead.size = 0;
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);
lfs->free.off = 0;
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
// load superblock
lfs1_dir_t dir;
@@ -6328,22 +6250,6 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
return err;
}
#ifndef LFS_READONLY
int lfs_fs_mkconsistent(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
err = lfs_fs_rawmkconsistent(lfs);
LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
#endif
#ifndef LFS_READONLY
int lfs_fs_gc(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
@@ -6360,6 +6266,22 @@ int lfs_fs_gc(lfs_t *lfs) {
}
#endif
#ifndef LFS_READONLY
int lfs_fs_mkconsistent(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
err = lfs_fs_rawmkconsistent(lfs);
LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
#endif
#ifndef LFS_READONLY
int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) {
int err = LFS_LOCK(lfs->cfg);

60
lfs.h
View File

@@ -226,20 +226,9 @@ struct lfs_config {
// Size of the lookahead buffer in bytes. A larger lookahead buffer
// increases the number of blocks found during an allocation pass. The
// lookahead buffer is stored as a compact bitmap, so each byte of RAM
// can track 8 blocks.
// can track 8 blocks. Must be a multiple of 8.
lfs_size_t lookahead_size;
// Threshold for metadata compaction during lfs_fs_gc in bytes. Metadata
// pairs that exceed this threshold will be compacted during lfs_fs_gc.
// Defaults to ~88% block_size when zero, though the default may change
// in the future.
//
// Note this only affects lfs_fs_gc. Normal compactions still only occur
// when full.
//
// Set to -1 to disable metadata compaction during lfs_fs_gc.
lfs_size_t compact_thresh;
// Optional statically allocated read buffer. Must be cache_size.
// By default lfs_malloc is used to allocate this buffer.
void *read_buffer;
@@ -248,8 +237,9 @@ struct lfs_config {
// By default lfs_malloc is used to allocate this buffer.
void *prog_buffer;
// Optional statically allocated lookahead buffer. Must be lookahead_size.
// By default lfs_malloc is used to allocate this buffer.
// Optional statically allocated lookahead buffer. Must be lookahead_size
// and aligned to a 32-bit boundary. By default lfs_malloc is used to
// allocate this buffer.
void *lookahead_buffer;
// Optional upper limit on length of file names in bytes. No downside for
@@ -440,13 +430,13 @@ typedef struct lfs {
lfs_gstate_t gdisk;
lfs_gstate_t gdelta;
struct lfs_lookahead {
lfs_block_t start;
struct lfs_free {
lfs_block_t off;
lfs_block_t size;
lfs_block_t next;
lfs_block_t ckpoint;
uint8_t *buffer;
} lookahead;
lfs_block_t i;
lfs_block_t ack;
uint32_t *buffer;
} free;
const struct lfs_config *cfg;
lfs_size_t block_count;
@@ -722,6 +712,18 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
// Returns a negative error code on failure.
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
// Attempt to proactively find free blocks
//
// Calling this function is not required, but may allowing the offloading of
// the expensive block allocation scan to a less time-critical code path.
//
// Note: littlefs currently does not persist any found free blocks to disk.
// This may change in the future.
//
// Returns a negative error code on failure. Finding no free blocks is
// not an error.
int lfs_fs_gc(lfs_t *lfs);
#ifndef LFS_READONLY
// Attempt to make the filesystem consistent and ready for writing
//
@@ -734,24 +736,6 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
int lfs_fs_mkconsistent(lfs_t *lfs);
#endif
#ifndef LFS_READONLY
// Attempt any janitorial work
//
// This currently:
// 1. Calls mkconsistent if not already consistent
// 2. Compacts metadata > compact_thresh
// 3. Populates the block allocator
//
// Though additional janitorial work may be added in the future.
//
// Calling this function is not required, but may allow the offloading of
// expensive janitorial work to a less time-critical code path.
//
// Returns a negative error code on failure. Accomplishing nothing is not
// an error.
int lfs_fs_gc(lfs_t *lfs);
#endif
#ifndef LFS_READONLY
// Grows the filesystem to a new size, updating the superblock with the new
// block count.

View File

@@ -11,6 +11,8 @@
#ifndef LFS_CONFIG
// If user provides their own CRC impl we don't need this
#ifndef LFS_CRC
// Software CRC implementation with small lookup table
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
static const uint32_t rtable[16] = {
@@ -29,6 +31,7 @@ uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
return crc;
}
#endif
#endif

View File

@@ -212,14 +212,20 @@ static inline uint32_t lfs_tobe32(uint32_t a) {
}
// Calculate CRC-32 with polynomial = 0x04c11db7
#ifdef LFS_CRC
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
return LFS_CRC(crc, buffer, size)
}
#else
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
#endif
// Allocate memory, only used if buffers are not provided to littlefs
//
// littlefs current has no alignment requirements, as it only allocates
// byte-level buffers.
// Note, memory must be 64-bit aligned
static inline void *lfs_malloc(size_t size) {
#ifndef LFS_NO_MALLOC
#if defined(LFS_MALLOC)
return LFS_MALLOC(size);
#elif !defined(LFS_NO_MALLOC)
return malloc(size);
#else
(void)size;
@@ -229,7 +235,9 @@ static inline void *lfs_malloc(size_t size) {
// Deallocate memory, only used if buffers are not provided to littlefs
static inline void lfs_free(void *p) {
#ifndef LFS_NO_MALLOC
#if defined(LFS_FREE)
LFS_FREE(p);
#elif !defined(LFS_NO_MALLOC)
free(p);
#else
(void)p;

View File

@@ -1321,7 +1321,6 @@ void perm_run(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
};
struct lfs_emubd_config bdcfg = {

View File

@@ -95,12 +95,11 @@ intmax_t bench_define(size_t define);
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define COMPACT_THRESH_i 8
#define BLOCK_CYCLES_i 9
#define ERASE_VALUE_i 10
#define ERASE_CYCLES_i 11
#define BADBLOCK_BEHAVIOR_i 12
#define POWERLOSS_BEHAVIOR_i 13
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define READ_SIZE bench_define(READ_SIZE_i)
#define PROG_SIZE bench_define(PROG_SIZE_i)
@@ -110,7 +109,6 @@ intmax_t bench_define(size_t define);
#define BLOCK_COUNT bench_define(BLOCK_COUNT_i)
#define CACHE_SIZE bench_define(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i)
#define COMPACT_THRESH bench_define(COMPACT_THRESH_i)
#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i)
#define ERASE_VALUE bench_define(ERASE_VALUE_i)
#define ERASE_CYCLES bench_define(ERASE_CYCLES_i)
@@ -126,7 +124,6 @@ intmax_t bench_define(size_t define);
BENCH_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1))\
BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
BENCH_DEF(LOOKAHEAD_SIZE, 16) \
BENCH_DEF(COMPACT_THRESH, 0) \
BENCH_DEF(BLOCK_CYCLES, -1) \
BENCH_DEF(ERASE_VALUE, 0xff) \
BENCH_DEF(ERASE_CYCLES, 0) \
@@ -134,7 +131,7 @@ intmax_t bench_define(size_t define);
BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)
#define BENCH_GEOMETRY_DEFINE_COUNT 4
#define BENCH_IMPLICIT_DEFINE_COUNT 14
#define BENCH_IMPLICIT_DEFINE_COUNT 13
#endif

View File

@@ -1346,7 +1346,6 @@ static void run_powerloss_none(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1423,7 +1422,6 @@ static void run_powerloss_linear(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1517,7 +1515,6 @@ static void run_powerloss_log(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1609,7 +1606,6 @@ static void run_powerloss_cycles(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
@@ -1799,7 +1795,6 @@ static void run_powerloss_exhaustive(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif

View File

@@ -88,13 +88,12 @@ intmax_t test_define(size_t define);
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define COMPACT_THRESH_i 8
#define BLOCK_CYCLES_i 9
#define ERASE_VALUE_i 10
#define ERASE_CYCLES_i 11
#define BADBLOCK_BEHAVIOR_i 12
#define POWERLOSS_BEHAVIOR_i 13
#define DISK_VERSION_i 14
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define DISK_VERSION_i 13
#define READ_SIZE TEST_DEFINE(READ_SIZE_i)
#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i)
@@ -104,7 +103,6 @@ intmax_t test_define(size_t define);
#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i)
#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i)
#define COMPACT_THRESH TEST_DEFINE(COMPACT_THRESH_i)
#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i)
#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i)
#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i)
@@ -121,7 +119,6 @@ intmax_t test_define(size_t define);
TEST_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1)) \
TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
TEST_DEF(LOOKAHEAD_SIZE, 16) \
TEST_DEF(COMPACT_THRESH, 0) \
TEST_DEF(BLOCK_CYCLES, -1) \
TEST_DEF(ERASE_VALUE, 0xff) \
TEST_DEF(ERASE_CYCLES, 0) \
@@ -130,7 +127,7 @@ intmax_t test_define(size_t define);
TEST_DEF(DISK_VERSION, 0)
#define TEST_GEOMETRY_DEFINE_COUNT 4
#define TEST_IMPLICIT_DEFINE_COUNT 15
#define TEST_IMPLICIT_DEFINE_COUNT 14
#endif

View File

@@ -7,7 +7,6 @@ if = 'BLOCK_CYCLES == -1'
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
code = '''
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
@@ -61,7 +60,6 @@ code = '''
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
code = '''
const char *names[] = {"bacon", "eggs", "pancakes"};

View File

@@ -98,7 +98,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
lfs_alloc_ckpoint(&lfs);
lfs_alloc_ack(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
@@ -170,7 +170,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
lfs_alloc_ckpoint(&lfs);
lfs_alloc_ack(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;