Implemented exhaustive testing of n nested powerlosses

As expected this takes a significant amount of time (~10 minutes for all
1 powerlosses, >10 hours for all 2 powerlosses) but this may be reducible in
the future by optimizing tests for powerloss testing. Currently
test_files does a lot of work that doesn't really have testing value.
This commit is contained in:
Christopher Haster
2022-08-25 11:06:19 -05:00
parent 552336eba9
commit 5279fc6022
3 changed files with 292 additions and 144 deletions

View File

@@ -29,68 +29,65 @@
// Note we can only modify a block if we have exclusive access to it (rc == 1)
//
// TODO
__attribute__((unused))
static void lfs_testbd_incblock(lfs_testbd_t *bd, lfs_block_t block) {
if (bd->blocks[block]) {
bd->blocks[block]->rc += 1;
static lfs_testbd_block_t *lfs_testbd_incblock(lfs_testbd_block_t *block) {
if (block) {
block->rc += 1;
}
return block;
}
static void lfs_testbd_decblock(lfs_testbd_t *bd, lfs_block_t block) {
if (bd->blocks[block]) {
bd->blocks[block]->rc -= 1;
if (bd->blocks[block]->rc == 0) {
free(bd->blocks[block]);
bd->blocks[block] = NULL;
static void lfs_testbd_decblock(lfs_testbd_block_t *block) {
if (block) {
block->rc -= 1;
if (block->rc == 0) {
free(block);
}
}
}
static const lfs_testbd_block_t *lfs_testbd_getblock(lfs_testbd_t *bd,
lfs_block_t block) {
return bd->blocks[block];
}
static lfs_testbd_block_t *lfs_testbd_mutblock(lfs_testbd_t *bd,
lfs_block_t block, lfs_size_t block_size) {
if (bd->blocks[block] && bd->blocks[block]->rc == 1) {
static lfs_testbd_block_t *lfs_testbd_mutblock(
const struct lfs_config *cfg,
lfs_testbd_block_t **block) {
lfs_testbd_block_t *block_ = *block;
if (block_ && block_->rc == 1) {
// rc == 1? can modify
return bd->blocks[block];
return block_;
} else if (bd->blocks[block]) {
} else if (block_) {
// rc > 1? need to create a copy
lfs_testbd_block_t *b = malloc(
sizeof(lfs_testbd_block_t) + block_size);
if (!b) {
lfs_testbd_block_t *nblock = malloc(
sizeof(lfs_testbd_block_t) + cfg->block_size);
if (!nblock) {
return NULL;
}
memcpy(b, bd->blocks[block], sizeof(lfs_testbd_block_t) + block_size);
b->rc = 1;
memcpy(nblock, block_,
sizeof(lfs_testbd_block_t) + cfg->block_size);
nblock->rc = 1;
lfs_testbd_decblock(bd, block);
bd->blocks[block] = b;
return b;
lfs_testbd_decblock(block_);
*block = nblock;
return nblock;
} else {
// no block? need to allocate
lfs_testbd_block_t *b = malloc(
sizeof(lfs_testbd_block_t) + block_size);
if (!b) {
lfs_testbd_block_t *nblock = malloc(
sizeof(lfs_testbd_block_t) + cfg->block_size);
if (!nblock) {
return NULL;
}
b->rc = 1;
b->wear = 0;
nblock->rc = 1;
nblock->wear = 0;
// zero for consistency
memset(b->data,
lfs_testbd_t *bd = cfg->context;
memset(nblock->data,
(bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
block_size);
cfg->block_size);
bd->blocks[block] = b;
return b;
*block = nblock;
return nblock;
}
}
@@ -129,22 +126,25 @@ int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
// setup testing things
bd->power_cycles = bd->cfg->power_cycles;
bd->disk_fd = -1;
bd->disk_scratch_block = NULL;
bd->branches = NULL;
bd->branch_capacity = 0;
bd->branch_count = 0;
bd->disk = NULL;
if (bd->cfg->disk_path) {
bd->disk = malloc(sizeof(lfs_testbd_disk_t));
if (!bd->disk) {
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
bd->disk->rc = 1;
bd->disk->scratch = NULL;
#ifdef _WIN32
bd->disk_fd = open(bd->cfg->disk_path,
bd->disk->fd = open(bd->cfg->disk_path,
O_RDWR | O_CREAT | O_BINARY, 0666);
#else
bd->disk_fd = open(bd->cfg->disk_path,
bd->disk->fd = open(bd->cfg->disk_path,
O_RDWR | O_CREAT, 0666);
#endif
if (bd->disk_fd < 0) {
if (bd->disk->fd < 0) {
int err = -errno;
LFS_TESTBD_TRACE("lfs_testbd_create -> %d", err);
return err;
@@ -153,12 +153,12 @@ int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
// if we're emulating erase values, we can keep a block around in
// memory of just the erase state to speed up emulated erases
if (bd->cfg->erase_value != -1) {
bd->disk_scratch_block = malloc(cfg->block_size);
if (!bd->disk_scratch_block) {
bd->disk->scratch = malloc(cfg->block_size);
if (!bd->disk->scratch) {
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
memset(bd->disk_scratch_block,
memset(bd->disk->scratch,
bd->cfg->erase_value,
cfg->block_size);
}
@@ -191,16 +191,18 @@ int lfs_testbd_destroy(const struct lfs_config *cfg) {
// decrement reference counts
for (lfs_block_t i = 0; i < cfg->block_count; i++) {
lfs_testbd_decblock(bd, i);
lfs_testbd_decblock(bd->blocks[i]);
}
// free memory
free(bd->blocks);
free(bd->branches);
if (bd->disk_fd >= 0) {
close(bd->disk_fd);
free(bd->disk_scratch_block);
// clean up other resources
if (bd->disk) {
bd->disk->rc -= 1;
if (bd->disk->rc == 0) {
close(bd->disk->fd);
free(bd->disk->scratch);
free(bd->disk);
}
}
LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", 0);
@@ -225,7 +227,7 @@ int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
LFS_ASSERT(off+size <= cfg->block_size);
// get the block
const lfs_testbd_block_t *b = lfs_testbd_getblock(bd, block);
const lfs_testbd_block_t *b = bd->blocks[block];
if (b) {
// block bad?
if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles &&
@@ -273,7 +275,7 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
LFS_ASSERT(off+size <= cfg->block_size);
// get the block
lfs_testbd_block_t *b = lfs_testbd_mutblock(bd, block, cfg->block_size);
lfs_testbd_block_t *b = lfs_testbd_mutblock(cfg, &bd->blocks[block]);
if (!b) {
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
@@ -305,8 +307,8 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
memcpy(&b->data[off], buffer, size);
// mirror to disk file?
if (bd->disk_fd >= 0) {
off_t res1 = lseek(bd->disk_fd,
if (bd->disk) {
off_t res1 = lseek(bd->disk->fd,
(off_t)block*cfg->block_size + (off_t)off,
SEEK_SET);
if (res1 < 0) {
@@ -315,7 +317,7 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
return err;
}
ssize_t res2 = write(bd->disk_fd, buffer, size);
ssize_t res2 = write(bd->disk->fd, buffer, size);
if (res2 < 0) {
int err = -errno;
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
@@ -344,15 +346,6 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
}
}
// // track power-loss branch?
// if (bd->cfg->track_branches) {
// int err = lfs_testbd_trackbranch(bd);
// if (err) {
// LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
// return err;
// }
// }
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
return 0;
}
@@ -365,7 +358,7 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_ASSERT(block < cfg->block_count);
// get the block
lfs_testbd_block_t *b = lfs_testbd_mutblock(bd, block, cfg->block_size);
lfs_testbd_block_t *b = lfs_testbd_mutblock(cfg, &bd->blocks[block]);
if (!b) {
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
@@ -394,8 +387,8 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
memset(b->data, bd->cfg->erase_value, cfg->block_size);
// mirror to disk file?
if (bd->disk_fd >= 0) {
off_t res1 = lseek(bd->disk_fd,
if (bd->disk) {
off_t res1 = lseek(bd->disk->fd,
(off_t)block*cfg->block_size,
SEEK_SET);
if (res1 < 0) {
@@ -404,8 +397,8 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
return err;
}
ssize_t res2 = write(bd->disk_fd,
bd->disk_scratch_block,
ssize_t res2 = write(bd->disk->fd,
bd->disk->scratch,
cfg->block_size);
if (res2 < 0) {
int err = -errno;
@@ -436,15 +429,6 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
}
}
// // track power-loss branch?
// if (bd->cfg->track_branches) {
// int err = lfs_testbd_trackbranch(bd);
// if (err) {
// LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
// return err;
// }
// }
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
return 0;
}
@@ -472,7 +456,7 @@ lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
// get the wear
lfs_testbd_wear_t wear;
const lfs_testbd_block_t *b = lfs_testbd_getblock(bd, block);
const lfs_testbd_block_t *b = bd->blocks[block];
if (b) {
wear = b->wear;
} else {
@@ -492,7 +476,7 @@ int lfs_testbd_setwear(const struct lfs_config *cfg,
LFS_ASSERT(block < cfg->block_count);
// set the wear
lfs_testbd_block_t *b = lfs_testbd_mutblock(bd, block, cfg->block_size);
lfs_testbd_block_t *b = lfs_testbd_mutblock(cfg, &bd->blocks[block]);
if (!b) {
LFS_TESTBD_TRACE("lfs_testbd_setwear -> %"PRIu32, LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
@@ -524,23 +508,30 @@ int lfs_testbd_setpowercycles(const struct lfs_config *cfg,
return 0;
}
//int lfs_testbd_getbranch(const struct lfs_config *cfg,
// lfs_testbd_powercycles_t branch, lfs_testbd_t *bd) {
// LFS_TESTBD_TRACE("lfs_testbd_getbranch(%p, %zu, %p)",
// (void*)cfg, branch, bd);
// lfs_testbd_t *bd = cfg->context;
//
// // TODO
//
// LFS_TESTBD_TRACE("lfs_testbd_getbranch -> %d", 0);
// return 0;
//}
lfs_testbd_spowercycles_t lfs_testbd_getbranchcount(
const struct lfs_config *cfg) {
LFS_TESTBD_TRACE("lfs_testbd_getbranchcount(%p)", (void*)cfg);
int lfs_testbd_copy(const struct lfs_config *cfg, lfs_testbd_t *copy) {
LFS_TESTBD_TRACE("lfs_testbd_copy(%p, %p)", (void*)cfg, (void*)copy);
lfs_testbd_t *bd = cfg->context;
LFS_TESTBD_TRACE("lfs_testbd_getbranchcount -> %"PRIu32, bd->branch_count);
return bd->branch_count;
// lazily copy over our block array
copy->blocks = malloc(cfg->block_count * sizeof(lfs_testbd_block_t*));
if (!copy->blocks) {
LFS_TESTBD_TRACE("lfs_testbd_copy -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
for (size_t i = 0; i < cfg->block_count; i++) {
copy->blocks[i] = lfs_testbd_incblock(bd->blocks[i]);
}
// other state
copy->power_cycles = bd->power_cycles;
copy->disk = bd->disk;
if (copy->disk) {
copy->disk->rc += 1;
}
copy->cfg = bd->cfg;
LFS_TESTBD_TRACE("lfs_testbd_copy -> %d", 0);
return 0;
}

View File

@@ -119,6 +119,13 @@ typedef struct lfs_testbd_block {
uint8_t data[];
} lfs_testbd_block_t;
// Disk mirror
typedef struct lfs_testbd_disk {
uint32_t rc;
int fd;
uint8_t *scratch;
} lfs_testbd_disk_t;
// testbd state
typedef struct lfs_testbd {
// array of copy-on-write blocks
@@ -126,31 +133,7 @@ typedef struct lfs_testbd {
// some other test state
uint32_t power_cycles;
int disk_fd;
uint8_t *disk_scratch_block;
// array of tracked branches
struct lfs_testbd *branches;
lfs_testbd_powercycles_t branch_count;
lfs_testbd_powercycles_t branch_capacity;
// TODO file?
// union {
// struct {
// lfs_filebd_t bd;
// } file;
// struct {
// lfs_rambd_t bd;
// struct lfs_rambd_config cfg;
// } ram;
// } u;
//
// bool persist;
// uint32_t power_cycles;
// lfs_testbd_wear_t *wear;
// uint8_t *scratch;
lfs_testbd_disk_t *disk;
const struct lfs_testbd_config *cfg;
} lfs_testbd_t;
@@ -207,13 +190,8 @@ lfs_testbd_spowercycles_t lfs_testbd_getpowercycles(
int lfs_testbd_setpowercycles(const struct lfs_config *cfg,
lfs_testbd_powercycles_t power_cycles);
// Get a power-loss branch, requires track_branches=true
int lfs_testbd_getbranch(const struct lfs_config *cfg,
lfs_testbd_powercycles_t branch, lfs_testbd_t *bd);
// Get the current number of power-loss branches
lfs_testbd_spowercycles_t lfs_testbd_getbranchcount(
const struct lfs_config *cfg);
// Create a copy-on-write copy of the state of this block device
int lfs_testbd_copy(const struct lfs_config *cfg, lfs_testbd_t *copy);
#ifdef __cplusplus

View File

@@ -700,6 +700,7 @@ static void run_powerloss_linear(
while (true) {
if (!setjmp(powerloss_jmp)) {
// run the test
case_->run(&cfg);
break;
}
@@ -780,6 +781,7 @@ static void run_powerloss_exponential(
while (true) {
if (!setjmp(powerloss_jmp)) {
// run the test
case_->run(&cfg);
break;
}
@@ -858,6 +860,7 @@ static void run_powerloss_cycles(
while (true) {
if (!setjmp(powerloss_jmp)) {
// run the test
case_->run(&cfg);
break;
}
@@ -883,15 +886,177 @@ static void run_powerloss_cycles(
}
}
//static void run_powerloss_n(void *data,
//
//static void run_powerloss_incremental(void *data,
struct powerloss_exhaustive_state {
struct lfs_config *cfg;
lfs_testbd_t *branches;
size_t branch_count;
size_t branch_capacity;
};
struct powerloss_exhaustive_cycles {
lfs_testbd_powercycles_t *cycles;
size_t cycle_count;
size_t cycle_capacity;
};
static void powerloss_exhaustive_branch(void *c) {
// append to branches
struct powerloss_exhaustive_state *state = c;
state->branch_count += 1;
if (state->branch_count > state->branch_capacity) {
state->branch_capacity = (2*state->branch_capacity > 4)
? 2*state->branch_capacity
: 4;
state->branches = realloc(state->branches,
state->branch_capacity * sizeof(lfs_testbd_t));
if (!state->branches) {
fprintf(stderr, "error: exhaustive: out of memory\n");
exit(-1);
}
}
// create copy-on-write copy
int err = lfs_testbd_copy(state->cfg,
&state->branches[state->branch_count-1]);
if (err) {
fprintf(stderr, "error: exhaustive: could not create bd copy\n");
exit(-1);
}
// also trigger on next power cycle
lfs_testbd_setpowercycles(state->cfg, 1);
}
static void run_powerloss_exhaustive_layer(
const struct test_suite *suite,
const struct test_case *case_,
size_t perm,
struct lfs_config *cfg,
struct lfs_testbd_config *bdcfg,
size_t depth,
struct powerloss_exhaustive_cycles *cycles) {
(void)suite;
struct powerloss_exhaustive_state state = {
.cfg = cfg,
.branches = NULL,
.branch_count = 0,
.branch_capacity = 0,
};
// run through the test without additional powerlosses, collecting possible
// branches as we do so
lfs_testbd_setpowercycles(state.cfg, depth > 0 ? 1 : 0);
bdcfg->powerloss_data = &state;
// run the tests
case_->run(cfg);
// aggressively clean up memory here to try to keep our memory usage low
int err = lfs_testbd_destroy(cfg);
if (err) {
fprintf(stderr, "error: could not destroy block device: %d\n", err);
exit(-1);
}
// recurse into each branch
for (size_t i = 0; i < state.branch_count; i++) {
// first push and print the branch
cycles->cycle_count += 1;
if (cycles->cycle_count > cycles->cycle_capacity) {
cycles->cycle_capacity = (2*cycles->cycle_capacity > 4)
? 2*cycles->cycle_capacity
: 4;
cycles->cycles = realloc(cycles->cycles,
cycles->cycle_capacity * sizeof(lfs_testbd_powercycles_t));
if (!cycles->cycles) {
fprintf(stderr, "error: exhaustive: out of memory\n");
exit(-1);
}
}
cycles->cycles[cycles->cycle_count-1] = i;
printf("powerloss %s#%zu#", case_->id, perm);
leb16_print(cycles->cycles, cycles->cycle_count);
printf("\n");
// now recurse
cfg->context = &state.branches[i];
run_powerloss_exhaustive_layer(suite, case_, perm,
cfg, bdcfg, depth-1, cycles);
// pop the cycle
cycles->cycle_count -= 1;
}
// clean up memory
free(state.branches);
}
static void run_powerloss_exhaustive(
const struct test_suite *suite,
const struct test_case *case_,
size_t perm,
const lfs_testbd_powercycles_t *cycles,
size_t cycle_count) {
(void)cycles;
(void)suite;
// create block device and configuration
lfs_testbd_t bd;
struct lfs_config cfg = {
.context = &bd,
.read = lfs_testbd_read,
.prog = lfs_testbd_prog,
.erase = lfs_testbd_erase,
.sync = lfs_testbd_sync,
.read_size = READ_SIZE,
.prog_size = PROG_SIZE,
.block_size = BLOCK_SIZE,
.block_count = BLOCK_COUNT,
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
};
struct lfs_testbd_config bdcfg = {
.erase_value = ERASE_VALUE,
.erase_cycles = ERASE_CYCLES,
.badblock_behavior = BADBLOCK_BEHAVIOR,
.disk_path = test_disk,
.read_delay = test_read_delay,
.prog_delay = test_prog_delay,
.erase_delay = test_erase_delay,
.powerloss_behavior = POWERLOSS_BEHAVIOR,
.powerloss_cb = powerloss_exhaustive_branch,
.powerloss_data = NULL,
};
int err = lfs_testbd_createcfg(&cfg, test_disk, &bdcfg);
if (err) {
fprintf(stderr, "error: could not create block device: %d\n", err);
exit(-1);
}
// run the test, increasing power-cycles as power-loss events occur
printf("running %s#%zu\n", case_->id, perm);
// recursively exhaust each layer of powerlosses
run_powerloss_exhaustive_layer(suite, case_, perm,
&cfg, &bdcfg, cycle_count,
&(struct powerloss_exhaustive_cycles){NULL, 0, 0});
printf("finished %s#%zu\n", case_->id, perm);
}
const test_powerloss_t builtin_powerlosses[] = {
{'0', "none", run_powerloss_none, NULL, 0},
{'e', "exponential", run_powerloss_exponential, NULL, 0},
{'l', "linear", run_powerloss_linear, NULL, 0},
//{'x', "exhaustive", run_powerloss_exhaustive}
{'x', "exhaustive", run_powerloss_exhaustive, NULL, SIZE_MAX},
{0, NULL, NULL, NULL, 0},
};
@@ -899,7 +1064,7 @@ const char *const builtin_powerlosses_help[] = {
"Run with no power-losses.",
"Run with linearly-decreasing power-losses.",
"Run with exponentially-decreasing power-losses.",
//"Run a all permutations of power-losses, this may take a while.",
"Run a all permutations of power-losses, this may take a while.",
"Run a all permutations of n power-losses.",
"Run a custom comma-separated set of power-losses.",
"Run a custom leb16-encoded set of power-losses.",
@@ -1273,9 +1438,6 @@ invalid_define:
}
}
// exhaustive permutations
// TODO
// comma-separated permutation
if (*optarg == '{') {
// how many cycles?
@@ -1341,6 +1503,23 @@ invalid_define:
goto powerloss_next;
}
// exhaustive permutations
{
char *parsed = NULL;
size_t count = strtoumax(optarg, &parsed, 0);
if (parsed == optarg) {
goto powerloss_unknown;
}
((test_powerloss_t*)test_powerlosses)[
test_powerloss_count-1] = (test_powerloss_t){
.run = run_powerloss_exhaustive,
.cycles = NULL,
.cycle_count = count,
};
optarg = (char*)parsed;
goto powerloss_next;
}
powerloss_unknown:
// unknown scenario?
fprintf(stderr, "error: "