Replaced tn/bn prefixes with an actual dependency system in tests/benches

The previous system of relying on test name prefixes for ordering was
simple, but organizing tests by dependencies and topologically sorting
during compilation is 1. more flexible and 2. simplifies test names,
which get typed a lot.

Note these are not "hard" dependencies, each test suite should work fine
in isolation. These "after" dependencies just hint an ordering when all
tests are ran.

As such, it's worth noting the tests should NOT error of a dependency is
missing. This unfortunately makes it a bit hard to catch typos, but
allows faster compilation of a subset of tests.

---

To make this work the way tests are linked has changed from using custom
linker section (fun linker magic!) to a weakly linked array appended to
every source file (also fun linker magic!).

At least with this method test.py has strict control over the test
ordering, and doesn't depend on 1. the order in which the linker merges
sections, and 2. the order tests are passed to test.py. I didn't realize
the previous system was so fragile.
This commit is contained in:
Christopher Haster
2023-08-04 13:33:00 -05:00
parent 2835b17d14
commit 5be7bae518
15 changed files with 579 additions and 463 deletions

View File

@@ -1,9 +1,11 @@
# Bench our mid-level B-trees
after = 'bench_rbyd'
# maximize lookahead buffer, we don't actually gc so we only get one pass
# of the disk for these tests
defines.LOOKAHEAD_SIZE = 'BLOCK_COUNT / 8'
[cases.b2_btree_lookup]
[cases.bench_btree_lookup]
defines.N = [8, 16, 32, 64, 128, 256, 1024]
# 0 = in-order
# 1 = reversed-order
@@ -53,7 +55,7 @@ code = '''
BENCH_STOP();
'''
[cases.b2_btree_commit]
[cases.bench_btree_commit]
defines.N = [8, 16, 32, 64, 128, 256, 1024]
# 0 = in-order
# 1 = reversed-order

View File

@@ -1,9 +1,11 @@
# Bench our high-level metadata tree in the core of littlefs
after = ['bench_rbyd', 'bench_btree']
# maximize lookahead buffer, we don't actually gc so we only get one pass
# of the disk for these tests
defines.LOOKAHEAD_SIZE = 'BLOCK_COUNT / 8'
[cases.b3_mtree_lookup]
[cases.bench_mtree_lookup]
defines.N = [8, 16, 32, 64, 128, 256, 1024]
# 0 = in-order
# 1 = reversed-order
@@ -64,7 +66,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.b3_mtree_commit]
[cases.bench_mtree_commit]
defines.N = [8, 16, 32, 64, 128, 256, 1024]
# 0 = in-order
# 1 = reversed-order
@@ -136,7 +138,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.b3_mtree_traversal]
[cases.bench_mtree_traversal]
defines.N = [8, 16, 32, 64, 128, 256, 1024]
# 0 = in-order
# 1 = reversed-order

View File

@@ -1,11 +1,11 @@
# Bench our low-level rbyd data-structure
# set block_size to the full size of disk so we can test arbitrarily
# large rbyd trees, we don't really care about block sizes at this
# abstraction level
defines.BLOCK_SIZE = 'DISK_SIZE'
[cases.b1_rbyd_attr_commit]
[cases.bench_rbyd_attr_commit]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -62,7 +62,7 @@ code = '''
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, cfg->block_size) => 0;
'''
[cases.b1_rbyd_attr_fetch]
[cases.bench_rbyd_attr_fetch]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -119,7 +119,7 @@ code = '''
BENCH_STOP();
'''
[cases.b1_rbyd_attr_lookup]
[cases.bench_rbyd_attr_lookup]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -183,7 +183,7 @@ code = '''
BENCH_STOP();
'''
[cases.b1_rbyd_attr_append]
[cases.bench_rbyd_attr_append]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -249,7 +249,7 @@ code = '''
assert(memcmp(buffer, "\xbb\xbb\xbb\xbb", 4) == 0);
'''
[cases.b1_rbyd_attr_remove]
[cases.bench_rbyd_attr_remove]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -314,7 +314,7 @@ code = '''
=> LFS_ERR_NOENT;
'''
[cases.b1_rbyd_id_commit]
[cases.bench_rbyd_id_commit]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -367,7 +367,7 @@ code = '''
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, cfg->block_size) => 0;
'''
[cases.b1_rbyd_id_fetch]
[cases.bench_rbyd_id_fetch]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -420,7 +420,7 @@ code = '''
BENCH_STOP();
'''
[cases.b1_rbyd_id_lookup]
[cases.bench_rbyd_id_lookup]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -478,7 +478,7 @@ code = '''
BENCH_STOP();
'''
[cases.b1_rbyd_id_create]
[cases.bench_rbyd_id_create]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order
@@ -539,7 +539,7 @@ code = '''
assert(memcmp(buffer, "\xbb\xbb\xbb\xbb", 4) == 0);
'''
[cases.b1_rbyd_id_delete]
[cases.bench_rbyd_id_delete]
# 0 = in-order
# 1 = reversed-order
# 2 = random-order

View File

@@ -122,15 +122,6 @@ typedef struct bench_id {
} bench_id_t;
// bench suites are linked into a custom ld section
extern struct bench_suite __start__bench_suites;
extern struct bench_suite __stop__bench_suites;
const struct bench_suite *bench_suites = &__start__bench_suites;
#define BENCH_SUITE_COUNT \
((size_t)(&__stop__bench_suites - &__start__bench_suites))
// bench define management
typedef struct bench_define_map {
const bench_define_t *defines;
@@ -852,23 +843,23 @@ static void summary(void) {
struct perm_count_state perms = {0, 0};
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
bench_define_suite(&bench_suites[i]);
for (size_t i = 0; i < bench_suite_count; i++) {
bench_define_suite(bench_suites[i]);
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
}
cases += 1;
case_forperm(
&bench_suites[i],
&bench_suites[i].cases[j],
bench_suites[i],
&bench_suites[i]->cases[j],
bench_ids[t].defines,
bench_ids[t].define_count,
perm_count,
@@ -876,7 +867,7 @@ static void summary(void) {
}
suites += 1;
flags |= bench_suites[i].flags;
flags |= bench_suites[i]->flags;
}
}
@@ -897,8 +888,8 @@ static void summary(void) {
static void list_suites(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
size_t len = strlen(bench_suites[i].name);
for (size_t i = 0; i < bench_suite_count; i++) {
size_t len = strlen(bench_suites[i]->name);
if (len > name_width) {
name_width = len;
}
@@ -908,26 +899,26 @@ static void list_suites(void) {
printf("%-*s %7s %7s %11s\n",
name_width, "suite", "flags", "cases", "perms");
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
bench_define_suite(&bench_suites[i]);
for (size_t i = 0; i < bench_suite_count; i++) {
bench_define_suite(bench_suites[i]);
size_t cases = 0;
struct perm_count_state perms = {0, 0};
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
}
cases += 1;
case_forperm(
&bench_suites[i],
&bench_suites[i].cases[j],
bench_suites[i],
&bench_suites[i]->cases[j],
bench_ids[t].defines,
bench_ids[t].define_count,
perm_count,
@@ -943,11 +934,11 @@ static void list_suites(void) {
sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
char flag_buf[64];
sprintf(flag_buf, "%s%s",
(bench_suites[i].flags & BENCH_INTERNAL) ? "i" : "",
(!bench_suites[i].flags) ? "-" : "");
(bench_suites[i]->flags & BENCH_INTERNAL) ? "i" : "",
(!bench_suites[i]->flags) ? "-" : "");
printf("%-*s %7s %7zu %11s\n",
name_width,
bench_suites[i].name,
bench_suites[i]->name,
flag_buf,
cases,
perm_buf);
@@ -958,9 +949,9 @@ static void list_suites(void) {
static void list_cases(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
size_t len = strlen(bench_suites[i].cases[j].name);
for (size_t i = 0; i < bench_suite_count; i++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
size_t len = strlen(bench_suites[i]->cases[j].name);
if (len > name_width) {
name_width = len;
}
@@ -970,23 +961,23 @@ static void list_cases(void) {
printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms");
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
bench_define_suite(&bench_suites[i]);
for (size_t i = 0; i < bench_suite_count; i++) {
bench_define_suite(bench_suites[i]);
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
}
struct perm_count_state perms = {0, 0};
case_forperm(
&bench_suites[i],
&bench_suites[i].cases[j],
bench_suites[i],
&bench_suites[i]->cases[j],
bench_ids[t].defines,
bench_ids[t].define_count,
perm_count,
@@ -996,13 +987,13 @@ static void list_cases(void) {
sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
char flag_buf[64];
sprintf(flag_buf, "%s%s",
(bench_suites[i].cases[j].flags & BENCH_INTERNAL)
(bench_suites[i]->cases[j].flags & BENCH_INTERNAL)
? "i" : "",
(!bench_suites[i].cases[j].flags)
(!bench_suites[i]->cases[j].flags)
? "-" : "");
printf("%-*s %7s %11s\n",
name_width,
bench_suites[i].cases[j].name,
bench_suites[i]->cases[j].name,
flag_buf,
perm_buf);
}
@@ -1013,8 +1004,8 @@ static void list_cases(void) {
static void list_suite_paths(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
size_t len = strlen(bench_suites[i].name);
for (size_t i = 0; i < bench_suite_count; i++) {
size_t len = strlen(bench_suites[i]->name);
if (len > name_width) {
name_width = len;
}
@@ -1023,16 +1014,16 @@ static void list_suite_paths(void) {
printf("%-*s %s\n", name_width, "suite", "path");
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
for (size_t i = 0; i < bench_suite_count; i++) {
size_t cases = 0;
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
cases += 1;
@@ -1046,8 +1037,8 @@ static void list_suite_paths(void) {
printf("%-*s %s\n",
name_width,
bench_suites[i].name,
bench_suites[i].path);
bench_suites[i]->name,
bench_suites[i]->path);
}
}
}
@@ -1055,9 +1046,9 @@ static void list_suite_paths(void) {
static void list_case_paths(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
size_t len = strlen(bench_suites[i].cases[j].name);
for (size_t i = 0; i < bench_suite_count; i++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
size_t len = strlen(bench_suites[i]->cases[j].name);
if (len > name_width) {
name_width = len;
}
@@ -1067,21 +1058,21 @@ static void list_case_paths(void) {
printf("%-*s %s\n", name_width, "case", "path");
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t i = 0; i < bench_suite_count; i++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
}
printf("%-*s %s\n",
name_width,
bench_suites[i].cases[j].name,
bench_suites[i].cases[j].path);
bench_suites[i]->cases[j].name,
bench_suites[i]->cases[j].path);
}
}
}
@@ -1179,22 +1170,22 @@ static void list_defines(void) {
// add defines
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
bench_define_suite(&bench_suites[i]);
for (size_t i = 0; i < bench_suite_count; i++) {
bench_define_suite(bench_suites[i]);
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
}
case_forperm(
&bench_suites[i],
&bench_suites[i].cases[j],
bench_suites[i],
&bench_suites[i]->cases[j],
bench_ids[t].defines,
bench_ids[t].define_count,
perm_list_defines,
@@ -1225,22 +1216,22 @@ static void list_permutation_defines(void) {
// add permutation defines
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
bench_define_suite(&bench_suites[i]);
for (size_t i = 0; i < bench_suite_count; i++) {
bench_define_suite(bench_suites[i]);
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
}
case_forperm(
&bench_suites[i],
&bench_suites[i].cases[j],
bench_suites[i],
&bench_suites[i]->cases[j],
bench_ids[t].defines,
bench_ids[t].define_count,
perm_list_permutation_defines,
@@ -1445,22 +1436,22 @@ static void run(void) {
signal(SIGPIPE, SIG_IGN);
for (size_t t = 0; t < bench_id_count; t++) {
for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
bench_define_suite(&bench_suites[i]);
for (size_t i = 0; i < bench_suite_count; i++) {
bench_define_suite(bench_suites[i]);
for (size_t j = 0; j < bench_suites[i].case_count; j++) {
for (size_t j = 0; j < bench_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (bench_ids[t].name && !(
strcmp(bench_ids[t].name,
bench_suites[i].name) == 0
bench_suites[i]->name) == 0
|| strcmp(bench_ids[t].name,
bench_suites[i].cases[j].name) == 0)) {
bench_suites[i]->cases[j].name) == 0)) {
continue;
}
case_forperm(
&bench_suites[i],
&bench_suites[i].cases[j],
bench_suites[i],
&bench_suites[i]->cases[j],
bench_ids[t].defines,
bench_ids[t].define_count,
perm_run,

View File

@@ -74,6 +74,9 @@ struct bench_suite {
size_t case_count;
};
extern const struct bench_suite *const bench_suites[];
extern const size_t bench_suite_count;
// deterministic prng for pseudo-randomness in benches
uint32_t bench_prng(uint32_t *state);

View File

@@ -135,15 +135,6 @@ typedef struct test_id {
} test_id_t;
// test suites are linked into a custom ld section
extern struct test_suite __start__test_suites;
extern struct test_suite __stop__test_suites;
const struct test_suite *test_suites = &__start__test_suites;
#define TEST_SUITE_COUNT \
((size_t)(&__stop__test_suites - &__start__test_suites))
// test define management
typedef struct test_define_map {
const test_define_t *defines;
@@ -881,23 +872,23 @@ static void summary(void) {
struct perm_count_state perms = {0, 0};
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
test_define_suite(&test_suites[i]);
for (size_t i = 0; i < test_suite_count; i++) {
test_define_suite(test_suites[i]);
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
cases += 1;
case_forperm(
&test_suites[i],
&test_suites[i].cases[j],
test_suites[i],
&test_suites[i]->cases[j],
test_ids[t].defines,
test_ids[t].define_count,
test_ids[t].cycles,
@@ -907,7 +898,7 @@ static void summary(void) {
}
suites += 1;
flags |= test_suites[i].flags;
flags |= test_suites[i]->flags;
}
}
@@ -929,8 +920,8 @@ static void summary(void) {
static void list_suites(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
size_t len = strlen(test_suites[i].name);
for (size_t i = 0; i < test_suite_count; i++) {
size_t len = strlen(test_suites[i]->name);
if (len > name_width) {
name_width = len;
}
@@ -940,26 +931,26 @@ static void list_suites(void) {
printf("%-*s %7s %7s %11s\n",
name_width, "suite", "flags", "cases", "perms");
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
test_define_suite(&test_suites[i]);
for (size_t i = 0; i < test_suite_count; i++) {
test_define_suite(test_suites[i]);
size_t cases = 0;
struct perm_count_state perms = {0, 0};
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
cases += 1;
case_forperm(
&test_suites[i],
&test_suites[i].cases[j],
test_suites[i],
&test_suites[i]->cases[j],
test_ids[t].defines,
test_ids[t].define_count,
test_ids[t].cycles,
@@ -977,12 +968,12 @@ static void list_suites(void) {
sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
char flag_buf[64];
sprintf(flag_buf, "%s%s%s",
(test_suites[i].flags & TEST_REENTRANT) ? "r" : "",
(test_suites[i].flags & TEST_INTERNAL) ? "i" : "",
(!test_suites[i].flags) ? "-" : "");
(test_suites[i]->flags & TEST_REENTRANT) ? "r" : "",
(test_suites[i]->flags & TEST_INTERNAL) ? "i" : "",
(!test_suites[i]->flags) ? "-" : "");
printf("%-*s %7s %7zu %11s\n",
name_width,
test_suites[i].name,
test_suites[i]->name,
flag_buf,
cases,
perm_buf);
@@ -993,9 +984,9 @@ static void list_suites(void) {
static void list_cases(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
for (size_t j = 0; j < test_suites[i].case_count; j++) {
size_t len = strlen(test_suites[i].cases[j].name);
for (size_t i = 0; i < test_suite_count; i++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
size_t len = strlen(test_suites[i]->cases[j].name);
if (len > name_width) {
name_width = len;
}
@@ -1005,23 +996,23 @@ static void list_cases(void) {
printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms");
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
test_define_suite(&test_suites[i]);
for (size_t i = 0; i < test_suite_count; i++) {
test_define_suite(test_suites[i]);
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
struct perm_count_state perms = {0, 0};
case_forperm(
&test_suites[i],
&test_suites[i].cases[j],
test_suites[i],
&test_suites[i]->cases[j],
test_ids[t].defines,
test_ids[t].define_count,
test_ids[t].cycles,
@@ -1033,15 +1024,15 @@ static void list_cases(void) {
sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
char flag_buf[64];
sprintf(flag_buf, "%s%s%s",
(test_suites[i].cases[j].flags & TEST_REENTRANT)
(test_suites[i]->cases[j].flags & TEST_REENTRANT)
? "r" : "",
(test_suites[i].cases[j].flags & TEST_INTERNAL)
(test_suites[i]->cases[j].flags & TEST_INTERNAL)
? "i" : "",
(!test_suites[i].cases[j].flags)
(!test_suites[i]->cases[j].flags)
? "-" : "");
printf("%-*s %7s %11s\n",
name_width,
test_suites[i].cases[j].name,
test_suites[i]->cases[j].name,
flag_buf,
perm_buf);
}
@@ -1052,8 +1043,8 @@ static void list_cases(void) {
static void list_suite_paths(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
size_t len = strlen(test_suites[i].name);
for (size_t i = 0; i < test_suite_count; i++) {
size_t len = strlen(test_suites[i]->name);
if (len > name_width) {
name_width = len;
}
@@ -1062,16 +1053,16 @@ static void list_suite_paths(void) {
printf("%-*s %s\n", name_width, "suite", "path");
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
for (size_t i = 0; i < test_suite_count; i++) {
size_t cases = 0;
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
@@ -1085,8 +1076,8 @@ static void list_suite_paths(void) {
printf("%-*s %s\n",
name_width,
test_suites[i].name,
test_suites[i].path);
test_suites[i]->name,
test_suites[i]->path);
}
}
}
@@ -1094,9 +1085,9 @@ static void list_suite_paths(void) {
static void list_case_paths(void) {
// at least size so that names fit
unsigned name_width = 23;
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
for (size_t j = 0; j < test_suites[i].case_count; j++) {
size_t len = strlen(test_suites[i].cases[j].name);
for (size_t i = 0; i < test_suite_count; i++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
size_t len = strlen(test_suites[i]->cases[j].name);
if (len > name_width) {
name_width = len;
}
@@ -1106,21 +1097,21 @@ static void list_case_paths(void) {
printf("%-*s %s\n", name_width, "case", "path");
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t i = 0; i < test_suite_count; i++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
printf("%-*s %s\n",
name_width,
test_suites[i].cases[j].name,
test_suites[i].cases[j].path);
test_suites[i]->cases[j].name,
test_suites[i]->cases[j].path);
}
}
}
@@ -1222,22 +1213,22 @@ static void list_defines(void) {
// add defines
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
test_define_suite(&test_suites[i]);
for (size_t i = 0; i < test_suite_count; i++) {
test_define_suite(test_suites[i]);
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
case_forperm(
&test_suites[i],
&test_suites[i].cases[j],
test_suites[i],
&test_suites[i]->cases[j],
test_ids[t].defines,
test_ids[t].define_count,
test_ids[t].cycles,
@@ -1270,22 +1261,22 @@ static void list_permutation_defines(void) {
// add permutation defines
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
test_define_suite(&test_suites[i]);
for (size_t i = 0; i < test_suite_count; i++) {
test_define_suite(test_suites[i]);
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
case_forperm(
&test_suites[i],
&test_suites[i].cases[j],
test_suites[i],
&test_suites[i]->cases[j],
test_ids[t].defines,
test_ids[t].define_count,
test_ids[t].cycles,
@@ -1977,22 +1968,22 @@ static void run(void) {
signal(SIGPIPE, SIG_IGN);
for (size_t t = 0; t < test_id_count; t++) {
for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
test_define_suite(&test_suites[i]);
for (size_t i = 0; i < test_suite_count; i++) {
test_define_suite(test_suites[i]);
for (size_t j = 0; j < test_suites[i].case_count; j++) {
for (size_t j = 0; j < test_suites[i]->case_count; j++) {
// does neither suite nor case name match?
if (test_ids[t].name && !(
strcmp(test_ids[t].name,
test_suites[i].name) == 0
test_suites[i]->name) == 0
|| strcmp(test_ids[t].name,
test_suites[i].cases[j].name) == 0)) {
test_suites[i]->cases[j].name) == 0)) {
continue;
}
case_forperm(
&test_suites[i],
&test_suites[i].cases[j],
test_suites[i],
&test_suites[i]->cases[j],
test_ids[t].defines,
test_ids[t].define_count,
test_ids[t].cycles,

View File

@@ -68,6 +68,9 @@ struct test_suite {
size_t case_count;
};
extern const struct test_suite *const test_suites[];
extern const size_t test_suite_count;
// this is defined as true when powerloss-testing
extern bool test_pl;

View File

@@ -142,6 +142,14 @@ class BenchCase:
k),
file=sys.stderr)
def __repr__(self):
return '<BenchCase %s>' % self.name
def __lt__(self, other):
# sort by suite, lineno, and name
return ((self.suite, self.lineno, self.name)
< (other.suite, other.lineno, other.name))
class BenchSuite:
# create a BenchSuite object from a toml file
@@ -193,13 +201,16 @@ class BenchSuite:
if not case_linenos or l < case_linenos[0][0]),
default=None)
self.after = config.pop('after', [])
if not isinstance(self.after, list):
self.after = [self.after]
# a couple of these we just forward to all cases
defines = config.pop('defines', {})
in_ = config.pop('in', None)
self.cases = []
for name, case in sorted(cases.items(),
key=lambda c: c[1].get('lineno')):
for name, case in cases.items():
self.cases.append(BenchCase(config={
'name': name,
'path': path + (':%d' % case['lineno']
@@ -210,6 +221,9 @@ class BenchSuite:
**case},
args=args))
# sort for consistency
self.cases.sort()
# combine per-case defines
self.defines = set.union(set(), *(
set(case.defines) for case in self.cases))
@@ -225,6 +239,14 @@ class BenchSuite:
k),
file=sys.stderr)
def __repr__(self):
return '<TestSuite %s>' % self.name
def __lt__(self, other):
# sort by name
#
# note we override this with a topological sort during compilation
return self.name < other.name
def compile(bench_paths, **args):
@@ -246,7 +268,29 @@ def compile(bench_paths, **args):
# load the suites
suites = [BenchSuite(path, args) for path in paths]
suites.sort(key=lambda s: s.name)
# sort suites by:
# 1. topologically by "after" dependencies
# 2. lexicographically for consistency
pending = co.OrderedDict((suite.name, suite)
for suite in sorted(suites))
suites = []
while pending:
pending_ = co.OrderedDict()
for suite in pending.values():
if not any(after in pending for after in suite.after):
suites.append(suite)
else:
pending_[suite.name] = suite
if len(pending_) == len(pending):
print('%serror:%s cycle detected in suite ordering, %s' % (
'\x1b[01;31m' if args['color'] else '',
'\x1b[m' if args['color'] else '',
', '.join(suite.name for suite in pending.values())))
sys.exit(-1)
pending = pending_
# check for name conflicts, these will cause ambiguity problems later
# when running benches
@@ -415,12 +459,6 @@ def compile(bench_paths, **args):
f.writeln()
# create suite struct
#
# note we place this in the custom bench_suites section with
# minimum alignment, otherwise GCC ups the alignment to
# 32-bytes for some reason
f.writeln('__attribute__((section("_bench_suites"), '
'aligned(1)))')
f.writeln('const struct bench_suite __bench__%s__suite = {'
% suite.name)
f.writeln(4*' '+'.name = "%s",' % suite.name)
@@ -526,6 +564,26 @@ def compile(bench_paths, **args):
f.writeln('#endif')
f.writeln()
# declare our bench suites
#
# by declaring these as weak we can write these to every
# source file without issue, eventually one of these copies
# will be linked
for suite in suites:
f.writeln('extern const struct bench_suite '
'__bench__%s__suite;' % suite.name);
f.writeln()
f.writeln('__attribute__((weak))')
f.writeln('const struct bench_suite *const bench_suites[] = {');
for suite in suites:
f.writeln(4*' '+'&__bench__%s__suite,' % suite.name);
f.writeln('};')
f.writeln('__attribute__((weak))')
f.writeln('const size_t bench_suite_count = %d;' % len(suites))
f.writeln()
def find_runner(runner, id=None, **args):
cmd = runner.copy()

View File

@@ -144,6 +144,14 @@ class TestCase:
k),
file=sys.stderr)
def __repr__(self):
return '<TestCase %s>' % self.name
def __lt__(self, other):
# sort by suite, lineno, and name
return ((self.suite, self.lineno, self.name)
< (other.suite, other.lineno, other.name))
class TestSuite:
# create a TestSuite object from a toml file
@@ -195,14 +203,17 @@ class TestSuite:
if not case_linenos or l < case_linenos[0][0]),
default=None)
self.after = config.pop('after', [])
if not isinstance(self.after, list):
self.after = [self.after]
# a couple of these we just forward to all cases
defines = config.pop('defines', {})
in_ = config.pop('in', None)
reentrant = config.pop('reentrant', False)
self.cases = []
for name, case in sorted(cases.items(),
key=lambda c: c[1].get('lineno')):
for name, case in cases.items():
self.cases.append(TestCase(config={
'name': name,
'path': path + (':%d' % case['lineno']
@@ -214,6 +225,9 @@ class TestSuite:
**case},
args=args))
# sort for consistency
self.cases.sort()
# combine per-case defines
self.defines = set.union(set(), *(
set(case.defines) for case in self.cases))
@@ -230,6 +244,14 @@ class TestSuite:
k),
file=sys.stderr)
def __repr__(self):
return '<TestSuite %s>' % self.name
def __lt__(self, other):
# sort by name
#
# note we override this with a topological sort during compilation
return self.name < other.name
def compile(test_paths, **args):
@@ -251,7 +273,29 @@ def compile(test_paths, **args):
# load the suites
suites = [TestSuite(path, args) for path in paths]
suites.sort(key=lambda s: s.name)
# sort suites by:
# 1. topologically by "after" dependencies
# 2. lexicographically for consistency
pending = co.OrderedDict((suite.name, suite)
for suite in sorted(suites))
suites = []
while pending:
pending_ = co.OrderedDict()
for suite in pending.values():
if not any(after in pending for after in suite.after):
suites.append(suite)
else:
pending_[suite.name] = suite
if len(pending_) == len(pending):
print('%serror:%s cycle detected in suite ordering, %s' % (
'\x1b[01;31m' if args['color'] else '',
'\x1b[m' if args['color'] else '',
', '.join(suite.name for suite in pending.values())))
sys.exit(-1)
pending = pending_
# check for name conflicts, these will cause ambiguity problems later
# when running tests
@@ -420,12 +464,6 @@ def compile(test_paths, **args):
f.writeln()
# create suite struct
#
# note we place this in the custom test_suites section with
# minimum alignment, otherwise GCC ups the alignment to
# 32-bytes for some reason
f.writeln('__attribute__((section("_test_suites"), '
'aligned(1)))')
f.writeln('const struct test_suite __test__%s__suite = {'
% suite.name)
f.writeln(4*' '+'.name = "%s",' % suite.name)
@@ -533,6 +571,26 @@ def compile(test_paths, **args):
f.writeln('#endif')
f.writeln()
# declare our test suites
#
# by declaring these as weak we can write these to every
# source file without issue, eventually one of these copies
# will be linked
for suite in suites:
f.writeln('extern const struct test_suite '
'__test__%s__suite;' % suite.name);
f.writeln()
f.writeln('__attribute__((weak))')
f.writeln('const struct test_suite *const test_suites[] = {');
for suite in suites:
f.writeln(4*' '+'&__test__%s__suite,' % suite.name);
f.writeln('};')
f.writeln('__attribute__((weak))')
f.writeln('const size_t test_suite_count = %d;' % len(suites))
f.writeln()
def find_runner(runner, id=None, **args):
cmd = runner.copy()

View File

@@ -1,12 +1,13 @@
# Tests covering properties of the block allocator
after = 'test_mtree'
# TODO test all of these with weird block sizes? would be nice to make this
# easy via the test_runner, either by handling it there or letting a single
# config limit the block count by a couple blocks
# test that we can alloc
[cases.t4_alloc_blocks]
[cases.test_alloc_blocks]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -39,7 +40,7 @@ code = '''
'''
# test that we can realloc after an ack
[cases.t4_alloc_reuse]
[cases.test_alloc_reuse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -95,7 +96,7 @@ code = '''
# test that we can alloc an mtree, the difference between this and mtree tests
# is we expect this to be able to handle wrap-around
[cases.t4_alloc_mtree]
[cases.test_alloc_mtree]
in = 'lfs.c'
code = '''
const char *alphas = "abcdefghijklmnopqrstuvwxyz";

View File

@@ -1,10 +1,12 @@
# Block device tests
#
# These tests don't really test littlefs at all, they are here only to make
# sure the underlying block device is working.
#
# Note we use 251, a prime, in places to avoid aliasing powers of 2.
#
[cases.t0_bd_one_block]
[cases.test_bd_one_block]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
@@ -29,7 +31,7 @@ code = '''
}
'''
[cases.t0_bd_two_block]
[cases.test_bd_two_block]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
@@ -87,7 +89,7 @@ code = '''
}
'''
[cases.t0_bd_last_block]
[cases.test_bd_last_block]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
@@ -145,7 +147,7 @@ code = '''
}
'''
[cases.t0_bd_powers_of_two]
[cases.test_bd_powers_of_two]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''
@@ -191,7 +193,7 @@ code = '''
}
'''
[cases.t0_bd_fibonacci]
[cases.test_bd_fibonacci]
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
code = '''

View File

@@ -1,3 +1,5 @@
# Test the mid-level B-trees
after = 'test_rbyd'
# maximize lookahead buffer, we don't actually gc so we only get one pass
# of the disk for these tests
@@ -5,7 +7,7 @@ defines.LOOKAHEAD_SIZE = 'lfs_alignup(BLOCK_COUNT / 8, 8)'
# test an empty tree
[cases.t2_btree_zero]
[cases.test_btree_zero]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -36,7 +38,7 @@ code = '''
'''
# test an inlined tree
[cases.t2_btree_one]
[cases.test_btree_one]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -75,7 +77,7 @@ code = '''
'''
# test a single-rbyd tree
[cases.t2_btree_two]
[cases.test_btree_two]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -121,7 +123,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_two_backwards]
[cases.test_btree_two_backwards]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -168,7 +170,7 @@ code = '''
'''
# still a single-rbyd tree, just making sure it works
[cases.t2_btree_three]
[cases.test_btree_three]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -222,7 +224,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_three_backwards]
[cases.test_btree_three_backwards]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -278,7 +280,7 @@ code = '''
# try larger trees, when exactly a tree splits depends on the disk geometry, so
# we don't really have a better way of testing multi-rbyd trees
[cases.t2_btree_push]
[cases.test_btree_push]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
in = 'lfs.c'
code = '''
@@ -330,7 +332,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_push_backwards]
[cases.test_btree_push_backwards]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
in = 'lfs.c'
code = '''
@@ -382,7 +384,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_push_fuzz]
[cases.test_btree_push_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.SEED = 'range(10)'
in = 'lfs.c'
@@ -467,7 +469,7 @@ code = '''
lfs_deinit(&lfs) => 0;
'''
[cases.t2_btree_push_sparse]
[cases.test_btree_push_sparse]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.W = 5
in = 'lfs.c'
@@ -536,7 +538,7 @@ code = '''
&id_, &tag_, &weight_, &data_) => LFS_ERR_NOENT;
'''
[cases.t2_btree_push_sparse_fuzz]
[cases.test_btree_push_sparse_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.W = 5
defines.SEED = 'range(10)'
@@ -679,7 +681,7 @@ code = '''
# test btree updates
# try some small trees for easy corner cases first
[cases.t2_btree_update_one]
[cases.test_btree_update_one]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -720,7 +722,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_update_two]
[cases.test_btree_update_two]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -771,7 +773,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_update_three]
[cases.test_btree_update_three]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -832,7 +834,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_update]
[cases.test_btree_update]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
in = 'lfs.c'
code = '''
@@ -901,7 +903,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_update_fuzz]
[cases.test_btree_update_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.SAMPLES = 10
defines.SEED = 'range(10)'
@@ -999,7 +1001,7 @@ code = '''
free(sim);
'''
[cases.t2_btree_update_sparse]
[cases.test_btree_update_sparse]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.W = 5
in = 'lfs.c'
@@ -1085,7 +1087,7 @@ code = '''
&id_, &tag_, &weight_, &data_) => LFS_ERR_NOENT;
'''
[cases.t2_btree_update_sparse_fuzz]
[cases.test_btree_update_sparse_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.W = 5
defines.SEED = 'range(10)'
@@ -1240,7 +1242,7 @@ code = '''
# try some corner cases first, these are actually pretty tricky since we
# need to recognize when to collapse back into an inlined tree
[cases.t2_btree_pop_one]
[cases.test_btree_pop_one]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1293,7 +1295,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_pop_two]
[cases.test_btree_pop_two]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1360,7 +1362,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_pop_two_other]
[cases.test_btree_pop_two_other]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1427,7 +1429,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_pop_three]
[cases.test_btree_pop_three]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1508,7 +1510,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_pop]
[cases.test_btree_pop]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.REMAINING = [64, 2, 1, 0]
if = 'N > REMAINING'
@@ -1599,7 +1601,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_pop_backwards]
[cases.test_btree_pop_backwards]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.REMAINING = [64, 2, 1, 0]
if = 'N > REMAINING'
@@ -1689,7 +1691,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_pop_fuzz]
[cases.test_btree_pop_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.REMAINING = [64, 2, 1, 0]
defines.SEED = 'range(10)'
@@ -1788,7 +1790,7 @@ code = '''
free(sim);
'''
[cases.t2_btree_pop_sparse]
[cases.test_btree_pop_sparse]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.W = 5
defines.REMAINING = [64, 2, 1, 0]
@@ -1905,7 +1907,7 @@ code = '''
&id_, &tag_, &weight_, &data_) => LFS_ERR_NOENT;
'''
[cases.t2_btree_pop_sparse_fuzz]
[cases.test_btree_pop_sparse_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.W = 5
defines.REMAINING = [64, 2, 1, 0]
@@ -2070,7 +2072,7 @@ code = '''
# test btree splits
[cases.t2_btree_split]
[cases.test_btree_split]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
in = 'lfs.c'
code = '''
@@ -2125,7 +2127,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_split_fuzz]
[cases.test_btree_split_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.SEED = 'range(10)'
in = 'lfs.c'
@@ -2216,7 +2218,7 @@ code = '''
lfs_deinit(&lfs) => 0;
'''
[cases.t2_btree_split_sparse]
[cases.test_btree_split_sparse]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.W = 5
in = 'lfs.c'
@@ -2272,7 +2274,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_split_sparse_fuzz]
[cases.test_btree_split_sparse_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.W = 5
defines.SEED = 'range(10)'
@@ -2425,7 +2427,7 @@ code = '''
# Some more general fuzz testing
[cases.t2_btree_general_fuzz]
[cases.test_btree_general_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.SEED = 'range(100)'
in = 'lfs.c'
@@ -2541,7 +2543,7 @@ code = '''
free(sim);
'''
[cases.t2_btree_general_sparse_fuzz]
[cases.test_btree_general_sparse_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.W = 5
defines.SEED = 'range(100)'
@@ -2717,7 +2719,7 @@ code = '''
# test key-value btrees
[cases.t2_btree_find_zero]
[cases.test_btree_find_zero]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -2748,7 +2750,7 @@ code = '''
&id_, &tag_, &weight_, &data_) => LFS_ERR_NOENT;
'''
[cases.t2_btree_find_one]
[cases.test_btree_find_one]
# true or false for if we should use dids vs names
defines.DID = [false, true]
in = 'lfs.c'
@@ -2797,7 +2799,7 @@ code = '''
assert(memcmp(buffer, "0", 1) == 0);
'''
[cases.t2_btree_find_two]
[cases.test_btree_find_two]
# true or false for if we should use dids vs names
defines.DID = [false, true]
in = 'lfs.c'
@@ -2857,7 +2859,7 @@ code = '''
assert(memcmp(buffer, "1", 1) == 0);
'''
[cases.t2_btree_find_three]
[cases.test_btree_find_three]
in = 'lfs.c'
# true or false for if we should use dids vs names
defines.DID = [false, true]
@@ -2928,7 +2930,7 @@ code = '''
assert(memcmp(buffer, "2", 1) == 0);
'''
[cases.t2_btree_find_three_backwards]
[cases.test_btree_find_three_backwards]
# true or false for if we should use dids vs names
defines.DID = [false, true]
in = 'lfs.c'
@@ -2999,7 +3001,7 @@ code = '''
assert(memcmp(buffer, "2", 1) == 0);
'''
[cases.t2_btree_find]
[cases.test_btree_find]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
# true or false for if we should use dids vs names
defines.DID = [false, true]
@@ -3065,7 +3067,7 @@ code = '''
}
'''
[cases.t2_btree_find_fuzz]
[cases.test_btree_find_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.SEED = 'range(10)'
in = 'lfs.c'
@@ -3176,7 +3178,7 @@ code = '''
lfs_deinit(&lfs) => 0;
'''
[cases.t2_btree_find_sparse]
[cases.test_btree_find_sparse]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.W = 5
# true or false for if we should use dids vs names
@@ -3243,7 +3245,7 @@ code = '''
}
'''
[cases.t2_btree_find_sparse_fuzz]
[cases.test_btree_find_sparse_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.W = 5
defines.SEED = 'range(10)'
@@ -3395,7 +3397,7 @@ code = '''
'''
# make sure we test finds with other operations
[cases.t2_btree_find_general_fuzz]
[cases.test_btree_find_general_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.SEED = 'range(100)'
in = 'lfs.c'
@@ -3569,7 +3571,7 @@ code = '''
free(sim_names);
'''
[cases.t2_btree_find_general_sparse_fuzz]
[cases.test_btree_find_general_sparse_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
defines.W = 5
defines.SEED = 'range(100)'
@@ -3795,7 +3797,7 @@ code = '''
## B-tree traversal tests ##
# some simple btree traversals
[cases.t2_btree_traversal]
[cases.test_btree_traversal]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
in = 'lfs.c'
code = '''
@@ -3913,7 +3915,7 @@ code = '''
&tag_, &weight_, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t2_btree_traversal_fuzz]
[cases.test_btree_traversal_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.SEED = 'range(10)'
in = 'lfs.c'

View File

@@ -1,9 +1,9 @@
# Directory tests
# Test directory operations
after = ['test_mtree', 'test_alloc']
## mkdir tests
[cases.t5_dirs_mkdir]
[cases.test_dirs_mkdir]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -52,7 +52,7 @@ code = '''
'''
# test that noent errors work
[cases.t5_dirs_noent]
[cases.test_dirs_noent]
defines.REMOUNT = [false, true]
code = '''
lfs_t lfs;
@@ -99,7 +99,7 @@ code = '''
'''
# test that stat on root works
[cases.t5_dirs_stat_root]
[cases.test_dirs_stat_root]
defines.REMOUNT = [false, true]
code = '''
lfs_t lfs;
@@ -146,7 +146,7 @@ code = '''
'''
# test that creating the same directory twice errors
[cases.t5_dirs_mkdir_exists]
[cases.test_dirs_mkdir_exists]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -197,7 +197,7 @@ code = '''
'''
# test what happens if we try to make root
[cases.t5_dirs_mkdir_root]
[cases.test_dirs_mkdir_root]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -253,7 +253,7 @@ code = '''
'''
# test that creating a directory with an invalid path errors
[cases.t5_dirs_mkdir_noent]
[cases.test_dirs_mkdir_noent]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -306,7 +306,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_siblings]
[cases.test_dirs_mkdir_siblings]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -369,7 +369,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_children]
[cases.test_dirs_mkdir_children]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -452,7 +452,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_many]
[cases.test_dirs_mkdir_many]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.REMOUNT = [false, true]
# limit powerloss testing due to time
@@ -515,7 +515,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_many_backwards]
[cases.test_dirs_mkdir_many_backwards]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.REMOUNT = [false, true]
# limit powerloss testing due to time
@@ -578,7 +578,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_many_2layers]
[cases.test_dirs_mkdir_many_2layers]
defines.N = [1, 2, 4, 8, 16]
defines.REMOUNT = [false, true]
# limit powerloss testing due to time
@@ -679,7 +679,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_many_3layers]
[cases.test_dirs_mkdir_many_3layers]
defines.N = [1, 2, 4]
defines.REMOUNT = [false, true]
# limit powerloss testing due to time
@@ -818,7 +818,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_many_linkedlist]
[cases.test_dirs_mkdir_many_linkedlist]
defines.N = [1, 2, 4, 8, 16, 32, 64]
defines.REMOUNT = [false, true]
# limit powerloss testing due to time
@@ -888,7 +888,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mkdir_fuzz]
[cases.test_dirs_mkdir_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
defines.REMOUNT = [false, true]
@@ -989,7 +989,7 @@ code = '''
# test that did collisions don't cause issues
[cases.t5_dirs_did_collisions]
[cases.test_dirs_did_collisions]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1084,7 +1084,7 @@ code = '''
'''
# these will also collide with the root
[cases.t5_dirs_did_zero]
[cases.test_dirs_did_zero]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1181,7 +1181,7 @@ code = '''
# these will need to rollover from 0xffffffff -> 0x00000000 correctly
#
# note this is true even if you truncate
[cases.t5_dirs_did_ones]
[cases.test_dirs_did_ones]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1278,7 +1278,7 @@ code = '''
# these test some boundary conditions on the underlying leb128 encoding,
# if the leb128 disk-size is not calculated correctly these can cause
# issues
[cases.t5_dirs_did_leb128_boundaries]
[cases.test_dirs_did_leb128_boundaries]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1375,7 +1375,7 @@ code = '''
## dir remove tests
[cases.t5_dirs_rm]
[cases.test_dirs_rm]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1449,7 +1449,7 @@ code = '''
'''
# test that noent errors work
[cases.t5_dirs_rm_noent]
[cases.test_dirs_rm_noent]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1506,7 +1506,7 @@ code = '''
'''
# test that we catch removing of a non-empty directory
[cases.t5_dirs_rm_notempty]
[cases.test_dirs_rm_notempty]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1577,7 +1577,7 @@ code = '''
'''
# test what happens if we try to remove root
[cases.t5_dirs_rm_root]
[cases.test_dirs_rm_root]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1633,7 +1633,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_siblings]
[cases.test_dirs_rm_siblings]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1789,7 +1789,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_children]
[cases.test_dirs_rm_children]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -1995,7 +1995,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_many]
[cases.test_dirs_rm_many]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.REMAINING = [64, 2, 1, 0]
defines.REMOUNT = [false, true]
@@ -2110,7 +2110,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_many_backwards]
[cases.test_dirs_rm_many_backwards]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.REMAINING = [64, 2, 1, 0]
defines.REMOUNT = [false, true]
@@ -2225,7 +2225,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_many_2layers]
[cases.test_dirs_rm_many_2layers]
defines.N = [1, 2, 4, 8, 16]
defines.REMAINING = [2, 1, 0]
defines.REMOUNT = [false, true]
@@ -2429,7 +2429,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_many_3layers]
[cases.test_dirs_rm_many_3layers]
defines.N = [1, 2, 4]
defines.REMAINING = [2, 1, 0]
defines.REMOUNT = [false, true]
@@ -2727,7 +2727,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_many_linkedlist]
[cases.test_dirs_rm_many_linkedlist]
defines.N = [1, 2, 4, 8, 16, 32, 64]
defines.REMAINING = [16, 2, 1, 0]
defines.REMOUNT = [false, true]
@@ -2850,7 +2850,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rm_fuzz]
[cases.test_dirs_rm_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
defines.REMOUNT = [false, true]
@@ -2970,7 +2970,7 @@ code = '''
## dir rename tests
[cases.t5_dirs_mv]
[cases.test_dirs_mv]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3056,7 +3056,7 @@ code = '''
'''
# test that we can rename, and replace, other directories
[cases.t5_dirs_mv_replace]
[cases.test_dirs_mv_replace]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3141,7 +3141,7 @@ code = '''
'''
# test that we can rename to ourselves
[cases.t5_dirs_mv_noop]
[cases.test_dirs_mv_noop]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3220,7 +3220,7 @@ code = '''
'''
# test that we catch replacing an invalid path
[cases.t5_dirs_mv_noent]
[cases.test_dirs_mv_noent]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3280,7 +3280,7 @@ code = '''
'''
# test that we catch replacing a non-empty directory
[cases.t5_dirs_mv_notempty]
[cases.test_dirs_mv_notempty]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3358,7 +3358,7 @@ code = '''
'''
# test what happens if we try to rename root
[cases.t5_dirs_mv_root]
[cases.test_dirs_mv_root]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3411,7 +3411,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_siblings]
[cases.test_dirs_mv_siblings]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3607,7 +3607,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_children]
[cases.test_dirs_mv_children]
defines.REMOUNT = [false, true]
reentrant = true
code = '''
@@ -3889,7 +3889,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_many]
[cases.test_dirs_mv_many]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.BEFORE = [false, true]
defines.REMOUNT = [false, true]
@@ -4004,7 +4004,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_many_backwards]
[cases.test_dirs_mv_many_backwards]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.BEFORE = [false, true]
defines.REMOUNT = [false, true]
@@ -4119,7 +4119,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_many_2layers]
[cases.test_dirs_mv_many_2layers]
defines.N = [1, 2, 4, 8, 16]
defines.BEFORE = [false, true]
defines.REMOUNT = [false, true]
@@ -4324,7 +4324,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_many_3layers]
[cases.test_dirs_mv_many_3layers]
defines.N = [1, 2, 4]
defines.BEFORE = [false, true]
defines.REMOUNT = [false, true]
@@ -4626,7 +4626,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_many_linkedlist]
[cases.test_dirs_mv_many_linkedlist]
defines.N = [1, 2, 4, 8, 16, 32, 64]
defines.BEFORE = [false, true]
defines.REMOUNT = [false, true]
@@ -4762,7 +4762,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_mv_fuzz]
[cases.test_dirs_mv_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
defines.REMOUNT = [false, true]
@@ -4903,7 +4903,7 @@ code = '''
'''
# test all of the operations together
[cases.t5_dirs_general_fuzz]
[cases.test_dirs_general_fuzz]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
defines.REMOUNT = [false, true]
@@ -5060,7 +5060,7 @@ code = '''
## Test seeking and stuff
[cases.t5_dirs_tell]
[cases.test_dirs_tell]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
code = '''
@@ -5111,7 +5111,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_rewind]
[cases.test_dirs_rewind]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
code = '''
@@ -5182,7 +5182,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_seek]
[cases.test_dirs_seek]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
code = '''
@@ -5253,7 +5253,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_read_idempotent]
[cases.test_dirs_read_idempotent]
defines.PARENT = [false, true]
# bit 0x2 = left neighbor
# bit 0x1 = right neighbor
@@ -5339,7 +5339,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_read_with_mkdirs]
[cases.test_dirs_read_with_mkdirs]
defines.N = 5
# where in the dir read do we mkdir?
defines.I = 'range(6)'
@@ -5434,7 +5434,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_read_with_rms]
[cases.test_dirs_read_with_rms]
defines.N = 5
# where in the dir read do we rm?
defines.I = 'range(5)'
@@ -5533,7 +5533,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_read_with_mvs]
[cases.test_dirs_read_with_mvs]
defines.N = 5
# where in the dir read do we rm?
defines.I = 'range(5)'
@@ -5651,7 +5651,7 @@ code = '''
# dir reads with 2x ops have better chances of catching bugs that depend on
# invalid dir states
[cases.t5_dirs_read_with_2_mkdirs]
[cases.test_dirs_read_with_2_mkdirs]
defines.N = 5
# where in the dir read do we mkdir?
defines.I = 'range(6)'
@@ -5756,7 +5756,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_read_with_2_rms]
[cases.test_dirs_read_with_2_rms]
defines.N = 5
# where in the dir read do we rm?
defines.I = 'range(5)'
@@ -5863,7 +5863,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t5_dirs_read_with_2_mvs]
[cases.test_dirs_read_with_2_mvs]
defines.N = 5
# where in the dir read do we rm?
defines.I = 'range(5)'
@@ -5993,7 +5993,7 @@ code = '''
'''
# test removing the directory we are iterating over
[cases.t5_dirs_read_rm]
[cases.test_dirs_read_rm]
defines.N = 5
# where in the dir read do we remove?
defines.I = 'range(6)'
@@ -6086,7 +6086,7 @@ code = '''
#
# This is a useful feature, but it's unintuitive if this should have
# well-defined behavior, so make sure to test for it
[cases.t5_dirs_recursive_rm]
[cases.test_dirs_recursive_rm]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.PARENT = [false, true]
# 0 => don't seek
@@ -6208,7 +6208,7 @@ code = '''
#
# This is a useful feature, but it's unintuitive if this should have
# well-defined behavior, so make sure to test for it
[cases.t5_dirs_recursive_mv]
[cases.test_dirs_recursive_mv]
defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
defines.BEFORE = [false, true]
# 0 => no

View File

@@ -1,9 +1,12 @@
# Test the high-level metadata tree in the core of littlefs
after = ['test_rbyd', 'test_btree']
# maximize lookahead buffer, we don't actually gc so we only get one pass
# of the disk for these tests
defines.LOOKAHEAD_SIZE = 'BLOCK_COUNT / 8'
# test a single mroot
[cases.t3_mtree_mroot]
[cases.test_mtree_mroot]
code = '''
lfs_t lfs;
lfsr_format(&lfs, cfg) => 0;
@@ -12,7 +15,7 @@ code = '''
'''
# test a single mroot with attributes
[cases.t3_mtree_mroot_attrs]
[cases.test_mtree_mroot_attrs]
defines.N = [1, 3]
in = 'lfs.c'
code = '''
@@ -51,7 +54,7 @@ code = '''
'''
# test a single mroot with forced compaction
[cases.t3_mtree_mroot_compact]
[cases.test_mtree_mroot_compact]
defines.N = [1, 3]
in = 'lfs.c'
code = '''
@@ -93,7 +96,7 @@ code = '''
'''
# test a single mroot with many commits
[cases.t3_mtree_mroot_many_commits]
[cases.test_mtree_mroot_many_commits]
defines.N = [5, 5000]
in = 'lfs.c'
code = '''
@@ -135,7 +138,7 @@ code = '''
## Splitting operations ##
# specific split corner cases
[cases.t3_mtree_uninline]
[cases.test_mtree_uninline]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -210,7 +213,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_uninline_split]
[cases.test_mtree_uninline_split]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -284,7 +287,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_split]
[cases.test_mtree_split]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -386,7 +389,7 @@ code = '''
# try creating a range of entries that may or may not split our mtree
[cases.t3_mtree_split_many]
[cases.test_mtree_split_many]
defines.N = [5, 10, 20, 40, 80, 160, 320]
defines.FORCE_COMPACTION = [false, true]
in = 'lfs.c'
@@ -471,7 +474,7 @@ code = '''
'''
# create random entries
[cases.t3_mtree_split_fuzz]
[cases.test_mtree_split_fuzz]
defines.N = [5, 10, 20, 40, 80, 160]
defines.FORCE_COMPACTION = [false, true]
defines.SEED = 'range(100)'
@@ -580,7 +583,7 @@ code = '''
## Dropping operations ##
# specific drop corner cases
[cases.t3_mtree_drop]
[cases.test_mtree_drop]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -650,7 +653,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_compact]
[cases.test_mtree_drop_compact]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -723,7 +726,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_uninline]
[cases.test_mtree_drop_uninline]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -783,7 +786,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_uninline_split_l]
[cases.test_mtree_drop_uninline_split_l]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -851,7 +854,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_uninline_split_r]
[cases.test_mtree_drop_uninline_split_r]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -919,7 +922,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_uninline_split_both]
[cases.test_mtree_drop_uninline_split_both]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -970,7 +973,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_split_l]
[cases.test_mtree_drop_split_l]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -1065,7 +1068,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_split_r]
[cases.test_mtree_drop_split_r]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -1160,7 +1163,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_split_both]
[cases.test_mtree_drop_split_both]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -1240,7 +1243,7 @@ code = '''
'''
# try creating an mtree and then dropping mdirs
[cases.t3_mtree_drop_many]
[cases.test_mtree_drop_many]
defines.N = [5, 10, 20, 40, 80, 160, 320]
defines.REMAINING = [20, 5, 1, 0]
if = 'N > REMAINING'
@@ -1340,7 +1343,7 @@ code = '''
'''
# this one has some pretty nasty corner cases
[cases.t3_mtree_repeated_drop]
[cases.test_mtree_repeated_drop]
defines.N = [5, 10, 20, 40]
defines.FORCE_COMPACTION = [false, true]
defines.CYCLES = 10
@@ -1427,7 +1430,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_fuzz]
[cases.test_mtree_drop_fuzz]
defines.N = [5, 10, 20, 40, 80, 160]
defines.FORCE_COMPACTION = [false, true]
defines.SEED = 'range(100)'
@@ -1559,7 +1562,7 @@ code = '''
## Relocation operations ##
# specific relocation corner cases
[cases.t3_mtree_relocate]
[cases.test_mtree_relocate]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -1655,7 +1658,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_relocate_sibling_l]
[cases.test_mtree_relocate_sibling_l]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -1750,7 +1753,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_relocate_sibling_r]
[cases.test_mtree_relocate_sibling_r]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -1845,7 +1848,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_extend]
[cases.test_mtree_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -1902,7 +1905,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_extend_twice]
[cases.test_mtree_extend_twice]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -1964,7 +1967,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_relocate_mroot]
[cases.test_mtree_relocate_mroot]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -2032,7 +2035,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_relocate_extend]
[cases.test_mtree_relocate_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -2139,7 +2142,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_split_extend]
[cases.test_mtree_split_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -2253,7 +2256,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_drop_extend]
[cases.test_mtree_drop_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -2336,7 +2339,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_uninline_extend]
[cases.test_mtree_uninline_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -2425,7 +2428,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_uninline_split_extend]
[cases.test_mtree_uninline_split_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -2514,7 +2517,7 @@ code = '''
'''
# this fuzz covers a lot of configuratinos
[cases.t3_mtree_relocating_fuzz]
[cases.test_mtree_relocating_fuzz]
defines.N = [5, 10, 20, 40]
defines.FORCE_COMPACTION = [false, true]
defines.BLOCK_CYCLES = [5, 2, 1]
@@ -2659,7 +2662,7 @@ code = '''
## Neighboring mdir updates ##
[cases.t3_mtree_neighbor]
[cases.test_mtree_neighbor]
in = 'lfs.c'
code = '''
const char *alphas = "abcdefghijklmnopqrstuvwxyz";
@@ -2714,7 +2717,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_remove_l]
[cases.test_mtree_neighbor_remove_l]
in = 'lfs.c'
code = '''
const char *alphas = "abcdefghijklmnopqrstuvwxyz";
@@ -2760,7 +2763,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_remove_r]
[cases.test_mtree_neighbor_remove_r]
in = 'lfs.c'
code = '''
const char *alphas = "abcdefghijklmnopqrstuvwxyz";
@@ -2806,7 +2809,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_uninline]
[cases.test_mtree_neighbor_uninline]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -2890,7 +2893,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_uninline_split]
[cases.test_mtree_neighbor_uninline_split]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -2969,7 +2972,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_split]
[cases.test_mtree_neighbor_split]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
in = 'lfs.c'
@@ -3074,7 +3077,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_extend]
[cases.test_mtree_neighbor_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -3145,7 +3148,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_relocate]
[cases.test_mtree_neighbor_relocate]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -3243,7 +3246,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_mid_split]
[cases.test_mtree_neighbor_mid_split]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -3340,7 +3343,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_neighbor_mid_drop]
[cases.test_mtree_neighbor_mid_drop]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -3437,7 +3440,7 @@ code = '''
## mtree traversal ##
# test specific corner cases
[cases.t3_mtree_traversal]
[cases.test_mtree_traversal]
defines.VALIDATE = [false, true]
in = 'lfs.c'
code = '''
@@ -3538,7 +3541,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_traversal_uninline]
[cases.test_mtree_traversal_uninline]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
defines.VALIDATE = [false, true]
@@ -3674,7 +3677,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_traversal_split]
[cases.test_mtree_traversal_split]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
defines.VALIDATE = [false, true]
@@ -3810,7 +3813,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_traversal_extend]
[cases.test_mtree_traversal_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -3929,7 +3932,7 @@ code = '''
'''
# larger traversal tests
[cases.t3_mtree_traversal_many]
[cases.test_mtree_traversal_many]
defines.N = [5, 10, 20, 40, 80, 160, 320]
defines.VALIDATE = [false, true]
defines.FORCE_COMPACTION = [false, true]
@@ -4074,7 +4077,7 @@ code = '''
lfsr_unmount(&lfs) => 0;
'''
[cases.t3_mtree_traversal_fuzz]
[cases.test_mtree_traversal_fuzz]
defines.N = [5, 10, 20, 40, 80, 160]
defines.VALIDATE = [false, true]
defines.FORCE_COMPACTION = [false, true]
@@ -4245,7 +4248,7 @@ code = '''
## Cycle detection? ##
# test that our cycle detector at least works in common cases
[cases.t3_mtree_traversal_mroot_cycle]
[cases.test_mtree_traversal_mroot_cycle]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -4309,7 +4312,7 @@ code = '''
## Magic consistency ##
# make sure our magic string ("littlefs") shows up in the same place (off=8)
[cases.t3_mtree_magic]
[cases.test_mtree_magic]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
code = '''
@@ -4328,7 +4331,7 @@ code = '''
assert(memcmp(&magic[8], "littlefs", 8) == 0);
'''
[cases.t3_mtree_magic_extend]
[cases.test_mtree_magic_extend]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts
@@ -4380,7 +4383,7 @@ code = '''
assert(memcmp(&magic[8], "littlefs", 8) == 0);
'''
[cases.t3_mtree_magic_extend_twice]
[cases.test_mtree_magic_extend_twice]
# this should be set so only one entry can fit in a metadata block
defines.SIZE = 'BLOCK_SIZE / 4'
# make it so blocks relocate every two compacts

View File

@@ -1,5 +1,5 @@
# Test this inner rbyd data-structure
# Test the low-level rbyd data-structure
after = 'test_bd'
# test with a number of different erase values
defines.ERASE_VALUE = [0xff, 0x00, 0x1b]
@@ -13,7 +13,7 @@ defines.ERASE_VALUE = [0xff, 0x00, 0x1b]
# waste time when testing
defines.BLOCK_SIZE = 32768
[cases.t1_rbyd_commit]
[cases.test_rbyd_commit]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -44,7 +44,7 @@ code = '''
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, 0) => 0;
'''
[cases.t1_rbyd_multi_commit]
[cases.test_rbyd_multi_commit]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -76,7 +76,7 @@ code = '''
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, 0) => 0;
'''
[cases.t1_rbyd_commit_fetch_commit]
[cases.test_rbyd_commit_fetch_commit]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -104,12 +104,12 @@ code = '''
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, 0) => 0;
'''
# [cases.t1_rbyd_fetchmatch]
# [cases.t1_rbyd_multi_fetchmatch]
# [cases.test_rbyd_fetchmatch]
# [cases.test_rbyd_multi_fetchmatch]
# TODO we really need to test dense keys...
[cases.t1_rbyd_lookup]
[cases.test_rbyd_lookup]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -219,7 +219,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_multi_lookup]
[cases.test_rbyd_multi_lookup]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -331,7 +331,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_get]
[cases.test_rbyd_get]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -410,7 +410,7 @@ code = '''
=> LFS_ERR_NOENT;
'''
[cases.t1_rbyd_multi_get]
[cases.test_rbyd_multi_get]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -491,7 +491,7 @@ code = '''
=> LFS_ERR_NOENT;
'''
[cases.t1_rbyd_bifoliate]
[cases.test_rbyd_bifoliate]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -554,7 +554,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_bflips]
[cases.test_rbyd_bflips]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -619,7 +619,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_trifoliate]
[cases.test_rbyd_trifoliate]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -698,7 +698,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_rflips]
[cases.test_rbyd_rflips]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -841,7 +841,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_quadrifoliate]
[cases.test_rbyd_quadrifoliate]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1012,7 +1012,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_rotations]
[cases.test_rbyd_rotations]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1183,7 +1183,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_ysplits]
[cases.test_rbyd_ysplits]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1362,7 +1362,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_quintifoliate]
[cases.test_rbyd_quintifoliate]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1569,7 +1569,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_prunes]
[cases.test_rbyd_prunes]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1735,7 +1735,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_sextifoliate]
[cases.test_rbyd_sextifoliate]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -1918,7 +1918,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_permutations]
[cases.test_rbyd_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -2007,7 +2007,7 @@ code = '''
}
'''
[cases.t1_rbyd_multi_permutations]
[cases.test_rbyd_multi_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -2094,7 +2094,7 @@ code = '''
}
'''
[cases.t1_rbyd_traverse]
[cases.test_rbyd_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -2183,7 +2183,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_multi_traverse]
[cases.test_rbyd_multi_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -2274,7 +2274,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_traverse_permutations]
[cases.test_rbyd_traverse_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -2344,7 +2344,7 @@ code = '''
}
'''
[cases.t1_rbyd_multi_traverse_permutations]
[cases.test_rbyd_multi_traverse_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -2415,7 +2415,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_update_permutations]
[cases.test_rbyd_update_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -2526,7 +2526,7 @@ code = '''
}
'''
[cases.t1_rbyd_large]
[cases.test_rbyd_large]
in = 'lfs.c'
# ORDER:
# 0 = in-order
@@ -2593,7 +2593,7 @@ code = '''
### Removal testing ###
[cases.t1_rbyd_remove]
[cases.test_rbyd_remove]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -2699,7 +2699,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_remove_permutations]
[cases.test_rbyd_remove_permutations]
defines.N = 'range(1, 7)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -2849,7 +2849,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_remove_traverse_permutations]
[cases.test_rbyd_remove_traverse_permutations]
defines.N = 'range(1, 7)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -2949,7 +2949,7 @@ code = '''
}
'''
[cases.t1_rbyd_remove_missing]
[cases.test_rbyd_remove_missing]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -3098,7 +3098,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_remove_again]
[cases.test_rbyd_remove_again]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -3333,7 +3333,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_remove_all]
[cases.test_rbyd_remove_all]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -3415,7 +3415,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_remove_all_permutations]
[cases.test_rbyd_remove_all_permutations]
defines.N = 'range(1, 7)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -3540,7 +3540,7 @@ code = '''
# the main purpose of this test is to try to fuzz for failures in the
# balancing algorithm
[cases.t1_rbyd_fuzz_append_removes]
[cases.test_rbyd_fuzz_append_removes]
defines.N = 'range(1, 33)'
defines.SEED = 'range(1000)'
# large progs take too long for now
@@ -3652,7 +3652,7 @@ code = '''
### Insertion testing ###
[cases.t1_rbyd_create]
[cases.test_rbyd_create]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -3799,7 +3799,7 @@ code = '''
assert(memcmp(buffer, "\xcc\xcc\xcc\xcc", 4) == 0);
'''
[cases.t1_rbyd_multi_create]
[cases.test_rbyd_multi_create]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -3956,7 +3956,7 @@ code = '''
assert(memcmp(buffer, "\xcc\xcc\xcc\xcc", 4) == 0);
'''
[cases.t1_rbyd_create_permutations]
[cases.test_rbyd_create_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -4056,7 +4056,7 @@ code = '''
}
'''
[cases.t1_rbyd_multi_create_permutations]
[cases.test_rbyd_multi_create_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -4155,7 +4155,7 @@ code = '''
}
'''
[cases.t1_rbyd_create_traverse]
[cases.test_rbyd_create_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -4261,7 +4261,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_multi_create_traverse]
[cases.test_rbyd_multi_create_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -4369,7 +4369,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_create_traverse_permutations]
[cases.test_rbyd_create_traverse_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -4459,7 +4459,7 @@ code = '''
}
'''
[cases.t1_rbyd_multi_create_traverse_permutations]
[cases.test_rbyd_multi_create_traverse_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -4547,7 +4547,7 @@ code = '''
}
'''
[cases.t1_rbyd_create_large]
[cases.test_rbyd_create_large]
in = 'lfs.c'
# ORDER:
# 0 = in-order
@@ -4620,7 +4620,7 @@ code = '''
### Mixed create and attr testing ###
[cases.t1_rbyd_mixed]
[cases.test_rbyd_mixed]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -4854,7 +4854,7 @@ code = '''
assert(memcmp(buffer, "\xcc\xcc", 2) == 0);
'''
[cases.t1_rbyd_multi_mixed]
[cases.test_rbyd_multi_mixed]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -5110,7 +5110,7 @@ code = '''
assert(memcmp(buffer, "\xcc\xcc", 2) == 0);
'''
[cases.t1_rbyd_mixed_permutations]
[cases.test_rbyd_mixed_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -5222,7 +5222,7 @@ code = '''
}
'''
[cases.t1_rbyd_multi_mixed_permutations]
[cases.test_rbyd_multi_mixed_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -5333,7 +5333,7 @@ code = '''
}
'''
[cases.t1_rbyd_mixed_traverse]
[cases.test_rbyd_mixed_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -5499,7 +5499,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_multi_mixed_traverse]
[cases.test_rbyd_multi_mixed_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -5671,7 +5671,7 @@ code = '''
&id_, &tag_, NULL, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_mixed_traverse_permutations]
[cases.test_rbyd_mixed_traverse_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -5779,7 +5779,7 @@ code = '''
}
'''
[cases.t1_rbyd_multi_mixed_traverse_permutations]
[cases.test_rbyd_multi_mixed_traverse_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -5888,7 +5888,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_mixed_update_permutations]
[cases.test_rbyd_mixed_update_permutations]
defines.N = 'range(1, 4)'
defines.M = 'range(1, 3)'
# -1 => exhaust all permutations
@@ -6016,7 +6016,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_mixed_remove_permutations]
[cases.test_rbyd_mixed_remove_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -6210,7 +6210,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_mixed_remove_all_permutations]
[cases.test_rbyd_mixed_remove_all_permutations]
defines.N = 'range(1, 4)'
defines.M = 'range(1, 3)'
# -1 => exhaust all permutations
@@ -6335,7 +6335,7 @@ code = '''
}
'''
[cases.t1_rbyd_mixed_large]
[cases.test_rbyd_mixed_large]
in = 'lfs.c'
# ORDER:
# 0 = in-order
@@ -6426,7 +6426,7 @@ code = '''
### Test unrelated no-id tags ###
[cases.t1_rbyd_unrelated_create_permutations]
[cases.test_rbyd_unrelated_create_permutations]
defines.N = 'range(1, 8)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -6564,7 +6564,7 @@ code = '''
}
'''
[cases.t1_rbyd_unrelated_mixed_permutations]
[cases.test_rbyd_unrelated_mixed_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -6727,7 +6727,7 @@ code = '''
### Deletion testing ###
[cases.t1_rbyd_delete]
[cases.test_rbyd_delete]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -6853,7 +6853,7 @@ code = '''
lfsr_rbyd_get(&lfs, &rbyd, 2, LFSR_TAG_REG, buffer, 4) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_delete_range]
[cases.test_rbyd_delete_range]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -7093,7 +7093,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_delete_permutations]
[cases.test_rbyd_delete_permutations]
defines.N = 'range(1, 7)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -7246,7 +7246,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_delete_range_permutations]
[cases.test_rbyd_delete_range_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -7437,7 +7437,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_delete_traverse_permutations]
[cases.test_rbyd_delete_traverse_permutations]
defines.N = 'range(1, 7)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -7560,7 +7560,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_delete_traverse_range_permutations]
[cases.test_rbyd_delete_traverse_range_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -7705,7 +7705,7 @@ code = '''
# Note, "delete_all" is a weird state for rbyd trees to be in, since they
# don't really have a trunk at this point
[cases.t1_rbyd_delete_all]
[cases.test_rbyd_delete_all]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -7819,7 +7819,7 @@ code = '''
=> LFS_ERR_NOENT;
'''
[cases.t1_rbyd_delete_all_range]
[cases.test_rbyd_delete_all_range]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -7946,7 +7946,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_delete_all_permutations]
[cases.test_rbyd_delete_all_permutations]
defines.N = 'range(1, 7)'
# -1 => exhaust all permutations
# n => reproduce a specific permutation
@@ -8087,7 +8087,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_delete_all_range_permutations]
[cases.test_rbyd_delete_all_range_permutations]
defines.N = 'range(1, 7)'
defines.M = 'range(1, 4)'
# -1 => exhaust all permutations
@@ -8247,7 +8247,7 @@ code = '''
# the main purpose of this test is to try to fuzz for failures in the
# balancing algorithm
[cases.t1_rbyd_fuzz_create_deletes]
[cases.test_rbyd_fuzz_create_deletes]
defines.N = 'range(1, 33)'
defines.SEED = 'range(1000)'
# large progs take too long for now
@@ -8357,7 +8357,7 @@ code = '''
# Test rbyd weights
[cases.t1_rbyd_sparse]
[cases.test_rbyd_sparse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -8594,7 +8594,7 @@ code = '''
assert(lfsr_data_size(data_) == 4);
'''
[cases.t1_rbyd_sparse_traverse]
[cases.test_rbyd_sparse_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -8696,7 +8696,7 @@ code = '''
&id_, &tag_, &weight_, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_sparse_permutations]
[cases.test_rbyd_sparse_permutations]
defines.N = 'range(1, 8)'
defines.W = 5
# -1 => exhaust all permutations
@@ -8782,7 +8782,7 @@ code = '''
}
'''
[cases.t1_rbyd_sparse_traverse_permutations]
[cases.test_rbyd_sparse_traverse_permutations]
defines.N = 'range(1, 8)'
defines.W = 5
# -1 => exhaust all permutations
@@ -8874,7 +8874,7 @@ code = '''
'''
# Weights mixed with attributes
[cases.t1_rbyd_sparse_mixed]
[cases.test_rbyd_sparse_mixed]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -9546,7 +9546,7 @@ code = '''
assert(lfsr_data_size(data_) == 2);
'''
[cases.t1_rbyd_sparse_mixed_traverse]
[cases.test_rbyd_sparse_mixed_traverse]
in = 'lfs.c'
code = '''
lfs_t lfs;
@@ -9795,7 +9795,7 @@ code = '''
&id_, &tag_, &weight_, &data_) => LFS_ERR_NOENT;
'''
[cases.t1_rbyd_sparse_mixed_permutations]
[cases.test_rbyd_sparse_mixed_permutations]
defines.N = 'range(1, 8)'
defines.W = 5
# -1 => exhaust all permutations
@@ -9906,7 +9906,7 @@ code = '''
}
'''
[cases.t1_rbyd_sparse_mixed_traverse_permutations]
[cases.test_rbyd_sparse_mixed_traverse_permutations]
defines.N = 'range(1, 8)'
defines.W = 5
# -1 => exhaust all permutations
@@ -10026,7 +10026,7 @@ code = '''
# other sparse testing, various grow/shrink corner cases
[cases.t1_rbyd_sparse_grow_permutations]
[cases.test_rbyd_sparse_grow_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
defines.D = [1, 2]
@@ -10152,7 +10152,7 @@ code = '''
}
'''
[cases.t1_rbyd_sparse_grupdate_permutations]
[cases.test_rbyd_sparse_grupdate_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
defines.D = [1, 2]
@@ -10287,7 +10287,7 @@ code = '''
# I don't know if this actually happens in littlefs, but this tests a specific
# code path in lfsr_rbyd_append (split altgt + shrinking)
[cases.t1_rbyd_sparse_grappend_permutations]
[cases.test_rbyd_sparse_grappend_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
defines.D = [1, 2]
@@ -10435,7 +10435,7 @@ code = '''
}
'''
[cases.t1_rbyd_sparse_shrink_permutations]
[cases.test_rbyd_sparse_shrink_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
defines.D = [1, 2]
@@ -10561,7 +10561,7 @@ code = '''
}
'''
[cases.t1_rbyd_sparse_shrupdate_permutations]
[cases.test_rbyd_sparse_shrupdate_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
defines.D = [1, 2]
@@ -10696,7 +10696,7 @@ code = '''
# I don't know if this actually happens in littlefs, but this tests a specific
# code path in lfsr_rbyd_append (split altgt + shrinking)
[cases.t1_rbyd_sparse_shrappend_permutations]
[cases.test_rbyd_sparse_shrappend_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
defines.D = [1, 2]
@@ -10844,7 +10844,7 @@ code = '''
}
'''
[cases.t1_rbyd_sparse_delete_permutations]
[cases.test_rbyd_sparse_delete_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
# -1 => exhaust all permutations
@@ -10997,7 +10997,7 @@ code = '''
}
'''
[cases.t1_rbyd_sparse_attr_permutations]
[cases.test_rbyd_sparse_attr_permutations]
defines.N = 'range(1, 7)'
defines.W = 5
# -1 => exhaust all permutations
@@ -11175,7 +11175,7 @@ code = '''
# Some more fuzzish testing
[cases.t1_rbyd_fuzz_mixed]
[cases.test_rbyd_fuzz_mixed]
defines.N = 'range(1, 33)'
defines.M = 3
defines.SEED = 'range(1000)'
@@ -11326,7 +11326,7 @@ code = '''
free(sim);
'''
[cases.t1_rbyd_fuzz_sparse]
[cases.test_rbyd_fuzz_sparse]
defines.N = 'range(1, 33)'
defines.W = 5
defines.SEED = 'range(1000)'
@@ -11521,7 +11521,7 @@ code = '''
### Wide-tag things ###
[cases.t1_rbyd_wide_lookup_permutations]
[cases.test_rbyd_wide_lookup_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -11606,7 +11606,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_wide_remove_permutations]
[cases.test_rbyd_wide_remove_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -11743,7 +11743,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_wide_replace_permutations]
[cases.test_rbyd_wide_replace_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -11884,7 +11884,7 @@ code = '''
}
'''
[cases.t1_rbyd_wide_mixed_lookup_permutations]
[cases.test_rbyd_wide_mixed_lookup_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -11970,7 +11970,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_wide_mixed_remove_permutations]
[cases.test_rbyd_wide_mixed_remove_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -12115,7 +12115,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_wide_mixed_replace_permutations]
[cases.test_rbyd_wide_mixed_replace_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -12264,7 +12264,7 @@ code = '''
}
'''
[cases.t1_rbyd_wide_weighted_lookup_permutations]
[cases.test_rbyd_wide_weighted_lookup_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -12348,7 +12348,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_wide_weighted_remove_permutations]
[cases.test_rbyd_wide_weighted_remove_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations
@@ -12487,7 +12487,7 @@ code = '''
# NOTE if we separate physical/logical block sizes we may be able to
# use emubd's copy-on-write copy to speed this up significantly
[cases.t1_rbyd_wide_weighted_replace_permutations]
[cases.test_rbyd_wide_weighted_replace_permutations]
defines.N = 'range(1, 7)'
defines.SHIFT = [0, 3, -3]
# -1 => exhaust all permutations