From 39f417db45deadfa1d4722610a4b5a9a042fb1c5 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Fri, 6 Oct 2023 23:21:26 -0500 Subject: [PATCH] Implemented a filesystem traversal that understands file bptrs/btrees Ended up changing the name of lfsr_mtree_traversal_t -> lfsr_traversal_t, since this behaves more like a filesytem-wide traversal than an mtree traversal (it returns several typed objects, not mdirs like the other mtree functions for one). As a part of this changeset, lfsr_btraversal_t (was lfsr_btree_traversal_t) and lfsr_traversal_t no longer return untyped lfsr_data_ts, but instead return specialized lfsr_{b,t}info_t structs. We weren't even using lfsr_data_t for its original purpose in lfsr_traversal_t. Also changed lfsr_traversal_next -> lfsr_traversal_read, you may notice at this point the changes are intended to make lfsr_traversal_t look more like lfsr_dir_t for consistency. --- Internally lfsr_traversal_t now uses a full state machine with its own enum due to the complexity of traversing the filesystem incrementally. Because creating diagrams is fun, here's the current full state machine, though note it will need to be extended for any parity-trees/free-trees/etc: mrootanchor | v mrootchain .-' | | v | mtree ---> openedblock '-. | ^ | ^ v v | v | mdirblock openedbtree | ^ v | mdirbtree I'm not sure I'm happy with the current implementation, and eventually it will need to be able to handle in-place repairs to the blocks it sees, so this whole thing may need a rewrite. But in the meantime, this passes the new clobber tests in test_alloc, so it should be enough to prove the file implementation works. (which is definitely is not fully tested yet, and some bugs had to be fixed for the new tests in test_alloc to pass). --- Speaking of test_alloc. The inherent cyclic dependency between files/dirs/alloc makes it a bit hard to know what order to test these bits of functionality in. Originally I was testing alloc first, because it seems you need to be confident in your block allocator before you can start testing higher-level data structures. But I've gone ahead and reversed this order, testing alloc after files/dirs. This is because of an interesting observation that if alloc is broken, you can always increase the test device's size to some absurd number (-DDISK_SIZE=16777216, for example) to kick the can down the road. Testing in this order allows alloc to use more high-level APIs and focus on corner cases where the allocator's behavior requires subtlety to be correct (e.g. ENOSPC). --- lfs.c | 843 ++++++++++++++++++++++++------------------ scripts/bench.py | 2 +- scripts/test.py | 2 +- tests/test_alloc.toml | 644 +++++++++++++++++++++++++++++--- tests/test_btree.toml | 98 ++--- tests/test_dtree.toml | 2 +- tests/test_files.toml | 2 +- tests/test_mtree.toml | 391 ++++++++------------ 8 files changed, 1289 insertions(+), 695 deletions(-) diff --git a/lfs.c b/lfs.c index f0c3edb0..a8edd526 100644 --- a/lfs.c +++ b/lfs.c @@ -4798,24 +4798,34 @@ static int lfsr_btree_namelookup(lfs_t *lfs, const lfsr_btree_t *btree, // incremental btree traversal // // note this is different from iteration, iteration should use -// lfsr_btree_lookupnext, traversal includes inner entries -typedef struct lfsr_btree_traversal { +// lfsr_btree_lookupnext, traversal includes inner btree nodes +typedef struct lfsr_btraversal { lfsr_bid_t bid; lfsr_srid_t rid; lfsr_rbyd_t branch; -} lfsr_btree_traversal_t; +} lfsr_btraversal_t; -#define LFSR_BTREE_TRAVERSAL() \ - ((lfsr_btree_traversal_t){ \ +#define LFSR_BTRAVERSAL() \ + ((lfsr_btraversal_t){ \ .bid=0, \ .rid=0, \ .branch.trunk=0, \ .branch.weight=0}) -static int lfsr_btree_traversal_next(lfs_t *lfs, const lfsr_btree_t *btree, - lfsr_btree_traversal_t *traversal, - lfsr_bid_t *bid_, - lfsr_tag_t *tag_, lfsr_bid_t *weight_, lfsr_data_t *data_) { +// into returned from btree traversal +typedef struct lfsr_binfo { + lfsr_bid_t bid; + lfsr_tag_t tag; + lfsr_bid_t weight; + union { + lfsr_rbyd_t rbyd; + lfsr_data_t data; + } u; +} lfsr_binfo_t; + +static int lfsr_btraversal_read(lfs_t *lfs, const lfsr_btree_t *btree, + lfsr_btraversal_t *traversal, + lfsr_binfo_t *binfo) { while (true) { // in range? if (traversal->bid >= lfsr_btree_weight(btree)) { @@ -4827,21 +4837,12 @@ static int lfsr_btree_traversal_next(lfs_t *lfs, const lfsr_btree_t *btree, // setup traversal to terminate next call traversal->bid = lfsr_btree_weight(btree); - // TODO how many of these should be conditional? - if (bid_) { - *bid_ = lfsr_btree_weight(btree)-1; - } - if (tag_) { - *tag_ = btree->u.inlined.tag; - } - if (weight_) { - *weight_ = lfsr_btree_weight(btree); - } - if (data_) { - *data_ = LFSR_DATA_BUF( - btree->u.inlined.buf, - btree->u.inlined.size); - } + binfo->bid = lfsr_btree_weight(btree)-1; + binfo->tag = btree->u.inlined.tag; + binfo->weight = lfsr_btree_weight(btree); + binfo->u.data = LFSR_DATA_BUF( + btree->u.inlined.buf, + btree->u.inlined.size); return 0; } @@ -4852,22 +4853,10 @@ static int lfsr_btree_traversal_next(lfs_t *lfs, const lfsr_btree_t *btree, traversal->branch = btree->u.rbyd; if (traversal->rid == 0) { - // TODO how many of these should be conditional? - if (bid_) { - *bid_ = lfsr_btree_weight(btree)-1; - } - if (tag_) { - *tag_ = LFSR_TAG_BTREE; - } - if (weight_) { - *weight_ = lfsr_btree_weight(btree); - } - if (data_) { - // note btrees are returned decoded - *data_ = LFSR_DATA_BUF( - &traversal->branch, - sizeof(lfsr_rbyd_t)); - } + binfo->bid = lfsr_btree_weight(btree)-1; + binfo->tag = LFSR_TAG_BTREE; + binfo->weight = traversal->branch.weight; + binfo->u.rbyd = traversal->branch; return 0; } @@ -4913,22 +4902,10 @@ static int lfsr_btree_traversal_next(lfs_t *lfs, const lfsr_btree_t *btree, // return inner btree nodes if this is the first time we've // seen them if (traversal->rid == 0) { - // TODO how many of these should be conditional? - if (bid_) { - *bid_ = traversal->bid + (rid__ - traversal->rid); - } - if (tag_) { - *tag_ = LFSR_TAG_BTREE; - } - if (weight_) { - *weight_ = traversal->branch.weight; - } - if (data_) { - // note btrees are returned decoded - *data_ = LFSR_DATA_BUF( - &traversal->branch, - sizeof(lfsr_rbyd_t)); - } + binfo->bid = traversal->bid + (rid__ - traversal->rid);; + binfo->tag = LFSR_TAG_BTREE; + binfo->weight = traversal->branch.weight; + binfo->u.rbyd = traversal->branch; return 0; } @@ -4941,19 +4918,10 @@ static int lfsr_btree_traversal_next(lfs_t *lfs, const lfsr_btree_t *btree, lfsr_bid_t bid__ = traversal->bid + (rid__ - traversal->rid); traversal->rid = rid__ + 1; - // TODO how many of these should be conditional? - if (bid_) { - *bid_ = bid__; - } - if (tag_) { - *tag_ = tag__; - } - if (weight_) { - *weight_ = weight__; - } - if (data_) { - *data_ = data__; - } + binfo->bid = bid__; + binfo->tag = tag__; + binfo->weight = weight__; + binfo->u.data = data__; return 0; } } @@ -6853,313 +6821,463 @@ next:; } -// incremental mtree traversal -typedef struct lfsr_mtree_traversal { +// incremental filesystem traversal +typedef struct lfsr_traversal { // core traversal state uint8_t flags; uint8_t state; - lfsr_mdir_t mdir; union { - // cycle detection state, only valid when mdir.mid.bid == -1 + // cycle detection state, only valid when traversing mroot anchors struct { lfs_block_t blocks[2]; lfs_block_t step; uint8_t power; - } tortoise; - // btree traversal state, only valid when mdir.mid.bid != -1 - lfsr_btree_traversal_t traversal; + } mtortoise; + // btree traversal state, only valid when traversing the mtree + lfsr_btraversal_t mtraversal; + // opened file state, only valid when traversing opened files + const lfsr_openedmdir_t *opened; } u; -} lfsr_mtree_traversal_t; + lfsr_mdir_t mdir; + lfsr_btree_t btree; + lfsr_btraversal_t btraversal; +} lfsr_traversal_t; enum { // traverse all blocks in the filesystem - LFSR_MTREE_TRAVERSAL_ALL = 0x1, + LFSR_TRAVERSAL_ALL = 0x1, // validate checksums while traversing - LFSR_MTREE_TRAVERSAL_VALIDATE = 0x2, + LFSR_TRAVERSAL_VALIDATE = 0x2, }; // traversing littlefs is a bit complex, so we use a state machine to keep // track of where we are enum { - LFSR_MTREE_TRAVERSAL_MROOTANCHOR = 0, - LFSR_MTREE_TRAVERSAL_MROOTCHAIN = 1, - LFSR_MTREE_TRAVERSAL_MTREE = 2, - LFSR_MTREE_TRAVERSAL_BTREE = 3, + LFSR_TRAVERSAL_MROOTANCHOR = 0, + LFSR_TRAVERSAL_MROOTCHAIN = 1, + LFSR_TRAVERSAL_MTREE = 2, + LFSR_TRAVERSAL_MDIRBLOCK = 3, + LFSR_TRAVERSAL_MDIRBTREE = 4, + LFSR_TRAVERSAL_OPENEDBLOCK = 5, + LFSR_TRAVERSAL_OPENEDBTREE = 6, }; -#define LFSR_MTREE_TRAVERSAL(_flags) \ - ((lfsr_mtree_traversal_t){ \ +#define LFSR_TRAVERSAL(_flags) \ + ((lfsr_traversal_t){ \ .flags=_flags, \ - .state=LFSR_MTREE_TRAVERSAL_MROOTANCHOR, \ - .u.tortoise.blocks={0, 0}, \ - .u.tortoise.step=0, \ - .u.tortoise.power=0}) + .state=LFSR_TRAVERSAL_MROOTANCHOR, \ + .u.mtortoise.blocks={0, 0}, \ + .u.mtortoise.step=0, \ + .u.mtortoise.power=0}) -static int lfsr_mtree_traversal_next(lfs_t *lfs, - lfsr_mtree_traversal_t *traversal, - lfsr_smid_t *mid_, lfsr_tag_t *tag_, lfsr_data_t *data_) { - switch (traversal->state) { - // start with the mrootanchor 0x{0,1} - // - // note we make sure to include all mroots in our mroot chain! - // - case LFSR_MTREE_TRAVERSAL_MROOTANCHOR:; - // fetch the first mroot 0x{0,1} - int err = lfsr_mdir_fetch(lfs, &traversal->mdir, - -1, LFSR_MBLOCKS_MROOTANCHOR()); - if (err) { - return err; - } +static inline bool lfsr_traversal_isall(const lfsr_traversal_t *traversal) { + return traversal->flags & LFSR_TRAVERSAL_ALL; +} - // transition to traversing the mroot chain - traversal->state = LFSR_MTREE_TRAVERSAL_MROOTCHAIN; +static inline bool lfsr_traversal_isvalidate( + const lfsr_traversal_t *traversal) { + return traversal->flags & LFSR_TRAVERSAL_VALIDATE; +} - if (mid_) { - *mid_ = -1; - } - if (tag_) { - *tag_ = LFSR_TAG_MDIR; - } - if (data_) { - *data_ = LFSR_DATA_BUF(&traversal->mdir, sizeof(lfsr_mdir_t)); - } - return 0; +// needed for lfsr_traversal_read +static inline bool lfsr_file_hasbnull(const lfsr_file_t *file); +static inline bool lfsr_file_hasbptr(const lfsr_file_t *file); +static inline bool lfsr_file_hasbtree(const lfsr_file_t *file); - // traverse the mroot chain, checking for mroot/mtree/mdir - case LFSR_MTREE_TRAVERSAL_MROOTCHAIN:; - // lookup mroot, if we find one this is a fake mroot - lfsr_tag_t tag; - lfsr_data_t data; - err = lfsr_mdir_lookup(lfs, &traversal->mdir, - -1, LFSR_TAG_WIDE(STRUCT), - &tag, &data); - if (err) { - return err; - } +// info returned by mtree traveral +typedef struct lfsr_tinfo { + lfsr_tag_t tag; + union { + lfsr_mdir_t mdir; + lfsr_rbyd_t rbyd; + lfsr_bptr_t bptr; + } u; +} lfsr_tinfo_t; - // found a new mroot - if (tag == LFSR_TAG_MROOT) { - err = lfsr_data_readmblocks(lfs, &data, - traversal->mdir.u.m.blocks); +static int lfsr_traversal_read(lfs_t *lfs, lfsr_traversal_t *traversal, + lfsr_tinfo_t *tinfo) { + while (true) { + switch (traversal->state) { + // start with the mrootanchor 0x{0,1} + // + // note we make sure to include all mroots in our mroot chain! + // + case LFSR_TRAVERSAL_MROOTANCHOR:; + // fetch the first mroot 0x{0,1} + int err = lfsr_mdir_fetch(lfs, &traversal->mdir, + -1, LFSR_MBLOCKS_MROOTANCHOR()); if (err) { return err; } - // detect cycles with Brent's algorithm - // - // note we only check for cycles in the mroot chain, the btree - // inner nodes require checksums of their pointers, so creating - // a valid cycle is actually quite difficult - // - if (lfsr_mblocks_cmp( - traversal->mdir.u.m.blocks, - traversal->u.tortoise.blocks) == 0) { - LFS_ERROR("Cycle detected during mtree traversal " - "(0x{%"PRIx32",%"PRIx32"})", - traversal->mdir.u.m.blocks[0], - traversal->mdir.u.m.blocks[1]); + // transition to traversing the mroot chain + traversal->state = LFSR_TRAVERSAL_MROOTCHAIN; + + tinfo->tag = LFSR_TAG_MDIR; + tinfo->u.mdir = traversal->mdir; + return 0; + + // traverse the mroot chain, checking for mroot/mtree/mdir + case LFSR_TRAVERSAL_MROOTCHAIN:; + // lookup mroot, if we find one this is a fake mroot + lfsr_tag_t tag; + lfsr_data_t data; + err = lfsr_mdir_lookup(lfs, &traversal->mdir, + -1, LFSR_TAG_WIDE(STRUCT), + &tag, &data); + if (err) { + // if we have no mtree/mdir (inlined mdir) and we're + // traversing all blocks, we need to traverse any files in + // our mroot next + if (err == LFS_ERR_NOENT && lfsr_traversal_isall(traversal)) { + traversal->mdir.mid = 0; + traversal->state = LFSR_TRAVERSAL_MDIRBLOCK; + continue; + } + return err; + } + + // found a new mroot + if (tag == LFSR_TAG_MROOT) { + err = lfsr_data_readmblocks(lfs, &data, + traversal->mdir.u.m.blocks); + if (err) { + return err; + } + + // detect cycles with Brent's algorithm + // + // note we only check for cycles in the mroot chain, the btree + // inner nodes require checksums of their pointers, so creating + // a valid cycle is actually quite difficult + // + if (lfsr_mblocks_cmp( + traversal->mdir.u.m.blocks, + traversal->u.mtortoise.blocks) == 0) { + LFS_ERROR("Cycle detected during mtree traversal " + "(0x{%"PRIx32",%"PRIx32"})", + traversal->mdir.u.m.blocks[0], + traversal->mdir.u.m.blocks[1]); + return LFS_ERR_CORRUPT; + } + if (traversal->u.mtortoise.step + // TODO why cast? + == ((lfs_block_t)1 << traversal->u.mtortoise.power)) { + traversal->u.mtortoise.blocks[0] + = traversal->mdir.u.m.blocks[0]; + traversal->u.mtortoise.blocks[1] + = traversal->mdir.u.m.blocks[1]; + traversal->u.mtortoise.step = 0; + traversal->u.mtortoise.power += 1; + } + traversal->u.mtortoise.step += 1; + + // fetch this mroot + err = lfsr_mdir_fetch(lfs, &traversal->mdir, + -1, traversal->mdir.u.m.blocks); + if (err) { + return err; + } + + tinfo->tag = LFSR_TAG_MDIR; + tinfo->u.mdir = traversal->mdir; + return 0; + + // found an mdir? + } else if (tag == LFSR_TAG_MDIR) { + // fetch this mdir + err = lfsr_data_readmblocks(lfs, &data, + tinfo->u.mdir.u.m.blocks); + if (err) { + return err; + } + + err = lfsr_mdir_fetch(lfs, &tinfo->u.mdir, + 0, tinfo->u.mdir.u.m.blocks); + if (err) { + return err; + } + + // transition to traversing the mtree + traversal->state = LFSR_TRAVERSAL_MTREE; + traversal->u.mtraversal = LFSR_BTRAVERSAL(); + + // if we're traversing all blocks, we transition to + // block/btree traversal next + if (lfsr_traversal_isall(traversal)) { + traversal->state = LFSR_TRAVERSAL_MDIRBLOCK; + } + + tinfo->tag = LFSR_TAG_MDIR; + return 0; + + // found an mtree? + } else if (tag == LFSR_TAG_MTREE) { + // read the root of the mtree and return it, lfs->mtree may not + // be initialized yet + err = lfsr_data_readbtree(lfs, &data, &tinfo->u.rbyd); + if (err) { + return err; + } + + // validate our btree nodes if requested, this just means we + // need to do a full rbyd fetch and make sure the checksums + // match + if (lfsr_traversal_isvalidate(traversal)) { + err = lfsr_rbyd_fetchvalidate(lfs, &tinfo->u.rbyd, + tinfo->u.rbyd.block, tinfo->u.rbyd.trunk, + tinfo->u.rbyd.weight, + tinfo->u.rbyd.cksum); + if (err) { + return err; + } + } + + // transition to traversing the mtree + traversal->state = LFSR_TRAVERSAL_MTREE; + traversal->u.mtraversal = LFSR_BTRAVERSAL(); + + tinfo->tag = LFSR_TAG_BTREE; + return 0; + + } else { + LFS_ERROR("Weird mtree entry? (0x%"PRIx32")", tag); return LFS_ERR_CORRUPT; } - if (traversal->u.tortoise.step - // TODO why cast? - == ((lfs_block_t)1 << traversal->u.tortoise.power)) { - traversal->u.tortoise.blocks[0] - = traversal->mdir.u.m.blocks[0]; - traversal->u.tortoise.blocks[1] - = traversal->mdir.u.m.blocks[1]; - traversal->u.tortoise.step = 0; - traversal->u.tortoise.power += 1; - } - traversal->u.tortoise.step += 1; - // fetch this mroot - err = lfsr_mdir_fetch(lfs, &traversal->mdir, - -1, traversal->mdir.u.m.blocks); + // traverse the mtree, including both inner btree nodes and mdirs + case LFSR_TRAVERSAL_MTREE:; + // traverse through the mtree + lfsr_binfo_t binfo; + err = lfsr_btraversal_read(lfs, &lfs->mtree, + &traversal->u.mtraversal, + &binfo); if (err) { + // if we're done with our mtree, and we're traversing all + // blocks, move on to any open files + if (err == LFS_ERR_NOENT && lfsr_traversal_isall(traversal)) { + traversal->u.opened + = lfs->opened[LFS_TYPE_REG-LFS_TYPE_REG]; + traversal->state = LFSR_TRAVERSAL_OPENEDBLOCK; + continue; + } return err; } - if (mid_) { - *mid_ = -1; - } - if (tag_) { - *tag_ = LFSR_TAG_MDIR; - } - if (data_) { - *data_ = LFSR_DATA_BUF(&traversal->mdir, sizeof(lfsr_mdir_t)); - } - return 0; - - // found an mdir? - } else if (tag == LFSR_TAG_MDIR) { - // fetch this mdir - err = lfsr_data_readmblocks(lfs, &data, - traversal->mdir.u.m.blocks); - if (err) { - return err; + // wait is this the mtree's root? skip this, we assume we've already + // seen it above (this gets a bit weird because 1. mtree may be + // uninitialized in mountinited and 2. stack really matters since + // we're at the bottom of lfs_alloc) + if (lfsr_btree_isinlined(&lfs->mtree) + || (binfo.tag == LFSR_TAG_BTREE + && binfo.u.rbyd.block == lfs->mtree.u.rbyd.block)) { + continue; } - err = lfsr_mdir_fetch(lfs, &traversal->mdir, - 0, traversal->mdir.u.m.blocks); - if (err) { - return err; - } + // inner btree nodes already decoded + if (binfo.tag == LFSR_TAG_BTREE) { + // validate our btree nodes if requested, this just means we + // need to do a full rbyd fetch and make sure the checksums + // match + if (lfsr_traversal_isvalidate(traversal)) { + err = lfsr_rbyd_fetchvalidate(lfs, &binfo.u.rbyd, + binfo.u.rbyd.block, binfo.u.rbyd.trunk, + binfo.u.rbyd.weight, + binfo.u.rbyd.cksum); + if (err) { + return err; + } + } - // TODO this is ugly - // transition to traversing the mtree, but skip our mdir - traversal->state = LFSR_MTREE_TRAVERSAL_MTREE; - lfsr_btree_t mtree = LFSR_BTREE_NULL; - err = lfsr_data_readbtreeinlined(lfs, &data, - LFSR_TAG_MDIR, lfsr_mleafweight(lfs), - &mtree); - if (err) { - return err; - } - traversal->u.traversal = LFSR_BTREE_TRAVERSAL(); - err = lfsr_btree_traversal_next( - lfs, &mtree, - &traversal->u.traversal, - NULL, NULL, NULL, NULL); - if (err) { - return err; - } + tinfo->tag = LFSR_TAG_BTREE; + tinfo->u.rbyd = binfo.u.rbyd; + return 0; - if (mid_) { - *mid_ = 0; - } - if (tag_) { - *tag_ = LFSR_TAG_MDIR; - } - if (data_) { - *data_ = LFSR_DATA_BUF(&traversal->mdir, sizeof(lfsr_mdir_t)); - } - return 0; - - // found an mtree? - } else if (tag == LFSR_TAG_MTREE) { - // read the root of the mtree and return it, lfs->mtree may not - // be initialized yet - // TODO uh, should we make traversal->mdir a different type? - err = lfsr_data_readbtree(lfs, &data, &traversal->mdir.u.rbyd); - if (err) { - return err; - } - - // validate our btree nodes if requested, this just means we need - // to do a full rbyd fetch and make sure the checksums match - if (traversal->flags & LFSR_MTREE_TRAVERSAL_VALIDATE) { - err = lfsr_rbyd_fetchvalidate(lfs, &traversal->mdir.u.rbyd, - traversal->mdir.u.rbyd.block, - traversal->mdir.u.rbyd.trunk, - traversal->mdir.u.rbyd.weight, - traversal->mdir.u.rbyd.cksum); + // fetch mdir if we're on a leaf + } else if (binfo.tag == LFSR_TAG_MDIR) { + err = lfsr_data_readmblocks(lfs, &binfo.u.data, + traversal->mdir.u.m.blocks); if (err) { return err; } - } - // transition to traversing the mtree, but skip the root - traversal->state = LFSR_MTREE_TRAVERSAL_MTREE; - traversal->u.traversal = LFSR_BTREE_TRAVERSAL(); - err = lfsr_btree_traversal_next( - lfs, (lfsr_btree_t*)&traversal->mdir.u.rbyd, - &traversal->u.traversal, - NULL, NULL, NULL, NULL); - if (err) { - return err; - } - - if (mid_) { - *mid_ = 0; - } - if (tag_) { - *tag_ = LFSR_TAG_BTREE; - } - if (data_) { - *data_ = LFSR_DATA_BUF(&traversal->mdir.u.rbyd, - sizeof(lfsr_rbyd_t)); - } - return 0; - - } else { - LFS_ERROR("Weird mtree entry? (0x%"PRIx32")", tag); - return LFS_ERR_CORRUPT; - } - - // traverse the mtree, including both inner btree nodes and mdirs - case LFSR_MTREE_TRAVERSAL_MTREE:; - // traverse through the mtree - lfsr_bid_t bid; - err = lfsr_btree_traversal_next( - lfs, &lfs->mtree, &traversal->u.traversal, - &bid, &tag, NULL, &data); - if (err) { - return err; - } - - // inner btree nodes already decoded - if (tag == LFSR_TAG_BTREE) { - // validate our btree nodes if requested, this just means we need - // to do a full rbyd fetch and make sure the checksums match - if (traversal->flags & LFSR_MTREE_TRAVERSAL_VALIDATE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t*)data.u.direct.buffer; - err = lfsr_rbyd_fetchvalidate(lfs, branch, - branch->block, branch->trunk, branch->weight, - branch->cksum); + err = lfsr_mdir_fetch(lfs, &traversal->mdir, + binfo.bid & lfsr_midbmask(lfs), + traversal->mdir.u.m.blocks); if (err) { return err; } + + // if we're traversing all blocks, we transition to mdir + // traversal next + if (lfsr_traversal_isall(traversal)) { + traversal->state = LFSR_TRAVERSAL_MDIRBLOCK; + } + + tinfo->tag = LFSR_TAG_MDIR; + tinfo->u.mdir = traversal->mdir; + return 0; + + } else { + LFS_ERROR("Weird mtree entry? (0x%"PRIx32")", binfo.tag); + return LFS_ERR_CORRUPT; } - if (mid_) { - *mid_ = bid; + // scan for blocks/btrees in the current mdir + case LFSR_TRAVERSAL_MDIRBLOCK:; + // have we exceeded our mdir's weight? got back to mtree + // traversal + if ((traversal->mdir.mid & lfsr_midrmask(lfs)) + >= traversal->mdir.u.m.weight) { + traversal->state = LFSR_TRAVERSAL_MTREE; + continue; } - if (tag_) { - *tag_ = LFSR_TAG_BTREE; - } - if (data_) { - *data_ = data; - } - return 0; - // fetch mdir if we're on a leaf - } else if (tag == LFSR_TAG_MDIR) { - err = lfsr_data_readmblocks(lfs, &data, - traversal->mdir.u.m.blocks); - if (err) { + // do we have a block/btree? + err = lfsr_mdir_lookupnext(lfs, &traversal->mdir, + traversal->mdir.mid, LFSR_TAG_BLOCK, + &tag, &data); + if (err && err != LFS_ERR_NOENT) { return err; } - err = lfsr_mdir_fetch(lfs, &traversal->mdir, - bid, traversal->mdir.u.m.blocks); + // found a direct block? + if (err != LFS_ERR_NOENT && tag == LFSR_TAG_BLOCK) { + err = lfsr_data_readbptr(lfs, &data, &tinfo->u.bptr); + if (err) { + return err; + } + + // TODO validate? + + // transition to next file + traversal->mdir.mid += 1; + + tinfo->tag = LFSR_TAG_BLOCK; + return 0; + + // found a btree? + } else if (err != LFS_ERR_NOENT && tag == LFSR_TAG_BTREE) { + err = lfsr_data_readbtree(lfs, &data, + &traversal->btree.u.rbyd); + if (err) { + return err; + } + + // start traversing + traversal->btraversal = LFSR_BTRAVERSAL(); + traversal->state = LFSR_TRAVERSAL_MDIRBTREE; + continue; + + // no? continue to next file + } else { + traversal->mdir.mid += 1; + continue; + } + + // scan for blocks/btrees in our opened file list + case LFSR_TRAVERSAL_OPENEDBLOCK:; + // reached end of opened file list? + if (!traversal->u.opened) { + return LFS_ERR_NOENT; + } + + const lfsr_file_t *file = (const lfsr_file_t*)traversal->u.opened; + // found a direct block? + if (lfsr_file_hasbptr(file)) { + tinfo->u.bptr = file->u.bptr; + + // TODO validate? + + // transition to next file + traversal->u.opened = file->m.next; + + tinfo->tag = LFSR_TAG_BLOCK; + return 0; + + // found a btree? + } else if (lfsr_file_hasbtree(file)) { + // start traversing + traversal->btree = file->u.btree; + traversal->btraversal = LFSR_BTRAVERSAL(); + traversal->state = LFSR_TRAVERSAL_OPENEDBTREE; + continue; + + // no? continue to next file + } else { + traversal->u.opened = file->m.next; + continue; + } + + // traverse any file btrees, including both inner btree nodes and + // block pointers + case LFSR_TRAVERSAL_MDIRBTREE:; + case LFSR_TRAVERSAL_OPENEDBTREE:; + // traverse through our btree + err = lfsr_btraversal_read(lfs, &traversal->btree, + &traversal->btraversal, + &binfo); if (err) { + if (err == LFS_ERR_NOENT) { + // end of btree? go to next file + if (traversal->state == LFSR_TRAVERSAL_MDIRBTREE) { + traversal->mdir.mid += 1; + traversal->state = LFSR_TRAVERSAL_MDIRBLOCK; + continue; + } else if (traversal->state == LFSR_TRAVERSAL_OPENEDBTREE) { + traversal->u.opened = traversal->u.opened->next; + traversal->state = LFSR_TRAVERSAL_OPENEDBLOCK; + continue; + } else { + LFS_UNREACHABLE(); + } + } return err; } - if (mid_) { - *mid_ = bid; - } - if (tag_) { - *tag_ = LFSR_TAG_MDIR; - } - if (data_) { - *data_ = LFSR_DATA_BUF(&traversal->mdir, sizeof(lfsr_mdir_t)); - } - return 0; + // found an inner btree node? + if (binfo.tag == LFSR_TAG_BTREE) { + // validate our btree nodes if requested, this just means we + // need to do a full rbyd fetch and make sure the checksums + // match + if (lfsr_traversal_isvalidate(traversal)) { + err = lfsr_rbyd_fetchvalidate(lfs, &binfo.u.rbyd, + binfo.u.rbyd.block, binfo.u.rbyd.trunk, + binfo.u.rbyd.weight, + binfo.u.rbyd.cksum); + if (err) { + return err; + } + } - } else { - LFS_ERROR("Weird mtree entry? (0x%"PRIx32")", tag); - return LFS_ERR_CORRUPT; + tinfo->tag = LFSR_TAG_BTREE; + tinfo->u.rbyd = binfo.u.rbyd; + return 0; + + // found inlined data? ignore this + } else if (binfo.tag == LFSR_TAG_INLINED) { + continue; + + // found an indirect block? + } else if (binfo.tag == LFSR_TAG_BLOCK) { + err = lfsr_data_readbptr(lfs, &binfo.u.data, + &tinfo->u.bptr); + if (err) { + return err; + } + + // TODO validate? + + tinfo->tag = LFSR_TAG_BLOCK; + return 0; + + } else { + LFS_UNREACHABLE(); + } + + default:; + LFS_UNREACHABLE(); } - - // traverse any file btree, including both inner btree nodes and - // block pointers - case LFSR_MTREE_TRAVERSAL_BTREE:; - // TODO - return 0; - - default:; - LFS_UNREACHABLE(); } } @@ -7258,19 +7376,16 @@ static int lfsr_mountinited(lfs_t *lfs) { // traverse the mtree rooted at mroot 0x{1,0} // - // note that lfsr_mtree_traversal_next will update our mroot/mtree + // note that lfsr_traversal_next will update our mroot/mtree // based on what mroots it finds // // we do validate btree inner nodes here, how can we trust our // mdirs are valid if we haven't checked the btree inner nodes at // least once? - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - LFSR_MTREE_TRAVERSAL_VALIDATE); + lfsr_traversal_t traversal = LFSR_TRAVERSAL(LFSR_TRAVERSAL_VALIDATE); while (true) { - lfsr_tag_t tag; - lfsr_data_t data; - int err = lfsr_mtree_traversal_next(lfs, &traversal, - NULL, &tag, &data); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(lfs, &traversal, &tinfo); if (err) { if (err == LFS_ERR_NOENT) { break; @@ -7279,14 +7394,12 @@ static int lfsr_mountinited(lfs_t *lfs) { } // found an mdir? - if (tag == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data.u.direct.buffer; - + if (tinfo.tag == LFSR_TAG_MDIR) { // found an mroot? - if (mdir->mid == -1) { + if (tinfo.u.mdir.mid == -1) { // has magic string? lfsr_data_t data; - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_MAGIC, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, -1, LFSR_TAG_MAGIC, NULL, &data); if (err) { if (err == LFS_ERR_NOENT) { @@ -7308,7 +7421,8 @@ static int lfsr_mountinited(lfs_t *lfs) { } // check the disk version - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_VERSION, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_VERSION, NULL, &data); if (err) { if (err == LFS_ERR_NOENT) { @@ -7350,7 +7464,7 @@ static int lfsr_mountinited(lfs_t *lfs) { } // check for any flags - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_FLAGS, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, -1, LFSR_TAG_FLAGS, NULL, &data); if (err && err != LFS_ERR_NOENT) { return err; @@ -7373,7 +7487,8 @@ static int lfsr_mountinited(lfs_t *lfs) { } // check checksum type - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_CKSUMTYPE, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_CKSUMTYPE, NULL, &data); if (err && err != LFS_ERR_NOENT) { return err; @@ -7397,7 +7512,8 @@ static int lfsr_mountinited(lfs_t *lfs) { } // check redundancy type - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_REDUNDTYPE, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_REDUNDTYPE, NULL, &data); if (err && err != LFS_ERR_NOENT) { return err; @@ -7422,7 +7538,8 @@ static int lfsr_mountinited(lfs_t *lfs) { } // check block limit / block size - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_BLOCKLIMIT, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_BLOCKLIMIT, NULL, &data); if (err && err != LFS_ERR_NOENT) { return err; @@ -7449,7 +7566,8 @@ static int lfsr_mountinited(lfs_t *lfs) { } // check disk limit / block count - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_DISKLIMIT, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_DISKLIMIT, NULL, &data); if (err && err != LFS_ERR_NOENT) { return err; @@ -7476,7 +7594,8 @@ static int lfsr_mountinited(lfs_t *lfs) { } // read the mleaf limit - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_MLEAFLIMIT, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_MLEAFLIMIT, NULL, &data); if (err) { if (err == LFS_ERR_NOENT) { @@ -7506,7 +7625,8 @@ static int lfsr_mountinited(lfs_t *lfs) { lfs->mleaf_bits = lfs_nlog2(mleaf_limit); // read the size limit - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_SIZELIMIT, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_SIZELIMIT, NULL, &data); if (err) { if (err == LFS_ERR_NOENT) { @@ -7535,7 +7655,8 @@ static int lfsr_mountinited(lfs_t *lfs) { lfs->size_limit = size_limit; // read the name limit - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_NAMELIMIT, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_NAMELIMIT, NULL, &data); if (err) { if (err == LFS_ERR_NOENT) { @@ -7564,7 +7685,8 @@ static int lfsr_mountinited(lfs_t *lfs) { lfs->name_limit = name_limit; // check the utag limit - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_UTAGLIMIT, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_UTAGLIMIT, NULL, &data); if (err && err != LFS_ERR_NOENT) { return err; @@ -7592,7 +7714,8 @@ static int lfsr_mountinited(lfs_t *lfs) { } // check the uattr limit - err = lfsr_mdir_lookup(lfs, mdir, -1, LFSR_TAG_UATTRLIMIT, + err = lfsr_mdir_lookup(lfs, &tinfo.u.mdir, + -1, LFSR_TAG_UATTRLIMIT, NULL, &data); if (err && err != LFS_ERR_NOENT) { return err; @@ -7620,7 +7743,7 @@ static int lfsr_mountinited(lfs_t *lfs) { } // keep track of the last mroot we see, this is the "real" mroot - lfs->mroot = *mdir; + lfs->mroot = tinfo.u.mdir; } else { // found a direct mdir? keep track of this as our "mtree" @@ -7631,24 +7754,23 @@ static int lfsr_mountinited(lfs_t *lfs) { err = lfsr_btree_commit(lfs, &lfs->mtree, LFSR_ATTRS( LFSR_ATTR(0, MDIR, +lfsr_mleafweight(lfs), - FROMMBLOCKS(mdir->u.m.blocks, mdir_buf)))); + FROMMBLOCKS(tinfo.u.mdir.u.m.blocks, + mdir_buf)))); LFS_ASSERT(!err); } } // collect any gdeltas from this mdir - err = lfsr_fs_consumegdelta(lfs, mdir); + err = lfsr_fs_consumegdelta(lfs, &tinfo.u.mdir); if (err) { return err; } // found an mtree inner-node? - } else if (tag == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t*)data.u.direct.buffer; - + } else if (tinfo.tag == LFSR_TAG_BTREE) { // found the root of the mtree? if (lfsr_btree_isnull(&lfs->mtree)) { - lfs->mtree.u.rbyd = *branch; + lfs->mtree.u.rbyd = tinfo.u.rbyd; } } else { @@ -7870,12 +7992,10 @@ static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { // traverse the filesystem, building up knowledge of what blocks are // in use in our lookahead window - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL(0); + lfsr_traversal_t traversal = LFSR_TRAVERSAL(LFSR_TRAVERSAL_ALL); while (true) { - lfsr_tag_t tag; - lfsr_data_t data; - int err = lfsr_mtree_traversal_next(lfs, &traversal, - NULL, &tag, &data); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(lfs, &traversal, &tinfo); if (err) { if (err == LFS_ERR_NOENT) { break; @@ -7886,14 +8006,15 @@ static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { // TODO add block pointers here? // mark any blocks we see at in-use, including any btree/mdir blocks - if (tag == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data.u.direct.buffer; - lfs_alloc_setinuse(lfs, mdir->u.m.blocks[1]); - lfs_alloc_setinuse(lfs, mdir->u.m.blocks[0]); + if (tinfo.tag == LFSR_TAG_MDIR) { + lfs_alloc_setinuse(lfs, tinfo.u.mdir.u.m.blocks[1]); + lfs_alloc_setinuse(lfs, tinfo.u.mdir.u.m.blocks[0]); - } else if (tag == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t*)data.u.direct.buffer; - lfs_alloc_setinuse(lfs, branch->block); + } else if (tinfo.tag == LFSR_TAG_BTREE) { + lfs_alloc_setinuse(lfs, tinfo.u.rbyd.block); + + } else if (tinfo.tag == LFSR_TAG_BLOCK) { + lfs_alloc_setinuse(lfs, tinfo.u.bptr.block); } else { LFS_UNREACHABLE(); @@ -9727,7 +9848,7 @@ static int lfsr_file_flushinlined(lfs_t *lfs, lfsr_file_t *file, pos = block_pos; while (pos < lfsr_btree_weight(&file->u.btree) && pos < block_pos + lfs->cfg->block_size - && crystallized < lfs->cfg->crystallize_size) { + && crystallized <= lfs->cfg->crystallize_size) { lfs_off_t d = lfs->cfg->block_size - (pos - block_pos); // prioritize our inlined data @@ -9798,9 +9919,9 @@ static int lfsr_file_flushinlined(lfs_t *lfs, lfsr_file_t *file, pos = block_pos; size = lfs_min32( lfs->cfg->block_size, - file->size - block_pos); - while (pos < size) { - lfs_off_t d = size - pos; + lfs_max32(pos_+size_, file->size) - block_pos); + while (pos < block_pos + size) { + lfs_off_t d = block_pos + size - pos; // prioritize our inlined data if (pos < pos_ + size_) { diff --git a/scripts/bench.py b/scripts/bench.py index 1c32a9b0..08141fb7 100755 --- a/scripts/bench.py +++ b/scripts/bench.py @@ -294,7 +294,7 @@ def compile(bench_paths, **args): pending_[suite.name] = suite if len(pending_) == len(pending): - print('%serror:%s cycle detected in suite ordering, %s' % ( + print('%serror:%s cycle detected in suite ordering: {%s}' % ( '\x1b[01;31m' if args['color'] else '', '\x1b[m' if args['color'] else '', ', '.join(suite.name for suite in pending.values()))) diff --git a/scripts/test.py b/scripts/test.py index 8e8c4d38..df7a0a2d 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -299,7 +299,7 @@ def compile(test_paths, **args): pending_[suite.name] = suite if len(pending_) == len(pending): - print('%serror:%s cycle detected in suite ordering, %s' % ( + print('%serror:%s cycle detected in suite ordering: {%s}' % ( '\x1b[01;31m' if args['color'] else '', '\x1b[m' if args['color'] else '', ', '.join(suite.name for suite in pending.values()))) diff --git a/tests/test_alloc.toml b/tests/test_alloc.toml index 37f60f51..0c7d4220 100644 --- a/tests/test_alloc.toml +++ b/tests/test_alloc.toml @@ -1,5 +1,13 @@ # Tests covering properties of the block allocator -after = 'test_mtree' + +# The ordering of these tests vs higher-level tests (files/dirs/etc) gets +# a bit weird because there is an inherent cyclic dependency +# +# It's counter-intuitive, but we run the alloc tests _after_ file/dir tests, +# since you can usually ignore allocator issues temporarily by making the test +# device really big (-DDISK_SIZE=16777216, etc) +# +after = ['test_mtree', 'test_dtree', 'test_files'] # TODO test all of these with weird block sizes? would be nice to make this @@ -7,7 +15,7 @@ after = 'test_mtree' # config limit the block count by a couple blocks # test that we can alloc -[cases.test_alloc_blocks] +[cases.test_alloc_alloc] in = 'lfs.c' code = ''' lfs_t lfs; @@ -94,89 +102,621 @@ code = ''' lfsr_unmount(&lfs) => 0; ''' -# test that we can alloc an mtree, the difference between this and mtree tests -# is we expect this to be able to handle wrap-around -[cases.test_alloc_mtree] + +# clobber tests test that our traversal algorithm works +[cases.test_alloc_clobber_dirs] +defines.N = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] +defines.VALIDATE = [false, true] +defines.REMOUNT = [false, true] in = 'lfs.c' code = ''' - const char *alphas = "abcdefghijklmnopqrstuvwxyz"; lfs_t lfs; lfsr_format(&lfs, CFG) => 0; lfsr_mount(&lfs, CFG) => 0; - lfs_alloc_ack(&lfs); - // remove root dstart for now - lfsr_mdir_commit(&lfs, &lfs.mroot, LFSR_ATTRS( - LFSR_ATTR(0, RM, -1, NULL))) => 0; - lfsr_mdir_t mdir; - lfsr_mtree_lookup(&lfs, 0*lfsr_mleafweight(&lfs)+0, - &mdir) => 0; + // create this many directories + for (lfs_size_t i = 0; i < N; i++) { + char name[256]; + sprintf(name, "dir%04d", i); + lfsr_mkdir(&lfs, name) => 0; + } - lfs_size_t count = 0; - while (true) { - // at least try to catch infinite loops - assert(count < BLOCK_SIZE * BLOCK_COUNT/2); + // check that our mkdir worked + for (lfs_size_t i = 0; i < N; i++) { + char name[256]; + sprintf(name, "dir%04d", i); + struct lfs_info info; + lfsr_stat(&lfs, name, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_DIR); + } - // ack before each commit to reset the allocator - lfs_alloc_ack(&lfs); + lfsr_dir_t dir; + lfsr_dir_open(&lfs, &dir, "/") => 0; + struct lfs_info info; + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, ".") == 0); + assert(info.type == LFS_TYPE_DIR); + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, "..") == 0); + assert(info.type == LFS_TYPE_DIR); + for (lfs_size_t i = 0; i < N; i++) { + char name[256]; + sprintf(name, "dir%04d", i); + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_DIR); + } + lfsr_dir_read(&lfs, &dir, &info) => LFS_ERR_NOENT; + lfsr_dir_close(&lfs, &dir) => 0; - // keep creating new metadata entries until we run out of space - int err = lfsr_mdir_commit(&lfs, &mdir, LFSR_ATTRS( - LFSR_ATTR(mdir.mid, REG, +1, - BUF(&alphas[count % 26], 1)))); - assert(!err || err == LFS_ERR_NOSPC); - if (err == LFS_ERR_NOSPC) { + // remount? + if (REMOUNT) { + lfsr_unmount(&lfs) => 0; + lfsr_mount(&lfs, CFG) => 0; + } + + // first traverse the tree to find all blocks in use + uint8_t *seen = malloc((BLOCK_COUNT+7)/8); + memset(seen, 0, (BLOCK_COUNT+7)/8); + + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0); + for (lfs_block_t i = 0;; i++) { + // a bit hacky, but this catches infinite loops + assert(i < 2*BLOCK_COUNT); + + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); + assert(!err || err == LFS_ERR_NOENT); + if (err == LFS_ERR_NOENT) { break; } - uint8_t buffer[4]; - lfsr_mdir_get(&lfs, &mdir, mdir.mid, LFSR_TAG_REG, - buffer, 4) => 1; - assert(memcmp(buffer, &alphas[count % 26], 1) == 0); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); - mdir.mid += 1; - count += 1; - } + // keep track of seen blocks + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); - printf("alloced %d metadata entries in %d blocks\n", - count, (lfs_block_t)BLOCK_COUNT); + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); - // test that all of our metadata entries are still there - lfs_size_t i = 0; - for (lfs_ssize_t mid = 0; - mid < lfs_smax32( - lfsr_mtree_weight(&lfs), - lfsr_mleafweight(&lfs)); - mid += lfsr_mleafweight(&lfs)) { - lfsr_mdir_t mdir; - lfsr_mtree_lookup(&lfs, mid, &mdir) => 0; - for (; (mdir.mid & lfsr_midrmask(&lfs)) - < (lfs_ssize_t)mdir.u.m.weight; - mdir.mid += 1) { - uint8_t buffer[4]; - lfsr_mdir_get(&lfs, &mdir, mdir.mid, LFSR_TAG_REG, - buffer, 4) => 1; - assert(memcmp(buffer, &alphas[i % 26], 1) == 0); - i += 1; + // keep track of seen blocks + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + + } else { + // this shouldn't happen + printf("traversal: 0x%x\n", tinfo.tag); + assert(false); } } - assert(i == count); + + // then clobber every other block + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); + for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { + if (!(seen[block / 8] & (1 << (block % 8)))) { + CFG->erase(CFG, block) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; + } + } + free(seen); + + // then check that we can read our directories after clobbering + for (lfs_size_t i = 0; i < N; i++) { + char name[256]; + sprintf(name, "dir%04d", i); + struct lfs_info info; + lfsr_stat(&lfs, name, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_DIR); + } + + lfsr_dir_open(&lfs, &dir, "/") => 0; + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, ".") == 0); + assert(info.type == LFS_TYPE_DIR); + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, "..") == 0); + assert(info.type == LFS_TYPE_DIR); + for (lfs_size_t i = 0; i < N; i++) { + char name[256]; + sprintf(name, "dir%04d", i); + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_DIR); + } + lfsr_dir_read(&lfs, &dir, &info) => LFS_ERR_NOENT; + lfsr_dir_close(&lfs, &dir) => 0; + + lfsr_unmount(&lfs) => 0; +''' + +[cases.test_alloc_clobber_files] +defines.N = [1, 2, 4, 8, 16, 32, 64] +defines.SIZE = [ + '0', + 'CACHE_SIZE/2', + '2*CACHE_SIZE', + 'BLOCK_SIZE/2', + 'BLOCK_SIZE', + '2*BLOCK_SIZE', + '8*BLOCK_SIZE', +] +defines.VALIDATE = [false, true] +defines.REMOUNT = [false, true] +in = 'lfs.c' +if = '(SIZE*N)/BLOCK_SIZE <= 32' +code = ''' + lfs_t lfs; + lfsr_format(&lfs, CFG) => 0; + lfsr_mount(&lfs, CFG) => 0; + + // create this many files + uint32_t prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + char name[256]; + sprintf(name, "file%04d", i); + + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + lfsr_file_t file; + lfsr_file_open(&lfs, &file, name, + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; + lfsr_file_write(&lfs, &file, wbuf, SIZE) => SIZE; + lfsr_file_close(&lfs, &file) => 0; + } + + // check that our writes worked + prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + // check with stat + char name[256]; + sprintf(name, "file%04d", i); + struct lfs_info info; + lfsr_stat(&lfs, name, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_REG); + assert(info.size == SIZE); + + // try reading the file, note we reset prng above + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + lfsr_file_t file; + uint8_t rbuf[SIZE]; + lfsr_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0; + lfsr_file_read(&lfs, &file, rbuf, SIZE) => SIZE; + assert(memcmp(rbuf, wbuf, SIZE) == 0); + lfsr_file_close(&lfs, &file) => 0; + } + + // remount? + if (REMOUNT) { + lfsr_unmount(&lfs) => 0; + lfsr_mount(&lfs, CFG) => 0; + } + + // first traverse the tree to find all blocks in use + uint8_t *seen = malloc((BLOCK_COUNT+7)/8); + memset(seen, 0, (BLOCK_COUNT+7)/8); + + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + (VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0) + | LFSR_TRAVERSAL_ALL); + for (lfs_block_t i = 0;; i++) { + // a bit hacky, but this catches infinite loops + assert(i < 2*BLOCK_COUNT); + + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); + assert(!err || err == LFS_ERR_NOENT); + if (err == LFS_ERR_NOENT) { + break; + } + + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); + + // keep track of seen blocks + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); + + // keep track of seen blocks + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + + } else if (tinfo.tag == LFSR_TAG_BLOCK) { + printf("traversal: 0x%x block 0x%x\n", + tinfo.tag, + tinfo.u.bptr.block); + + // keep track of seen blocks + seen[tinfo.u.bptr.block / 8] |= 1 << (tinfo.u.bptr.block % 8); + + } else { + // this shouldn't happen + printf("traversal: 0x%x\n", tinfo.tag); + assert(false); + } + } + + // then clobber every other block + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); + for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { + if (!(seen[block / 8] & (1 << (block % 8)))) { + CFG->erase(CFG, block) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; + } + } + free(seen); + + // then check that reading our files still works after clobbering + prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + // check with stat + char name[256]; + sprintf(name, "file%04d", i); + struct lfs_info info; + lfsr_stat(&lfs, name, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_REG); + assert(info.size == SIZE); + + // try reading the file, note we reset prng above + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + lfsr_file_t file; + uint8_t rbuf[SIZE]; + lfsr_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0; + lfsr_file_read(&lfs, &file, rbuf, SIZE) => SIZE; + assert(memcmp(rbuf, wbuf, SIZE) == 0); + lfsr_file_close(&lfs, &file) => 0; + } + + lfsr_unmount(&lfs) => 0; +''' + +# open files need to be tracked internally to make sure this doesn't break +[cases.test_alloc_clobber_open_files] +defines.N = [1, 2, 4, 8, 16, 32, 64] +defines.SIZE = [ + '0', + 'CACHE_SIZE/2', + '2*CACHE_SIZE', + 'BLOCK_SIZE/2', + 'BLOCK_SIZE', + '2*BLOCK_SIZE', + '8*BLOCK_SIZE', +] +defines.VALIDATE = [false, true] +defines.REMOUNT = [false, true] +in = 'lfs.c' +if = '(SIZE*N)/BLOCK_SIZE <= 32' +code = ''' + lfs_t lfs; + lfsr_format(&lfs, CFG) => 0; + lfsr_mount(&lfs, CFG) => 0; + + // create this many files + lfsr_file_t files[N]; + uint32_t prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + char name[256]; + sprintf(name, "file%04d", i); + + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + lfsr_file_open(&lfs, &files[i], name, + LFS_O_RDWR | LFS_O_CREAT | LFS_O_EXCL) => 0; + lfsr_file_write(&lfs, &files[i], wbuf, SIZE) => SIZE; + } + + // check that our writes worked + prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + // try reading the file, note we reset prng above + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + uint8_t rbuf[SIZE]; + lfsr_file_rewind(&lfs, &files[i]) => 0; + lfsr_file_read(&lfs, &files[i], rbuf, SIZE) => SIZE; + assert(memcmp(rbuf, wbuf, SIZE) == 0); + } + + // first traverse the tree to find all blocks in use + uint8_t *seen = malloc((BLOCK_COUNT+7)/8); + memset(seen, 0, (BLOCK_COUNT+7)/8); + + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + (VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0) + | LFSR_TRAVERSAL_ALL); + for (lfs_block_t i = 0;; i++) { + // a bit hacky, but this catches infinite loops + assert(i < 2*BLOCK_COUNT); + + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); + assert(!err || err == LFS_ERR_NOENT); + if (err == LFS_ERR_NOENT) { + break; + } + + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); + + // keep track of seen blocks + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); + + // keep track of seen blocks + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + + } else if (tinfo.tag == LFSR_TAG_BLOCK) { + printf("traversal: 0x%x block 0x%x\n", + tinfo.tag, + tinfo.u.bptr.block); + + // keep track of seen blocks + seen[tinfo.u.bptr.block / 8] |= 1 << (tinfo.u.bptr.block % 8); + + } else { + // this shouldn't happen + printf("traversal: 0x%x\n", tinfo.tag); + assert(false); + } + } + + // then clobber every other block + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); + for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { + if (!(seen[block / 8] & (1 << (block % 8)))) { + CFG->erase(CFG, block) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; + } + } + free(seen); + + // then check that reading our files still works after clobbering + prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + // try reading the file, note we reset prng above + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + uint8_t rbuf[SIZE]; + lfsr_file_rewind(&lfs, &files[i]) => 0; + lfsr_file_read(&lfs, &files[i], rbuf, SIZE) => SIZE; + assert(memcmp(rbuf, wbuf, SIZE) == 0); + } + + // and everything is fine after saving the files + for (lfs_size_t i = 0; i < N; i++) { + lfsr_file_close(&lfs, &files[i]) => 0; + } + + if (REMOUNT) { + lfsr_unmount(&lfs) => 0; + lfsr_mount(&lfs, CFG) => 0; + } + + prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + // check with stat + char name[256]; + sprintf(name, "file%04d", i); + struct lfs_info info; + lfsr_stat(&lfs, name, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_REG); + assert(info.size == SIZE); + + // try reading the file, note we reset prng above + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + lfsr_file_t file; + uint8_t rbuf[SIZE]; + lfsr_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0; + lfsr_file_read(&lfs, &file, rbuf, SIZE) => SIZE; + assert(memcmp(rbuf, wbuf, SIZE) == 0); + lfsr_file_close(&lfs, &file) => 0; + } lfsr_unmount(&lfs) => 0; ''' +# TODO more nospc tests (opened files? other?) +# nospc tests mostly test that things still work when block allocation +# wraparound occurs +[cases.test_alloc_nospc_dirs] +defines.REMOUNT = [false, true] +code = ''' + lfs_t lfs; + lfsr_format(&lfs, CFG) => 0; + lfsr_mount(&lfs, CFG) => 0; + // create directories until we run out of space + lfs_size_t n = 0; + for (;; n++) { + char name[256]; + sprintf(name, "dir%08d", n); + int err = lfsr_mkdir(&lfs, name); + assert(!err || err == LFS_ERR_NOSPC); + if (err == LFS_ERR_NOSPC) { + break; + } + } + // remount? + if (REMOUNT) { + lfsr_unmount(&lfs) => 0; + lfsr_mount(&lfs, CFG) => 0; + } + // check that our mkdir worked until we ran out of space + for (lfs_size_t i = 0; i < n; i++) { + char name[256]; + sprintf(name, "dir%08d", i); + struct lfs_info info; + lfsr_stat(&lfs, name, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_DIR); + } + lfsr_dir_t dir; + lfsr_dir_open(&lfs, &dir, "/") => 0; + struct lfs_info info; + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, ".") == 0); + assert(info.type == LFS_TYPE_DIR); + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, "..") == 0); + assert(info.type == LFS_TYPE_DIR); + for (lfs_size_t i = 0; i < n; i++) { + char name[256]; + sprintf(name, "dir%08d", i); + lfsr_dir_read(&lfs, &dir, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_DIR); + } + lfsr_dir_read(&lfs, &dir, &info) => LFS_ERR_NOENT; + lfsr_dir_close(&lfs, &dir) => 0; + lfsr_unmount(&lfs) => 0; +''' +[cases.test_alloc_nospc_files] +defines.SIZE = [ + '0', + 'CACHE_SIZE/2', + '2*CACHE_SIZE', + 'BLOCK_SIZE/2', + 'BLOCK_SIZE', + '2*BLOCK_SIZE', + '8*BLOCK_SIZE', +] +defines.REMOUNT = [false, true] +code = ''' + lfs_t lfs; + lfsr_format(&lfs, CFG) => 0; + lfsr_mount(&lfs, CFG) => 0; + // create files until we run out of space + uint32_t prng = 42; + lfs_size_t n = 0; + for (;; n++) { + char name[256]; + sprintf(name, "file%08d", n); + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + lfsr_file_t file; + int err = lfsr_file_open(&lfs, &file, name, + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL); + assert(!err || err == LFS_ERR_NOSPC); + if (err == LFS_ERR_NOSPC) { + break; + } + lfs_ssize_t size = lfsr_file_write(&lfs, &file, wbuf, SIZE); + assert(size == SIZE || size == LFS_ERR_NOSPC); + if (size == LFS_ERR_NOSPC) { + lfsr_file_close(&lfs, &file) => 0; + break; + } + + err = lfsr_file_close(&lfs, &file); + assert(!err || err == LFS_ERR_NOSPC); + if (err == LFS_ERR_NOSPC) { + break; + } + } + + // remount? + if (REMOUNT) { + lfsr_unmount(&lfs) => 0; + lfsr_mount(&lfs, CFG) => 0; + } + + // check that our file writes worked until we ran out of space + prng = 42; + for (lfs_size_t i = 0; i < n; i++) { + // check with stat + char name[256]; + sprintf(name, "file%08d", i); + struct lfs_info info; + lfsr_stat(&lfs, name, &info) => 0; + assert(strcmp(info.name, name) == 0); + assert(info.type == LFS_TYPE_REG); + assert(info.size == SIZE); + + // try reading the file, note we reset prng above + uint8_t wbuf[SIZE]; + for (lfs_size_t j = 0; j < SIZE; j++) { + wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26); + } + + lfsr_file_t file; + uint8_t rbuf[SIZE]; + lfsr_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0; + lfsr_file_read(&lfs, &file, rbuf, SIZE) => SIZE; + assert(memcmp(rbuf, wbuf, SIZE) == 0); + lfsr_file_close(&lfs, &file) => 0; + } + + lfsr_unmount(&lfs) => 0; +''' diff --git a/tests/test_btree.toml b/tests/test_btree.toml index fba7c733..365b266c 100644 --- a/tests/test_btree.toml +++ b/tests/test_btree.toml @@ -4235,49 +4235,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_btree_traversal_t traversal = LFSR_BTREE_TRAVERSAL(); - + lfsr_btraversal_t traversal = LFSR_BTRAVERSAL(); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops assert(i < 2*N); - lfs_size_t bid_; - lfsr_tag_t tag_; - lfs_size_t weight_; - lfsr_data_t data_; - int err = lfsr_btree_traversal_next(&lfs, &btree, &traversal, - &bid_, &tag_, &weight_, &data_); + lfsr_binfo_t binfo; + int err = lfsr_btraversal_read(&lfs, &btree, &traversal, &binfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; + if (binfo.tag == LFSR_TAG_BTREE) { printf("traversal: %d 0x%x w%d btree 0x%x.%x\n", - bid_, - tag_, - weight_, - branch->block, branch->trunk); + binfo.bid, + binfo.tag, + binfo.weight, + binfo.u.rbyd.block, binfo.u.rbyd.trunk); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); + seen[binfo.u.rbyd.block / 8] |= 1 << (binfo.u.rbyd.block % 8); + + } else if (binfo.tag == LFSR_TAG_INLINED) { + printf("traversal: %d 0x%x w%d data %d\n", + binfo.bid, + binfo.tag, + binfo.weight, + lfsr_data_size(&binfo.u.data)); + } else { - printf("traversal: %d 0x%x w%d %d\n", - bid_, - tag_, - weight_, - lfsr_data_size(&data_)); + // well this shouldn't happen + printf("traversal: %d 0x%x w%d\n", + binfo.bid, + binfo.tag, + binfo.weight); + assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); @@ -4383,49 +4386,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_btree_traversal_t traversal = LFSR_BTREE_TRAVERSAL(); - + lfsr_btraversal_t traversal = LFSR_BTRAVERSAL(); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops assert(i < 2*N); - lfs_size_t bid_; - lfsr_tag_t tag_; - lfs_size_t weight_; - lfsr_data_t data_; - int err = lfsr_btree_traversal_next(&lfs, &btree, &traversal, - &bid_, &tag_, &weight_, &data_); + lfsr_binfo_t binfo; + int err = lfsr_btraversal_read(&lfs, &btree, &traversal, &binfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; + if (binfo.tag == LFSR_TAG_BTREE) { printf("traversal: %d 0x%x w%d btree 0x%x.%x\n", - bid_, - tag_, - weight_, - branch->block, branch->trunk); + binfo.bid, + binfo.tag, + binfo.weight, + binfo.u.rbyd.block, binfo.u.rbyd.trunk); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); + seen[binfo.u.rbyd.block / 8] |= 1 << (binfo.u.rbyd.block % 8); + + } else if (binfo.tag == LFSR_TAG_INLINED) { + printf("traversal: %d 0x%x w%d data %d\n", + binfo.bid, + binfo.tag, + binfo.weight, + lfsr_data_size(&binfo.u.data)); + } else { - printf("traversal: %d 0x%x w%d %d\n", - bid_, - tag_, - weight_, - lfsr_data_size(&data_)); + // well this shouldn't happen + printf("traversal: %d 0x%x w%d\n", + binfo.bid, + binfo.tag, + binfo.weight); + assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); diff --git a/tests/test_dtree.toml b/tests/test_dtree.toml index 12982a88..563467d6 100644 --- a/tests/test_dtree.toml +++ b/tests/test_dtree.toml @@ -1,5 +1,5 @@ # Test directory operations -after = ['test_mtree', 'test_alloc'] +after = 'test_mtree' ## mkdir tests diff --git a/tests/test_files.toml b/tests/test_files.toml index 2f6970df..7437fb3e 100644 --- a/tests/test_files.toml +++ b/tests/test_files.toml @@ -1,5 +1,5 @@ # Test basic file operations -after = ['test_dtree'] +after = ['test_dtree', 'test_btree'] # test creation/deletion diff --git a/tests/test_mtree.toml b/tests/test_mtree.toml index 77fdfa31..afc43022 100644 --- a/tests/test_mtree.toml +++ b/tests/test_mtree.toml @@ -3447,62 +3447,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - VALIDATE ? LFSR_MTREE_TRAVERSAL_VALIDATE : 0); - + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops - assert(i < 2*1); + assert(i < 2*BLOCK_COUNT); - lfs_ssize_t mid_; - lfsr_tag_t tag_; - lfsr_data_t data_; - int err = lfsr_mtree_traversal_next(&lfs, &traversal, - &mid_, &tag_, &data_); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x btree 0x%x.%x\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - branch->block, branch->trunk); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); - } else if (tag_ == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x mdir 0x{%x,%x}\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - mdir->u.m.blocks[0], mdir->u.m.blocks[1]); + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); // keep track of seen blocks - seen[mdir->u.m.blocks[1] / 8] |= 1 << (mdir->u.m.blocks[1] % 8); - seen[mdir->u.m.blocks[0] / 8] |= 1 << (mdir->u.m.blocks[0] % 8); + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + } else { // this shouldn't happen - printf("traversal: %d.%d 0x%x %d\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - lfsr_data_size(&data_)); + printf("traversal: 0x%x\n", tinfo.tag); assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); @@ -3571,62 +3561,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - VALIDATE ? LFSR_MTREE_TRAVERSAL_VALIDATE : 0); - + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops - assert(i < 2*2); + assert(i < 2*BLOCK_COUNT); - lfs_ssize_t mid_; - lfsr_tag_t tag_; - lfsr_data_t data_; - int err = lfsr_mtree_traversal_next(&lfs, &traversal, - &mid_, &tag_, &data_); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x btree 0x%x.%x\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - branch->block, branch->trunk); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); - } else if (tag_ == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x mdir 0x{%x,%x}\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - mdir->u.m.blocks[0], mdir->u.m.blocks[1]); + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); // keep track of seen blocks - seen[mdir->u.m.blocks[1] / 8] |= 1 << (mdir->u.m.blocks[1] % 8); - seen[mdir->u.m.blocks[0] / 8] |= 1 << (mdir->u.m.blocks[0] % 8); + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + } else { // this shouldn't happen - printf("traversal: %d.%d 0x%x %d\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - lfsr_data_size(&data_)); + printf("traversal: 0x%x\n", tinfo.tag); assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); @@ -3706,62 +3686,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - VALIDATE ? LFSR_MTREE_TRAVERSAL_VALIDATE : 0); - + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops - assert(i < 2*3); + assert(i < 2*BLOCK_COUNT); - lfs_ssize_t mid_; - lfsr_tag_t tag_; - lfsr_data_t data_; - int err = lfsr_mtree_traversal_next(&lfs, &traversal, - &mid_, &tag_, &data_); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x btree 0x%x.%x\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - branch->block, branch->trunk); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); - } else if (tag_ == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x mdir 0x{%x,%x}\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - mdir->u.m.blocks[0], mdir->u.m.blocks[1]); + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); // keep track of seen blocks - seen[mdir->u.m.blocks[1] / 8] |= 1 << (mdir->u.m.blocks[1] % 8); - seen[mdir->u.m.blocks[0] / 8] |= 1 << (mdir->u.m.blocks[0] % 8); + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + } else { // this shouldn't happen - printf("traversal: %d.%d 0x%x %d\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - lfsr_data_size(&data_)); + printf("traversal: 0x%x\n", tinfo.tag); assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); @@ -3833,62 +3803,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - VALIDATE ? LFSR_MTREE_TRAVERSAL_VALIDATE : 0); - + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops - assert(i < 2*3); + assert(i < 2*BLOCK_COUNT); - lfs_ssize_t mid_; - lfsr_tag_t tag_; - lfsr_data_t data_; - int err = lfsr_mtree_traversal_next(&lfs, &traversal, - &mid_, &tag_, &data_); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x btree 0x%x.%x\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - branch->block, branch->trunk); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); - } else if (tag_ == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x mdir 0x{%x,%x}\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - mdir->u.m.blocks[0], mdir->u.m.blocks[1]); + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); // keep track of seen blocks - seen[mdir->u.m.blocks[1] / 8] |= 1 << (mdir->u.m.blocks[1] % 8); - seen[mdir->u.m.blocks[0] / 8] |= 1 << (mdir->u.m.blocks[0] % 8); + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + } else { // this shouldn't happen - printf("traversal: %d.%d 0x%x %d\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - lfsr_data_size(&data_)); + printf("traversal: 0x%x\n", tinfo.tag); assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); @@ -3971,62 +3931,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - VALIDATE ? LFSR_MTREE_TRAVERSAL_VALIDATE : 0); - + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops - assert(i < 2*(1+N)); + assert(i < 2*BLOCK_COUNT); - lfs_ssize_t mid_; - lfsr_tag_t tag_; - lfsr_data_t data_; - int err = lfsr_mtree_traversal_next(&lfs, &traversal, - &mid_, &tag_, &data_); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x btree 0x%x.%x\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - branch->block, branch->trunk); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); - } else if (tag_ == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x mdir 0x{%x,%x}\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - mdir->u.m.blocks[0], mdir->u.m.blocks[1]); + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); // keep track of seen blocks - seen[mdir->u.m.blocks[1] / 8] |= 1 << (mdir->u.m.blocks[1] % 8); - seen[mdir->u.m.blocks[0] / 8] |= 1 << (mdir->u.m.blocks[0] % 8); + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + } else { // this shouldn't happen - printf("traversal: %d.%d 0x%x %d\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - lfsr_data_size(&data_)); + printf("traversal: 0x%x\n", tinfo.tag); assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); @@ -4141,62 +4091,52 @@ code = ''' uint8_t *seen = malloc((BLOCK_COUNT+7)/8); memset(seen, 0, (BLOCK_COUNT+7)/8); - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - VALIDATE ? LFSR_MTREE_TRAVERSAL_VALIDATE : 0); - + lfsr_traversal_t traversal = LFSR_TRAVERSAL( + VALIDATE ? LFSR_TRAVERSAL_VALIDATE : 0); for (lfs_block_t i = 0;; i++) { // a bit hacky, but this catches infinite loops - assert(i < 2*(1+N)); + assert(i < 2*BLOCK_COUNT); - lfs_ssize_t mid_; - lfsr_tag_t tag_; - lfsr_data_t data_; - int err = lfsr_mtree_traversal_next(&lfs, &traversal, - &mid_, &tag_, &data_); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); assert(!err || err == LFS_ERR_NOENT); if (err == LFS_ERR_NOENT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x btree 0x%x.%x\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - branch->block, branch->trunk); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); // keep track of seen blocks - seen[branch->block / 8] |= 1 << (branch->block % 8); - } else if (tag_ == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x mdir 0x{%x,%x}\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - mdir->u.m.blocks[0], mdir->u.m.blocks[1]); + seen[tinfo.u.mdir.u.m.blocks[1] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[1] % 8); + seen[tinfo.u.mdir.u.m.blocks[0] / 8] + |= 1 << (tinfo.u.mdir.u.m.blocks[0] % 8); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); // keep track of seen blocks - seen[mdir->u.m.blocks[1] / 8] |= 1 << (mdir->u.m.blocks[1] % 8); - seen[mdir->u.m.blocks[0] / 8] |= 1 << (mdir->u.m.blocks[0] % 8); + seen[tinfo.u.rbyd.block / 8] |= 1 << (tinfo.u.rbyd.block % 8); + } else { // this shouldn't happen - printf("traversal: %d.%d 0x%x %d\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - lfsr_data_size(&data_)); + printf("traversal: 0x%x\n", tinfo.tag); assert(false); } } // if traversal worked, we should be able to clobber all other blocks - uint8_t buffer_[BLOCK_SIZE]; - memset(buffer_, 0xcc, BLOCK_SIZE); + uint8_t clobber_buf[BLOCK_SIZE]; + memset(clobber_buf, 0xcc, BLOCK_SIZE); for (lfs_block_t block = 0; block < BLOCK_COUNT; block++) { if (!(seen[block / 8] & (1 << (block % 8)))) { CFG->erase(CFG, block) => 0; - CFG->prog(CFG, block, 0, buffer_, BLOCK_SIZE) => 0; + CFG->prog(CFG, block, 0, clobber_buf, BLOCK_SIZE) => 0; } } free(seen); @@ -4249,44 +4189,31 @@ code = ''' MROOT, 0, FROMMBLOCKS(LFSR_MBLOCKS_MROOTANCHOR(), buf)))) => 0; // technically, cycle detection only needs to work when we're validating - lfsr_mtree_traversal_t traversal = LFSR_MTREE_TRAVERSAL( - LFSR_MTREE_TRAVERSAL_VALIDATE); - + lfsr_traversal_t traversal = LFSR_TRAVERSAL(LFSR_TRAVERSAL_VALIDATE); for (lfs_block_t i = 0;; i++) { // assert that we detect the cycle in a reasonable number of iterations - assert(i < 1024); + assert(i < 2*BLOCK_COUNT); - lfs_ssize_t mid_; - lfsr_tag_t tag_; - lfsr_data_t data_; - int err = lfsr_mtree_traversal_next(&lfs, &traversal, - &mid_, &tag_, &data_); + lfsr_tinfo_t tinfo; + int err = lfsr_traversal_read(&lfs, &traversal, &tinfo); assert(!err || err == LFS_ERR_CORRUPT); if (err == LFS_ERR_CORRUPT) { break; } - if (tag_ == LFSR_TAG_BTREE) { - lfsr_rbyd_t *branch = (lfsr_rbyd_t *)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x btree 0x%x.%x\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - branch->block, branch->trunk); - } else if (tag_ == LFSR_TAG_MDIR) { - lfsr_mdir_t *mdir = (lfsr_mdir_t*)data_.u.direct.buffer; - printf("traversal: %d.%d 0x%x mdir 0x{%x,%x}\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - mdir->u.m.blocks[0], mdir->u.m.blocks[1]); + if (tinfo.tag == LFSR_TAG_MDIR) { + printf("traversal: 0x%x mdir 0x{%x,%x}\n", + tinfo.tag, + tinfo.u.mdir.u.m.blocks[0], tinfo.u.mdir.u.m.blocks[1]); + + } else if (tinfo.tag == LFSR_TAG_BTREE) { + printf("traversal: 0x%x btree 0x%x.%x\n", + tinfo.tag, + tinfo.u.rbyd.block, tinfo.u.rbyd.trunk); + } else { // this shouldn't happen - printf("traversal: %d.%d 0x%x %d\n", - mid_ >> lfs.mleaf_bits, - mid_ & lfsr_midrmask(&lfs), - tag_, - lfsr_data_size(&data_)); + printf("traversal: 0x%x\n", tinfo.tag); assert(false); } }