forked from Imagelibrary/littlefs
Added some rbyd benchmarks, fixed/tweaked some related scripts
- Added both uattr (limited to 256) and id (limited to 65535) benchmarks covering the main rbyd operations - Fixed issue where --defines gets passed to the test/bench runners when querying id-specific information. After changing the test/bench runners to prioritize explicit defines, this causes problems for recorded benchmark results and debug related things. - In plot.py/plotmpl.py, made --by/-x/-y in subplots behave somewhat reasonably, contributing to a global dataset and the figure's legend, colors, etc, but only shown in the specified subplot. This is useful mainly for showing different -y values on different subplots. - In plot.py/plotmpl.py, added --labels to allow explicit configuration of legend labels, much like --colors/--formats/--chars/etc. This removes one of the main annoying needs for modifying benchmark results.
This commit is contained in:
700
benches/bench_rbyd.toml
Normal file
700
benches/bench_rbyd.toml
Normal file
@@ -0,0 +1,700 @@
|
||||
[cases.bench_rbyd_attr_commit]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*N <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// build the attribute list for the current permutations
|
||||
//
|
||||
// NOTE we only have 256 user attributes, so this benchmark is
|
||||
// a bit limited
|
||||
uint32_t prng = 42;
|
||||
BENCH_START();
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
attrs[i] = *LFSR_ATTR2(
|
||||
UATTR, i_, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR2(UATTR, i_ & 0xff, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
BENCH_STOP();
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_attr_fetch]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*N <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// build the attribute list for the current permutations
|
||||
//
|
||||
// NOTE we only have 256 user attributes, so this benchmark is
|
||||
// a bit limited
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
attrs[i] = *LFSR_ATTR2(
|
||||
UATTR, i_, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR2(UATTR, i_ & 0xff, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
BENCH_START();
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
BENCH_STOP();
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_attr_lookup]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*N <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// build the attribute list for the current permutations
|
||||
//
|
||||
// NOTE we only have 256 user attributes, so this benchmark is
|
||||
// a bit limited
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
attrs[i] = *LFSR_ATTR2(
|
||||
UATTR, i_, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR2(UATTR, i_ & 0xff, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
|
||||
BENCH_START();
|
||||
lfs_off_t i_ = BENCH_PRNG(&prng) % N;
|
||||
lfs_off_t off;
|
||||
lfs_size_t size;
|
||||
lfsr_rbyd_lookup(&lfs, &rbyd, LFSR_TAG2(UATTR, i_ & 0xff, -1), &off, &size);
|
||||
BENCH_STOP();
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_attr_append]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*(N+1) <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// build the attribute list for the current permutations
|
||||
//
|
||||
// NOTE we only have 256 user attributes, so this benchmark is
|
||||
// a bit limited
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
attrs[i] = *LFSR_ATTR2(
|
||||
UATTR, i_, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR2(UATTR, i_ & 0xff, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
|
||||
BENCH_START();
|
||||
lfs_off_t i_ = BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR2(UATTR, i_, -1, "\xbb\xbb\xbb\xbb", 4, NULL)) => 0;
|
||||
BENCH_STOP();
|
||||
|
||||
uint8_t buffer[4];
|
||||
lfsr_rbyd_get(&lfs, &rbyd, LFSR_TAG2(UATTR, i_, -1), buffer, 4) => 4;
|
||||
assert(memcmp(buffer, "\xbb\xbb\xbb\xbb", 4) == 0);
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_attr_remove]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*(N+1) <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// build the attribute list for the current permutations
|
||||
//
|
||||
// NOTE we only have 256 user attributes, so this benchmark is
|
||||
// a bit limited
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
attrs[i] = *LFSR_ATTR2(
|
||||
UATTR, i_, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR2(UATTR, i_ & 0xff, -1, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
|
||||
BENCH_START();
|
||||
lfs_off_t i_ = BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR2(RMUATTR, i_, -1, NULL, 0, NULL)) => 0;
|
||||
BENCH_STOP();
|
||||
|
||||
uint8_t buffer[4];
|
||||
lfsr_rbyd_get(&lfs, &rbyd, LFSR_TAG2(UATTR, i_, -1), buffer, 4)
|
||||
=> LFS_ERR_NOENT;
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_id_commit]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256, 1024, 2048, 4096]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*N <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// create commits, note we need to take care to generate
|
||||
// indexes within a valid range as the rbyd grows
|
||||
uint32_t prng = 42;
|
||||
BENCH_START();
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
attrs[i] = *LFSR_ATTR(
|
||||
MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR(MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
BENCH_STOP();
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_id_fetch]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256, 1024, 2048, 4096]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*N <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// create commits, note we need to take care to generate
|
||||
// indexes within a valid range as the rbyd grows
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
attrs[i] = *LFSR_ATTR(
|
||||
MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR(MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
BENCH_START();
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
BENCH_STOP();
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_id_lookup]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256, 1024, 2048, 4096]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*N <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// create commits, note we need to take care to generate
|
||||
// indexes within a valid range as the rbyd grows
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
attrs[i] = *LFSR_ATTR(
|
||||
MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR(MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
|
||||
BENCH_START();
|
||||
lfs_off_t i_ = BENCH_PRNG(&prng) % N;
|
||||
lfs_off_t off;
|
||||
lfs_size_t size;
|
||||
lfsr_rbyd_lookup(&lfs, &rbyd, LFSR_TAG(MKREG, i_), &off, &size)
|
||||
=> LFSR_TAG(MKREG, i_);
|
||||
BENCH_STOP();
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_id_create]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256, 1024, 2048, 4096]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*(N+1) <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// create commits, note we need to take care to generate
|
||||
// indexes within a valid range as the rbyd grows
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
attrs[i] = *LFSR_ATTR(
|
||||
MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR(MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
|
||||
BENCH_START();
|
||||
lfs_off_t i_ = BENCH_PRNG(&prng) % (N+1);
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR(MKREG, i_, "\xbb\xbb\xbb\xbb", 4,
|
||||
NULL)) => 0;
|
||||
BENCH_STOP();
|
||||
|
||||
uint8_t buffer[4];
|
||||
lfsr_rbyd_get(&lfs, &rbyd, LFSR_TAG(MKREG, i_), buffer, 4) => 4;
|
||||
assert(memcmp(buffer, "\xbb\xbb\xbb\xbb", 4) == 0);
|
||||
'''
|
||||
|
||||
[cases.bench_rbyd_id_delete]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
# 0 = 1 commit
|
||||
# 1 = N commits
|
||||
defines.COMMIT = [0, 1]
|
||||
defines.N = [8, 16, 32, 64, 128, 256, 1024, 2048, 4096]
|
||||
in = 'lfs.c'
|
||||
if = 'COMMIT == 0 || PROG_SIZE*(N+1) <= BLOCK_SIZE'
|
||||
code = '''
|
||||
// set block_size to the full size of disk so we can test arbitrarily
|
||||
// large rbyd trees, we don't really care about block sizes at this
|
||||
// abstraction level
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
cfg_.block_size = cfg->block_size*cfg->block_count;
|
||||
cfg_.block_count = 1;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, &cfg_) => 0;
|
||||
|
||||
lfsr_rbyd_t rbyd = {
|
||||
.block = 0,
|
||||
.trunk = 0,
|
||||
.off = 0,
|
||||
.rev = 1,
|
||||
.crc = 0,
|
||||
.count = 0,
|
||||
.erased = true,
|
||||
};
|
||||
lfs_bd_erase(&lfs, rbyd.block) => 0;
|
||||
|
||||
// create commits, note we need to take care to generate
|
||||
// indexes within a valid range as the rbyd grows
|
||||
uint32_t prng = 42;
|
||||
if (COMMIT == 0) {
|
||||
struct lfsr_attr attrs[N];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
attrs[i] = *LFSR_ATTR(
|
||||
MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
(i+1 < N) ? &attrs[i+1] : NULL);
|
||||
}
|
||||
lfsr_rbyd_commit(&lfs, &rbyd, attrs) => 0;
|
||||
} else {
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? 0
|
||||
: BENCH_PRNG(&prng) % (rbyd.count+1);
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR(MKREG, i_, "\xaa\xaa\xaa\xaa", 4,
|
||||
NULL)) => 0;
|
||||
}
|
||||
}
|
||||
|
||||
lfsr_rbyd_fetch(&lfs, &rbyd, rbyd.block, NULL) => 0;
|
||||
|
||||
BENCH_START();
|
||||
lfs_off_t i_ = BENCH_PRNG(&prng) % N;
|
||||
lfsr_rbyd_commit(&lfs, &rbyd,
|
||||
LFSR_ATTR(RM, i_, NULL, 0,
|
||||
NULL)) => 0;
|
||||
BENCH_STOP();
|
||||
'''
|
||||
@@ -493,7 +493,7 @@ def compile(bench_paths, **args):
|
||||
f.writeln('#endif')
|
||||
f.writeln()
|
||||
|
||||
def find_runner(runner, **args):
|
||||
def find_runner(runner, id=None, **args):
|
||||
cmd = runner.copy()
|
||||
|
||||
# run under some external command?
|
||||
@@ -543,10 +543,18 @@ def find_runner(runner, **args):
|
||||
cmd.append('--erase-sleep=%s' % args['erase_sleep'])
|
||||
|
||||
# defines?
|
||||
if args.get('define'):
|
||||
if args.get('define') and id is None:
|
||||
for define in args.get('define'):
|
||||
cmd.append('-D%s' % define)
|
||||
|
||||
# test id?
|
||||
#
|
||||
# note we disable defines above when id is explicit, defines override id
|
||||
# in the test runner, which is not what we want when querying an explicit
|
||||
# test id
|
||||
if id is not None:
|
||||
cmd.append(id)
|
||||
|
||||
return cmd
|
||||
|
||||
def list_(runner, bench_ids=[], **args):
|
||||
@@ -568,7 +576,8 @@ def list_(runner, bench_ids=[], **args):
|
||||
return sp.call(cmd)
|
||||
|
||||
|
||||
def find_perms(runner_, ids=[], **args):
|
||||
def find_perms(runner, ids=[], **args):
|
||||
runner_ = find_runner(runner, **args)
|
||||
case_suites = {}
|
||||
expected_case_perms = co.defaultdict(lambda: 0)
|
||||
expected_perms = 0
|
||||
@@ -646,7 +655,8 @@ def find_perms(runner_, ids=[], **args):
|
||||
expected_perms,
|
||||
total_perms)
|
||||
|
||||
def find_path(runner_, id, **args):
|
||||
def find_path(runner, id, **args):
|
||||
runner_ = find_runner(runner, id, **args)
|
||||
path = None
|
||||
# query from runner
|
||||
cmd = runner_ + ['--list-case-paths', id]
|
||||
@@ -677,7 +687,8 @@ def find_path(runner_, id, **args):
|
||||
|
||||
return path
|
||||
|
||||
def find_defines(runner_, id, **args):
|
||||
def find_defines(runner, id, **args):
|
||||
runner_ = find_runner(runner, id, **args)
|
||||
# query permutation defines from runner
|
||||
cmd = runner_ + ['--list-permutation-defines', id]
|
||||
if args.get('verbose'):
|
||||
@@ -749,13 +760,13 @@ class BenchFailure(Exception):
|
||||
self.stdout = stdout
|
||||
self.assert_ = assert_
|
||||
|
||||
def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
def run_stage(name, runner, ids, stdout_, trace_, output_, **args):
|
||||
# get expected suite/case/perm counts
|
||||
(case_suites,
|
||||
expected_suite_perms,
|
||||
expected_case_perms,
|
||||
expected_perms,
|
||||
total_perms) = find_perms(runner_, ids, **args)
|
||||
total_perms) = find_perms(runner, ids, **args)
|
||||
|
||||
passed_suite_perms = co.defaultdict(lambda: 0)
|
||||
passed_case_perms = co.defaultdict(lambda: 0)
|
||||
@@ -778,7 +789,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
locals = th.local()
|
||||
children = set()
|
||||
|
||||
def run_runner(runner_, ids=[]):
|
||||
def run_runner(runner_):
|
||||
nonlocal passed_suite_perms
|
||||
nonlocal passed_case_perms
|
||||
nonlocal passed_perms
|
||||
@@ -788,7 +799,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
nonlocal locals
|
||||
|
||||
# run the benches!
|
||||
cmd = runner_ + ids
|
||||
cmd = runner_
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
|
||||
@@ -843,7 +854,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
if output_:
|
||||
# get defines and write to csv
|
||||
defines = find_defines(
|
||||
runner_, m.group('id'), **args)
|
||||
runner, m.group('id'), **args)
|
||||
output_.writerow({
|
||||
'suite': suite,
|
||||
'case': case,
|
||||
@@ -875,7 +886,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
list(last_stdout),
|
||||
last_assert)
|
||||
|
||||
def run_job(runner_, ids=[], start=None, step=None):
|
||||
def run_job(start=None, step=None):
|
||||
nonlocal failures
|
||||
nonlocal killed
|
||||
nonlocal locals
|
||||
@@ -883,16 +894,18 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
start = start or 0
|
||||
step = step or 1
|
||||
while start < total_perms:
|
||||
job_runner = runner_.copy()
|
||||
runner_ = find_runner(runner, **args)
|
||||
if args.get('isolate') or args.get('valgrind'):
|
||||
job_runner.append('-s%s,%s,%s' % (start, start+step, step))
|
||||
runner_.append('-s%s,%s,%s' % (start, start+step, step))
|
||||
else:
|
||||
job_runner.append('-s%s,,%s' % (start, step))
|
||||
runner_.append('-s%s,,%s' % (start, step))
|
||||
|
||||
runner_.extend(ids)
|
||||
|
||||
try:
|
||||
# run the benches
|
||||
locals.seen_perms = 0
|
||||
run_runner(job_runner, ids)
|
||||
run_runner(runner_)
|
||||
assert locals.seen_perms > 0
|
||||
start += locals.seen_perms*step
|
||||
|
||||
@@ -902,7 +915,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
case, _ = failure.id.split(':', 1)
|
||||
suite = case_suites[case]
|
||||
# get defines and write to csv
|
||||
defines = find_defines(runner_, failure.id, **args)
|
||||
defines = find_defines(runner, failure.id, **args)
|
||||
output_.writerow({
|
||||
'suite': suite,
|
||||
'case': case,
|
||||
@@ -932,11 +945,11 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
if 'jobs' in args:
|
||||
for job in range(args['jobs']):
|
||||
runners.append(th.Thread(
|
||||
target=run_job, args=(runner_, ids, job, args['jobs']),
|
||||
target=run_job, args=(job, args['jobs']),
|
||||
daemon=True))
|
||||
else:
|
||||
runners.append(th.Thread(
|
||||
target=run_job, args=(runner_, ids, None, None),
|
||||
target=run_job, args=(None, None),
|
||||
daemon=True))
|
||||
|
||||
def print_update(done):
|
||||
@@ -999,13 +1012,13 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
|
||||
def run(runner, bench_ids=[], **args):
|
||||
# query runner for benches
|
||||
runner_ = find_runner(runner, **args)
|
||||
print('using runner: %s' % ' '.join(shlex.quote(c) for c in runner_))
|
||||
print('using runner: %s' % ' '.join(
|
||||
shlex.quote(c) for c in find_runner(runner, **args)))
|
||||
(_,
|
||||
expected_suite_perms,
|
||||
expected_case_perms,
|
||||
expected_perms,
|
||||
total_perms) = find_perms(runner_, bench_ids, **args)
|
||||
total_perms) = find_perms(runner, bench_ids, **args)
|
||||
print('found %d suites, %d cases, %d/%d permutations' % (
|
||||
len(expected_suite_perms),
|
||||
len(expected_case_perms),
|
||||
@@ -1053,7 +1066,7 @@ def run(runner, bench_ids=[], **args):
|
||||
failures_,
|
||||
killed) = run_stage(
|
||||
by or 'benches',
|
||||
runner_,
|
||||
runner,
|
||||
[by] if by is not None else [],
|
||||
stdout,
|
||||
trace,
|
||||
@@ -1100,12 +1113,12 @@ def run(runner, bench_ids=[], **args):
|
||||
# print each failure
|
||||
for failure in failures:
|
||||
assert failure.id is not None, '%s broken? %r' % (
|
||||
' '.join(shlex.quote(c) for c in runner_),
|
||||
' '.join(shlex.quote(c) for c in find_runner(runner, **args)),
|
||||
failure)
|
||||
|
||||
# get some extra info from runner
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
defines = find_defines(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
defines = find_defines(runner, failure.id, **args)
|
||||
|
||||
# show summary of failure
|
||||
print('%s%s:%d:%sfailure:%s %s%s failed' % (
|
||||
@@ -1142,19 +1155,19 @@ def run(runner, bench_ids=[], **args):
|
||||
or args.get('gdb_case')
|
||||
or args.get('gdb_main')):
|
||||
failure = failures[0]
|
||||
cmd = runner_ + [failure.id]
|
||||
cmd = find_runner(runner, failure.id, **args)
|
||||
|
||||
if args.get('gdb_main'):
|
||||
# we don't really need the case breakpoint here, but it
|
||||
# can be helpful
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
cmd[:0] = args['gdb_path'] + [
|
||||
'-ex', 'break main',
|
||||
'-ex', 'break %s:%d' % (path, lineno),
|
||||
'-ex', 'run',
|
||||
'--args']
|
||||
elif args.get('gdb_case'):
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
cmd[:0] = args['gdb_path'] + [
|
||||
'-ex', 'break %s:%d' % (path, lineno),
|
||||
'-ex', 'run',
|
||||
|
||||
@@ -514,11 +514,11 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
results = results_
|
||||
|
||||
# if y not specified, try to guess from data
|
||||
if y is None:
|
||||
if not y:
|
||||
y = co.OrderedDict()
|
||||
for r in results:
|
||||
for k, v in r.items():
|
||||
if (by is None or k not in by) and v.strip():
|
||||
if (not by or k not in by) and v.strip():
|
||||
try:
|
||||
dat(v)
|
||||
y[k] = True
|
||||
@@ -526,7 +526,7 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
y[k] = False
|
||||
y = list(k for k,v in y.items() if v)
|
||||
|
||||
if by is not None:
|
||||
if by:
|
||||
# find all 'by' values
|
||||
ks = set()
|
||||
for r in results:
|
||||
@@ -535,8 +535,8 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
|
||||
# collect all datasets
|
||||
datasets = co.OrderedDict()
|
||||
for ks_ in (ks if by is not None else [()]):
|
||||
for x_ in (x if x is not None else [None]):
|
||||
for ks_ in (ks if by else [()]):
|
||||
for x_ in (x if x else [None]):
|
||||
for y_ in y:
|
||||
# hide x/y if there is only one field
|
||||
k_x = x_ if len(x or []) > 1 else ''
|
||||
@@ -547,7 +547,7 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
x_,
|
||||
y_,
|
||||
[(by_, {k_}) for by_, k_ in zip(by, ks_)]
|
||||
if by is not None else [])
|
||||
if by else [])
|
||||
|
||||
return datasets
|
||||
|
||||
@@ -822,6 +822,7 @@ def main(csv_paths, *,
|
||||
colors=None,
|
||||
chars=None,
|
||||
line_chars=None,
|
||||
labels=None,
|
||||
points=False,
|
||||
points_and_lines=False,
|
||||
width=None,
|
||||
@@ -876,26 +877,41 @@ def main(csv_paths, *,
|
||||
else:
|
||||
line_chars_ = [False]
|
||||
|
||||
if labels is not None:
|
||||
labels_ = labels
|
||||
else:
|
||||
labels_ = ['']
|
||||
|
||||
# allow escape codes in labels/titles
|
||||
title = escape(title).splitlines() if title is not None else []
|
||||
xlabel = escape(xlabel).splitlines() if xlabel is not None else []
|
||||
ylabel = escape(ylabel).splitlines() if ylabel is not None else []
|
||||
|
||||
# subplot can also contribute to subplots, resolve this here or things
|
||||
# become a mess...
|
||||
subplots += subplot.pop('subplots', [])
|
||||
|
||||
# allow any subplots to contribute to by/x/y
|
||||
def subplots_get(k, *, subplots=[], **args):
|
||||
v = args.get(k, []).copy()
|
||||
for _, subargs in subplots:
|
||||
v.extend(subplots_get(k, **subargs))
|
||||
return v
|
||||
|
||||
all_by = (by or []) + subplots_get('by', **subplot, subplots=subplots)
|
||||
all_x = (x or []) + subplots_get('x', **subplot, subplots=subplots)
|
||||
all_y = (y or []) + subplots_get('y', **subplot, subplots=subplots)
|
||||
|
||||
# separate out renames
|
||||
renames = list(it.chain.from_iterable(
|
||||
((k, v) for v in vs)
|
||||
for k, vs in it.chain(by or [], x or [], y or [])))
|
||||
if by is not None:
|
||||
by = [k for k, _ in by]
|
||||
if x is not None:
|
||||
x = [k for k, _ in x]
|
||||
if y is not None:
|
||||
y = [k for k, _ in y]
|
||||
for k, vs in it.chain(all_by, all_x, all_y)))
|
||||
all_by = [k for k, _ in all_by]
|
||||
all_x = [k for k, _ in all_x]
|
||||
all_y = [k for k, _ in all_y]
|
||||
|
||||
# create a grid of subplots
|
||||
grid = Grid.fromargs(
|
||||
subplots=subplots + subplot.pop('subplots', []),
|
||||
**subplot)
|
||||
grid = Grid.fromargs(**subplot, subplots=subplots)
|
||||
|
||||
for s in grid:
|
||||
# allow subplot params to override global params
|
||||
@@ -980,7 +996,7 @@ def main(csv_paths, *,
|
||||
results = collect(csv_paths, renames)
|
||||
|
||||
# then extract the requested datasets
|
||||
datasets_ = datasets(results, by, x, y, define)
|
||||
datasets_ = datasets(results, all_by, all_x, all_y, define)
|
||||
|
||||
# figure out colors/chars here so that subplot defines
|
||||
# don't change them later, that'd be bad
|
||||
@@ -993,6 +1009,9 @@ def main(csv_paths, *,
|
||||
dataline_chars_ = {
|
||||
name: line_chars_[i % len(line_chars_)]
|
||||
for i, name in enumerate(datasets_.keys())}
|
||||
datalabels_ = {
|
||||
name: labels_[i % len(labels_)]
|
||||
for i, name in enumerate(datasets_.keys())}
|
||||
|
||||
# build legend?
|
||||
legend_width = 0
|
||||
@@ -1000,12 +1019,13 @@ def main(csv_paths, *,
|
||||
legend_ = []
|
||||
for i, k in enumerate(datasets_.keys()):
|
||||
label = '%s%s' % (
|
||||
'%s ' % chars_[i % len(chars_)]
|
||||
'%s ' % datachars_[k]
|
||||
if chars is not None
|
||||
else '%s ' % line_chars_[i % len(line_chars_)]
|
||||
else '%s ' % dataline_chars_[k]
|
||||
if line_chars is not None
|
||||
else '',
|
||||
','.join(k_ for k_ in k if k_))
|
||||
datalabels_[k]
|
||||
or ','.join(k_ for k_ in k if k_))
|
||||
|
||||
if label:
|
||||
legend_.append(label)
|
||||
@@ -1104,6 +1124,8 @@ def main(csv_paths, *,
|
||||
# create a plot for each subplot
|
||||
for s in grid:
|
||||
# allow subplot params to override global params
|
||||
x_ = {k for k,_ in (x or []) + s.args.get('x', [])}
|
||||
y_ = {k for k,_ in (y or []) + s.args.get('y', [])}
|
||||
define_ = define + s.args.get('define', [])
|
||||
xlim_ = s.args.get('xlim', xlim)
|
||||
ylim_ = s.args.get('ylim', ylim)
|
||||
@@ -1118,7 +1140,13 @@ def main(csv_paths, *,
|
||||
|
||||
# data can be constrained by subplot-specific defines,
|
||||
# so re-extract for each plot
|
||||
subdatasets = datasets(results, by, x, y, define_)
|
||||
subdatasets = datasets(results, all_by, all_x, all_y, define_)
|
||||
|
||||
# filter by subplot x/y
|
||||
subdatasets = co.OrderedDict([(name, dataset)
|
||||
for name, dataset in subdatasets.items()
|
||||
if not name[-2] or name[-2] in x_
|
||||
if not name[-1] or name[-1] in y_])
|
||||
|
||||
# find actual xlim/ylim
|
||||
xlim_ = (
|
||||
@@ -1446,6 +1474,10 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
'--line-chars',
|
||||
help="Characters to use for lines.")
|
||||
parser.add_argument(
|
||||
'--labels',
|
||||
type=lambda x: [x.strip() for x in x.split(',')],
|
||||
help="Comma-separated legend labels.")
|
||||
parser.add_argument(
|
||||
'-W', '--width',
|
||||
nargs='?',
|
||||
|
||||
@@ -260,11 +260,11 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
results = results_
|
||||
|
||||
# if y not specified, try to guess from data
|
||||
if y is None:
|
||||
if not y:
|
||||
y = co.OrderedDict()
|
||||
for r in results:
|
||||
for k, v in r.items():
|
||||
if (by is None or k not in by) and v.strip():
|
||||
if (not by or k not in by) and v.strip():
|
||||
try:
|
||||
dat(v)
|
||||
y[k] = True
|
||||
@@ -272,7 +272,7 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
y[k] = False
|
||||
y = list(k for k,v in y.items() if v)
|
||||
|
||||
if by is not None:
|
||||
if by:
|
||||
# find all 'by' values
|
||||
ks = set()
|
||||
for r in results:
|
||||
@@ -281,8 +281,8 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
|
||||
# collect all datasets
|
||||
datasets = co.OrderedDict()
|
||||
for ks_ in (ks if by is not None else [()]):
|
||||
for x_ in (x if x is not None else [None]):
|
||||
for ks_ in (ks if by else [()]):
|
||||
for x_ in (x if x else [None]):
|
||||
for y_ in y:
|
||||
# hide x/y if there is only one field
|
||||
k_x = x_ if len(x or []) > 1 else ''
|
||||
@@ -293,7 +293,7 @@ def datasets(results, by=None, x=None, y=None, define=[]):
|
||||
x_,
|
||||
y_,
|
||||
[(by_, {k_}) for by_, k_ in zip(by, ks_)]
|
||||
if by is not None else [])
|
||||
if by else [])
|
||||
|
||||
return datasets
|
||||
|
||||
@@ -570,6 +570,7 @@ def main(csv_paths, output, *,
|
||||
points_and_lines=False,
|
||||
colors=None,
|
||||
formats=None,
|
||||
labels=None,
|
||||
width=WIDTH,
|
||||
height=HEIGHT,
|
||||
xlim=(None,None),
|
||||
@@ -645,6 +646,11 @@ def main(csv_paths, output, *,
|
||||
else:
|
||||
formats_ = FORMATS
|
||||
|
||||
if labels is not None:
|
||||
labels_ = labels
|
||||
else:
|
||||
labels_ = ['']
|
||||
|
||||
if font_color is not None:
|
||||
font_color_ = font_color
|
||||
elif dark:
|
||||
@@ -723,24 +729,36 @@ def main(csv_paths, output, *,
|
||||
# equivalent to 96, maybe this is the default for SVG rendering?
|
||||
plt.rc('figure', dpi=96)
|
||||
|
||||
# subplot can also contribute to subplots, resolve this here or things
|
||||
# become a mess...
|
||||
subplots += subplot.pop('subplots', [])
|
||||
|
||||
# allow any subplots to contribute to by/x/y
|
||||
def subplots_get(k, *, subplots=[], **args):
|
||||
v = args.get(k, []).copy()
|
||||
for _, subargs in subplots:
|
||||
v.extend(subplots_get(k, **subargs))
|
||||
return v
|
||||
|
||||
all_by = (by or []) + subplots_get('by', **subplot, subplots=subplots)
|
||||
all_x = (x or []) + subplots_get('x', **subplot, subplots=subplots)
|
||||
all_y = (y or []) + subplots_get('y', **subplot, subplots=subplots)
|
||||
|
||||
# separate out renames
|
||||
renames = list(it.chain.from_iterable(
|
||||
((k, v) for v in vs)
|
||||
for k, vs in it.chain(by or [], x or [], y or [])))
|
||||
if by is not None:
|
||||
by = [k for k, _ in by]
|
||||
if x is not None:
|
||||
x = [k for k, _ in x]
|
||||
if y is not None:
|
||||
y = [k for k, _ in y]
|
||||
for k, vs in it.chain(all_by, all_x, all_y)))
|
||||
all_by = [k for k, _ in all_by]
|
||||
all_x = [k for k, _ in all_x]
|
||||
all_y = [k for k, _ in all_y]
|
||||
|
||||
# first collect results from CSV files
|
||||
results = collect(csv_paths, renames)
|
||||
|
||||
# then extract the requested datasets
|
||||
datasets_ = datasets(results, by, x, y, define)
|
||||
datasets_ = datasets(results, all_by, all_x, all_y, define)
|
||||
|
||||
# figure out formats/colors here so that subplot defines
|
||||
# figure out formats/colors/labels here so that subplot defines
|
||||
# don't change them later, that'd be bad
|
||||
dataformats_ = {
|
||||
name: formats_[i % len(formats_)]
|
||||
@@ -748,11 +766,12 @@ def main(csv_paths, output, *,
|
||||
datacolors_ = {
|
||||
name: colors_[i % len(colors_)]
|
||||
for i, name in enumerate(datasets_.keys())}
|
||||
datalabels_ = {
|
||||
name: labels_[i % len(labels_)]
|
||||
for i, name in enumerate(datasets_.keys())}
|
||||
|
||||
# create a grid of subplots
|
||||
grid = Grid.fromargs(
|
||||
subplots=subplots + subplot.pop('subplots', []),
|
||||
**subplot)
|
||||
grid = Grid.fromargs(**subplot, subplots=subplots)
|
||||
|
||||
# create a matplotlib plot
|
||||
fig = plt.figure(figsize=(
|
||||
@@ -785,6 +804,8 @@ def main(csv_paths, output, *,
|
||||
# now plot each subplot
|
||||
for s in grid:
|
||||
# allow subplot params to override global params
|
||||
x_ = {k for k,_ in (x or []) + s.args.get('x', [])}
|
||||
y_ = {k for k,_ in (y or []) + s.args.get('y', [])}
|
||||
define_ = define + s.args.get('define', [])
|
||||
xlim_ = s.args.get('xlim', xlim)
|
||||
ylim_ = s.args.get('ylim', ylim)
|
||||
@@ -812,7 +833,13 @@ def main(csv_paths, output, *,
|
||||
|
||||
# data can be constrained by subplot-specific defines,
|
||||
# so re-extract for each plot
|
||||
subdatasets = datasets(results, by, x, y, define_)
|
||||
subdatasets = datasets(results, all_by, all_x, all_y, define_)
|
||||
|
||||
# filter by subplot x/y
|
||||
subdatasets = co.OrderedDict([(name, dataset)
|
||||
for name, dataset in subdatasets.items()
|
||||
if not name[-2] or name[-2] in x_
|
||||
if not name[-1] or name[-1] in y_])
|
||||
|
||||
# plot!
|
||||
ax = s.ax
|
||||
@@ -924,17 +951,24 @@ def main(csv_paths, output, *,
|
||||
#
|
||||
# note this was written before constrained_layout supported legend
|
||||
# collisions, hopefully this is added in the future
|
||||
labels = co.OrderedDict()
|
||||
legend = {}
|
||||
for s in grid:
|
||||
for h, l in zip(*s.ax.get_legend_handles_labels()):
|
||||
labels[l] = h
|
||||
legend[l] = h
|
||||
# sort in dataset order
|
||||
legend_ = []
|
||||
for name in datasets_.keys():
|
||||
name_ = ','.join(k for k in name if k)
|
||||
if name_ in legend:
|
||||
legend_.append((datalabels_[name] or name_, legend[name_]))
|
||||
legend = legend_
|
||||
|
||||
if legend_right:
|
||||
ax = fig.add_subplot(gs[(1 if legend_above else 0):,-1])
|
||||
ax.set_axis_off()
|
||||
ax.legend(
|
||||
labels.values(),
|
||||
labels.keys(),
|
||||
[h for _,h in legend],
|
||||
[l for l,_ in legend],
|
||||
loc='upper left',
|
||||
fancybox=False,
|
||||
borderaxespad=0)
|
||||
@@ -944,10 +978,19 @@ def main(csv_paths, output, *,
|
||||
ax.set_axis_off()
|
||||
|
||||
# try different column counts until we fit in the axes
|
||||
for ncol in reversed(range(1, len(labels)+1)):
|
||||
for ncol in reversed(range(1, len(legend)+1)):
|
||||
# permute the labels, mpl wants to order these column first
|
||||
nrow = m.ceil(len(legend)/ncol)
|
||||
legend_ = ncol*nrow*[None]
|
||||
for x in range(ncol):
|
||||
for y in range(nrow):
|
||||
if x+ncol*y < len(legend):
|
||||
legend_[x*nrow+y] = legend[x+ncol*y]
|
||||
legend_ = [l for l in legend_ if l is not None]
|
||||
|
||||
legend_ = ax.legend(
|
||||
labels.values(),
|
||||
labels.keys(),
|
||||
[h for _,h in legend_],
|
||||
[l for l,_ in legend_],
|
||||
loc='upper center',
|
||||
ncol=ncol,
|
||||
fancybox=False,
|
||||
@@ -969,10 +1012,19 @@ def main(csv_paths, output, *,
|
||||
weight=plt.rcParams['axes.labelweight'])
|
||||
|
||||
# try different column counts until we fit in the axes
|
||||
for ncol in reversed(range(1, len(labels)+1)):
|
||||
for ncol in reversed(range(1, len(legend)+1)):
|
||||
# permute the labels, mpl wants to order these column first
|
||||
nrow = m.ceil(len(legend)/ncol)
|
||||
legend_ = ncol*nrow*[None]
|
||||
for x in range(ncol):
|
||||
for y in range(nrow):
|
||||
if x+ncol*y < len(legend):
|
||||
legend_[x*nrow+y] = legend[x+ncol*y]
|
||||
legend_ = [l for l in legend_ if l is not None]
|
||||
|
||||
legend_ = ax.legend(
|
||||
labels.values(),
|
||||
labels.keys(),
|
||||
[h for _,h in legend_],
|
||||
[l for l,_ in legend_],
|
||||
loc='upper center',
|
||||
ncol=ncol,
|
||||
fancybox=False,
|
||||
@@ -1088,6 +1140,10 @@ if __name__ == "__main__":
|
||||
type=lambda x: [x.strip().replace('0',',') for x in x.split(',')],
|
||||
help="Comma-separated matplotlib formats to use. Allows '0' as an "
|
||||
"alternative for ','.")
|
||||
parser.add_argument(
|
||||
'--labels',
|
||||
type=lambda x: [x.strip() for x in x.split(',')],
|
||||
help="Comma-separated legend labels.")
|
||||
parser.add_argument(
|
||||
'-W', '--width',
|
||||
type=lambda x: int(x, 0),
|
||||
|
||||
@@ -507,7 +507,7 @@ def compile(test_paths, **args):
|
||||
f.writeln('#endif')
|
||||
f.writeln()
|
||||
|
||||
def find_runner(runner, **args):
|
||||
def find_runner(runner, id=None, **args):
|
||||
cmd = runner.copy()
|
||||
|
||||
# run under some external command?
|
||||
@@ -559,10 +559,18 @@ def find_runner(runner, **args):
|
||||
cmd.append('--erase-sleep=%s' % args['erase_sleep'])
|
||||
|
||||
# defines?
|
||||
if args.get('define'):
|
||||
if args.get('define') and id is None:
|
||||
for define in args.get('define'):
|
||||
cmd.append('-D%s' % define)
|
||||
|
||||
# test id?
|
||||
#
|
||||
# note we disable defines above when id is explicit, defines override id
|
||||
# in the test runner, which is not what we want when querying an explicit
|
||||
# test id
|
||||
if id is not None:
|
||||
cmd.append(id)
|
||||
|
||||
return cmd
|
||||
|
||||
def list_(runner, test_ids=[], **args):
|
||||
@@ -585,7 +593,8 @@ def list_(runner, test_ids=[], **args):
|
||||
return sp.call(cmd)
|
||||
|
||||
|
||||
def find_perms(runner_, ids=[], **args):
|
||||
def find_perms(runner, ids=[], **args):
|
||||
runner_ = find_runner(runner, **args)
|
||||
case_suites = {}
|
||||
expected_case_perms = co.defaultdict(lambda: 0)
|
||||
expected_perms = 0
|
||||
@@ -663,7 +672,8 @@ def find_perms(runner_, ids=[], **args):
|
||||
expected_perms,
|
||||
total_perms)
|
||||
|
||||
def find_path(runner_, id, **args):
|
||||
def find_path(runner, id, **args):
|
||||
runner_ = find_runner(runner, id, **args)
|
||||
path = None
|
||||
# query from runner
|
||||
cmd = runner_ + ['--list-case-paths', id]
|
||||
@@ -694,7 +704,8 @@ def find_path(runner_, id, **args):
|
||||
|
||||
return path
|
||||
|
||||
def find_defines(runner_, id, **args):
|
||||
def find_defines(runner, id, **args):
|
||||
runner_ = find_runner(runner, id, **args)
|
||||
# query permutation defines from runner
|
||||
cmd = runner_ + ['--list-permutation-defines', id]
|
||||
if args.get('verbose'):
|
||||
@@ -766,13 +777,13 @@ class TestFailure(Exception):
|
||||
self.stdout = stdout
|
||||
self.assert_ = assert_
|
||||
|
||||
def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
def run_stage(name, runner, ids, stdout_, trace_, output_, **args):
|
||||
# get expected suite/case/perm counts
|
||||
(case_suites,
|
||||
expected_suite_perms,
|
||||
expected_case_perms,
|
||||
expected_perms,
|
||||
total_perms) = find_perms(runner_, ids, **args)
|
||||
total_perms) = find_perms(runner, ids, **args)
|
||||
|
||||
passed_suite_perms = co.defaultdict(lambda: 0)
|
||||
passed_case_perms = co.defaultdict(lambda: 0)
|
||||
@@ -790,7 +801,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
locals = th.local()
|
||||
children = set()
|
||||
|
||||
def run_runner(runner_, ids=[]):
|
||||
def run_runner(runner_):
|
||||
nonlocal passed_suite_perms
|
||||
nonlocal passed_case_perms
|
||||
nonlocal passed_perms
|
||||
@@ -798,7 +809,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
nonlocal locals
|
||||
|
||||
# run the tests!
|
||||
cmd = runner_ + ids
|
||||
cmd = runner_
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
|
||||
@@ -850,7 +861,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
if output_:
|
||||
# get defines and write to csv
|
||||
defines = find_defines(
|
||||
runner_, m.group('id'), **args)
|
||||
runner, m.group('id'), **args)
|
||||
output_.writerow({
|
||||
'suite': suite,
|
||||
'case': case,
|
||||
@@ -880,7 +891,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
list(last_stdout),
|
||||
last_assert)
|
||||
|
||||
def run_job(runner_, ids=[], start=None, step=None):
|
||||
def run_job(start=None, step=None):
|
||||
nonlocal failures
|
||||
nonlocal killed
|
||||
nonlocal locals
|
||||
@@ -888,16 +899,18 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
start = start or 0
|
||||
step = step or 1
|
||||
while start < total_perms:
|
||||
job_runner = runner_.copy()
|
||||
runner_ = find_runner(runner, **args)
|
||||
if args.get('isolate') or args.get('valgrind'):
|
||||
job_runner.append('-s%s,%s,%s' % (start, start+step, step))
|
||||
runner_.append('-s%s,%s,%s' % (start, start+step, step))
|
||||
else:
|
||||
job_runner.append('-s%s,,%s' % (start, step))
|
||||
runner_.append('-s%s,,%s' % (start, step))
|
||||
|
||||
runner_.extend(ids)
|
||||
|
||||
try:
|
||||
# run the tests
|
||||
locals.seen_perms = 0
|
||||
run_runner(job_runner, ids)
|
||||
run_runner(runner_)
|
||||
assert locals.seen_perms > 0
|
||||
start += locals.seen_perms*step
|
||||
|
||||
@@ -907,7 +920,7 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
case, _ = failure.id.split(':', 1)
|
||||
suite = case_suites[case]
|
||||
# get defines and write to csv
|
||||
defines = find_defines(runner_, failure.id, **args)
|
||||
defines = find_defines(runner, failure.id, **args)
|
||||
output_.writerow({
|
||||
'suite': suite,
|
||||
'case': case,
|
||||
@@ -938,11 +951,11 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
if 'jobs' in args:
|
||||
for job in range(args['jobs']):
|
||||
runners.append(th.Thread(
|
||||
target=run_job, args=(runner_, ids, job, args['jobs']),
|
||||
target=run_job, args=(job, args['jobs']),
|
||||
daemon=True))
|
||||
else:
|
||||
runners.append(th.Thread(
|
||||
target=run_job, args=(runner_, ids, None, None),
|
||||
target=run_job, args=(None, None),
|
||||
daemon=True))
|
||||
|
||||
def print_update(done):
|
||||
@@ -1005,13 +1018,13 @@ def run_stage(name, runner_, ids, stdout_, trace_, output_, **args):
|
||||
|
||||
def run(runner, test_ids=[], **args):
|
||||
# query runner for tests
|
||||
runner_ = find_runner(runner, **args)
|
||||
print('using runner: %s' % ' '.join(shlex.quote(c) for c in runner_))
|
||||
print('using runner: %s' % ' '.join(
|
||||
shlex.quote(c) for c in find_runner(runner, **args)))
|
||||
(_,
|
||||
expected_suite_perms,
|
||||
expected_case_perms,
|
||||
expected_perms,
|
||||
total_perms) = find_perms(runner_, test_ids, **args)
|
||||
total_perms) = find_perms(runner, test_ids, **args)
|
||||
print('found %d suites, %d cases, %d/%d permutations' % (
|
||||
len(expected_suite_perms),
|
||||
len(expected_case_perms),
|
||||
@@ -1055,7 +1068,7 @@ def run(runner, test_ids=[], **args):
|
||||
failures_,
|
||||
killed) = run_stage(
|
||||
by or 'tests',
|
||||
runner_,
|
||||
runner,
|
||||
[by] if by is not None else [],
|
||||
stdout,
|
||||
trace,
|
||||
@@ -1100,12 +1113,12 @@ def run(runner, test_ids=[], **args):
|
||||
# print each failure
|
||||
for failure in failures:
|
||||
assert failure.id is not None, '%s broken? %r' % (
|
||||
' '.join(shlex.quote(c) for c in runner_),
|
||||
' '.join(shlex.quote(c) for c in find_runner(runner, **args)),
|
||||
failure)
|
||||
|
||||
# get some extra info from runner
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
defines = find_defines(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
defines = find_defines(runner, failure.id, **args)
|
||||
|
||||
# show summary of failure
|
||||
print('%s%s:%d:%sfailure:%s %s%s failed' % (
|
||||
@@ -1145,25 +1158,25 @@ def run(runner, test_ids=[], **args):
|
||||
or args.get('gdb_pl_before')
|
||||
or args.get('gdb_pl_after')):
|
||||
failure = failures[0]
|
||||
cmd = runner_ + [failure.id]
|
||||
cmd = find_runner(runner, failure.id, **args)
|
||||
|
||||
if args.get('gdb_main'):
|
||||
# we don't really need the case breakpoint here, but it
|
||||
# can be helpful
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
cmd[:0] = args['gdb_path'] + [
|
||||
'-ex', 'break main',
|
||||
'-ex', 'break %s:%d' % (path, lineno),
|
||||
'-ex', 'run',
|
||||
'--args']
|
||||
elif args.get('gdb_case'):
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
cmd[:0] = args['gdb_path'] + [
|
||||
'-ex', 'break %s:%d' % (path, lineno),
|
||||
'-ex', 'run',
|
||||
'--args']
|
||||
elif args.get('gdb_pl') is not None:
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
cmd[:0] = args['gdb_path'] + [
|
||||
'-ex', 'break %s:%d' % (path, lineno),
|
||||
'-ex', 'ignore 1 %d' % args['gdb_pl'],
|
||||
@@ -1175,7 +1188,7 @@ def run(runner, test_ids=[], **args):
|
||||
sum(1 for _ in re.finditer('[0-9a-f]',
|
||||
failure.id.split(':', 2)[-1]))
|
||||
if failure.id.count(':') >= 2 else 0)
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
cmd[:0] = args['gdb_path'] + [
|
||||
'-ex', 'break %s:%d' % (path, lineno),
|
||||
'-ex', 'ignore 1 %d' % max(powerlosses-1, 0),
|
||||
@@ -1187,7 +1200,7 @@ def run(runner, test_ids=[], **args):
|
||||
sum(1 for _ in re.finditer('[0-9a-f]',
|
||||
failure.id.split(':', 2)[-1]))
|
||||
if failure.id.count(':') >= 2 else 0)
|
||||
path, lineno = find_path(runner_, failure.id, **args)
|
||||
path, lineno = find_path(runner, failure.id, **args)
|
||||
cmd[:0] = args['gdb_path'] + [
|
||||
'-ex', 'break %s:%d' % (path, lineno),
|
||||
'-ex', 'ignore 1 %d' % powerlosses,
|
||||
|
||||
Reference in New Issue
Block a user