Misc test-runner improvements

- Added --disk/--trace/--output options for information-heavy debugging

- Renamed --skip/--count/--every to --start/--stop/--step.

  This matches common terms for ranges, and frees --skip for being used
  to skip test cases in the future.

- Better handling of SIGTERM, now all tests are killed, reported as
  failures, and testing is halted irregardless of -k.

  This is a compromise, you throw away the rest of the tests, which
  is normally what -k is for, but prevents annoying-to-terminate
  processes when debugging, which is a very interactive process.
This commit is contained in:
Christopher Haster
2022-04-25 01:08:44 -05:00
parent 5812d2b5cf
commit 5ee4b052ae
2 changed files with 72 additions and 54 deletions

View File

@@ -161,11 +161,11 @@ static const char *test_case = NULL;
static size_t test_perm = -1;
static const char *test_geometry = NULL;
static test_types_t test_types = 0;
static size_t test_skip = 0;
static size_t test_count = -1;
static size_t test_every = 1;
static size_t test_start = 0;
static size_t test_stop = -1;
static size_t test_step = 1;
static const char *test_persist = NULL;
static const char *test_disk = NULL;
FILE *test_trace = NULL;
// note, these skips are different than filtered tests
@@ -188,9 +188,9 @@ static bool test_perm_skip(size_t perm) {
}
static bool test_step_skip(size_t step) {
return !(step >= test_skip
&& (step-test_skip) < test_count
&& (step-test_skip) % test_every == 0);
return !(step >= test_start
&& step < test_stop
&& (step-test_start) % test_step == 0);
}
static void test_case_permcount(
@@ -528,7 +528,7 @@ static void run(void) {
.power_cycles = 0,
};
int err = lfs_testbd_createcfg(&cfg, test_persist, &bdcfg);
int err = lfs_testbd_createcfg(&cfg, test_disk, &bdcfg);
if (err) {
fprintf(stderr, "error: "
"could not create block device: %d\n", err);
@@ -572,10 +572,10 @@ enum opt_flags {
OPT_NORMAL = 'n',
OPT_REENTRANT = 'r',
OPT_VALGRIND = 'V',
OPT_SKIP = 5,
OPT_COUNT = 6,
OPT_EVERY = 7,
OPT_PERSIST = 'p',
OPT_START = 5,
OPT_STEP = 6,
OPT_STOP = 7,
OPT_DISK = 'd',
OPT_TRACE = 't',
};
@@ -595,10 +595,10 @@ const struct option long_opts[] = {
{"normal", no_argument, NULL, OPT_NORMAL},
{"reentrant", no_argument, NULL, OPT_REENTRANT},
{"valgrind", no_argument, NULL, OPT_VALGRIND},
{"skip", required_argument, NULL, OPT_SKIP},
{"count", required_argument, NULL, OPT_COUNT},
{"every", required_argument, NULL, OPT_EVERY},
{"persist", required_argument, NULL, OPT_PERSIST},
{"start", required_argument, NULL, OPT_START},
{"stop", required_argument, NULL, OPT_STOP},
{"step", required_argument, NULL, OPT_STEP},
{"disk", required_argument, NULL, OPT_DISK},
{"trace", required_argument, NULL, OPT_TRACE},
{NULL, 0, NULL, 0},
};
@@ -617,10 +617,10 @@ const char *const help_text[] = {
"Filter for normal tests. Can be combined.",
"Filter for reentrant tests. Can be combined.",
"Filter for Valgrind tests. Can be combined.",
"Skip the first n tests.",
"Stop after n tests.",
"Only run every n tests, calculated after --skip and --stop.",
"Persist the disk to this file.",
"Start at the nth test.",
"Stop before the nth test.",
"Only run every n tests, calculated after --start and --stop.",
"Use this file as the disk.",
"Redirect trace output to this file.",
};
@@ -766,35 +766,35 @@ invalid_define:
case OPT_VALGRIND:
test_types |= TEST_VALGRIND;
break;
case OPT_SKIP: {
case OPT_START: {
char *parsed = NULL;
test_skip = strtoumax(optarg, &parsed, 0);
test_start = strtoumax(optarg, &parsed, 0);
if (parsed == optarg) {
fprintf(stderr, "error: invalid skip: %s\n", optarg);
exit(-1);
}
break;
}
case OPT_COUNT: {
case OPT_STOP: {
char *parsed = NULL;
test_count = strtoumax(optarg, &parsed, 0);
test_stop = strtoumax(optarg, &parsed, 0);
if (parsed == optarg) {
fprintf(stderr, "error: invalid count: %s\n", optarg);
exit(-1);
}
break;
}
case OPT_EVERY: {
case OPT_STEP: {
char *parsed = NULL;
test_every = strtoumax(optarg, &parsed, 0);
test_step = strtoumax(optarg, &parsed, 0);
if (parsed == optarg) {
fprintf(stderr, "error: invalid every: %s\n", optarg);
exit(-1);
}
break;
}
case OPT_PERSIST:
test_persist = optarg;
case OPT_DISK:
test_disk = optarg;
break;
case OPT_TRACE:
if (strcmp(optarg, "-") == 0) {

View File

@@ -570,17 +570,17 @@ class TestFailure(Exception):
self.output = output
self.assert_ = assert_
def run_step(name, runner_, **args):
def run_stage(name, runner_, **args):
# get expected suite/case/perm counts
expected_suite_perms, expected_case_perms, expected_perms, total_perms = (
find_cases(runner_, **args))
# TODO persist/trace
# TODO valgrind/gdb/exec
passed_suite_perms = co.defaultdict(lambda: 0)
passed_case_perms = co.defaultdict(lambda: 0)
passed_perms = 0
failures = []
killed = False
pattern = re.compile('^(?:'
'(?P<op>running|finished|skipped) '
@@ -598,14 +598,20 @@ def run_step(name, runner_, **args):
nonlocal locals
# run the tests!
cmd = runner_
cmd = runner_.copy()
if args.get('disk'):
cmd.append('--disk=%s' % args['disk'])
if args.get('trace'):
cmd.append('--trace=%s' % args['trace'])
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
mpty, spty = pty.openpty()
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
os.close(spty)
mpty = os.fdopen(mpty, 'r', 1)
children.add(proc)
mpty = os.fdopen(mpty, 'r', 1)
if args.get('output'):
output = openio(args['output'], 'w')
last_id = None
last_output = []
@@ -622,7 +628,9 @@ def run_step(name, runner_, **args):
if not line:
break
last_output.append(line)
if args.get('verbose'):
if args.get('output'):
output.write(line)
elif args.get('verbose'):
sys.stdout.write(line)
m = pattern.match(line)
@@ -652,6 +660,8 @@ def run_step(name, runner_, **args):
finally:
children.remove(proc)
mpty.close()
if args.get('output'):
output.close()
proc.wait()
if proc.returncode != 0:
@@ -661,16 +671,16 @@ def run_step(name, runner_, **args):
last_output,
last_assert)
def run_job(runner, skip=None, every=None):
def run_job(runner, start=None, step=None):
nonlocal failures
nonlocal locals
while (skip or 0) < total_perms:
while (start or 0) < total_perms:
runner_ = runner.copy()
if skip is not None:
runner_.append('--skip=%d' % skip)
if every is not None:
runner_.append('--every=%d' % every)
if start is not None:
runner_.append('--start=%d' % start)
if step is not None:
runner_.append('--step=%d' % step)
try:
# run the tests
@@ -684,13 +694,13 @@ def run_step(name, runner_, **args):
failures.append(failure)
if args.get('keep_going'):
if args.get('keep_going') and not killed:
# resume after failed test
skip = (skip or 0) + locals.seen_perms*(every or 1)
start = (start or 0) + locals.seen_perms*(step or 1)
continue
else:
# stop other tests
for child in children:
for child in children.copy():
child.kill()
break
@@ -733,6 +743,10 @@ def run_step(name, runner_, **args):
if failures else ''))
sys.stdout.flush()
needs_newline = True
except KeyboardInterrupt:
# this is handled by the runner threads, we just
# need to not abort here
killed = True
finally:
if needs_newline:
print()
@@ -743,7 +757,8 @@ def run_step(name, runner_, **args):
return (
expected_perms,
passed_perms,
failures)
failures,
killed)
def run(**args):
@@ -767,41 +782,41 @@ def run(**args):
if args.get('by_suites'):
for type in ['normal', 'reentrant', 'valgrind']:
for suite in expected_suite_perms.keys():
expected_, passed_, failures_ = run_step(
expected_, passed_, failures_, killed = run_stage(
'%s %s' % (type, suite),
runner_ + ['--%s' % type, suite],
**args)
expected += expected_
passed += passed_
failures.extend(failures_)
if failures and not args.get('keep_going'):
if (failures and not args.get('keep_going')) or killed:
break
if failures and not args.get('keep_going'):
if (failures and not args.get('keep_going')) or killed:
break
elif args.get('by_cases'):
for type in ['normal', 'reentrant', 'valgrind']:
for case in expected_case_perms.keys():
expected_, passed_, failures_ = run_step(
expected_, passed_, failures_, killed = run_stage(
'%s %s' % (type, case),
runner_ + ['--%s' % type, case],
**args)
expected += expected_
passed += passed_
failures.extend(failures_)
if failures and not args.get('keep_going'):
if (failures and not args.get('keep_going')) or killed:
break
if failures and not args.get('keep_going'):
if (failures and not args.get('keep_going')) or killed:
break
else:
for type in ['normal', 'reentrant', 'valgrind']:
expected_, passed_, failures_ = run_step(
expected_, passed_, failures_, killed = run_stage(
'%s tests' % type,
runner_ + ['--%s' % type],
**args)
expected += expected_
passed += passed_
failures.extend(failures_)
if failures and not args.get('keep_going'):
if (failures and not args.get('keep_going')) or killed:
break
# show summary
@@ -867,7 +882,8 @@ if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Build and run tests.")
description="Build and run tests.",
conflict_handler='resolve')
# TODO document test case/perm specifier
parser.add_argument('test_paths', nargs='*',
help="Description of testis to run. May be a directory, path, or \
@@ -900,10 +916,12 @@ if __name__ == "__main__":
help="Filter for reentrant tests. Can be combined.")
test_parser.add_argument('-V', '--valgrind', action='store_true',
help="Filter for Valgrind tests. Can be combined.")
test_parser.add_argument('-p', '--persist',
help="Persist the disk to this file.")
test_parser.add_argument('-d', '--disk',
help="Use this file as the disk.")
test_parser.add_argument('-t', '--trace',
help="Redirect trace output to this file.")
test_parser.add_argument('-o', '--output',
help="Redirect stdout and stderr to this file.")
test_parser.add_argument('--runner', default=[RUNNER_PATH],
type=lambda x: x.split(),
help="Path to runner, defaults to %r" % RUNNER_PATH)