A couple Makefile-related tweaks

- Changed --(tool)-tool to --(tool)-path in scripts, this seems to be
  a more common name for this sort of flag.

- Changed BUILDDIR to not have implicit slash, makes Makefile internals
  a bit more readable.

- Fixed some outdated names hidden in less-often used ifdefs.
This commit is contained in:
Christopher Haster
2022-11-17 10:03:46 -06:00
parent e35e078943
commit bcc88f52f4
10 changed files with 204 additions and 190 deletions

147
Makefile
View File

@@ -1,21 +1,20 @@
ifdef BUILDDIR ifdef BUILDDIR
# make sure BUILDDIR ends with a slash
override BUILDDIR := $(BUILDDIR)/
# bit of a hack, but we want to make sure BUILDDIR directory structure # bit of a hack, but we want to make sure BUILDDIR directory structure
# is correct before any commands # is correct before any commands
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \ $(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
$(BUILDDIR) \ $(BUILDDIR)/ \
$(BUILDDIR)bd \ $(BUILDDIR)/bd \
$(BUILDDIR)runners \ $(BUILDDIR)/runners \
$(BUILDDIR)tests \ $(BUILDDIR)/tests \
$(BUILDDIR)benches)) $(BUILDDIR)/benches))
endif endif
BUILDDIR ?= .
# overridable target/src/tools/flags/etc # overridable target/src/tools/flags/etc
ifneq ($(wildcard test.c main.c),) ifneq ($(wildcard test.c main.c),)
TARGET ?= $(BUILDDIR)lfs TARGET ?= $(BUILDDIR)/lfs
else else
TARGET ?= $(BUILDDIR)lfs.a TARGET ?= $(BUILDDIR)/lfs.a
endif endif
@@ -30,19 +29,19 @@ GDB ?= gdb
PERF ?= perf PERF ?= perf
SRC ?= $(filter-out $(wildcard *.*.c),$(wildcard *.c)) SRC ?= $(filter-out $(wildcard *.*.c),$(wildcard *.c))
OBJ := $(SRC:%.c=$(BUILDDIR)%.o) OBJ := $(SRC:%.c=$(BUILDDIR)/%.o)
DEP := $(SRC:%.c=$(BUILDDIR)%.d) DEP := $(SRC:%.c=$(BUILDDIR)/%.d)
ASM := $(SRC:%.c=$(BUILDDIR)%.s) ASM := $(SRC:%.c=$(BUILDDIR)/%.s)
CI := $(SRC:%.c=$(BUILDDIR)%.ci) CI := $(SRC:%.c=$(BUILDDIR)/%.ci)
GCDA := $(SRC:%.c=$(BUILDDIR)%.t.a.gcda) GCDA := $(SRC:%.c=$(BUILDDIR)/%.t.a.gcda)
TESTS ?= $(wildcard tests/*.toml) TESTS ?= $(wildcard tests/*.toml)
TEST_SRC ?= $(SRC) \ TEST_SRC ?= $(SRC) \
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \ $(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
runners/test_runner.c runners/test_runner.c
TEST_RUNNER ?= $(BUILDDIR)runners/test_runner TEST_RUNNER ?= $(BUILDDIR)/runners/test_runner
TEST_TC := $(TESTS:%.toml=$(BUILDDIR)%.t.c) \ TEST_TC := $(TESTS:%.toml=$(BUILDDIR)/%.t.c) \
$(TEST_SRC:%.c=$(BUILDDIR)%.t.c) $(TEST_SRC:%.c=$(BUILDDIR)/%.t.c)
TEST_TAC := $(TEST_TC:%.t.c=%.t.a.c) TEST_TAC := $(TEST_TC:%.t.c=%.t.a.c)
TEST_OBJ := $(TEST_TAC:%.t.a.c=%.t.a.o) TEST_OBJ := $(TEST_TAC:%.t.a.c=%.t.a.o)
TEST_DEP := $(TEST_TAC:%.t.a.c=%.t.a.d) TEST_DEP := $(TEST_TAC:%.t.a.c=%.t.a.d)
@@ -56,9 +55,9 @@ BENCHES ?= $(wildcard benches/*.toml)
BENCH_SRC ?= $(SRC) \ BENCH_SRC ?= $(SRC) \
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \ $(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
runners/bench_runner.c runners/bench_runner.c
BENCH_RUNNER ?= $(BUILDDIR)runners/bench_runner BENCH_RUNNER ?= $(BUILDDIR)/runners/bench_runner
BENCH_BC := $(BENCHES:%.toml=$(BUILDDIR)%.b.c) \ BENCH_BC := $(BENCHES:%.toml=$(BUILDDIR)/%.b.c) \
$(BENCH_SRC:%.c=$(BUILDDIR)%.b.c) $(BENCH_SRC:%.c=$(BUILDDIR)/%.b.c)
BENCH_BAC := $(BENCH_BC:%.b.c=%.b.a.c) BENCH_BAC := $(BENCH_BC:%.b.c=%.b.a.c)
BENCH_OBJ := $(BENCH_BAC:%.b.a.c=%.b.a.o) BENCH_OBJ := $(BENCH_BAC:%.b.a.c=%.b.a.o)
BENCH_DEP := $(BENCH_BAC:%.b.a.c=%.b.a.d) BENCH_DEP := $(BENCH_BAC:%.b.a.c=%.b.a.d)
@@ -104,18 +103,18 @@ endif
override PERFFLAGS += $(filter -j%,$(MAKEFLAGS)) override PERFFLAGS += $(filter -j%,$(MAKEFLAGS))
override PERFBDFLAGS += $(filter -j%,$(MAKEFLAGS)) override PERFBDFLAGS += $(filter -j%,$(MAKEFLAGS))
ifneq ($(NM),nm) ifneq ($(NM),nm)
override CODEFLAGS += --nm-tool="$(NM)" override CODEFLAGS += --nm-path="$(NM)"
override DATAFLAGS += --nm-tool="$(NM)" override DATAFLAGS += --nm-path="$(NM)"
endif endif
ifneq ($(OBJDUMP),objdump) ifneq ($(OBJDUMP),objdump)
override CODEFLAGS += --objdump-tool="$(OBJDUMP)" override CODEFLAGS += --objdump-path="$(OBJDUMP)"
override DATAFLAGS += --objdump-tool="$(OBJDUMP)" override DATAFLAGS += --objdump-path="$(OBJDUMP)"
override STRUCTFLAGS += --objdump-tool="$(OBJDUMP)" override STRUCTFLAGS += --objdump-path="$(OBJDUMP)"
override PERFFLAGS += --objdump-tool="$(OBJDUMP)" override PERFFLAGS += --objdump-path="$(OBJDUMP)"
override PERFBDFLAGS += --objdump-tool="$(OBJDUMP)" override PERFBDFLAGS += --objdump-path="$(OBJDUMP)"
endif endif
ifneq ($(PERF),perf) ifneq ($(PERF),perf)
override PERFFLAGS += --perf-tool="$(PERF)" override PERFFLAGS += --perf-path="$(PERF)"
endif endif
override TESTFLAGS += -b override TESTFLAGS += -b
@@ -128,10 +127,10 @@ override TESTFLAGS += -p$(TEST_PERF)
override BENCHFLAGS += -p$(BENCH_PERF) override BENCHFLAGS += -p$(BENCH_PERF)
endif endif
ifdef YES_PERFBD ifdef YES_PERFBD
override TESTFLAGS += -t$(TEST_TRACE) --trace-backtrace --trace-freq=100 override TESTFLAGS += -t$(TEST_TRACE) --trace-backtrace --trace-freq=100
endif endif
ifndef NO_PERFBD ifndef NO_PERFBD
override BENCHFLAGS += -t$(BENCH_TRACE) --trace-backtrace --trace-freq=100 override BENCHFLAGS += -t$(BENCH_TRACE) --trace-backtrace --trace-freq=100
endif endif
ifdef VERBOSE ifdef VERBOSE
override TESTFLAGS += -v override TESTFLAGS += -v
@@ -144,16 +143,16 @@ override TESTFLAGS += --exec="$(EXEC)"
override BENCHFLAGS += --exec="$(EXEC)" override BENCHFLAGS += --exec="$(EXEC)"
endif endif
ifneq ($(GDB),gdb) ifneq ($(GDB),gdb)
override TESTFLAGS += --gdb-tool="$(GDB)" override TESTFLAGS += --gdb-path="$(GDB)"
override BENCHFLAGS += --gdb-tool="$(GDB)" override BENCHFLAGS += --gdb-path="$(GDB)"
endif endif
ifneq ($(VALGRIND),valgrind) ifneq ($(VALGRIND),valgrind)
override TESTFLAGS += --valgrind-tool="$(VALGRIND)" override TESTFLAGS += --valgrind-path="$(VALGRIND)"
override BENCHFLAGS += --valgrind-tool="$(VALGRIND)" override BENCHFLAGS += --valgrind-path="$(VALGRIND)"
endif endif
ifneq ($(PERF),perf) ifneq ($(PERF),perf)
override TESTFLAGS += --perf-tool="$(PERF)" override TESTFLAGS += --perf-path="$(PERF)"
override BENCHFLAGS += --perf-tool="$(PERF)" override BENCHFLAGS += --perf-path="$(PERF)"
endif endif
@@ -177,10 +176,10 @@ ifndef NO_COV
test-runner build-test: override CFLAGS+=--coverage test-runner build-test: override CFLAGS+=--coverage
endif endif
ifdef YES_PERF ifdef YES_PERF
bench-runner build-bench: override CFLAGS+=-fno-omit-frame-pointer test-runner build-test: override CFLAGS+=-fno-omit-frame-pointer
endif endif
ifdef YES_PERFBD ifdef YES_PERFBD
bench-runner build-bench: override CFLAGS+=-fno-omit-frame-pointer test-runner build-test: override CFLAGS+=-fno-omit-frame-pointer
endif endif
# note we remove some binary dependent files during compilation, # note we remove some binary dependent files during compilation,
# otherwise it's way to easy to end up with outdated results # otherwise it's way to easy to end up with outdated results
@@ -272,7 +271,7 @@ perfbd: $(BENCH_TRACE)
$(PERFBDFLAGS)) $(PERFBDFLAGS))
.PHONY: summary sizes .PHONY: summary sizes
summary sizes: $(BUILDDIR)lfs.csv summary sizes: $(BUILDDIR)/lfs.csv
$(strip ./scripts/summary.py -Y $^ \ $(strip ./scripts/summary.py -Y $^ \
-fcode=code_size \ -fcode=code_size \
-fdata=data_size \ -fdata=data_size \
@@ -288,88 +287,88 @@ summary sizes: $(BUILDDIR)lfs.csv
.SUFFIXES: .SUFFIXES:
.SECONDARY: .SECONDARY:
$(BUILDDIR)lfs: $(OBJ) $(BUILDDIR)/lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)lfs.a: $(OBJ) $(BUILDDIR)/lfs.a: $(OBJ)
$(AR) rcs $@ $^ $(AR) rcs $@ $^
$(BUILDDIR)lfs.code.csv: $(OBJ) $(BUILDDIR)/lfs.code.csv: $(OBJ)
./scripts/code.py $^ -q $(CODEFLAGS) -o $@ ./scripts/code.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.data.csv: $(OBJ) $(BUILDDIR)/lfs.data.csv: $(OBJ)
./scripts/data.py $^ -q $(CODEFLAGS) -o $@ ./scripts/data.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.stack.csv: $(CI) $(BUILDDIR)/lfs.stack.csv: $(CI)
./scripts/stack.py $^ -q $(CODEFLAGS) -o $@ ./scripts/stack.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.struct.csv: $(OBJ) $(BUILDDIR)/lfs.struct.csv: $(OBJ)
./scripts/struct_.py $^ -q $(CODEFLAGS) -o $@ ./scripts/struct_.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.cov.csv: $(GCDA) $(BUILDDIR)/lfs.cov.csv: $(GCDA)
./scripts/cov.py $^ $(patsubst %,-F%,$(SRC)) -q $(COVFLAGS) -o $@ ./scripts/cov.py $^ $(patsubst %,-F%,$(SRC)) -q $(COVFLAGS) -o $@
$(BUILDDIR)lfs.perf.csv: $(BENCH_PERF) $(BUILDDIR)/lfs.perf.csv: $(BENCH_PERF)
./scripts/perf.py $^ $(patsubst %,-F%,$(SRC)) -q $(PERFFLAGS) -o $@ ./scripts/perf.py $^ $(patsubst %,-F%,$(SRC)) -q $(PERFFLAGS) -o $@
$(BUILDDIR)lfs.perfbd.csv: $(BENCH_TRACE) $(BUILDDIR)/lfs.perfbd.csv: $(BENCH_TRACE)
$(strip ./scripts/perfbd.py \ $(strip ./scripts/perfbd.py \
$(BENCH_RUNNER) $^ $(patsubst %,-F%,$(SRC)) \ $(BENCH_RUNNER) $^ $(patsubst %,-F%,$(SRC)) \
-q $(PERFBDFLAGS) -o $@) -q $(PERFBDFLAGS) -o $@)
$(BUILDDIR)lfs.csv: \ $(BUILDDIR)/lfs.csv: \
$(BUILDDIR)lfs.code.csv \ $(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)lfs.data.csv \ $(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)lfs.stack.csv \ $(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)lfs.struct.csv $(BUILDDIR)/lfs.struct.csv
./scripts/summary.py $^ -q $(SUMMARYFLAGS) -o $@ ./scripts/summary.py $^ -q $(SUMMARYFLAGS) -o $@
$(BUILDDIR)runners/test_runner: $(TEST_OBJ) $(BUILDDIR)/runners/test_runner: $(TEST_OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)runners/bench_runner: $(BENCH_OBJ) $(BUILDDIR)/runners/bench_runner: $(BENCH_OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
# our main build rule generates .o, .d, and .ci files, the latter # our main build rule generates .o, .d, and .ci files, the latter
# used for stack analysis # used for stack analysis
$(BUILDDIR)%.o $(BUILDDIR)%.ci: %.c $(BUILDDIR)/%.o $(BUILDDIR)/%.ci: %.c
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $(BUILDDIR)$*.o $(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $(BUILDDIR)/$*.o
$(BUILDDIR)%.s: %.c $(BUILDDIR)/%.s: %.c
$(CC) -S $(CFLAGS) $< -o $@ $(CC) -S $(CFLAGS) $< -o $@
$(BUILDDIR)%.a.c: %.c $(BUILDDIR)/%.a.c: %.c
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@ ./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
$(BUILDDIR)%.a.c: $(BUILDDIR)%.c $(BUILDDIR)/%.a.c: $(BUILDDIR)/%.c
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@ ./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
$(BUILDDIR)%.t.c: %.toml $(BUILDDIR)/%.t.c: %.toml
./scripts/test.py -c $< $(TESTCFLAGS) -o $@ ./scripts/test.py -c $< $(TESTCFLAGS) -o $@
$(BUILDDIR)%.t.c: %.c $(TESTS) $(BUILDDIR)/%.t.c: %.c $(TESTS)
./scripts/test.py -c $(TESTS) -s $< $(TESTCFLAGS) -o $@ ./scripts/test.py -c $(TESTS) -s $< $(TESTCFLAGS) -o $@
$(BUILDDIR)%.b.c: %.toml $(BUILDDIR)/%.b.c: %.toml
./scripts/bench.py -c $< $(BENCHCFLAGS) -o $@ ./scripts/bench.py -c $< $(BENCHCFLAGS) -o $@
$(BUILDDIR)%.b.c: %.c $(BENCHES) $(BUILDDIR)/%.b.c: %.c $(BENCHES)
./scripts/bench.py -c $(BENCHES) -s $< $(BENCHCFLAGS) -o $@ ./scripts/bench.py -c $(BENCHES) -s $< $(BENCHCFLAGS) -o $@
# clean everything # clean everything
.PHONY: clean .PHONY: clean
clean: clean:
rm -f $(BUILDDIR)lfs rm -f $(BUILDDIR)/lfs
rm -f $(BUILDDIR)lfs.a rm -f $(BUILDDIR)/lfs.a
$(strip rm -f \ $(strip rm -f \
$(BUILDDIR)lfs.csv \ $(BUILDDIR)/lfs.csv \
$(BUILDDIR)lfs.code.csv \ $(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)lfs.data.csv \ $(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)lfs.stack.csv \ $(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)lfs.struct.csv \ $(BUILDDIR)/lfs.struct.csv \
$(BUILDDIR)lfs.cov.csv \ $(BUILDDIR)/lfs.cov.csv \
$(BUILDDIR)lfs.perf.csv \ $(BUILDDIR)/lfs.perf.csv \
$(BUILDDIR)lfs.perfbd.csv) $(BUILDDIR)/lfs.perfbd.csv)
rm -f $(OBJ) rm -f $(OBJ)
rm -f $(DEP) rm -f $(DEP)
rm -f $(ASM) rm -f $(ASM)

View File

@@ -30,8 +30,8 @@ import toml
RUNNER_PATH = './runners/bench_runner' RUNNER_PATH = './runners/bench_runner'
HEADER_PATH = 'runners/bench_runner.h' HEADER_PATH = 'runners/bench_runner.h'
GDB_TOOL = ['gdb'] GDB_PATH = ['gdb']
VALGRIND_TOOL = ['valgrind'] VALGRIND_PATH = ['valgrind']
PERF_SCRIPT = ['./scripts/perf.py'] PERF_SCRIPT = ['./scripts/perf.py']
@@ -502,7 +502,7 @@ def find_runner(runner, **args):
# run under valgrind? # run under valgrind?
if args.get('valgrind'): if args.get('valgrind'):
cmd[:0] = args['valgrind_tool'] + [ cmd[:0] = args['valgrind_path'] + [
'--leak-check=full', '--leak-check=full',
'--track-origins=yes', '--track-origins=yes',
'--error-exitcode=4', '--error-exitcode=4',
@@ -518,8 +518,8 @@ def find_runner(runner, **args):
if args.get('perf_period') else None, if args.get('perf_period') else None,
'--perf-events=%s' % args['perf_events'] '--perf-events=%s' % args['perf_events']
if args.get('perf_events') else None, if args.get('perf_events') else None,
'--perf-tool=%s' % args['perf_tool'] '--perf-path=%s' % args['perf_path']
if args.get('perf_tool') else None, if args.get('perf_path') else None,
'-o%s' % args['perf']])) '-o%s' % args['perf']]))
# other context # other context
@@ -1144,24 +1144,24 @@ def run(runner, bench_ids=[], **args):
cmd = runner_ + [failure.id] cmd = runner_ + [failure.id]
if args.get('gdb_main'): if args.get('gdb_main'):
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'break main', '-ex', 'break main',
'-ex', 'run', '-ex', 'run',
'--args'] '--args']
elif args.get('gdb_case'): elif args.get('gdb_case'):
path, lineno = find_path(runner_, failure.id, **args) path, lineno = find_path(runner_, failure.id, **args)
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'break %s:%d' % (path, lineno), '-ex', 'break %s:%d' % (path, lineno),
'-ex', 'run', '-ex', 'run',
'--args'] '--args']
elif failure.assert_ is not None: elif failure.assert_ is not None:
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'run', '-ex', 'run',
'-ex', 'frame function raise', '-ex', 'frame function raise',
'-ex', 'up 2', '-ex', 'up 2',
'--args'] '--args']
else: else:
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'run', '-ex', 'run',
'--args'] '--args']
@@ -1345,10 +1345,11 @@ if __name__ == "__main__":
help="Drop into gdb on bench failure but stop at the beginning " help="Drop into gdb on bench failure but stop at the beginning "
"of main.") "of main.")
bench_parser.add_argument( bench_parser.add_argument(
'--gdb-tool', '--gdb-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=GDB_TOOL, default=GDB_PATH,
help="Path to gdb tool to use. Defaults to %r." % GDB_TOOL) help="Path to the gdb executable, may include flags. "
"Defaults to %r." % GDB_PATH)
bench_parser.add_argument( bench_parser.add_argument(
'--exec', '--exec',
type=lambda e: e.split(), type=lambda e: e.split(),
@@ -1359,10 +1360,11 @@ if __name__ == "__main__":
help="Run under Valgrind to find memory errors. Implicitly sets " help="Run under Valgrind to find memory errors. Implicitly sets "
"--isolate.") "--isolate.")
bench_parser.add_argument( bench_parser.add_argument(
'--valgrind-tool', '--valgrind-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=VALGRIND_TOOL, default=VALGRIND_PATH,
help="Path to Valgrind tool to use. Defaults to %r." % VALGRIND_TOOL) help="Path to the Valgrind executable, may include flags. "
"Defaults to %r." % VALGRIND_PATH)
bench_parser.add_argument( bench_parser.add_argument(
'-p', '--perf', '-p', '--perf',
help="Run under Linux's perf to sample performance counters, writing " help="Run under Linux's perf to sample performance counters, writing "
@@ -1385,10 +1387,10 @@ if __name__ == "__main__":
default=PERF_SCRIPT, default=PERF_SCRIPT,
help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT) help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
bench_parser.add_argument( bench_parser.add_argument(
'--perf-tool', '--perf-path',
type=lambda x: x.split(), type=lambda x: x.split(),
help="Path to the perf tool to use. This is passed directly to the " help="Path to the perf executable, may include flags. This is passed "
"perf script") "directly to the perf script")
# compilation flags # compilation flags
comp_parser = parser.add_argument_group('compilation options') comp_parser = parser.add_argument_group('compilation options')

View File

@@ -21,7 +21,7 @@ import shutil
import subprocess import subprocess
import tempfile import tempfile
GIT_TOOL = ['git'] GIT_PATH = ['git']
def openio(path, mode='r', buffering=-1): def openio(path, mode='r', buffering=-1):
@@ -89,10 +89,10 @@ def main(from_prefix, to_prefix, paths=[], *,
no_renames=False, no_renames=False,
git=False, git=False,
no_stage=False, no_stage=False,
git_tool=GIT_TOOL): git_path=GIT_PATH):
if not paths: if not paths:
if git: if git:
cmd = git_tool + ['ls-tree', '-r', '--name-only', 'HEAD'] cmd = git_path + ['ls-tree', '-r', '--name-only', 'HEAD']
if verbose: if verbose:
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
paths = subprocess.check_output(cmd, encoding='utf8').split() paths = subprocess.check_output(cmd, encoding='utf8').split()
@@ -116,11 +116,11 @@ def main(from_prefix, to_prefix, paths=[], *,
# stage? # stage?
if git and not no_stage: if git and not no_stage:
if from_path != to_path: if from_path != to_path:
cmd = git_tool + ['rm', '-q', from_path] cmd = git_path + ['rm', '-q', from_path]
if verbose: if verbose:
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
subprocess.check_call(cmd) subprocess.check_call(cmd)
cmd = git_tool + ['add', to_path] cmd = git_path + ['add', to_path]
if verbose: if verbose:
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
subprocess.check_call(cmd) subprocess.check_call(cmd)
@@ -168,10 +168,11 @@ if __name__ == "__main__":
action='store_true', action='store_true',
help="Don't stage changes with git.") help="Don't stage changes with git.")
parser.add_argument( parser.add_argument(
'--git-tool', '--git-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=GIT_TOOL, default=GIT_PATH,
help="Path to git tool to use. Defaults to %r." % GIT_TOOL) help="Path to git executable, may include flags. "
"Defaults to %r." % GIT_PATH)
sys.exit(main(**{k: v sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items() for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None})) if v is not None}))

View File

@@ -23,9 +23,9 @@ import shlex
import subprocess as sp import subprocess as sp
NM_TOOL = ['nm'] NM_PATH = ['nm']
NM_TYPES = 'tTrRdD' NM_TYPES = 'tTrRdD'
OBJDUMP_TOOL = ['objdump'] OBJDUMP_PATH = ['objdump']
# integer fields # integer fields
@@ -135,9 +135,9 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering) return open(path, mode, buffering)
def collect(obj_paths, *, def collect(obj_paths, *,
nm_tool=NM_TOOL, nm_path=NM_PATH,
nm_types=NM_TYPES, nm_types=NM_TYPES,
objdump_tool=OBJDUMP_TOOL, objdump_path=OBJDUMP_PATH,
sources=None, sources=None,
everything=False, everything=False,
**args): **args):
@@ -162,8 +162,8 @@ def collect(obj_paths, *,
# find symbol sizes # find symbol sizes
results_ = [] results_ = []
# note nm-tool may contain extra args # note nm-path may contain extra args
cmd = nm_tool + ['--size-sort', path] cmd = nm_path + ['--size-sort', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -193,8 +193,8 @@ def collect(obj_paths, *,
# try to figure out the source file if we have debug-info # try to figure out the source file if we have debug-info
dirs = {} dirs = {}
files = {} files = {}
# note objdump-tool may contain extra args # note objdump-path may contain extra args
cmd = objdump_tool + ['--dwarf=rawline', path] cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -234,8 +234,8 @@ def collect(obj_paths, *,
is_func = False is_func = False
f_name = None f_name = None
f_file = None f_file = None
# note objdump-tool may contain extra args # note objdump-path may contain extra args
cmd = objdump_tool + ['--dwarf=info', path] cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -675,15 +675,17 @@ if __name__ == "__main__":
help="Type of symbols to report, this uses the same single-character " help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %r." % NM_TYPES) "type-names emitted by nm. Defaults to %r." % NM_TYPES)
parser.add_argument( parser.add_argument(
'--nm-tool', '--nm-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=NM_TOOL, default=NM_PATH,
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL) help="Path to the nm executable, may include flags. "
"Defaults to %r." % NM_PATH)
parser.add_argument( parser.add_argument(
'--objdump-tool', '--objdump-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=OBJDUMP_TOOL, default=OBJDUMP_PATH,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL) help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items() for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None})) if v is not None}))

View File

@@ -25,7 +25,7 @@ import subprocess as sp
# TODO use explode_asserts to avoid counting assert branches? # TODO use explode_asserts to avoid counting assert branches?
# TODO use dwarf=info to find functions for inline functions? # TODO use dwarf=info to find functions for inline functions?
GCOV_TOOL = ['gcov'] GCOV_PATH = ['gcov']
# integer fields # integer fields
@@ -210,15 +210,15 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering) return open(path, mode, buffering)
def collect(gcda_paths, *, def collect(gcda_paths, *,
gcov_tool=GCOV_TOOL, gcov_path=GCOV_PATH,
sources=None, sources=None,
everything=False, everything=False,
**args): **args):
results = [] results = []
for path in gcda_paths: for path in gcda_paths:
# get coverage info through gcov's json output # get coverage info through gcov's json output
# note, gcov-tool may contain extra args # note, gcov-path may contain extra args
cmd = GCOV_TOOL + ['-b', '-t', '--json-format', path] cmd = GCOV_PATH + ['-b', '-t', '--json-format', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -802,10 +802,11 @@ if __name__ == "__main__":
action='store_true', action='store_true',
help="Error if any branches are not covered.") help="Error if any branches are not covered.")
parser.add_argument( parser.add_argument(
'--gcov-tool', '--gcov-path',
default=GCOV_TOOL, default=GCOV_PATH,
type=lambda x: x.split(), type=lambda x: x.split(),
help="Path to the gcov tool to use. Defaults to %r." % GCOV_TOOL) help="Path to the gcov executable, may include paths. "
"Defaults to %r." % GCOV_PATH)
sys.exit(main(**{k: v sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items() for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None})) if v is not None}))

View File

@@ -23,9 +23,9 @@ import shlex
import subprocess as sp import subprocess as sp
NM_TOOL = ['nm'] NM_PATH = ['nm']
NM_TYPES = 'dDbB' NM_TYPES = 'dDbB'
OBJDUMP_TOOL = ['objdump'] OBJDUMP_PATH = ['objdump']
# integer fields # integer fields
@@ -135,9 +135,9 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering) return open(path, mode, buffering)
def collect(obj_paths, *, def collect(obj_paths, *,
nm_tool=NM_TOOL, nm_path=NM_PATH,
nm_types=NM_TYPES, nm_types=NM_TYPES,
objdump_tool=OBJDUMP_TOOL, objdump_path=OBJDUMP_PATH,
sources=None, sources=None,
everything=False, everything=False,
**args): **args):
@@ -162,8 +162,8 @@ def collect(obj_paths, *,
# find symbol sizes # find symbol sizes
results_ = [] results_ = []
# note nm-tool may contain extra args # note nm-path may contain extra args
cmd = nm_tool + ['--size-sort', path] cmd = nm_path + ['--size-sort', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -193,8 +193,8 @@ def collect(obj_paths, *,
# try to figure out the source file if we have debug-info # try to figure out the source file if we have debug-info
dirs = {} dirs = {}
files = {} files = {}
# note objdump-tool may contain extra args # note objdump-path may contain extra args
cmd = objdump_tool + ['--dwarf=rawline', path] cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -234,8 +234,8 @@ def collect(obj_paths, *,
is_func = False is_func = False
f_name = None f_name = None
f_file = None f_file = None
# note objdump-tool may contain extra args # note objdump-path may contain extra args
cmd = objdump_tool + ['--dwarf=info', path] cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -675,15 +675,17 @@ if __name__ == "__main__":
help="Type of symbols to report, this uses the same single-character " help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %r." % NM_TYPES) "type-names emitted by nm. Defaults to %r." % NM_TYPES)
parser.add_argument( parser.add_argument(
'--nm-tool', '--nm-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=NM_TOOL, default=NM_PATH,
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL) help="Path to the nm executable, may include flags. "
"Defaults to %r." % NM_PATH)
parser.add_argument( parser.add_argument(
'--objdump-tool', '--objdump-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=OBJDUMP_TOOL, default=OBJDUMP_PATH,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL) help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items() for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None})) if v is not None}))

View File

@@ -30,10 +30,10 @@ import zipfile
# TODO support non-zip perf results? # TODO support non-zip perf results?
PERF_TOOL = ['perf'] PERF_PATH = ['perf']
PERF_EVENTS = 'cycles,branch-misses,branches,cache-misses,cache-references' PERF_EVENTS = 'cycles,branch-misses,branches,cache-misses,cache-references'
PERF_FREQ = 100 PERF_FREQ = 100
OBJDUMP_TOOL = ['objdump'] OBJDUMP_PATH = ['objdump']
THRESHOLD = (0.5, 0.85) THRESHOLD = (0.5, 0.85)
@@ -161,13 +161,13 @@ def record(command, *,
perf_freq=PERF_FREQ, perf_freq=PERF_FREQ,
perf_period=None, perf_period=None,
perf_events=PERF_EVENTS, perf_events=PERF_EVENTS,
perf_tool=PERF_TOOL, perf_path=PERF_PATH,
**args): **args):
# create a temporary file for perf to write to, as far as I can tell # create a temporary file for perf to write to, as far as I can tell
# this is strictly needed because perf's pipe-mode only works with stdout # this is strictly needed because perf's pipe-mode only works with stdout
with tempfile.NamedTemporaryFile('rb') as f: with tempfile.NamedTemporaryFile('rb') as f:
# figure out our perf invocation # figure out our perf invocation
perf = perf_tool + list(filter(None, [ perf = perf_path + list(filter(None, [
'record', 'record',
'-F%s' % perf_freq '-F%s' % perf_freq
if perf_freq is not None if perf_freq is not None
@@ -234,7 +234,7 @@ def multiprocessing_cache(f):
@multiprocessing_cache @multiprocessing_cache
def collect_syms_and_lines(obj_path, *, def collect_syms_and_lines(obj_path, *,
objdump_tool=None, objdump_path=None,
**args): **args):
symbol_pattern = re.compile( symbol_pattern = re.compile(
'^(?P<addr>[0-9a-fA-F]+)' '^(?P<addr>[0-9a-fA-F]+)'
@@ -263,7 +263,7 @@ def collect_syms_and_lines(obj_path, *,
# figure out symbol addresses and file+line ranges # figure out symbol addresses and file+line ranges
syms = {} syms = {}
sym_at = [] sym_at = []
cmd = objdump_tool + ['-t', obj_path] cmd = objdump_path + ['-t', obj_path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -312,7 +312,7 @@ def collect_syms_and_lines(obj_path, *,
op_file = 1 op_file = 1
op_line = 1 op_line = 1
op_addr = 0 op_addr = 0
cmd = objdump_tool + ['--dwarf=rawline', obj_path] cmd = objdump_path + ['--dwarf=rawline', obj_path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -384,7 +384,7 @@ def collect_syms_and_lines(obj_path, *,
def collect_decompressed(path, *, def collect_decompressed(path, *,
perf_tool=PERF_TOOL, perf_path=PERF_PATH,
sources=None, sources=None,
everything=False, everything=False,
propagate=0, propagate=0,
@@ -407,8 +407,8 @@ def collect_decompressed(path, *,
'cache-misses': 'cmisses', 'cache-misses': 'cmisses',
'cache-references': 'caches'} 'cache-references': 'caches'}
# note perf_tool may contain extra args # note perf_path may contain extra args
cmd = perf_tool + [ cmd = perf_path + [
'script', 'script',
'-i%s' % path] '-i%s' % path]
if args.get('verbose'): if args.get('verbose'):
@@ -1259,14 +1259,16 @@ if __name__ == "__main__":
const=0, const=0,
help="Number of processes to use. 0 spawns one process per core.") help="Number of processes to use. 0 spawns one process per core.")
parser.add_argument( parser.add_argument(
'--perf-tool', '--perf-path',
type=lambda x: x.split(), type=lambda x: x.split(),
help="Path to the perf tool to use. Defaults to %r." % PERF_TOOL) help="Path to the perf executable, may include flags. "
"Defaults to %r." % PERF_PATH)
parser.add_argument( parser.add_argument(
'--objdump-tool', '--objdump-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=OBJDUMP_TOOL, default=OBJDUMP_PATH,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL) help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
# record flags # record flags
record_parser = parser.add_argument_group('record options') record_parser = parser.add_argument_group('record options')
@@ -1294,9 +1296,10 @@ if __name__ == "__main__":
help="perf events to record. This is passed directly to perf. " help="perf events to record. This is passed directly to perf. "
"Defaults to %r." % PERF_EVENTS) "Defaults to %r." % PERF_EVENTS)
record_parser.add_argument( record_parser.add_argument(
'--perf-tool', '--perf-path',
type=lambda x: x.split(), type=lambda x: x.split(),
help="Path to the perf tool to use. Defaults to %r." % PERF_TOOL) help="Path to the perf executable, may include flags. "
"Defaults to %r." % PERF_PATH)
# avoid intermixed/REMAINDER conflict, see above # avoid intermixed/REMAINDER conflict, see above
if nargs == argparse.REMAINDER: if nargs == argparse.REMAINDER:

View File

@@ -24,7 +24,7 @@ import shlex
import subprocess as sp import subprocess as sp
OBJDUMP_TOOL = ['objdump'] OBJDUMP_PATH = ['objdump']
THRESHOLD = (0.5, 0.85) THRESHOLD = (0.5, 0.85)
@@ -142,7 +142,7 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering) return open(path, mode, buffering)
def collect_syms_and_lines(obj_path, *, def collect_syms_and_lines(obj_path, *,
objdump_tool=None, objdump_path=None,
**args): **args):
symbol_pattern = re.compile( symbol_pattern = re.compile(
'^(?P<addr>[0-9a-fA-F]+)' '^(?P<addr>[0-9a-fA-F]+)'
@@ -171,7 +171,7 @@ def collect_syms_and_lines(obj_path, *,
# figure out symbol addresses # figure out symbol addresses
syms = {} syms = {}
sym_at = [] sym_at = []
cmd = objdump_tool + ['-t', obj_path] cmd = objdump_path + ['-t', obj_path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -220,7 +220,7 @@ def collect_syms_and_lines(obj_path, *,
op_file = 1 op_file = 1
op_line = 1 op_line = 1
op_addr = 0 op_addr = 0
cmd = objdump_tool + ['--dwarf=rawline', obj_path] cmd = objdump_path + ['--dwarf=rawline', obj_path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -1244,10 +1244,11 @@ if __name__ == "__main__":
const=0, const=0,
help="Number of processes to use. 0 spawns one process per core.") help="Number of processes to use. 0 spawns one process per core.")
parser.add_argument( parser.add_argument(
'--objdump-tool', '--objdump-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=OBJDUMP_TOOL, default=OBJDUMP_PATH,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL) help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items() for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None})) if v is not None}))

View File

@@ -20,7 +20,7 @@ import shlex
import subprocess as sp import subprocess as sp
OBJDUMP_TOOL = ['objdump'] OBJDUMP_PATH = ['objdump']
@@ -129,7 +129,7 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering) return open(path, mode, buffering)
def collect(obj_paths, *, def collect(obj_paths, *,
objdump_tool=OBJDUMP_TOOL, objdump_path=OBJDUMP_PATH,
sources=None, sources=None,
everything=False, everything=False,
internal=False, internal=False,
@@ -150,8 +150,8 @@ def collect(obj_paths, *,
# find files, we want to filter by structs in .h files # find files, we want to filter by structs in .h files
dirs = {} dirs = {}
files = {} files = {}
# note objdump-tool may contain extra args # note objdump-path may contain extra args
cmd = objdump_tool + ['--dwarf=rawline', path] cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -191,8 +191,8 @@ def collect(obj_paths, *,
s_name = None s_name = None
s_file = None s_file = None
s_size = None s_size = None
# note objdump-tool may contain extra args # note objdump-path may contain extra args
cmd = objdump_tool + ['--dwarf=info', path] cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'): if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd)) print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
@@ -626,10 +626,11 @@ if __name__ == "__main__":
action='store_true', action='store_true',
help="Also show structs in .c files.") help="Also show structs in .c files.")
parser.add_argument( parser.add_argument(
'--objdump-tool', '--objdump-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=OBJDUMP_TOOL, default=OBJDUMP_PATH,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL) help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items() for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None})) if v is not None}))

View File

@@ -30,8 +30,8 @@ import toml
RUNNER_PATH = './runners/test_runner' RUNNER_PATH = './runners/test_runner'
HEADER_PATH = 'runners/test_runner.h' HEADER_PATH = 'runners/test_runner.h'
GDB_TOOL = ['gdb'] GDB_PATH = ['gdb']
VALGRIND_TOOL = ['valgrind'] VALGRIND_PATH = ['valgrind']
PERF_SCRIPT = ['./scripts/perf.py'] PERF_SCRIPT = ['./scripts/perf.py']
@@ -516,7 +516,7 @@ def find_runner(runner, **args):
# run under valgrind? # run under valgrind?
if args.get('valgrind'): if args.get('valgrind'):
cmd[:0] = args['valgrind_tool'] + [ cmd[:0] = args['valgrind_path'] + [
'--leak-check=full', '--leak-check=full',
'--track-origins=yes', '--track-origins=yes',
'--error-exitcode=4', '--error-exitcode=4',
@@ -532,8 +532,8 @@ def find_runner(runner, **args):
if args.get('perf_period') else None, if args.get('perf_period') else None,
'--perf-events=%s' % args['perf_events'] '--perf-events=%s' % args['perf_events']
if args.get('perf_events') else None, if args.get('perf_events') else None,
'--perf-tool=%s' % args['perf_tool'] '--perf-path=%s' % args['perf_path']
if args.get('perf_tool') else None, if args.get('perf_path') else None,
'-o%s' % args['perf']])) '-o%s' % args['perf']]))
# other context # other context
@@ -1144,24 +1144,24 @@ def run(runner, test_ids=[], **args):
cmd = runner_ + [failure.id] cmd = runner_ + [failure.id]
if args.get('gdb_main'): if args.get('gdb_main'):
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'break main', '-ex', 'break main',
'-ex', 'run', '-ex', 'run',
'--args'] '--args']
elif args.get('gdb_case'): elif args.get('gdb_case'):
path, lineno = find_path(runner_, failure.id, **args) path, lineno = find_path(runner_, failure.id, **args)
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'break %s:%d' % (path, lineno), '-ex', 'break %s:%d' % (path, lineno),
'-ex', 'run', '-ex', 'run',
'--args'] '--args']
elif failure.assert_ is not None: elif failure.assert_ is not None:
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'run', '-ex', 'run',
'-ex', 'frame function raise', '-ex', 'frame function raise',
'-ex', 'up 2', '-ex', 'up 2',
'--args'] '--args']
else: else:
cmd[:0] = args['gdb_tool'] + [ cmd[:0] = args['gdb_path'] + [
'-ex', 'run', '-ex', 'run',
'--args'] '--args']
@@ -1353,10 +1353,11 @@ if __name__ == "__main__":
help="Drop into gdb on test failure but stop at the beginning " help="Drop into gdb on test failure but stop at the beginning "
"of main.") "of main.")
test_parser.add_argument( test_parser.add_argument(
'--gdb-tool', '--gdb-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=GDB_TOOL, default=GDB_PATH,
help="Path to gdb tool to use. Defaults to %r." % GDB_TOOL) help="Path to the gdb executable, may include flags. "
"Defaults to %r." % GDB_PATH)
test_parser.add_argument( test_parser.add_argument(
'--exec', '--exec',
type=lambda e: e.split(), type=lambda e: e.split(),
@@ -1367,10 +1368,11 @@ if __name__ == "__main__":
help="Run under Valgrind to find memory errors. Implicitly sets " help="Run under Valgrind to find memory errors. Implicitly sets "
"--isolate.") "--isolate.")
test_parser.add_argument( test_parser.add_argument(
'--valgrind-tool', '--valgrind-path',
type=lambda x: x.split(), type=lambda x: x.split(),
default=VALGRIND_TOOL, default=VALGRIND_PATH,
help="Path to Valgrind tool to use. Defaults to %r." % VALGRIND_TOOL) help="Path to the Valgrind executable, may include flags. "
"Defaults to %r." % VALGRIND_PATH)
test_parser.add_argument( test_parser.add_argument(
'-p', '--perf', '-p', '--perf',
help="Run under Linux's perf to sample performance counters, writing " help="Run under Linux's perf to sample performance counters, writing "
@@ -1393,10 +1395,10 @@ if __name__ == "__main__":
default=PERF_SCRIPT, default=PERF_SCRIPT,
help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT) help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
test_parser.add_argument( test_parser.add_argument(
'--perf-tool', '--perf-path',
type=lambda x: x.split(), type=lambda x: x.split(),
help="Path to the perf tool to use. This is passed directly to the " help="Path to the perf executable, may include flags. This is passed "
"perf script") "directly to the perf script")
# compilation flags # compilation flags
comp_parser = parser.add_argument_group('compilation options') comp_parser = parser.add_argument_group('compilation options')