A couple Makefile-related tweaks

- Changed --(tool)-tool to --(tool)-path in scripts, this seems to be
  a more common name for this sort of flag.

- Changed BUILDDIR to not have implicit slash, makes Makefile internals
  a bit more readable.

- Fixed some outdated names hidden in less-often used ifdefs.
This commit is contained in:
Christopher Haster
2022-11-17 10:03:46 -06:00
parent e35e078943
commit bcc88f52f4
10 changed files with 204 additions and 190 deletions

147
Makefile
View File

@@ -1,21 +1,20 @@
ifdef BUILDDIR
# make sure BUILDDIR ends with a slash
override BUILDDIR := $(BUILDDIR)/
# bit of a hack, but we want to make sure BUILDDIR directory structure
# is correct before any commands
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
$(BUILDDIR) \
$(BUILDDIR)bd \
$(BUILDDIR)runners \
$(BUILDDIR)tests \
$(BUILDDIR)benches))
$(BUILDDIR)/ \
$(BUILDDIR)/bd \
$(BUILDDIR)/runners \
$(BUILDDIR)/tests \
$(BUILDDIR)/benches))
endif
BUILDDIR ?= .
# overridable target/src/tools/flags/etc
ifneq ($(wildcard test.c main.c),)
TARGET ?= $(BUILDDIR)lfs
TARGET ?= $(BUILDDIR)/lfs
else
TARGET ?= $(BUILDDIR)lfs.a
TARGET ?= $(BUILDDIR)/lfs.a
endif
@@ -30,19 +29,19 @@ GDB ?= gdb
PERF ?= perf
SRC ?= $(filter-out $(wildcard *.*.c),$(wildcard *.c))
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
CI := $(SRC:%.c=$(BUILDDIR)%.ci)
GCDA := $(SRC:%.c=$(BUILDDIR)%.t.a.gcda)
OBJ := $(SRC:%.c=$(BUILDDIR)/%.o)
DEP := $(SRC:%.c=$(BUILDDIR)/%.d)
ASM := $(SRC:%.c=$(BUILDDIR)/%.s)
CI := $(SRC:%.c=$(BUILDDIR)/%.ci)
GCDA := $(SRC:%.c=$(BUILDDIR)/%.t.a.gcda)
TESTS ?= $(wildcard tests/*.toml)
TEST_SRC ?= $(SRC) \
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
runners/test_runner.c
TEST_RUNNER ?= $(BUILDDIR)runners/test_runner
TEST_TC := $(TESTS:%.toml=$(BUILDDIR)%.t.c) \
$(TEST_SRC:%.c=$(BUILDDIR)%.t.c)
TEST_RUNNER ?= $(BUILDDIR)/runners/test_runner
TEST_TC := $(TESTS:%.toml=$(BUILDDIR)/%.t.c) \
$(TEST_SRC:%.c=$(BUILDDIR)/%.t.c)
TEST_TAC := $(TEST_TC:%.t.c=%.t.a.c)
TEST_OBJ := $(TEST_TAC:%.t.a.c=%.t.a.o)
TEST_DEP := $(TEST_TAC:%.t.a.c=%.t.a.d)
@@ -56,9 +55,9 @@ BENCHES ?= $(wildcard benches/*.toml)
BENCH_SRC ?= $(SRC) \
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
runners/bench_runner.c
BENCH_RUNNER ?= $(BUILDDIR)runners/bench_runner
BENCH_BC := $(BENCHES:%.toml=$(BUILDDIR)%.b.c) \
$(BENCH_SRC:%.c=$(BUILDDIR)%.b.c)
BENCH_RUNNER ?= $(BUILDDIR)/runners/bench_runner
BENCH_BC := $(BENCHES:%.toml=$(BUILDDIR)/%.b.c) \
$(BENCH_SRC:%.c=$(BUILDDIR)/%.b.c)
BENCH_BAC := $(BENCH_BC:%.b.c=%.b.a.c)
BENCH_OBJ := $(BENCH_BAC:%.b.a.c=%.b.a.o)
BENCH_DEP := $(BENCH_BAC:%.b.a.c=%.b.a.d)
@@ -104,18 +103,18 @@ endif
override PERFFLAGS += $(filter -j%,$(MAKEFLAGS))
override PERFBDFLAGS += $(filter -j%,$(MAKEFLAGS))
ifneq ($(NM),nm)
override CODEFLAGS += --nm-tool="$(NM)"
override DATAFLAGS += --nm-tool="$(NM)"
override CODEFLAGS += --nm-path="$(NM)"
override DATAFLAGS += --nm-path="$(NM)"
endif
ifneq ($(OBJDUMP),objdump)
override CODEFLAGS += --objdump-tool="$(OBJDUMP)"
override DATAFLAGS += --objdump-tool="$(OBJDUMP)"
override STRUCTFLAGS += --objdump-tool="$(OBJDUMP)"
override PERFFLAGS += --objdump-tool="$(OBJDUMP)"
override PERFBDFLAGS += --objdump-tool="$(OBJDUMP)"
override CODEFLAGS += --objdump-path="$(OBJDUMP)"
override DATAFLAGS += --objdump-path="$(OBJDUMP)"
override STRUCTFLAGS += --objdump-path="$(OBJDUMP)"
override PERFFLAGS += --objdump-path="$(OBJDUMP)"
override PERFBDFLAGS += --objdump-path="$(OBJDUMP)"
endif
ifneq ($(PERF),perf)
override PERFFLAGS += --perf-tool="$(PERF)"
override PERFFLAGS += --perf-path="$(PERF)"
endif
override TESTFLAGS += -b
@@ -128,10 +127,10 @@ override TESTFLAGS += -p$(TEST_PERF)
override BENCHFLAGS += -p$(BENCH_PERF)
endif
ifdef YES_PERFBD
override TESTFLAGS += -t$(TEST_TRACE) --trace-backtrace --trace-freq=100
override TESTFLAGS += -t$(TEST_TRACE) --trace-backtrace --trace-freq=100
endif
ifndef NO_PERFBD
override BENCHFLAGS += -t$(BENCH_TRACE) --trace-backtrace --trace-freq=100
override BENCHFLAGS += -t$(BENCH_TRACE) --trace-backtrace --trace-freq=100
endif
ifdef VERBOSE
override TESTFLAGS += -v
@@ -144,16 +143,16 @@ override TESTFLAGS += --exec="$(EXEC)"
override BENCHFLAGS += --exec="$(EXEC)"
endif
ifneq ($(GDB),gdb)
override TESTFLAGS += --gdb-tool="$(GDB)"
override BENCHFLAGS += --gdb-tool="$(GDB)"
override TESTFLAGS += --gdb-path="$(GDB)"
override BENCHFLAGS += --gdb-path="$(GDB)"
endif
ifneq ($(VALGRIND),valgrind)
override TESTFLAGS += --valgrind-tool="$(VALGRIND)"
override BENCHFLAGS += --valgrind-tool="$(VALGRIND)"
override TESTFLAGS += --valgrind-path="$(VALGRIND)"
override BENCHFLAGS += --valgrind-path="$(VALGRIND)"
endif
ifneq ($(PERF),perf)
override TESTFLAGS += --perf-tool="$(PERF)"
override BENCHFLAGS += --perf-tool="$(PERF)"
override TESTFLAGS += --perf-path="$(PERF)"
override BENCHFLAGS += --perf-path="$(PERF)"
endif
@@ -177,10 +176,10 @@ ifndef NO_COV
test-runner build-test: override CFLAGS+=--coverage
endif
ifdef YES_PERF
bench-runner build-bench: override CFLAGS+=-fno-omit-frame-pointer
test-runner build-test: override CFLAGS+=-fno-omit-frame-pointer
endif
ifdef YES_PERFBD
bench-runner build-bench: override CFLAGS+=-fno-omit-frame-pointer
test-runner build-test: override CFLAGS+=-fno-omit-frame-pointer
endif
# note we remove some binary dependent files during compilation,
# otherwise it's way to easy to end up with outdated results
@@ -272,7 +271,7 @@ perfbd: $(BENCH_TRACE)
$(PERFBDFLAGS))
.PHONY: summary sizes
summary sizes: $(BUILDDIR)lfs.csv
summary sizes: $(BUILDDIR)/lfs.csv
$(strip ./scripts/summary.py -Y $^ \
-fcode=code_size \
-fdata=data_size \
@@ -288,88 +287,88 @@ summary sizes: $(BUILDDIR)lfs.csv
.SUFFIXES:
.SECONDARY:
$(BUILDDIR)lfs: $(OBJ)
$(BUILDDIR)/lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)lfs.a: $(OBJ)
$(BUILDDIR)/lfs.a: $(OBJ)
$(AR) rcs $@ $^
$(BUILDDIR)lfs.code.csv: $(OBJ)
$(BUILDDIR)/lfs.code.csv: $(OBJ)
./scripts/code.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.data.csv: $(OBJ)
$(BUILDDIR)/lfs.data.csv: $(OBJ)
./scripts/data.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.stack.csv: $(CI)
$(BUILDDIR)/lfs.stack.csv: $(CI)
./scripts/stack.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.struct.csv: $(OBJ)
$(BUILDDIR)/lfs.struct.csv: $(OBJ)
./scripts/struct_.py $^ -q $(CODEFLAGS) -o $@
$(BUILDDIR)lfs.cov.csv: $(GCDA)
$(BUILDDIR)/lfs.cov.csv: $(GCDA)
./scripts/cov.py $^ $(patsubst %,-F%,$(SRC)) -q $(COVFLAGS) -o $@
$(BUILDDIR)lfs.perf.csv: $(BENCH_PERF)
$(BUILDDIR)/lfs.perf.csv: $(BENCH_PERF)
./scripts/perf.py $^ $(patsubst %,-F%,$(SRC)) -q $(PERFFLAGS) -o $@
$(BUILDDIR)lfs.perfbd.csv: $(BENCH_TRACE)
$(BUILDDIR)/lfs.perfbd.csv: $(BENCH_TRACE)
$(strip ./scripts/perfbd.py \
$(BENCH_RUNNER) $^ $(patsubst %,-F%,$(SRC)) \
-q $(PERFBDFLAGS) -o $@)
$(BUILDDIR)lfs.csv: \
$(BUILDDIR)lfs.code.csv \
$(BUILDDIR)lfs.data.csv \
$(BUILDDIR)lfs.stack.csv \
$(BUILDDIR)lfs.struct.csv
$(BUILDDIR)/lfs.csv: \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)/lfs.struct.csv
./scripts/summary.py $^ -q $(SUMMARYFLAGS) -o $@
$(BUILDDIR)runners/test_runner: $(TEST_OBJ)
$(BUILDDIR)/runners/test_runner: $(TEST_OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)runners/bench_runner: $(BENCH_OBJ)
$(BUILDDIR)/runners/bench_runner: $(BENCH_OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
# our main build rule generates .o, .d, and .ci files, the latter
# used for stack analysis
$(BUILDDIR)%.o $(BUILDDIR)%.ci: %.c
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $(BUILDDIR)$*.o
$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: %.c
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $(BUILDDIR)/$*.o
$(BUILDDIR)%.s: %.c
$(BUILDDIR)/%.s: %.c
$(CC) -S $(CFLAGS) $< -o $@
$(BUILDDIR)%.a.c: %.c
$(BUILDDIR)/%.a.c: %.c
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
$(BUILDDIR)%.a.c: $(BUILDDIR)%.c
$(BUILDDIR)/%.a.c: $(BUILDDIR)/%.c
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
$(BUILDDIR)%.t.c: %.toml
$(BUILDDIR)/%.t.c: %.toml
./scripts/test.py -c $< $(TESTCFLAGS) -o $@
$(BUILDDIR)%.t.c: %.c $(TESTS)
$(BUILDDIR)/%.t.c: %.c $(TESTS)
./scripts/test.py -c $(TESTS) -s $< $(TESTCFLAGS) -o $@
$(BUILDDIR)%.b.c: %.toml
$(BUILDDIR)/%.b.c: %.toml
./scripts/bench.py -c $< $(BENCHCFLAGS) -o $@
$(BUILDDIR)%.b.c: %.c $(BENCHES)
$(BUILDDIR)/%.b.c: %.c $(BENCHES)
./scripts/bench.py -c $(BENCHES) -s $< $(BENCHCFLAGS) -o $@
# clean everything
.PHONY: clean
clean:
rm -f $(BUILDDIR)lfs
rm -f $(BUILDDIR)lfs.a
rm -f $(BUILDDIR)/lfs
rm -f $(BUILDDIR)/lfs.a
$(strip rm -f \
$(BUILDDIR)lfs.csv \
$(BUILDDIR)lfs.code.csv \
$(BUILDDIR)lfs.data.csv \
$(BUILDDIR)lfs.stack.csv \
$(BUILDDIR)lfs.struct.csv \
$(BUILDDIR)lfs.cov.csv \
$(BUILDDIR)lfs.perf.csv \
$(BUILDDIR)lfs.perfbd.csv)
$(BUILDDIR)/lfs.csv \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)/lfs.struct.csv \
$(BUILDDIR)/lfs.cov.csv \
$(BUILDDIR)/lfs.perf.csv \
$(BUILDDIR)/lfs.perfbd.csv)
rm -f $(OBJ)
rm -f $(DEP)
rm -f $(ASM)

View File

@@ -30,8 +30,8 @@ import toml
RUNNER_PATH = './runners/bench_runner'
HEADER_PATH = 'runners/bench_runner.h'
GDB_TOOL = ['gdb']
VALGRIND_TOOL = ['valgrind']
GDB_PATH = ['gdb']
VALGRIND_PATH = ['valgrind']
PERF_SCRIPT = ['./scripts/perf.py']
@@ -502,7 +502,7 @@ def find_runner(runner, **args):
# run under valgrind?
if args.get('valgrind'):
cmd[:0] = args['valgrind_tool'] + [
cmd[:0] = args['valgrind_path'] + [
'--leak-check=full',
'--track-origins=yes',
'--error-exitcode=4',
@@ -518,8 +518,8 @@ def find_runner(runner, **args):
if args.get('perf_period') else None,
'--perf-events=%s' % args['perf_events']
if args.get('perf_events') else None,
'--perf-tool=%s' % args['perf_tool']
if args.get('perf_tool') else None,
'--perf-path=%s' % args['perf_path']
if args.get('perf_path') else None,
'-o%s' % args['perf']]))
# other context
@@ -1144,24 +1144,24 @@ def run(runner, bench_ids=[], **args):
cmd = runner_ + [failure.id]
if args.get('gdb_main'):
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'break main',
'-ex', 'run',
'--args']
elif args.get('gdb_case'):
path, lineno = find_path(runner_, failure.id, **args)
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'break %s:%d' % (path, lineno),
'-ex', 'run',
'--args']
elif failure.assert_ is not None:
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'run',
'-ex', 'frame function raise',
'-ex', 'up 2',
'--args']
else:
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'run',
'--args']
@@ -1345,10 +1345,11 @@ if __name__ == "__main__":
help="Drop into gdb on bench failure but stop at the beginning "
"of main.")
bench_parser.add_argument(
'--gdb-tool',
'--gdb-path',
type=lambda x: x.split(),
default=GDB_TOOL,
help="Path to gdb tool to use. Defaults to %r." % GDB_TOOL)
default=GDB_PATH,
help="Path to the gdb executable, may include flags. "
"Defaults to %r." % GDB_PATH)
bench_parser.add_argument(
'--exec',
type=lambda e: e.split(),
@@ -1359,10 +1360,11 @@ if __name__ == "__main__":
help="Run under Valgrind to find memory errors. Implicitly sets "
"--isolate.")
bench_parser.add_argument(
'--valgrind-tool',
'--valgrind-path',
type=lambda x: x.split(),
default=VALGRIND_TOOL,
help="Path to Valgrind tool to use. Defaults to %r." % VALGRIND_TOOL)
default=VALGRIND_PATH,
help="Path to the Valgrind executable, may include flags. "
"Defaults to %r." % VALGRIND_PATH)
bench_parser.add_argument(
'-p', '--perf',
help="Run under Linux's perf to sample performance counters, writing "
@@ -1385,10 +1387,10 @@ if __name__ == "__main__":
default=PERF_SCRIPT,
help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
bench_parser.add_argument(
'--perf-tool',
'--perf-path',
type=lambda x: x.split(),
help="Path to the perf tool to use. This is passed directly to the "
"perf script")
help="Path to the perf executable, may include flags. This is passed "
"directly to the perf script")
# compilation flags
comp_parser = parser.add_argument_group('compilation options')

View File

@@ -21,7 +21,7 @@ import shutil
import subprocess
import tempfile
GIT_TOOL = ['git']
GIT_PATH = ['git']
def openio(path, mode='r', buffering=-1):
@@ -89,10 +89,10 @@ def main(from_prefix, to_prefix, paths=[], *,
no_renames=False,
git=False,
no_stage=False,
git_tool=GIT_TOOL):
git_path=GIT_PATH):
if not paths:
if git:
cmd = git_tool + ['ls-tree', '-r', '--name-only', 'HEAD']
cmd = git_path + ['ls-tree', '-r', '--name-only', 'HEAD']
if verbose:
print(' '.join(shlex.quote(c) for c in cmd))
paths = subprocess.check_output(cmd, encoding='utf8').split()
@@ -116,11 +116,11 @@ def main(from_prefix, to_prefix, paths=[], *,
# stage?
if git and not no_stage:
if from_path != to_path:
cmd = git_tool + ['rm', '-q', from_path]
cmd = git_path + ['rm', '-q', from_path]
if verbose:
print(' '.join(shlex.quote(c) for c in cmd))
subprocess.check_call(cmd)
cmd = git_tool + ['add', to_path]
cmd = git_path + ['add', to_path]
if verbose:
print(' '.join(shlex.quote(c) for c in cmd))
subprocess.check_call(cmd)
@@ -168,10 +168,11 @@ if __name__ == "__main__":
action='store_true',
help="Don't stage changes with git.")
parser.add_argument(
'--git-tool',
'--git-path',
type=lambda x: x.split(),
default=GIT_TOOL,
help="Path to git tool to use. Defaults to %r." % GIT_TOOL)
default=GIT_PATH,
help="Path to git executable, may include flags. "
"Defaults to %r." % GIT_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@@ -23,9 +23,9 @@ import shlex
import subprocess as sp
NM_TOOL = ['nm']
NM_PATH = ['nm']
NM_TYPES = 'tTrRdD'
OBJDUMP_TOOL = ['objdump']
OBJDUMP_PATH = ['objdump']
# integer fields
@@ -135,9 +135,9 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering)
def collect(obj_paths, *,
nm_tool=NM_TOOL,
nm_path=NM_PATH,
nm_types=NM_TYPES,
objdump_tool=OBJDUMP_TOOL,
objdump_path=OBJDUMP_PATH,
sources=None,
everything=False,
**args):
@@ -162,8 +162,8 @@ def collect(obj_paths, *,
# find symbol sizes
results_ = []
# note nm-tool may contain extra args
cmd = nm_tool + ['--size-sort', path]
# note nm-path may contain extra args
cmd = nm_path + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -193,8 +193,8 @@ def collect(obj_paths, *,
# try to figure out the source file if we have debug-info
dirs = {}
files = {}
# note objdump-tool may contain extra args
cmd = objdump_tool + ['--dwarf=rawline', path]
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -234,8 +234,8 @@ def collect(obj_paths, *,
is_func = False
f_name = None
f_file = None
# note objdump-tool may contain extra args
cmd = objdump_tool + ['--dwarf=info', path]
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -675,15 +675,17 @@ if __name__ == "__main__":
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
parser.add_argument(
'--nm-tool',
'--nm-path',
type=lambda x: x.split(),
default=NM_TOOL,
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
default=NM_PATH,
help="Path to the nm executable, may include flags. "
"Defaults to %r." % NM_PATH)
parser.add_argument(
'--objdump-tool',
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_TOOL,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@@ -25,7 +25,7 @@ import subprocess as sp
# TODO use explode_asserts to avoid counting assert branches?
# TODO use dwarf=info to find functions for inline functions?
GCOV_TOOL = ['gcov']
GCOV_PATH = ['gcov']
# integer fields
@@ -210,15 +210,15 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering)
def collect(gcda_paths, *,
gcov_tool=GCOV_TOOL,
gcov_path=GCOV_PATH,
sources=None,
everything=False,
**args):
results = []
for path in gcda_paths:
# get coverage info through gcov's json output
# note, gcov-tool may contain extra args
cmd = GCOV_TOOL + ['-b', '-t', '--json-format', path]
# note, gcov-path may contain extra args
cmd = GCOV_PATH + ['-b', '-t', '--json-format', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -802,10 +802,11 @@ if __name__ == "__main__":
action='store_true',
help="Error if any branches are not covered.")
parser.add_argument(
'--gcov-tool',
default=GCOV_TOOL,
'--gcov-path',
default=GCOV_PATH,
type=lambda x: x.split(),
help="Path to the gcov tool to use. Defaults to %r." % GCOV_TOOL)
help="Path to the gcov executable, may include paths. "
"Defaults to %r." % GCOV_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@@ -23,9 +23,9 @@ import shlex
import subprocess as sp
NM_TOOL = ['nm']
NM_PATH = ['nm']
NM_TYPES = 'dDbB'
OBJDUMP_TOOL = ['objdump']
OBJDUMP_PATH = ['objdump']
# integer fields
@@ -135,9 +135,9 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering)
def collect(obj_paths, *,
nm_tool=NM_TOOL,
nm_path=NM_PATH,
nm_types=NM_TYPES,
objdump_tool=OBJDUMP_TOOL,
objdump_path=OBJDUMP_PATH,
sources=None,
everything=False,
**args):
@@ -162,8 +162,8 @@ def collect(obj_paths, *,
# find symbol sizes
results_ = []
# note nm-tool may contain extra args
cmd = nm_tool + ['--size-sort', path]
# note nm-path may contain extra args
cmd = nm_path + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -193,8 +193,8 @@ def collect(obj_paths, *,
# try to figure out the source file if we have debug-info
dirs = {}
files = {}
# note objdump-tool may contain extra args
cmd = objdump_tool + ['--dwarf=rawline', path]
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -234,8 +234,8 @@ def collect(obj_paths, *,
is_func = False
f_name = None
f_file = None
# note objdump-tool may contain extra args
cmd = objdump_tool + ['--dwarf=info', path]
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -675,15 +675,17 @@ if __name__ == "__main__":
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
parser.add_argument(
'--nm-tool',
'--nm-path',
type=lambda x: x.split(),
default=NM_TOOL,
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
default=NM_PATH,
help="Path to the nm executable, may include flags. "
"Defaults to %r." % NM_PATH)
parser.add_argument(
'--objdump-tool',
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_TOOL,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@@ -30,10 +30,10 @@ import zipfile
# TODO support non-zip perf results?
PERF_TOOL = ['perf']
PERF_PATH = ['perf']
PERF_EVENTS = 'cycles,branch-misses,branches,cache-misses,cache-references'
PERF_FREQ = 100
OBJDUMP_TOOL = ['objdump']
OBJDUMP_PATH = ['objdump']
THRESHOLD = (0.5, 0.85)
@@ -161,13 +161,13 @@ def record(command, *,
perf_freq=PERF_FREQ,
perf_period=None,
perf_events=PERF_EVENTS,
perf_tool=PERF_TOOL,
perf_path=PERF_PATH,
**args):
# create a temporary file for perf to write to, as far as I can tell
# this is strictly needed because perf's pipe-mode only works with stdout
with tempfile.NamedTemporaryFile('rb') as f:
# figure out our perf invocation
perf = perf_tool + list(filter(None, [
perf = perf_path + list(filter(None, [
'record',
'-F%s' % perf_freq
if perf_freq is not None
@@ -234,7 +234,7 @@ def multiprocessing_cache(f):
@multiprocessing_cache
def collect_syms_and_lines(obj_path, *,
objdump_tool=None,
objdump_path=None,
**args):
symbol_pattern = re.compile(
'^(?P<addr>[0-9a-fA-F]+)'
@@ -263,7 +263,7 @@ def collect_syms_and_lines(obj_path, *,
# figure out symbol addresses and file+line ranges
syms = {}
sym_at = []
cmd = objdump_tool + ['-t', obj_path]
cmd = objdump_path + ['-t', obj_path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -312,7 +312,7 @@ def collect_syms_and_lines(obj_path, *,
op_file = 1
op_line = 1
op_addr = 0
cmd = objdump_tool + ['--dwarf=rawline', obj_path]
cmd = objdump_path + ['--dwarf=rawline', obj_path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -384,7 +384,7 @@ def collect_syms_and_lines(obj_path, *,
def collect_decompressed(path, *,
perf_tool=PERF_TOOL,
perf_path=PERF_PATH,
sources=None,
everything=False,
propagate=0,
@@ -407,8 +407,8 @@ def collect_decompressed(path, *,
'cache-misses': 'cmisses',
'cache-references': 'caches'}
# note perf_tool may contain extra args
cmd = perf_tool + [
# note perf_path may contain extra args
cmd = perf_path + [
'script',
'-i%s' % path]
if args.get('verbose'):
@@ -1259,14 +1259,16 @@ if __name__ == "__main__":
const=0,
help="Number of processes to use. 0 spawns one process per core.")
parser.add_argument(
'--perf-tool',
'--perf-path',
type=lambda x: x.split(),
help="Path to the perf tool to use. Defaults to %r." % PERF_TOOL)
help="Path to the perf executable, may include flags. "
"Defaults to %r." % PERF_PATH)
parser.add_argument(
'--objdump-tool',
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_TOOL,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
# record flags
record_parser = parser.add_argument_group('record options')
@@ -1294,9 +1296,10 @@ if __name__ == "__main__":
help="perf events to record. This is passed directly to perf. "
"Defaults to %r." % PERF_EVENTS)
record_parser.add_argument(
'--perf-tool',
'--perf-path',
type=lambda x: x.split(),
help="Path to the perf tool to use. Defaults to %r." % PERF_TOOL)
help="Path to the perf executable, may include flags. "
"Defaults to %r." % PERF_PATH)
# avoid intermixed/REMAINDER conflict, see above
if nargs == argparse.REMAINDER:

View File

@@ -24,7 +24,7 @@ import shlex
import subprocess as sp
OBJDUMP_TOOL = ['objdump']
OBJDUMP_PATH = ['objdump']
THRESHOLD = (0.5, 0.85)
@@ -142,7 +142,7 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering)
def collect_syms_and_lines(obj_path, *,
objdump_tool=None,
objdump_path=None,
**args):
symbol_pattern = re.compile(
'^(?P<addr>[0-9a-fA-F]+)'
@@ -171,7 +171,7 @@ def collect_syms_and_lines(obj_path, *,
# figure out symbol addresses
syms = {}
sym_at = []
cmd = objdump_tool + ['-t', obj_path]
cmd = objdump_path + ['-t', obj_path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -220,7 +220,7 @@ def collect_syms_and_lines(obj_path, *,
op_file = 1
op_line = 1
op_addr = 0
cmd = objdump_tool + ['--dwarf=rawline', obj_path]
cmd = objdump_path + ['--dwarf=rawline', obj_path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -1244,10 +1244,11 @@ if __name__ == "__main__":
const=0,
help="Number of processes to use. 0 spawns one process per core.")
parser.add_argument(
'--objdump-tool',
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_TOOL,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@@ -20,7 +20,7 @@ import shlex
import subprocess as sp
OBJDUMP_TOOL = ['objdump']
OBJDUMP_PATH = ['objdump']
@@ -129,7 +129,7 @@ def openio(path, mode='r', buffering=-1):
return open(path, mode, buffering)
def collect(obj_paths, *,
objdump_tool=OBJDUMP_TOOL,
objdump_path=OBJDUMP_PATH,
sources=None,
everything=False,
internal=False,
@@ -150,8 +150,8 @@ def collect(obj_paths, *,
# find files, we want to filter by structs in .h files
dirs = {}
files = {}
# note objdump-tool may contain extra args
cmd = objdump_tool + ['--dwarf=rawline', path]
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -191,8 +191,8 @@ def collect(obj_paths, *,
s_name = None
s_file = None
s_size = None
# note objdump-tool may contain extra args
cmd = objdump_tool + ['--dwarf=info', path]
# note objdump-path may contain extra args
cmd = objdump_path + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
@@ -626,10 +626,11 @@ if __name__ == "__main__":
action='store_true',
help="Also show structs in .c files.")
parser.add_argument(
'--objdump-tool',
'--objdump-path',
type=lambda x: x.split(),
default=OBJDUMP_TOOL,
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
default=OBJDUMP_PATH,
help="Path to the objdump executable, may include flags. "
"Defaults to %r." % OBJDUMP_PATH)
sys.exit(main(**{k: v
for k, v in vars(parser.parse_intermixed_args()).items()
if v is not None}))

View File

@@ -30,8 +30,8 @@ import toml
RUNNER_PATH = './runners/test_runner'
HEADER_PATH = 'runners/test_runner.h'
GDB_TOOL = ['gdb']
VALGRIND_TOOL = ['valgrind']
GDB_PATH = ['gdb']
VALGRIND_PATH = ['valgrind']
PERF_SCRIPT = ['./scripts/perf.py']
@@ -516,7 +516,7 @@ def find_runner(runner, **args):
# run under valgrind?
if args.get('valgrind'):
cmd[:0] = args['valgrind_tool'] + [
cmd[:0] = args['valgrind_path'] + [
'--leak-check=full',
'--track-origins=yes',
'--error-exitcode=4',
@@ -532,8 +532,8 @@ def find_runner(runner, **args):
if args.get('perf_period') else None,
'--perf-events=%s' % args['perf_events']
if args.get('perf_events') else None,
'--perf-tool=%s' % args['perf_tool']
if args.get('perf_tool') else None,
'--perf-path=%s' % args['perf_path']
if args.get('perf_path') else None,
'-o%s' % args['perf']]))
# other context
@@ -1144,24 +1144,24 @@ def run(runner, test_ids=[], **args):
cmd = runner_ + [failure.id]
if args.get('gdb_main'):
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'break main',
'-ex', 'run',
'--args']
elif args.get('gdb_case'):
path, lineno = find_path(runner_, failure.id, **args)
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'break %s:%d' % (path, lineno),
'-ex', 'run',
'--args']
elif failure.assert_ is not None:
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'run',
'-ex', 'frame function raise',
'-ex', 'up 2',
'--args']
else:
cmd[:0] = args['gdb_tool'] + [
cmd[:0] = args['gdb_path'] + [
'-ex', 'run',
'--args']
@@ -1353,10 +1353,11 @@ if __name__ == "__main__":
help="Drop into gdb on test failure but stop at the beginning "
"of main.")
test_parser.add_argument(
'--gdb-tool',
'--gdb-path',
type=lambda x: x.split(),
default=GDB_TOOL,
help="Path to gdb tool to use. Defaults to %r." % GDB_TOOL)
default=GDB_PATH,
help="Path to the gdb executable, may include flags. "
"Defaults to %r." % GDB_PATH)
test_parser.add_argument(
'--exec',
type=lambda e: e.split(),
@@ -1367,10 +1368,11 @@ if __name__ == "__main__":
help="Run under Valgrind to find memory errors. Implicitly sets "
"--isolate.")
test_parser.add_argument(
'--valgrind-tool',
'--valgrind-path',
type=lambda x: x.split(),
default=VALGRIND_TOOL,
help="Path to Valgrind tool to use. Defaults to %r." % VALGRIND_TOOL)
default=VALGRIND_PATH,
help="Path to the Valgrind executable, may include flags. "
"Defaults to %r." % VALGRIND_PATH)
test_parser.add_argument(
'-p', '--perf',
help="Run under Linux's perf to sample performance counters, writing "
@@ -1393,10 +1395,10 @@ if __name__ == "__main__":
default=PERF_SCRIPT,
help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
test_parser.add_argument(
'--perf-tool',
'--perf-path',
type=lambda x: x.split(),
help="Path to the perf tool to use. This is passed directly to the "
"perf script")
help="Path to the perf executable, may include flags. This is passed "
"directly to the perf script")
# compilation flags
comp_parser = parser.add_argument_group('compilation options')