mirror of
https://github.com/littlefs-project/littlefs.git
synced 2025-12-08 00:22:44 +00:00
Added perf.py a wrapper around Linux's perf tool for perf sampling
This provides 2 things:
1. perf integration with the bench/test runners - This is a bit tricky
with perf as it doesn't have its own way to combine perf measurements
across multiple processes. perf.py works around this by writing
everything to a zip file, using flock to synchronize. As a plus, free
compression!
2. Parsing and presentation of perf results in a format consistent with
the other CSV-based tools. This actually ran into a surprising number of
issues:
- We need to process raw events to get the information we want, this
ends up being a lot of data (~16MiB at 100Hz uncompressed), so we
paralellize the parsing of each decompressed perf file.
- perf reports raw addresses post-ASLR. It does provide sym+off which
is very useful, but to find the source of static functions we need to
reverse the ASLR by finding the delta the produces the best
symbol<->addr matches.
- This isn't related to perf, but decoding dwarf line-numbers is
really complicated. You basically need to write a tiny VM.
This also turns on perf measurement by default for the bench-runner, but at a
low frequency (100 Hz). This can be decreased or removed in the future
if it causes any slowdown.
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,6 +9,7 @@
|
|||||||
*.a.c
|
*.a.c
|
||||||
*.gcno
|
*.gcno
|
||||||
*.gcda
|
*.gcda
|
||||||
|
*.perf
|
||||||
|
|
||||||
# Testing things
|
# Testing things
|
||||||
blocks/
|
blocks/
|
||||||
|
|||||||
189
Makefile
189
Makefile
@@ -19,44 +19,52 @@ TARGET ?= $(BUILDDIR)lfs.a
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
CC ?= gcc
|
CC ?= gcc
|
||||||
AR ?= ar
|
AR ?= ar
|
||||||
SIZE ?= size
|
SIZE ?= size
|
||||||
CTAGS ?= ctags
|
CTAGS ?= ctags
|
||||||
NM ?= nm
|
NM ?= nm
|
||||||
OBJDUMP ?= objdump
|
OBJDUMP ?= objdump
|
||||||
LCOV ?= lcov
|
VALGRIND ?= valgrind
|
||||||
|
GDB ?= gdb
|
||||||
|
PERF ?= perf
|
||||||
|
|
||||||
SRC ?= $(filter-out $(wildcard *.*.c),$(wildcard *.c))
|
SRC ?= $(filter-out $(wildcard *.*.c),$(wildcard *.c))
|
||||||
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
|
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
|
||||||
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
|
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
|
||||||
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
|
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
|
||||||
CI := $(SRC:%.c=$(BUILDDIR)%.ci)
|
CI := $(SRC:%.c=$(BUILDDIR)%.ci)
|
||||||
GCDA := $(SRC:%.c=$(BUILDDIR)%.t.a.gcda)
|
GCDA := $(SRC:%.c=$(BUILDDIR)%.t.a.gcda)
|
||||||
|
|
||||||
TESTS ?= $(wildcard tests/*.toml)
|
TESTS ?= $(wildcard tests/*.toml)
|
||||||
TEST_SRC ?= $(SRC) \
|
TEST_SRC ?= $(SRC) \
|
||||||
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
|
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
|
||||||
runners/test_runner.c
|
runners/test_runner.c
|
||||||
TEST_TC := $(TESTS:%.toml=$(BUILDDIR)%.t.c) $(TEST_SRC:%.c=$(BUILDDIR)%.t.c)
|
TEST_RUNNER ?= $(BUILDDIR)runners/test_runner
|
||||||
TEST_TAC := $(TEST_TC:%.t.c=%.t.a.c)
|
TEST_TC := $(TESTS:%.toml=$(BUILDDIR)%.t.c) \
|
||||||
TEST_OBJ := $(TEST_TAC:%.t.a.c=%.t.a.o)
|
$(TEST_SRC:%.c=$(BUILDDIR)%.t.c)
|
||||||
TEST_DEP := $(TEST_TAC:%.t.a.c=%.t.a.d)
|
TEST_TAC := $(TEST_TC:%.t.c=%.t.a.c)
|
||||||
TEST_CI := $(TEST_TAC:%.t.a.c=%.t.a.ci)
|
TEST_OBJ := $(TEST_TAC:%.t.a.c=%.t.a.o)
|
||||||
|
TEST_DEP := $(TEST_TAC:%.t.a.c=%.t.a.d)
|
||||||
|
TEST_CI := $(TEST_TAC:%.t.a.c=%.t.a.ci)
|
||||||
TEST_GCNO := $(TEST_TAC:%.t.a.c=%.t.a.gcno)
|
TEST_GCNO := $(TEST_TAC:%.t.a.c=%.t.a.gcno)
|
||||||
TEST_GCDA := $(TEST_TAC:%.t.a.c=%.t.a.gcda)
|
TEST_GCDA := $(TEST_TAC:%.t.a.c=%.t.a.gcda)
|
||||||
|
TEST_PERF := $(TEST_RUNNER:%=%.perf)
|
||||||
|
|
||||||
BENCHES ?= $(wildcard benches/*.toml)
|
BENCHES ?= $(wildcard benches/*.toml)
|
||||||
BENCH_SRC ?= $(SRC) \
|
BENCH_SRC ?= $(SRC) \
|
||||||
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
|
$(filter-out $(wildcard bd/*.*.c),$(wildcard bd/*.c)) \
|
||||||
runners/bench_runner.c
|
runners/bench_runner.c
|
||||||
BENCH_BC := $(BENCHES:%.toml=$(BUILDDIR)%.b.c) $(BENCH_SRC:%.c=$(BUILDDIR)%.b.c)
|
BENCH_RUNNER ?= $(BUILDDIR)runners/bench_runner
|
||||||
BENCH_BAC := $(BENCH_BC:%.b.c=%.b.a.c)
|
BENCH_BC := $(BENCHES:%.toml=$(BUILDDIR)%.b.c) \
|
||||||
BENCH_OBJ := $(BENCH_BAC:%.b.a.c=%.b.a.o)
|
$(BENCH_SRC:%.c=$(BUILDDIR)%.b.c)
|
||||||
BENCH_DEP := $(BENCH_BAC:%.b.a.c=%.b.a.d)
|
BENCH_BAC := $(BENCH_BC:%.b.c=%.b.a.c)
|
||||||
BENCH_CI := $(BENCH_BAC:%.b.a.c=%.b.a.ci)
|
BENCH_OBJ := $(BENCH_BAC:%.b.a.c=%.b.a.o)
|
||||||
|
BENCH_DEP := $(BENCH_BAC:%.b.a.c=%.b.a.d)
|
||||||
|
BENCH_CI := $(BENCH_BAC:%.b.a.c=%.b.a.ci)
|
||||||
BENCH_GCNO := $(BENCH_BAC:%.b.a.c=%.b.a.gcno)
|
BENCH_GCNO := $(BENCH_BAC:%.b.a.c=%.b.a.gcno)
|
||||||
BENCH_GCDA := $(BENCH_BAC:%.b.a.c=%.b.a.gcda)
|
BENCH_GCDA := $(BENCH_BAC:%.b.a.c=%.b.a.gcda)
|
||||||
|
BENCH_PERF := $(BENCH_RUNNER:%=%.perf)
|
||||||
|
|
||||||
ifdef DEBUG
|
ifdef DEBUG
|
||||||
override CFLAGS += -O0
|
override CFLAGS += -O0
|
||||||
@@ -71,40 +79,67 @@ override CFLAGS += -I.
|
|||||||
override CFLAGS += -std=c99 -Wall -pedantic
|
override CFLAGS += -std=c99 -Wall -pedantic
|
||||||
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
|
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
|
||||||
override CFLAGS += -ftrack-macro-expansion=0
|
override CFLAGS += -ftrack-macro-expansion=0
|
||||||
|
ifdef YES_COVERAGE
|
||||||
|
override CFLAGS += --coverage
|
||||||
|
endif
|
||||||
|
ifdef YES_PERF
|
||||||
|
override CFLAGS += -fno-omit-frame-pointer
|
||||||
|
endif
|
||||||
|
|
||||||
override TESTFLAGS += -b
|
|
||||||
override BENCHFLAGS += -b
|
|
||||||
# forward -j flag
|
|
||||||
override TESTFLAGS += $(filter -j%,$(MAKEFLAGS))
|
|
||||||
override BENCHFLAGS += $(filter -j%,$(MAKEFLAGS))
|
|
||||||
ifdef VERBOSE
|
ifdef VERBOSE
|
||||||
override CODEFLAGS += -v
|
override CODEFLAGS += -v
|
||||||
override DATAFLAGS += -v
|
override DATAFLAGS += -v
|
||||||
override STACKFLAGS += -v
|
override STACKFLAGS += -v
|
||||||
override STRUCTFLAGS += -v
|
override STRUCTFLAGS += -v
|
||||||
override COVERAGEFLAGS += -v
|
override COVERAGEFLAGS += -v
|
||||||
override TESTFLAGS += -v
|
override PERFFLAGS += -v
|
||||||
override TESTCFLAGS += -v
|
|
||||||
override BENCHFLAGS += -v
|
|
||||||
override BENCHCFLAGS += -v
|
|
||||||
endif
|
|
||||||
ifdef EXEC
|
|
||||||
override TESTFLAGS += --exec="$(EXEC)"
|
|
||||||
override BENCHFLAGS += --exec="$(EXEC)"
|
|
||||||
endif
|
|
||||||
ifdef BUILDDIR
|
|
||||||
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
|
||||||
override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
|
|
||||||
override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
|
|
||||||
override STRUCTFLAGS += --build-dir="$(BUILDDIR:/=)"
|
|
||||||
override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
|
||||||
endif
|
endif
|
||||||
ifneq ($(NM),nm)
|
ifneq ($(NM),nm)
|
||||||
override CODEFLAGS += --nm-tool="$(NM)"
|
override CODEFLAGS += --nm-tool="$(NM)"
|
||||||
override DATAFLAGS += --nm-tool="$(NM)"
|
override DATAFLAGS += --nm-tool="$(NM)"
|
||||||
endif
|
endif
|
||||||
ifneq ($(OBJDUMP),objdump)
|
ifneq ($(OBJDUMP),objdump)
|
||||||
|
override CODEFLAGS += --objdump-tool="$(OBJDUMP)"
|
||||||
|
override DATAFLAGS += --objdump-tool="$(OBJDUMP)"
|
||||||
override STRUCTFLAGS += --objdump-tool="$(OBJDUMP)"
|
override STRUCTFLAGS += --objdump-tool="$(OBJDUMP)"
|
||||||
|
override PERFFLAGS += --objdump-tool="$(OBJDUMP)"
|
||||||
|
endif
|
||||||
|
ifneq ($(PERF),perf)
|
||||||
|
override PERFFLAGS += --perf-tool="$(PERF)"
|
||||||
|
endif
|
||||||
|
|
||||||
|
override TESTFLAGS += -b
|
||||||
|
override BENCHFLAGS += -b
|
||||||
|
# forward -j flag
|
||||||
|
override TESTFLAGS += $(filter -j%,$(MAKEFLAGS))
|
||||||
|
override BENCHFLAGS += $(filter -j%,$(MAKEFLAGS))
|
||||||
|
ifdef YES_PERF
|
||||||
|
override TESTFLAGS += --perf=$(TEST_PERF)
|
||||||
|
endif
|
||||||
|
ifndef NO_PERF
|
||||||
|
override BENCHFLAGS += --perf=$(BENCH_PERF)
|
||||||
|
endif
|
||||||
|
ifdef VERBOSE
|
||||||
|
override TESTFLAGS += -v
|
||||||
|
override TESTCFLAGS += -v
|
||||||
|
override BENCHFLAGS += -v
|
||||||
|
override BENCHCFLAGS += -v
|
||||||
|
endif
|
||||||
|
ifdef EXEC
|
||||||
|
override TESTFLAGS += --exec="$(EXEC)"
|
||||||
|
override BENCHFLAGS += --exec="$(EXEC)"
|
||||||
|
endif
|
||||||
|
ifneq ($(GDB),gdb)
|
||||||
|
override TESTFLAGS += --gdb-tool="$(GDB)"
|
||||||
|
override BENCHFLAGS += --gdb-tool="$(GDB)"
|
||||||
|
endif
|
||||||
|
ifneq ($(VALGRIND),valgrind)
|
||||||
|
override TESTFLAGS += --valgrind-tool="$(VALGRIND)"
|
||||||
|
override BENCHFLAGS += --valgrind-tool="$(VALGRIND)"
|
||||||
|
endif
|
||||||
|
ifneq ($(PERF),perf)
|
||||||
|
override TESTFLAGS += --perf-tool="$(PERF)"
|
||||||
|
override BENCHFLAGS += --perf-tool="$(PERF)"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
@@ -124,28 +159,50 @@ tags:
|
|||||||
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
|
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
|
||||||
|
|
||||||
.PHONY: test-runner build-test
|
.PHONY: test-runner build-test
|
||||||
|
ifndef NO_COVERAGE
|
||||||
test-runner build-test: override CFLAGS+=--coverage
|
test-runner build-test: override CFLAGS+=--coverage
|
||||||
test-runner build-test: $(BUILDDIR)runners/test_runner
|
endif
|
||||||
|
ifdef YES_PERF
|
||||||
|
bench-runner build-bench: override CFLAGS+=-fno-omit-frame-pointer
|
||||||
|
endif
|
||||||
|
test-runner build-test: $(TEST_RUNNER)
|
||||||
|
ifndef NO_COVERAGE
|
||||||
rm -f $(TEST_GCDA)
|
rm -f $(TEST_GCDA)
|
||||||
|
endif
|
||||||
|
ifdef YES_PERF
|
||||||
|
rm -f $(TEST_PERF)
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: test-runner
|
test: test-runner
|
||||||
./scripts/test.py $(BUILDDIR)runners/test_runner $(TESTFLAGS)
|
./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS)
|
||||||
|
|
||||||
.PHONY: test-list
|
.PHONY: test-list
|
||||||
test-list: test-runner
|
test-list: test-runner
|
||||||
./scripts/test.py $(BUILDDIR)runners/test_runner $(TESTFLAGS) -l
|
./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS) -l
|
||||||
|
|
||||||
.PHONY: bench-runner build-bench
|
.PHONY: bench-runner build-bench
|
||||||
bench-runner build-bench: $(BUILDDIR)runners/bench_runner
|
ifdef YES_COVERAGE
|
||||||
|
bench-runner build-bench: override CFLAGS+=--coverage
|
||||||
|
endif
|
||||||
|
ifndef NO_PERF
|
||||||
|
bench-runner build-bench: override CFLAGS+=-fno-omit-frame-pointer
|
||||||
|
endif
|
||||||
|
bench-runner build-bench: $(BENCH_RUNNER)
|
||||||
|
ifdef YES_COVERAGE
|
||||||
|
rm -f $(BENCH_GCDA)
|
||||||
|
endif
|
||||||
|
ifndef NO_PERF
|
||||||
|
rm -f $(BENCH_PERF)
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: bench
|
.PHONY: bench
|
||||||
bench: bench-runner
|
bench: bench-runner
|
||||||
./scripts/bench.py $(BUILDDIR)runners/bench_runner $(BENCHFLAGS)
|
./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS)
|
||||||
|
|
||||||
.PHONY: bench-list
|
.PHONY: bench-list
|
||||||
bench-list: bench-runner
|
bench-list: bench-runner
|
||||||
./scripts/bench.py $(BUILDDIR)runners/bench_runner $(BENCHFLAGS) -l
|
./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS) -l
|
||||||
|
|
||||||
.PHONY: code
|
.PHONY: code
|
||||||
code: $(OBJ)
|
code: $(OBJ)
|
||||||
@@ -165,7 +222,17 @@ struct: $(OBJ)
|
|||||||
|
|
||||||
.PHONY: coverage
|
.PHONY: coverage
|
||||||
coverage: $(GCDA)
|
coverage: $(GCDA)
|
||||||
./scripts/coverage.py $^ -slines -sbranches $(COVERAGEFLAGS)
|
$(strip ./scripts/coverage.py \
|
||||||
|
$^ $(patsubst %,-F%,$(SRC)) \
|
||||||
|
-slines -sbranches \
|
||||||
|
$(COVERAGEFLAGS))
|
||||||
|
|
||||||
|
.PHONY: perf
|
||||||
|
perf: $(BENCH_PERF)
|
||||||
|
$(strip ./scripts/perf.py \
|
||||||
|
$^ $(patsubst %,-F%,$(SRC)) \
|
||||||
|
-scycles \
|
||||||
|
$(PERFFLAGS))
|
||||||
|
|
||||||
.PHONY: summary sizes
|
.PHONY: summary sizes
|
||||||
summary sizes: $(BUILDDIR)lfs.csv
|
summary sizes: $(BUILDDIR)lfs.csv
|
||||||
@@ -203,7 +270,10 @@ $(BUILDDIR)lfs.struct.csv: $(OBJ)
|
|||||||
./scripts/struct_.py $^ -q $(CODEFLAGS) -o $@
|
./scripts/struct_.py $^ -q $(CODEFLAGS) -o $@
|
||||||
|
|
||||||
$(BUILDDIR)lfs.coverage.csv: $(GCDA)
|
$(BUILDDIR)lfs.coverage.csv: $(GCDA)
|
||||||
./scripts/coverage.py $^ -q $(COVERAGEFLAGS) -o $@
|
./scripts/coverage.py $^ $(patsubst %,-F%,$(SRC)) -q $(COVERAGEFLAGS) -o $@
|
||||||
|
|
||||||
|
$(BUILDDIR)lfs.perf.csv: $(BENCH_PERF)
|
||||||
|
./scripts/perf.py $^ $(patsubst %,-F%,$(SRC)) -q $(PERFFLAGS) -o $@
|
||||||
|
|
||||||
$(BUILDDIR)lfs.csv: \
|
$(BUILDDIR)lfs.csv: \
|
||||||
$(BUILDDIR)lfs.code.csv \
|
$(BUILDDIR)lfs.code.csv \
|
||||||
@@ -255,13 +325,13 @@ clean:
|
|||||||
$(BUILDDIR)lfs.data.csv \
|
$(BUILDDIR)lfs.data.csv \
|
||||||
$(BUILDDIR)lfs.stack.csv \
|
$(BUILDDIR)lfs.stack.csv \
|
||||||
$(BUILDDIR)lfs.struct.csv \
|
$(BUILDDIR)lfs.struct.csv \
|
||||||
$(BUILDDIR)lfs.coverage.csv)
|
$(BUILDDIR)lfs.coverage.csv \
|
||||||
rm -f $(BUILDDIR)runners/test_runner
|
$(BUILDDIR)lfs.perf.csv)
|
||||||
rm -f $(BUILDDIR)runners/bench_runner
|
|
||||||
rm -f $(OBJ)
|
rm -f $(OBJ)
|
||||||
rm -f $(DEP)
|
rm -f $(DEP)
|
||||||
rm -f $(ASM)
|
rm -f $(ASM)
|
||||||
rm -f $(CI)
|
rm -f $(CI)
|
||||||
|
rm -f $(TEST_RUNNER)
|
||||||
rm -f $(TEST_TC)
|
rm -f $(TEST_TC)
|
||||||
rm -f $(TEST_TAC)
|
rm -f $(TEST_TAC)
|
||||||
rm -f $(TEST_OBJ)
|
rm -f $(TEST_OBJ)
|
||||||
@@ -269,6 +339,8 @@ clean:
|
|||||||
rm -f $(TEST_CI)
|
rm -f $(TEST_CI)
|
||||||
rm -f $(TEST_GCNO)
|
rm -f $(TEST_GCNO)
|
||||||
rm -f $(TEST_GCDA)
|
rm -f $(TEST_GCDA)
|
||||||
|
rm -f $(TEST_PERF)
|
||||||
|
rm -f $(BENCH_RUNNER)
|
||||||
rm -f $(BENCH_BC)
|
rm -f $(BENCH_BC)
|
||||||
rm -f $(BENCH_BAC)
|
rm -f $(BENCH_BAC)
|
||||||
rm -f $(BENCH_OBJ)
|
rm -f $(BENCH_OBJ)
|
||||||
@@ -276,3 +348,4 @@ clean:
|
|||||||
rm -f $(BENCH_CI)
|
rm -f $(BENCH_CI)
|
||||||
rm -f $(BENCH_GCNO)
|
rm -f $(BENCH_GCNO)
|
||||||
rm -f $(BENCH_GCDA)
|
rm -f $(BENCH_GCDA)
|
||||||
|
rm -f $(BENCH_PERF)
|
||||||
|
|||||||
@@ -27,9 +27,13 @@ import time
|
|||||||
import toml
|
import toml
|
||||||
|
|
||||||
|
|
||||||
RUNNER_PATH = 'runners/bench_runner'
|
RUNNER_PATH = './runners/bench_runner'
|
||||||
HEADER_PATH = 'runners/bench_runner.h'
|
HEADER_PATH = 'runners/bench_runner.h'
|
||||||
|
|
||||||
|
GDB_TOOL = ['gdb']
|
||||||
|
VALGRIND_TOOL = ['valgrind']
|
||||||
|
PERF_SCRIPT = ['./scripts/perf.py']
|
||||||
|
|
||||||
|
|
||||||
def openio(path, mode='r', buffering=-1, nb=False):
|
def openio(path, mode='r', buffering=-1, nb=False):
|
||||||
if path == '-':
|
if path == '-':
|
||||||
@@ -502,12 +506,25 @@ def find_runner(runner, **args):
|
|||||||
|
|
||||||
# run under valgrind?
|
# run under valgrind?
|
||||||
if args.get('valgrind'):
|
if args.get('valgrind'):
|
||||||
cmd[:0] = filter(None, [
|
cmd[:0] = args['valgrind_tool'] + [
|
||||||
'valgrind',
|
|
||||||
'--leak-check=full',
|
'--leak-check=full',
|
||||||
'--track-origins=yes',
|
'--track-origins=yes',
|
||||||
'--error-exitcode=4',
|
'--error-exitcode=4',
|
||||||
'-q'])
|
'-q']
|
||||||
|
|
||||||
|
# run under perf?
|
||||||
|
if args.get('perf'):
|
||||||
|
cmd[:0] = args['perf_script'] + list(filter(None, [
|
||||||
|
'-R',
|
||||||
|
'--perf-freq=%s' % args['perf_freq']
|
||||||
|
if args.get('perf_freq') else None,
|
||||||
|
'--perf-period=%s' % args['perf_period']
|
||||||
|
if args.get('perf_period') else None,
|
||||||
|
'--perf-events=%s' % args['perf_events']
|
||||||
|
if args.get('perf_events') else None,
|
||||||
|
'--perf-tool=%s' % args['perf_tool']
|
||||||
|
if args.get('perf_tool') else None,
|
||||||
|
'-o%s' % args['perf']]))
|
||||||
|
|
||||||
# other context
|
# other context
|
||||||
if args.get('geometry'):
|
if args.get('geometry'):
|
||||||
@@ -789,9 +806,9 @@ def run_stage(name, runner_, ids, output_, **args):
|
|||||||
try:
|
try:
|
||||||
line = mpty.readline()
|
line = mpty.readline()
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == errno.EIO:
|
if e.errno != errno.EIO:
|
||||||
break
|
raise
|
||||||
raise
|
break
|
||||||
if not line:
|
if not line:
|
||||||
break
|
break
|
||||||
last_stdout.append(line)
|
last_stdout.append(line)
|
||||||
@@ -1126,24 +1143,24 @@ def run(runner, bench_ids=[], **args):
|
|||||||
cmd = runner_ + [failure.id]
|
cmd = runner_ + [failure.id]
|
||||||
|
|
||||||
if args.get('gdb_main'):
|
if args.get('gdb_main'):
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'break main',
|
'-ex', 'break main',
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'--args']
|
'--args']
|
||||||
elif args.get('gdb_case'):
|
elif args.get('gdb_case'):
|
||||||
path, lineno = find_path(runner_, failure.id, **args)
|
path, lineno = find_path(runner_, failure.id, **args)
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'break %s:%d' % (path, lineno),
|
'-ex', 'break %s:%d' % (path, lineno),
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'--args']
|
'--args']
|
||||||
elif failure.assert_ is not None:
|
elif failure.assert_ is not None:
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'-ex', 'frame function raise',
|
'-ex', 'frame function raise',
|
||||||
'-ex', 'up 2',
|
'-ex', 'up 2',
|
||||||
'--args']
|
'--args']
|
||||||
else:
|
else:
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'--args']
|
'--args']
|
||||||
|
|
||||||
@@ -1187,6 +1204,7 @@ if __name__ == "__main__":
|
|||||||
argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
|
argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Build and run benches.",
|
description="Build and run benches.",
|
||||||
|
allow_abbrev=False,
|
||||||
conflict_handler='ignore')
|
conflict_handler='ignore')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-v', '--verbose',
|
'-v', '--verbose',
|
||||||
@@ -1315,6 +1333,11 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Drop into gdb on bench failure but stop at the beginning "
|
help="Drop into gdb on bench failure but stop at the beginning "
|
||||||
"of main.")
|
"of main.")
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--gdb-tool',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
default=GDB_TOOL,
|
||||||
|
help="Path to gdb tool to use. Defaults to %r." % GDB_TOOL)
|
||||||
bench_parser.add_argument(
|
bench_parser.add_argument(
|
||||||
'--exec',
|
'--exec',
|
||||||
type=lambda e: e.split(),
|
type=lambda e: e.split(),
|
||||||
@@ -1324,6 +1347,37 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Run under Valgrind to find memory errors. Implicitly sets "
|
help="Run under Valgrind to find memory errors. Implicitly sets "
|
||||||
"--isolate.")
|
"--isolate.")
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--valgrind-tool',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
default=VALGRIND_TOOL,
|
||||||
|
help="Path to Valgrind tool to use. Defaults to %r." % VALGRIND_TOOL)
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--perf',
|
||||||
|
help="Run under Linux's perf to sample performance counters, writing "
|
||||||
|
"samples to this file.")
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--perf-freq',
|
||||||
|
help="perf sampling frequency. This is passed directly to the perf "
|
||||||
|
"script.")
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--perf-period',
|
||||||
|
help="perf sampling period. This is passed directly to the perf "
|
||||||
|
"script.")
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--perf-events',
|
||||||
|
help="perf events to record. This is passed directly to the perf "
|
||||||
|
"script.")
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--perf-script',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
default=PERF_SCRIPT,
|
||||||
|
help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
|
||||||
|
bench_parser.add_argument(
|
||||||
|
'--perf-tool',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
help="Path to the perf tool to use. This is passed directly to the "
|
||||||
|
"perf script")
|
||||||
|
|
||||||
# compilation flags
|
# compilation flags
|
||||||
comp_parser = parser.add_argument_group('compilation options')
|
comp_parser = parser.add_argument_group('compilation options')
|
||||||
@@ -1348,7 +1402,7 @@ if __name__ == "__main__":
|
|||||||
'-o', '--output',
|
'-o', '--output',
|
||||||
help="Output file.")
|
help="Output file.")
|
||||||
|
|
||||||
# runner + bench_ids overlaps bench_paths, so we need to do some munging here
|
# runner/bench_paths overlap, so need to do some munging here
|
||||||
args = parser.parse_intermixed_args()
|
args = parser.parse_intermixed_args()
|
||||||
args.bench_paths = [' '.join(args.runner or [])] + args.bench_ids
|
args.bench_paths = [' '.join(args.runner or [])] + args.bench_ids
|
||||||
args.runner = args.runner or [RUNNER_PATH]
|
args.runner = args.runner or [RUNNER_PATH]
|
||||||
|
|||||||
200
scripts/code.py
200
scripts/code.py
@@ -5,7 +5,7 @@
|
|||||||
# by Linux's Bloat-O-Meter.
|
# by Linux's Bloat-O-Meter.
|
||||||
#
|
#
|
||||||
# Example:
|
# Example:
|
||||||
# ./scripts/code.py lfs.o lfs_util.o -S
|
# ./scripts/code.py lfs.o lfs_util.o -Ssize
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022, The littlefs authors.
|
# Copyright (c) 2022, The littlefs authors.
|
||||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import collections as co
|
import collections as co
|
||||||
import csv
|
import csv
|
||||||
|
import difflib
|
||||||
import glob
|
import glob
|
||||||
import itertools as it
|
import itertools as it
|
||||||
import math as m
|
import math as m
|
||||||
@@ -25,7 +26,8 @@ import subprocess as sp
|
|||||||
|
|
||||||
OBJ_PATHS = ['*.o']
|
OBJ_PATHS = ['*.o']
|
||||||
NM_TOOL = ['nm']
|
NM_TOOL = ['nm']
|
||||||
TYPE = 'tTrRdD'
|
NM_TYPES = 'tTrRdD'
|
||||||
|
OBJDUMP_TOOL = ['objdump']
|
||||||
|
|
||||||
|
|
||||||
# integer fields
|
# integer fields
|
||||||
@@ -135,21 +137,32 @@ def openio(path, mode='r'):
|
|||||||
|
|
||||||
def collect(paths, *,
|
def collect(paths, *,
|
||||||
nm_tool=NM_TOOL,
|
nm_tool=NM_TOOL,
|
||||||
type=TYPE,
|
nm_types=NM_TYPES,
|
||||||
build_dir=None,
|
objdump_tool=OBJDUMP_TOOL,
|
||||||
|
sources=None,
|
||||||
everything=False,
|
everything=False,
|
||||||
**args):
|
**args):
|
||||||
results = []
|
size_pattern = re.compile(
|
||||||
pattern = re.compile(
|
|
||||||
'^(?P<size>[0-9a-fA-F]+)' +
|
'^(?P<size>[0-9a-fA-F]+)' +
|
||||||
' (?P<type>[%s])' % re.escape(type) +
|
' (?P<type>[%s])' % re.escape(nm_types) +
|
||||||
' (?P<func>.+?)$')
|
' (?P<func>.+?)$')
|
||||||
|
line_pattern = re.compile(
|
||||||
|
'^\s+(?P<no>[0-9]+)\s+'
|
||||||
|
'(?:(?P<dir>[0-9]+)\s+)?'
|
||||||
|
'.*\s+'
|
||||||
|
'(?P<path>[^\s]+)$')
|
||||||
|
info_pattern = re.compile(
|
||||||
|
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||||
|
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||||
|
'|^.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
|
||||||
|
|
||||||
|
results = []
|
||||||
for path in paths:
|
for path in paths:
|
||||||
# map to source file
|
# guess the source, if we have debug-info we'll replace this later
|
||||||
src_path = re.sub('\.o$', '.c', path)
|
file = re.sub('(\.o)?$', '.c', path, 1)
|
||||||
if build_dir:
|
|
||||||
src_path = re.sub('%s/*' % re.escape(build_dir), '',
|
# find symbol sizes
|
||||||
src_path)
|
results_ = []
|
||||||
# note nm-tool may contain extra args
|
# note nm-tool may contain extra args
|
||||||
cmd = nm_tool + ['--size-sort', path]
|
cmd = nm_tool + ['--size-sort', path]
|
||||||
if args.get('verbose'):
|
if args.get('verbose'):
|
||||||
@@ -158,21 +171,18 @@ def collect(paths, *,
|
|||||||
stdout=sp.PIPE,
|
stdout=sp.PIPE,
|
||||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
universal_newlines=True,
|
universal_newlines=True,
|
||||||
errors='replace')
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
for line in proc.stdout:
|
for line in proc.stdout:
|
||||||
m = pattern.match(line)
|
m = size_pattern.match(line)
|
||||||
if m:
|
if m:
|
||||||
func = m.group('func')
|
func = m.group('func')
|
||||||
# discard internal functions
|
# discard internal functions
|
||||||
if not everything and func.startswith('__'):
|
if not everything and func.startswith('__'):
|
||||||
continue
|
continue
|
||||||
# discard .8449 suffixes created by optimizer
|
results_.append(CodeResult(
|
||||||
func = re.sub('\.[0-9]+', '', func)
|
file, func,
|
||||||
|
|
||||||
results.append(CodeResult(
|
|
||||||
src_path, func,
|
|
||||||
int(m.group('size'), 16)))
|
int(m.group('size'), 16)))
|
||||||
|
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
if not args.get('verbose'):
|
if not args.get('verbose'):
|
||||||
@@ -180,6 +190,121 @@ def collect(paths, *,
|
|||||||
sys.stdout.write(line)
|
sys.stdout.write(line)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
|
|
||||||
|
# try to figure out the source file if we have debug-info
|
||||||
|
dirs = {}
|
||||||
|
files = {}
|
||||||
|
# note objdump-tool may contain extra args
|
||||||
|
cmd = objdump_tool + ['--dwarf=rawline', path]
|
||||||
|
if args.get('verbose'):
|
||||||
|
print(' '.join(shlex.quote(c) for c in cmd))
|
||||||
|
proc = sp.Popen(cmd,
|
||||||
|
stdout=sp.PIPE,
|
||||||
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
|
universal_newlines=True,
|
||||||
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
|
for line in proc.stdout:
|
||||||
|
# note that files contain references to dirs, which we
|
||||||
|
# dereference as soon as we see them as each file table follows a
|
||||||
|
# dir table
|
||||||
|
m = line_pattern.match(line)
|
||||||
|
if m:
|
||||||
|
if not m.group('dir'):
|
||||||
|
# found a directory entry
|
||||||
|
dirs[int(m.group('no'))] = m.group('path')
|
||||||
|
else:
|
||||||
|
# found a file entry
|
||||||
|
dir = int(m.group('dir'))
|
||||||
|
if dir in dirs:
|
||||||
|
files[int(m.group('no'))] = os.path.join(
|
||||||
|
dirs[dir],
|
||||||
|
m.group('path'))
|
||||||
|
else:
|
||||||
|
files[int(m.group('no'))] = m.group('path')
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode != 0:
|
||||||
|
if not args.get('verbose'):
|
||||||
|
for line in proc.stderr:
|
||||||
|
sys.stdout.write(line)
|
||||||
|
# do nothing on error, we don't need objdump to work, source files
|
||||||
|
# may just be inaccurate
|
||||||
|
pass
|
||||||
|
|
||||||
|
defs = {}
|
||||||
|
is_func = False
|
||||||
|
f_name = None
|
||||||
|
f_file = None
|
||||||
|
# note objdump-tool may contain extra args
|
||||||
|
cmd = objdump_tool + ['--dwarf=info', path]
|
||||||
|
if args.get('verbose'):
|
||||||
|
print(' '.join(shlex.quote(c) for c in cmd))
|
||||||
|
proc = sp.Popen(cmd,
|
||||||
|
stdout=sp.PIPE,
|
||||||
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
|
universal_newlines=True,
|
||||||
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
|
for line in proc.stdout:
|
||||||
|
# state machine here to find definitions
|
||||||
|
m = info_pattern.match(line)
|
||||||
|
if m:
|
||||||
|
if m.group('tag'):
|
||||||
|
if is_func:
|
||||||
|
defs[f_name] = files.get(f_file, '?')
|
||||||
|
is_func = (m.group('tag') == 'DW_TAG_subprogram')
|
||||||
|
elif m.group('name'):
|
||||||
|
f_name = m.group('name')
|
||||||
|
elif m.group('file'):
|
||||||
|
f_file = int(m.group('file'))
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode != 0:
|
||||||
|
if not args.get('verbose'):
|
||||||
|
for line in proc.stderr:
|
||||||
|
sys.stdout.write(line)
|
||||||
|
# do nothing on error, we don't need objdump to work, source files
|
||||||
|
# may just be inaccurate
|
||||||
|
pass
|
||||||
|
|
||||||
|
for r in results_:
|
||||||
|
# find best matching debug symbol, this may be slightly different
|
||||||
|
# due to optimizations
|
||||||
|
if defs:
|
||||||
|
# exact match? avoid difflib if we can for speed
|
||||||
|
if r.function in defs:
|
||||||
|
file = defs[r.function]
|
||||||
|
else:
|
||||||
|
_, file = max(
|
||||||
|
defs.items(),
|
||||||
|
key=lambda d: difflib.SequenceMatcher(None,
|
||||||
|
d[0],
|
||||||
|
r.function, False).ratio())
|
||||||
|
else:
|
||||||
|
file = r.file
|
||||||
|
|
||||||
|
# ignore filtered sources
|
||||||
|
if sources is not None:
|
||||||
|
if not any(
|
||||||
|
os.path.abspath(file) == os.path.abspath(s)
|
||||||
|
for s in sources):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# default to only cwd
|
||||||
|
if not everything and not os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(file)]) == os.getcwd():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# simplify path
|
||||||
|
if os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(file)]) == os.getcwd():
|
||||||
|
file = os.path.relpath(file)
|
||||||
|
else:
|
||||||
|
file = os.path.abspath(file)
|
||||||
|
|
||||||
|
results.append(CodeResult(file, r.function, r.size))
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
@@ -437,7 +562,7 @@ def main(obj_paths, *,
|
|||||||
paths.append(path)
|
paths.append(path)
|
||||||
|
|
||||||
if not paths:
|
if not paths:
|
||||||
print("error: no .obj files found in %r?" % obj_paths)
|
print("error: no .o files found in %r?" % obj_paths)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
results = collect(paths, **args)
|
results = collect(paths, **args)
|
||||||
@@ -469,13 +594,16 @@ def main(obj_paths, *,
|
|||||||
# write results to CSV
|
# write results to CSV
|
||||||
if args.get('output'):
|
if args.get('output'):
|
||||||
with openio(args['output'], 'w') as f:
|
with openio(args['output'], 'w') as f:
|
||||||
writer = csv.DictWriter(f, CodeResult._by
|
writer = csv.DictWriter(f,
|
||||||
|
(by if by is not None else CodeResult._by)
|
||||||
+ ['code_'+k for k in CodeResult._fields])
|
+ ['code_'+k for k in CodeResult._fields])
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
for r in results:
|
for r in results:
|
||||||
writer.writerow(
|
writer.writerow(
|
||||||
{k: getattr(r, k) for k in CodeResult._by}
|
{k: getattr(r, k)
|
||||||
| {'code_'+k: getattr(r, k) for k in CodeResult._fields})
|
for k in (by if by is not None else CodeResult._by)}
|
||||||
|
| {'code_'+k: getattr(r, k)
|
||||||
|
for k in CodeResult._fields})
|
||||||
|
|
||||||
# find previous results?
|
# find previous results?
|
||||||
if args.get('diff'):
|
if args.get('diff'):
|
||||||
@@ -512,7 +640,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Find code size at the function level.")
|
description="Find code size at the function level.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'obj_paths',
|
'obj_paths',
|
||||||
nargs='*',
|
nargs='*',
|
||||||
@@ -579,23 +708,30 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Only show the total.")
|
help="Only show the total.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-A', '--everything',
|
'-F', '--source',
|
||||||
|
dest='sources',
|
||||||
|
action='append',
|
||||||
|
help="Only consider definitions in this file. Defaults to anything "
|
||||||
|
"in the current directory.")
|
||||||
|
parser.add_argument(
|
||||||
|
'--everything',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Include builtin and libc specific symbols.")
|
help="Include builtin and libc specific symbols.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--type',
|
'--nm-types',
|
||||||
default=TYPE,
|
default=NM_TYPES,
|
||||||
help="Type of symbols to report, this uses the same single-character "
|
help="Type of symbols to report, this uses the same single-character "
|
||||||
"type-names emitted by nm. Defaults to %r." % TYPE)
|
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--nm-tool',
|
'--nm-tool',
|
||||||
type=lambda x: x.split(),
|
type=lambda x: x.split(),
|
||||||
default=NM_TOOL,
|
default=NM_TOOL,
|
||||||
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
|
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--build-dir',
|
'--objdump-tool',
|
||||||
help="Specify the relative build directory. Used to map object files "
|
type=lambda x: x.split(),
|
||||||
"to the correct source files.")
|
default=OBJDUMP_TOOL,
|
||||||
|
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
|
||||||
sys.exit(main(**{k: v
|
sys.exit(main(**{k: v
|
||||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||||
if v is not None}))
|
if v is not None}))
|
||||||
|
|||||||
@@ -3,7 +3,9 @@
|
|||||||
# Script to find coverage info after running tests.
|
# Script to find coverage info after running tests.
|
||||||
#
|
#
|
||||||
# Example:
|
# Example:
|
||||||
# ./scripts/coverage.py lfs.t.a.gcda lfs_util.t.a.gcda -s
|
# ./scripts/coverage.py \
|
||||||
|
# lfs.t.a.gcda lfs_util.t.a.gcda \
|
||||||
|
# -Flfs.c -Flfs_util.c -slines
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022, The littlefs authors.
|
# Copyright (c) 2022, The littlefs authors.
|
||||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||||
@@ -209,19 +211,13 @@ def openio(path, mode='r'):
|
|||||||
else:
|
else:
|
||||||
return open(path, mode)
|
return open(path, mode)
|
||||||
|
|
||||||
def collect(paths, *,
|
def collect(gcda_paths, *,
|
||||||
gcov_tool=GCOV_TOOL,
|
gcov_tool=GCOV_TOOL,
|
||||||
build_dir=None,
|
sources=None,
|
||||||
everything=False,
|
everything=False,
|
||||||
**args):
|
**args):
|
||||||
results = []
|
results = []
|
||||||
for path in paths:
|
for path in gcda_paths:
|
||||||
# map to source file
|
|
||||||
src_path = re.sub('\.t\.a\.gcda$', '.c', path)
|
|
||||||
if build_dir:
|
|
||||||
src_path = re.sub('%s/*' % re.escape(build_dir), '',
|
|
||||||
src_path)
|
|
||||||
|
|
||||||
# get coverage info through gcov's json output
|
# get coverage info through gcov's json output
|
||||||
# note, gcov-tool may contain extra args
|
# note, gcov-tool may contain extra args
|
||||||
cmd = GCOV_TOOL + ['-b', '-t', '--json-format', path]
|
cmd = GCOV_TOOL + ['-b', '-t', '--json-format', path]
|
||||||
@@ -231,7 +227,8 @@ def collect(paths, *,
|
|||||||
stdout=sp.PIPE,
|
stdout=sp.PIPE,
|
||||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
universal_newlines=True,
|
universal_newlines=True,
|
||||||
errors='replace')
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
data = json.load(proc.stdout)
|
data = json.load(proc.stdout)
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
@@ -242,12 +239,30 @@ def collect(paths, *,
|
|||||||
|
|
||||||
# collect line/branch coverage
|
# collect line/branch coverage
|
||||||
for file in data['files']:
|
for file in data['files']:
|
||||||
if file['file'] != src_path:
|
# ignore filtered sources
|
||||||
continue
|
if sources is not None:
|
||||||
|
if not any(
|
||||||
|
os.path.abspath(file['file']) == os.path.abspath(s)
|
||||||
|
for s in sources):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# default to only cwd
|
||||||
|
if not everything and not os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(file['file'])]) == os.getcwd():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# simplify path
|
||||||
|
if os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(file['file'])]) == os.getcwd():
|
||||||
|
file_name = os.path.relpath(file['file'])
|
||||||
|
else:
|
||||||
|
file_name = os.path.abspath(file['file'])
|
||||||
|
|
||||||
for func in file['functions']:
|
for func in file['functions']:
|
||||||
func_name = func.get('name', '(inlined)')
|
func_name = func.get('name', '(inlined)')
|
||||||
# discard internal function (this includes injected test cases)
|
# discard internal functions (this includes injected test cases)
|
||||||
if not everything:
|
if not everything:
|
||||||
if func_name.startswith('__'):
|
if func_name.startswith('__'):
|
||||||
continue
|
continue
|
||||||
@@ -255,7 +270,7 @@ def collect(paths, *,
|
|||||||
# go ahead and add functions, later folding will merge this if
|
# go ahead and add functions, later folding will merge this if
|
||||||
# there are other hits on this line
|
# there are other hits on this line
|
||||||
results.append(CoverageResult(
|
results.append(CoverageResult(
|
||||||
src_path, func_name, func['start_line'],
|
file_name, func_name, func['start_line'],
|
||||||
func['execution_count'], 0,
|
func['execution_count'], 0,
|
||||||
Frac(1 if func['execution_count'] > 0 else 0, 1),
|
Frac(1 if func['execution_count'] > 0 else 0, 1),
|
||||||
0,
|
0,
|
||||||
@@ -271,7 +286,7 @@ def collect(paths, *,
|
|||||||
# go ahead and add lines, later folding will merge this if
|
# go ahead and add lines, later folding will merge this if
|
||||||
# there are other hits on this line
|
# there are other hits on this line
|
||||||
results.append(CoverageResult(
|
results.append(CoverageResult(
|
||||||
src_path, func_name, line['line_number'],
|
file_name, func_name, line['line_number'],
|
||||||
0, line['count'],
|
0, line['count'],
|
||||||
0,
|
0,
|
||||||
Frac(1 if line['count'] > 0 else 0, 1),
|
Frac(1 if line['count'] > 0 else 0, 1),
|
||||||
@@ -519,31 +534,25 @@ def table(Result, results, diff_results=None, *,
|
|||||||
line[-1]))
|
line[-1]))
|
||||||
|
|
||||||
|
|
||||||
def annotate(Result, results, paths, *,
|
def annotate(Result, results, *,
|
||||||
annotate=False,
|
annotate=False,
|
||||||
lines=False,
|
lines=False,
|
||||||
branches=False,
|
branches=False,
|
||||||
build_dir=None,
|
|
||||||
**args):
|
**args):
|
||||||
# if neither branches/lines specified, color both
|
# if neither branches/lines specified, color both
|
||||||
if annotate and not lines and not branches:
|
if annotate and not lines and not branches:
|
||||||
lines, branches = True, True
|
lines, branches = True, True
|
||||||
|
|
||||||
for path in paths:
|
for path in co.OrderedDict.fromkeys(r.file for r in results).keys():
|
||||||
# map to source file
|
|
||||||
src_path = re.sub('\.t\.a\.gcda$', '.c', path)
|
|
||||||
if build_dir:
|
|
||||||
src_path = re.sub('%s/*' % re.escape(build_dir), '',
|
|
||||||
src_path)
|
|
||||||
|
|
||||||
# flatten to line info
|
# flatten to line info
|
||||||
results = fold(Result, results, by=['file', 'line'])
|
results = fold(Result, results, by=['file', 'line'])
|
||||||
table = {r.line: r for r in results if r.file == src_path}
|
table = {r.line: r for r in results if r.file == path}
|
||||||
|
|
||||||
# calculate spans to show
|
# calculate spans to show
|
||||||
if not annotate:
|
if not annotate:
|
||||||
spans = []
|
spans = []
|
||||||
last = None
|
last = None
|
||||||
|
func = None
|
||||||
for line, r in sorted(table.items()):
|
for line, r in sorted(table.items()):
|
||||||
if ((lines and int(r.hits) == 0)
|
if ((lines and int(r.hits) == 0)
|
||||||
or (branches and r.branches.a < r.branches.b)):
|
or (branches and r.branches.a < r.branches.b)):
|
||||||
@@ -553,27 +562,29 @@ def annotate(Result, results, paths, *,
|
|||||||
line+1+args['context'])
|
line+1+args['context'])
|
||||||
else:
|
else:
|
||||||
if last is not None:
|
if last is not None:
|
||||||
spans.append(last)
|
spans.append((last, func))
|
||||||
last = range(
|
last = range(
|
||||||
line-args['context'],
|
line-args['context'],
|
||||||
line+1+args['context'])
|
line+1+args['context'])
|
||||||
|
func = r.function
|
||||||
if last is not None:
|
if last is not None:
|
||||||
spans.append(last)
|
spans.append((last, func))
|
||||||
|
|
||||||
with open(src_path) as f:
|
with open(path) as f:
|
||||||
skipped = False
|
skipped = False
|
||||||
for i, line in enumerate(f):
|
for i, line in enumerate(f):
|
||||||
# skip lines not in spans?
|
# skip lines not in spans?
|
||||||
if not annotate and not any(i+1 in s for s in spans):
|
if not annotate and not any(i+1 in s for s, _ in spans):
|
||||||
skipped = True
|
skipped = True
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if skipped:
|
if skipped:
|
||||||
skipped = False
|
skipped = False
|
||||||
print('%s@@ %s:%d @@%s' % (
|
print('%s@@ %s:%d: %s @@%s' % (
|
||||||
'\x1b[36m' if args['color'] else '',
|
'\x1b[36m' if args['color'] else '',
|
||||||
src_path,
|
path,
|
||||||
i+1,
|
i+1,
|
||||||
|
next(iter(f for _, f in spans)),
|
||||||
'\x1b[m' if args['color'] else ''))
|
'\x1b[m' if args['color'] else ''))
|
||||||
|
|
||||||
# build line
|
# build line
|
||||||
@@ -659,12 +670,14 @@ def main(gcda_paths, *,
|
|||||||
# write results to CSV
|
# write results to CSV
|
||||||
if args.get('output'):
|
if args.get('output'):
|
||||||
with openio(args['output'], 'w') as f:
|
with openio(args['output'], 'w') as f:
|
||||||
writer = csv.DictWriter(f, CoverageResult._by
|
writer = csv.DictWriter(f,
|
||||||
|
(by if by is not None else CoverageResult._by)
|
||||||
+ ['coverage_'+k for k in CoverageResult._fields])
|
+ ['coverage_'+k for k in CoverageResult._fields])
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
for r in results:
|
for r in results:
|
||||||
writer.writerow(
|
writer.writerow(
|
||||||
{k: getattr(r, k) for k in CoverageResult._by}
|
{k: getattr(r, k)
|
||||||
|
for k in (by if by is not None else CoverageResult._by)}
|
||||||
| {'coverage_'+k: getattr(r, k)
|
| {'coverage_'+k: getattr(r, k)
|
||||||
for k in CoverageResult._fields})
|
for k in CoverageResult._fields})
|
||||||
|
|
||||||
@@ -698,8 +711,7 @@ def main(gcda_paths, *,
|
|||||||
or args.get('lines')
|
or args.get('lines')
|
||||||
or args.get('branches')):
|
or args.get('branches')):
|
||||||
# annotate sources
|
# annotate sources
|
||||||
annotate(CoverageResult, results, paths,
|
annotate(CoverageResult, results, **args)
|
||||||
**args)
|
|
||||||
else:
|
else:
|
||||||
# print table
|
# print table
|
||||||
table(CoverageResult, results,
|
table(CoverageResult, results,
|
||||||
@@ -724,7 +736,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Find coverage info after running tests.")
|
description="Find coverage info after running tests.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'gcda_paths',
|
'gcda_paths',
|
||||||
nargs='*',
|
nargs='*',
|
||||||
@@ -791,15 +804,21 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Only show the total.")
|
help="Only show the total.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-A', '--everything',
|
'-F', '--source',
|
||||||
|
dest='sources',
|
||||||
|
action='append',
|
||||||
|
help="Only consider definitions in this file. Defaults to anything "
|
||||||
|
"in the current directory.")
|
||||||
|
parser.add_argument(
|
||||||
|
'--everything',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Include builtin and libc specific symbols.")
|
help="Include builtin and libc specific symbols.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-H', '--hits',
|
'--hits',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Show total hits instead of coverage.")
|
help="Show total hits instead of coverage.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-l', '--annotate',
|
'-A', '--annotate',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Show source files annotated with coverage info.")
|
help="Show source files annotated with coverage info.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@@ -814,7 +833,7 @@ if __name__ == "__main__":
|
|||||||
'-c', '--context',
|
'-c', '--context',
|
||||||
type=lambda x: int(x, 0),
|
type=lambda x: int(x, 0),
|
||||||
default=3,
|
default=3,
|
||||||
help="Show a additional lines of context. Defaults to 3.")
|
help="Show n additional lines of context. Defaults to 3.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-W', '--width',
|
'-W', '--width',
|
||||||
type=lambda x: int(x, 0),
|
type=lambda x: int(x, 0),
|
||||||
@@ -838,10 +857,6 @@ if __name__ == "__main__":
|
|||||||
default=GCOV_TOOL,
|
default=GCOV_TOOL,
|
||||||
type=lambda x: x.split(),
|
type=lambda x: x.split(),
|
||||||
help="Path to the gcov tool to use. Defaults to %r." % GCOV_TOOL)
|
help="Path to the gcov tool to use. Defaults to %r." % GCOV_TOOL)
|
||||||
parser.add_argument(
|
|
||||||
'--build-dir',
|
|
||||||
help="Specify the relative build directory. Used to map object files "
|
|
||||||
"to the correct source files.")
|
|
||||||
sys.exit(main(**{k: v
|
sys.exit(main(**{k: v
|
||||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||||
if v is not None}))
|
if v is not None}))
|
||||||
|
|||||||
200
scripts/data.py
200
scripts/data.py
@@ -5,7 +5,7 @@
|
|||||||
# by Linux's Bloat-O-Meter.
|
# by Linux's Bloat-O-Meter.
|
||||||
#
|
#
|
||||||
# Example:
|
# Example:
|
||||||
# ./scripts/data.py lfs.o lfs_util.o -S
|
# ./scripts/data.py lfs.o lfs_util.o -Ssize
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022, The littlefs authors.
|
# Copyright (c) 2022, The littlefs authors.
|
||||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import collections as co
|
import collections as co
|
||||||
import csv
|
import csv
|
||||||
|
import difflib
|
||||||
import glob
|
import glob
|
||||||
import itertools as it
|
import itertools as it
|
||||||
import math as m
|
import math as m
|
||||||
@@ -25,7 +26,8 @@ import subprocess as sp
|
|||||||
|
|
||||||
OBJ_PATHS = ['*.o']
|
OBJ_PATHS = ['*.o']
|
||||||
NM_TOOL = ['nm']
|
NM_TOOL = ['nm']
|
||||||
TYPE = 'dDbB'
|
NM_TYPES = 'dDbB'
|
||||||
|
OBJDUMP_TOOL = ['objdump']
|
||||||
|
|
||||||
|
|
||||||
# integer fields
|
# integer fields
|
||||||
@@ -135,21 +137,32 @@ def openio(path, mode='r'):
|
|||||||
|
|
||||||
def collect(paths, *,
|
def collect(paths, *,
|
||||||
nm_tool=NM_TOOL,
|
nm_tool=NM_TOOL,
|
||||||
type=TYPE,
|
nm_types=NM_TYPES,
|
||||||
build_dir=None,
|
objdump_tool=OBJDUMP_TOOL,
|
||||||
|
sources=None,
|
||||||
everything=False,
|
everything=False,
|
||||||
**args):
|
**args):
|
||||||
results = []
|
size_pattern = re.compile(
|
||||||
pattern = re.compile(
|
|
||||||
'^(?P<size>[0-9a-fA-F]+)' +
|
'^(?P<size>[0-9a-fA-F]+)' +
|
||||||
' (?P<type>[%s])' % re.escape(type) +
|
' (?P<type>[%s])' % re.escape(nm_types) +
|
||||||
' (?P<func>.+?)$')
|
' (?P<func>.+?)$')
|
||||||
|
line_pattern = re.compile(
|
||||||
|
'^\s+(?P<no>[0-9]+)\s+'
|
||||||
|
'(?:(?P<dir>[0-9]+)\s+)?'
|
||||||
|
'.*\s+'
|
||||||
|
'(?P<path>[^\s]+)$')
|
||||||
|
info_pattern = re.compile(
|
||||||
|
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||||
|
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||||
|
'|^.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
|
||||||
|
|
||||||
|
results = []
|
||||||
for path in paths:
|
for path in paths:
|
||||||
# map to source file
|
# guess the source, if we have debug-info we'll replace this later
|
||||||
src_path = re.sub('\.o$', '.c', path)
|
file = re.sub('(\.o)?$', '.c', path, 1)
|
||||||
if build_dir:
|
|
||||||
src_path = re.sub('%s/*' % re.escape(build_dir), '',
|
# find symbol sizes
|
||||||
src_path)
|
results_ = []
|
||||||
# note nm-tool may contain extra args
|
# note nm-tool may contain extra args
|
||||||
cmd = nm_tool + ['--size-sort', path]
|
cmd = nm_tool + ['--size-sort', path]
|
||||||
if args.get('verbose'):
|
if args.get('verbose'):
|
||||||
@@ -158,21 +171,18 @@ def collect(paths, *,
|
|||||||
stdout=sp.PIPE,
|
stdout=sp.PIPE,
|
||||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
universal_newlines=True,
|
universal_newlines=True,
|
||||||
errors='replace')
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
for line in proc.stdout:
|
for line in proc.stdout:
|
||||||
m = pattern.match(line)
|
m = size_pattern.match(line)
|
||||||
if m:
|
if m:
|
||||||
func = m.group('func')
|
func = m.group('func')
|
||||||
# discard internal functions
|
# discard internal functions
|
||||||
if not everything and func.startswith('__'):
|
if not everything and func.startswith('__'):
|
||||||
continue
|
continue
|
||||||
# discard .8449 suffixes created by optimizer
|
results_.append(DataResult(
|
||||||
func = re.sub('\.[0-9]+', '', func)
|
file, func,
|
||||||
|
|
||||||
results.append(DataResult(
|
|
||||||
src_path, func,
|
|
||||||
int(m.group('size'), 16)))
|
int(m.group('size'), 16)))
|
||||||
|
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
if not args.get('verbose'):
|
if not args.get('verbose'):
|
||||||
@@ -180,6 +190,121 @@ def collect(paths, *,
|
|||||||
sys.stdout.write(line)
|
sys.stdout.write(line)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
|
|
||||||
|
# try to figure out the source file if we have debug-info
|
||||||
|
dirs = {}
|
||||||
|
files = {}
|
||||||
|
# note objdump-tool may contain extra args
|
||||||
|
cmd = objdump_tool + ['--dwarf=rawline', path]
|
||||||
|
if args.get('verbose'):
|
||||||
|
print(' '.join(shlex.quote(c) for c in cmd))
|
||||||
|
proc = sp.Popen(cmd,
|
||||||
|
stdout=sp.PIPE,
|
||||||
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
|
universal_newlines=True,
|
||||||
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
|
for line in proc.stdout:
|
||||||
|
# note that files contain references to dirs, which we
|
||||||
|
# dereference as soon as we see them as each file table follows a
|
||||||
|
# dir table
|
||||||
|
m = line_pattern.match(line)
|
||||||
|
if m:
|
||||||
|
if not m.group('dir'):
|
||||||
|
# found a directory entry
|
||||||
|
dirs[int(m.group('no'))] = m.group('path')
|
||||||
|
else:
|
||||||
|
# found a file entry
|
||||||
|
dir = int(m.group('dir'))
|
||||||
|
if dir in dirs:
|
||||||
|
files[int(m.group('no'))] = os.path.join(
|
||||||
|
dirs[dir],
|
||||||
|
m.group('path'))
|
||||||
|
else:
|
||||||
|
files[int(m.group('no'))] = m.group('path')
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode != 0:
|
||||||
|
if not args.get('verbose'):
|
||||||
|
for line in proc.stderr:
|
||||||
|
sys.stdout.write(line)
|
||||||
|
# do nothing on error, we don't need objdump to work, source files
|
||||||
|
# may just be inaccurate
|
||||||
|
pass
|
||||||
|
|
||||||
|
defs = {}
|
||||||
|
is_func = False
|
||||||
|
f_name = None
|
||||||
|
f_file = None
|
||||||
|
# note objdump-tool may contain extra args
|
||||||
|
cmd = objdump_tool + ['--dwarf=info', path]
|
||||||
|
if args.get('verbose'):
|
||||||
|
print(' '.join(shlex.quote(c) for c in cmd))
|
||||||
|
proc = sp.Popen(cmd,
|
||||||
|
stdout=sp.PIPE,
|
||||||
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
|
universal_newlines=True,
|
||||||
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
|
for line in proc.stdout:
|
||||||
|
# state machine here to find definitions
|
||||||
|
m = info_pattern.match(line)
|
||||||
|
if m:
|
||||||
|
if m.group('tag'):
|
||||||
|
if is_func:
|
||||||
|
defs[f_name] = files.get(f_file, '?')
|
||||||
|
is_func = (m.group('tag') == 'DW_TAG_subprogram')
|
||||||
|
elif m.group('name'):
|
||||||
|
f_name = m.group('name')
|
||||||
|
elif m.group('file'):
|
||||||
|
f_file = int(m.group('file'))
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode != 0:
|
||||||
|
if not args.get('verbose'):
|
||||||
|
for line in proc.stderr:
|
||||||
|
sys.stdout.write(line)
|
||||||
|
# do nothing on error, we don't need objdump to work, source files
|
||||||
|
# may just be inaccurate
|
||||||
|
pass
|
||||||
|
|
||||||
|
for r in results_:
|
||||||
|
# find best matching debug symbol, this may be slightly different
|
||||||
|
# due to optimizations
|
||||||
|
if defs:
|
||||||
|
# exact match? avoid difflib if we can for speed
|
||||||
|
if r.function in defs:
|
||||||
|
file = defs[r.function]
|
||||||
|
else:
|
||||||
|
_, file = max(
|
||||||
|
defs.items(),
|
||||||
|
key=lambda d: difflib.SequenceMatcher(None,
|
||||||
|
d[0],
|
||||||
|
r.function, False).ratio())
|
||||||
|
else:
|
||||||
|
file = r.file
|
||||||
|
|
||||||
|
# ignore filtered sources
|
||||||
|
if sources is not None:
|
||||||
|
if not any(
|
||||||
|
os.path.abspath(file) == os.path.abspath(s)
|
||||||
|
for s in sources):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# default to only cwd
|
||||||
|
if not everything and not os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(file)]) == os.getcwd():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# simplify path
|
||||||
|
if os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(file)]) == os.getcwd():
|
||||||
|
file = os.path.relpath(file)
|
||||||
|
else:
|
||||||
|
file = os.path.abspath(file)
|
||||||
|
|
||||||
|
results.append(DataResult(file, r.function, r.size))
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
@@ -437,7 +562,7 @@ def main(obj_paths, *,
|
|||||||
paths.append(path)
|
paths.append(path)
|
||||||
|
|
||||||
if not paths:
|
if not paths:
|
||||||
print("error: no .obj files found in %r?" % obj_paths)
|
print("error: no .o files found in %r?" % obj_paths)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
results = collect(paths, **args)
|
results = collect(paths, **args)
|
||||||
@@ -469,13 +594,16 @@ def main(obj_paths, *,
|
|||||||
# write results to CSV
|
# write results to CSV
|
||||||
if args.get('output'):
|
if args.get('output'):
|
||||||
with openio(args['output'], 'w') as f:
|
with openio(args['output'], 'w') as f:
|
||||||
writer = csv.DictWriter(f, DataResult._by
|
writer = csv.DictWriter(f,
|
||||||
|
(by if by is not None else DataResult._by)
|
||||||
+ ['data_'+k for k in DataResult._fields])
|
+ ['data_'+k for k in DataResult._fields])
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
for r in results:
|
for r in results:
|
||||||
writer.writerow(
|
writer.writerow(
|
||||||
{k: getattr(r, k) for k in DataResult._by}
|
{k: getattr(r, k)
|
||||||
| {'data_'+k: getattr(r, k) for k in DataResult._fields})
|
for k in (by if by is not None else DataResult._by)}
|
||||||
|
| {'data_'+k: getattr(r, k)
|
||||||
|
for k in DataResult._fields})
|
||||||
|
|
||||||
# find previous results?
|
# find previous results?
|
||||||
if args.get('diff'):
|
if args.get('diff'):
|
||||||
@@ -512,7 +640,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Find data size at the function level.")
|
description="Find data size at the function level.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'obj_paths',
|
'obj_paths',
|
||||||
nargs='*',
|
nargs='*',
|
||||||
@@ -579,23 +708,30 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Only show the total.")
|
help="Only show the total.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-A', '--everything',
|
'-F', '--source',
|
||||||
|
dest='sources',
|
||||||
|
action='append',
|
||||||
|
help="Only consider definitions in this file. Defaults to anything "
|
||||||
|
"in the current directory.")
|
||||||
|
parser.add_argument(
|
||||||
|
'--everything',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Include builtin and libc specific symbols.")
|
help="Include builtin and libc specific symbols.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--type',
|
'--nm-types',
|
||||||
default=TYPE,
|
default=NM_TYPES,
|
||||||
help="Type of symbols to report, this uses the same single-character "
|
help="Type of symbols to report, this uses the same single-character "
|
||||||
"type-names emitted by nm. Defaults to %r." % TYPE)
|
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--nm-tool',
|
'--nm-tool',
|
||||||
type=lambda x: x.split(),
|
type=lambda x: x.split(),
|
||||||
default=NM_TOOL,
|
default=NM_TOOL,
|
||||||
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
|
help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--build-dir',
|
'--objdump-tool',
|
||||||
help="Specify the relative build directory. Used to map object files "
|
type=lambda x: x.split(),
|
||||||
"to the correct source files.")
|
default=OBJDUMP_TOOL,
|
||||||
|
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
|
||||||
sys.exit(main(**{k: v
|
sys.exit(main(**{k: v
|
||||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||||
if v is not None}))
|
if v is not None}))
|
||||||
|
|||||||
1263
scripts/perf.py
Executable file
1263
scripts/perf.py
Executable file
File diff suppressed because it is too large
Load Diff
@@ -727,7 +727,8 @@ if __name__ == "__main__":
|
|||||||
import sys
|
import sys
|
||||||
import argparse
|
import argparse
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Plot CSV files in terminal.")
|
description="Plot CSV files in terminal.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'csv_paths',
|
'csv_paths',
|
||||||
nargs='*',
|
nargs='*',
|
||||||
|
|||||||
@@ -424,7 +424,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Preprocessor that makes asserts easier to debug.")
|
description="Preprocessor that makes asserts easier to debug.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'input',
|
'input',
|
||||||
help="Input C file.")
|
help="Input C file.")
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
# report as infinite stack usage.
|
# report as infinite stack usage.
|
||||||
#
|
#
|
||||||
# Example:
|
# Example:
|
||||||
# ./scripts/stack.py lfs.ci lfs_util.ci -S
|
# ./scripts/stack.py lfs.ci lfs_util.ci -Slimit
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022, The littlefs authors.
|
# Copyright (c) 2022, The littlefs authors.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
@@ -131,6 +131,7 @@ def openio(path, mode='r'):
|
|||||||
return open(path, mode)
|
return open(path, mode)
|
||||||
|
|
||||||
def collect(paths, *,
|
def collect(paths, *,
|
||||||
|
sources=None,
|
||||||
everything=False,
|
everything=False,
|
||||||
**args):
|
**args):
|
||||||
# parse the vcg format
|
# parse the vcg format
|
||||||
@@ -181,8 +182,9 @@ def collect(paths, *,
|
|||||||
if (not args.get('quiet')
|
if (not args.get('quiet')
|
||||||
and 'static' not in type
|
and 'static' not in type
|
||||||
and 'bounded' not in type):
|
and 'bounded' not in type):
|
||||||
print("warning: found non-static stack for %s (%s)"
|
print("warning: "
|
||||||
% (function, type, size))
|
"found non-static stack for %s (%s, %s)" % (
|
||||||
|
function, type, size))
|
||||||
_, _, _, targets = callgraph[info['title']]
|
_, _, _, targets = callgraph[info['title']]
|
||||||
callgraph[info['title']] = (
|
callgraph[info['title']] = (
|
||||||
file, function, int(size), targets)
|
file, function, int(size), targets)
|
||||||
@@ -193,11 +195,48 @@ def collect(paths, *,
|
|||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
callgraph_ = co.defaultdict(lambda: (None, None, 0, set()))
|
||||||
|
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||||
|
# discard internal functions
|
||||||
|
if not everything and s_function.startswith('__'):
|
||||||
|
continue
|
||||||
|
# ignore filtered sources
|
||||||
|
if sources is not None:
|
||||||
|
if not any(
|
||||||
|
os.path.abspath(s_file) == os.path.abspath(s)
|
||||||
|
for s in sources):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# default to only cwd
|
||||||
|
if not everything and not os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(s_file)]) == os.getcwd():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# smiplify path
|
||||||
|
if os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(s_file)]) == os.getcwd():
|
||||||
|
s_file = os.path.relpath(s_file)
|
||||||
|
else:
|
||||||
|
s_file = os.path.abspath(s_file)
|
||||||
|
|
||||||
|
callgraph_[source] = (s_file, s_function, frame, targets)
|
||||||
|
callgraph = callgraph_
|
||||||
|
|
||||||
if not everything:
|
if not everything:
|
||||||
for source, (s_file, s_function, _, _) in list(callgraph.items()):
|
callgraph_ = co.defaultdict(lambda: (None, None, 0, set()))
|
||||||
|
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||||
|
# discard filtered sources
|
||||||
|
if sources is not None and not any(
|
||||||
|
os.path.abspath(s_file) == os.path.abspath(s)
|
||||||
|
for s in sources):
|
||||||
|
continue
|
||||||
# discard internal functions
|
# discard internal functions
|
||||||
if s_file.startswith('<') or s_file.startswith('/usr/include'):
|
if s_function.startswith('__'):
|
||||||
del callgraph[source]
|
continue
|
||||||
|
callgraph_[source] = (s_file, s_function, frame, targets)
|
||||||
|
callgraph = callgraph_
|
||||||
|
|
||||||
# find maximum stack size recursively, this requires also detecting cycles
|
# find maximum stack size recursively, this requires also detecting cycles
|
||||||
# (in case of recursion)
|
# (in case of recursion)
|
||||||
@@ -278,7 +317,7 @@ def table(Result, results, diff_results=None, *,
|
|||||||
all=False,
|
all=False,
|
||||||
percent=False,
|
percent=False,
|
||||||
tree=False,
|
tree=False,
|
||||||
depth=None,
|
depth=1,
|
||||||
**_):
|
**_):
|
||||||
all_, all = all, __builtins__.all
|
all_, all = all, __builtins__.all
|
||||||
|
|
||||||
@@ -467,15 +506,8 @@ def table(Result, results, diff_results=None, *,
|
|||||||
# adjust the name width based on the expected call depth, though
|
# adjust the name width based on the expected call depth, though
|
||||||
# note this doesn't really work with unbounded recursion
|
# note this doesn't really work with unbounded recursion
|
||||||
if not summary:
|
if not summary:
|
||||||
# it doesn't really make sense to not have a depth with tree,
|
|
||||||
# so assume depth=inf if tree by default
|
|
||||||
if depth is None:
|
|
||||||
depth = m.inf if tree else 0
|
|
||||||
elif depth == 0:
|
|
||||||
depth = m.inf
|
|
||||||
|
|
||||||
if not m.isinf(depth):
|
if not m.isinf(depth):
|
||||||
widths[0] += 4*depth
|
widths[0] += 4*(depth-1)
|
||||||
|
|
||||||
# print our table with optional call info
|
# print our table with optional call info
|
||||||
#
|
#
|
||||||
@@ -528,7 +560,7 @@ def table(Result, results, diff_results=None, *,
|
|||||||
prefixes[2+is_last] + "'-> ",
|
prefixes[2+is_last] + "'-> ",
|
||||||
prefixes[2+is_last] + "| ",
|
prefixes[2+is_last] + "| ",
|
||||||
prefixes[2+is_last] + " "))
|
prefixes[2+is_last] + " "))
|
||||||
recurse(names, depth)
|
recurse(names, depth-1)
|
||||||
|
|
||||||
if not tree:
|
if not tree:
|
||||||
print('%-*s %s%s' % (
|
print('%-*s %s%s' % (
|
||||||
@@ -544,6 +576,13 @@ def main(ci_paths,
|
|||||||
defines=None,
|
defines=None,
|
||||||
sort=None,
|
sort=None,
|
||||||
**args):
|
**args):
|
||||||
|
# it doesn't really make sense to not have a depth with tree,
|
||||||
|
# so assume depth=inf if tree by default
|
||||||
|
if args.get('depth') is None:
|
||||||
|
args['depth'] = m.inf if args['tree'] else 1
|
||||||
|
elif args.get('depth') == 0:
|
||||||
|
args['depth'] = m.inf
|
||||||
|
|
||||||
# find sizes
|
# find sizes
|
||||||
if not args.get('use', None):
|
if not args.get('use', None):
|
||||||
# find .ci files
|
# find .ci files
|
||||||
@@ -588,13 +627,16 @@ def main(ci_paths,
|
|||||||
# write results to CSV
|
# write results to CSV
|
||||||
if args.get('output'):
|
if args.get('output'):
|
||||||
with openio(args['output'], 'w') as f:
|
with openio(args['output'], 'w') as f:
|
||||||
writer = csv.DictWriter(f, StackResult._by
|
writer = csv.DictWriter(f,
|
||||||
|
(by if by is not None else StackResult._by)
|
||||||
+ ['stack_'+k for k in StackResult._fields])
|
+ ['stack_'+k for k in StackResult._fields])
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
for r in results:
|
for r in results:
|
||||||
writer.writerow(
|
writer.writerow(
|
||||||
{k: getattr(r, k) for k in StackResult._by}
|
{k: getattr(r, k)
|
||||||
| {'stack_'+k: getattr(r, k) for k in StackResult._fields})
|
for k in (by if by is not None else StackResult._by)}
|
||||||
|
| {'stack_'+k: getattr(r, k)
|
||||||
|
for k in StackResult._fields})
|
||||||
|
|
||||||
# find previous results?
|
# find previous results?
|
||||||
if args.get('diff'):
|
if args.get('diff'):
|
||||||
@@ -636,7 +678,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Find stack usage at the function level.")
|
description="Find stack usage at the function level.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'ci_paths',
|
'ci_paths',
|
||||||
nargs='*',
|
nargs='*',
|
||||||
@@ -703,7 +746,13 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Only show the total.")
|
help="Only show the total.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-A', '--everything',
|
'-F', '--source',
|
||||||
|
dest='sources',
|
||||||
|
action='append',
|
||||||
|
help="Only consider definitions in this file. Defaults to anything "
|
||||||
|
"in the current directory.")
|
||||||
|
parser.add_argument(
|
||||||
|
'--everything',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Include builtin and libc specific symbols.")
|
help="Include builtin and libc specific symbols.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@@ -711,20 +760,16 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Only show the function call tree.")
|
help="Only show the function call tree.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-L', '--depth',
|
'-Z', '--depth',
|
||||||
nargs='?',
|
nargs='?',
|
||||||
type=lambda x: int(x, 0),
|
type=lambda x: int(x, 0),
|
||||||
const=0,
|
const=0,
|
||||||
help="Depth of function calls to show. 0 show all calls but may not "
|
help="Depth of function calls to show. 0 shows all calls but may not "
|
||||||
"terminate!")
|
"terminate!")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-e', '--error-on-recursion',
|
'-e', '--error-on-recursion',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Error if any functions are recursive.")
|
help="Error if any functions are recursive.")
|
||||||
parser.add_argument(
|
|
||||||
'--build-dir',
|
|
||||||
help="Specify the relative build directory. Used to map object files "
|
|
||||||
"to the correct source files.")
|
|
||||||
sys.exit(main(**{k: v
|
sys.exit(main(**{k: v
|
||||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||||
if v is not None}))
|
if v is not None}))
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# Script to find struct sizes.
|
# Script to find struct sizes.
|
||||||
#
|
#
|
||||||
# Example:
|
# Example:
|
||||||
# ./scripts/struct_.py lfs.o lfs_util.o -S
|
# ./scripts/struct_.py lfs.o lfs_util.o -Ssize
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022, The littlefs authors.
|
# Copyright (c) 2022, The littlefs authors.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
@@ -11,6 +11,7 @@
|
|||||||
|
|
||||||
import collections as co
|
import collections as co
|
||||||
import csv
|
import csv
|
||||||
|
import difflib
|
||||||
import glob
|
import glob
|
||||||
import itertools as it
|
import itertools as it
|
||||||
import math as m
|
import math as m
|
||||||
@@ -128,26 +129,28 @@ def openio(path, mode='r'):
|
|||||||
else:
|
else:
|
||||||
return open(path, mode)
|
return open(path, mode)
|
||||||
|
|
||||||
def collect(paths, *,
|
def collect(obj_paths, *,
|
||||||
objdump_tool=OBJDUMP_TOOL,
|
objdump_tool=OBJDUMP_TOOL,
|
||||||
build_dir=None,
|
sources=None,
|
||||||
everything=False,
|
everything=False,
|
||||||
|
internal=False,
|
||||||
**args):
|
**args):
|
||||||
decl_pattern = re.compile(
|
line_pattern = re.compile(
|
||||||
'^\s+(?P<no>[0-9]+)'
|
'^\s+(?P<no>[0-9]+)\s+'
|
||||||
'\s+(?P<dir>[0-9]+)'
|
'(?:(?P<dir>[0-9]+)\s+)?'
|
||||||
'\s+.*'
|
'.*\s+'
|
||||||
'\s+(?P<file>[^\s]+)$')
|
'(?P<path>[^\s]+)$')
|
||||||
struct_pattern = re.compile(
|
info_pattern = re.compile(
|
||||||
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
|
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||||
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||||
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
|
'|^.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*'
|
||||||
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for path in paths:
|
for path in obj_paths:
|
||||||
# find decl, we want to filter by structs in .h files
|
# find files, we want to filter by structs in .h files
|
||||||
decls = {}
|
dirs = {}
|
||||||
|
files = {}
|
||||||
# note objdump-tool may contain extra args
|
# note objdump-tool may contain extra args
|
||||||
cmd = objdump_tool + ['--dwarf=rawline', path]
|
cmd = objdump_tool + ['--dwarf=rawline', path]
|
||||||
if args.get('verbose'):
|
if args.get('verbose'):
|
||||||
@@ -156,12 +159,26 @@ def collect(paths, *,
|
|||||||
stdout=sp.PIPE,
|
stdout=sp.PIPE,
|
||||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
universal_newlines=True,
|
universal_newlines=True,
|
||||||
errors='replace')
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
for line in proc.stdout:
|
for line in proc.stdout:
|
||||||
# find file numbers
|
# note that files contain references to dirs, which we
|
||||||
m = decl_pattern.match(line)
|
# dereference as soon as we see them as each file table follows a
|
||||||
|
# dir table
|
||||||
|
m = line_pattern.match(line)
|
||||||
if m:
|
if m:
|
||||||
decls[int(m.group('no'))] = m.group('file')
|
if not m.group('dir'):
|
||||||
|
# found a directory entry
|
||||||
|
dirs[int(m.group('no'))] = m.group('path')
|
||||||
|
else:
|
||||||
|
# found a file entry
|
||||||
|
dir = int(m.group('dir'))
|
||||||
|
if dir in dirs:
|
||||||
|
files[int(m.group('no'))] = os.path.join(
|
||||||
|
dirs[dir],
|
||||||
|
m.group('path'))
|
||||||
|
else:
|
||||||
|
files[int(m.group('no'))] = m.group('path')
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
if not args.get('verbose'):
|
if not args.get('verbose'):
|
||||||
@@ -170,11 +187,11 @@ def collect(paths, *,
|
|||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
# collect structs as we parse dwarf info
|
# collect structs as we parse dwarf info
|
||||||
found = False
|
results_ = []
|
||||||
name = None
|
is_struct = False
|
||||||
decl = None
|
s_name = None
|
||||||
size = None
|
s_file = None
|
||||||
|
s_size = None
|
||||||
# note objdump-tool may contain extra args
|
# note objdump-tool may contain extra args
|
||||||
cmd = objdump_tool + ['--dwarf=info', path]
|
cmd = objdump_tool + ['--dwarf=info', path]
|
||||||
if args.get('verbose'):
|
if args.get('verbose'):
|
||||||
@@ -183,38 +200,23 @@ def collect(paths, *,
|
|||||||
stdout=sp.PIPE,
|
stdout=sp.PIPE,
|
||||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||||
universal_newlines=True,
|
universal_newlines=True,
|
||||||
errors='replace')
|
errors='replace',
|
||||||
|
close_fds=False)
|
||||||
for line in proc.stdout:
|
for line in proc.stdout:
|
||||||
# state machine here to find structs
|
# state machine here to find structs
|
||||||
m = struct_pattern.match(line)
|
m = info_pattern.match(line)
|
||||||
if m:
|
if m:
|
||||||
if m.group('tag'):
|
if m.group('tag'):
|
||||||
if (name is not None
|
if is_struct:
|
||||||
and decl is not None
|
file = files.get(s_file, '?')
|
||||||
and size is not None):
|
results_.append(StructResult(file, s_name, s_size))
|
||||||
file = decls.get(decl, '?')
|
is_struct = (m.group('tag') == 'DW_TAG_structure_type')
|
||||||
# map to source file
|
elif m.group('name'):
|
||||||
file = re.sub('\.o$', '.c', file)
|
s_name = m.group('name')
|
||||||
if build_dir:
|
elif m.group('file'):
|
||||||
file = re.sub(
|
s_file = int(m.group('file'))
|
||||||
'%s/*' % re.escape(build_dir), '',
|
elif m.group('size'):
|
||||||
file)
|
s_size = int(m.group('size'))
|
||||||
# only include structs declared in header files in the
|
|
||||||
# current directory, ignore internal-only structs (
|
|
||||||
# these are represented in other measurements)
|
|
||||||
if everything or file.endswith('.h'):
|
|
||||||
results.append(StructResult(file, name, size))
|
|
||||||
|
|
||||||
found = (m.group('tag') == 'structure_type')
|
|
||||||
name = None
|
|
||||||
decl = None
|
|
||||||
size = None
|
|
||||||
elif found and m.group('name'):
|
|
||||||
name = m.group('name')
|
|
||||||
elif found and name and m.group('decl'):
|
|
||||||
decl = int(m.group('decl'))
|
|
||||||
elif found and name and m.group('size'):
|
|
||||||
size = int(m.group('size'))
|
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
if not args.get('verbose'):
|
if not args.get('verbose'):
|
||||||
@@ -222,6 +224,34 @@ def collect(paths, *,
|
|||||||
sys.stdout.write(line)
|
sys.stdout.write(line)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
|
for r in results_:
|
||||||
|
# ignore filtered sources
|
||||||
|
if sources is not None:
|
||||||
|
if not any(
|
||||||
|
os.path.abspath(r.file) == os.path.abspath(s)
|
||||||
|
for s in sources):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# default to only cwd
|
||||||
|
if not everything and not os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(r.file)]) == os.getcwd():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# limit to .h files unless --internal
|
||||||
|
if not internal and not r.file.endswith('.h'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# simplify path
|
||||||
|
if os.path.commonpath([
|
||||||
|
os.getcwd(),
|
||||||
|
os.path.abspath(r.file)]) == os.getcwd():
|
||||||
|
file = os.path.relpath(r.file)
|
||||||
|
else:
|
||||||
|
file = os.path.abspath(r.file)
|
||||||
|
|
||||||
|
results.append(StructResult(r.file, r.struct, r.size))
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
@@ -479,7 +509,7 @@ def main(obj_paths, *,
|
|||||||
paths.append(path)
|
paths.append(path)
|
||||||
|
|
||||||
if not paths:
|
if not paths:
|
||||||
print("error: no .obj files found in %r?" % obj_paths)
|
print("error: no .o files found in %r?" % obj_paths)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
results = collect(paths, **args)
|
results = collect(paths, **args)
|
||||||
@@ -513,12 +543,14 @@ def main(obj_paths, *,
|
|||||||
# write results to CSV
|
# write results to CSV
|
||||||
if args.get('output'):
|
if args.get('output'):
|
||||||
with openio(args['output'], 'w') as f:
|
with openio(args['output'], 'w') as f:
|
||||||
writer = csv.DictWriter(f, StructResult._by
|
writer = csv.DictWriter(f,
|
||||||
|
(by if by is not None else StructResult._by)
|
||||||
+ ['struct_'+k for k in StructResult._fields])
|
+ ['struct_'+k for k in StructResult._fields])
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
for r in results:
|
for r in results:
|
||||||
writer.writerow(
|
writer.writerow(
|
||||||
{k: getattr(r, k) for k in StructResult._by}
|
{k: getattr(r, k)
|
||||||
|
for k in (by if by is not None else StructResult._by)}
|
||||||
| {'struct_'+k: getattr(r, k)
|
| {'struct_'+k: getattr(r, k)
|
||||||
for k in StructResult._fields})
|
for k in StructResult._fields})
|
||||||
|
|
||||||
@@ -559,7 +591,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Find struct sizes.")
|
description="Find struct sizes.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'obj_paths',
|
'obj_paths',
|
||||||
nargs='*',
|
nargs='*',
|
||||||
@@ -626,18 +659,24 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Only show the total.")
|
help="Only show the total.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-A', '--everything',
|
'-F', '--source',
|
||||||
|
dest='sources',
|
||||||
|
action='append',
|
||||||
|
help="Only consider definitions in this file. Defaults to anything "
|
||||||
|
"in the current directory.")
|
||||||
|
parser.add_argument(
|
||||||
|
'--everything',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help="Include builtin and libc specific symbols.")
|
help="Include builtin and libc specific symbols.")
|
||||||
|
parser.add_argument(
|
||||||
|
'--internal',
|
||||||
|
action='store_true',
|
||||||
|
help="Also show structs in .c files.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--objdump-tool',
|
'--objdump-tool',
|
||||||
type=lambda x: x.split(),
|
type=lambda x: x.split(),
|
||||||
default=OBJDUMP_TOOL,
|
default=OBJDUMP_TOOL,
|
||||||
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
|
help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
|
||||||
parser.add_argument(
|
|
||||||
'--build-dir',
|
|
||||||
help="Specify the relative build directory. Used to map object files "
|
|
||||||
"to the correct source files.")
|
|
||||||
sys.exit(main(**{k: v
|
sys.exit(main(**{k: v
|
||||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||||
if v is not None}))
|
if v is not None}))
|
||||||
|
|||||||
@@ -750,7 +750,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Summarize measurements in CSV files.")
|
description="Summarize measurements in CSV files.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'csv_paths',
|
'csv_paths',
|
||||||
nargs='*',
|
nargs='*',
|
||||||
|
|||||||
@@ -115,7 +115,8 @@ if __name__ == "__main__":
|
|||||||
import sys
|
import sys
|
||||||
import argparse
|
import argparse
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Efficiently displays the last n lines of a file/pipe.")
|
description="Efficiently displays the last n lines of a file/pipe.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'path',
|
'path',
|
||||||
nargs='?',
|
nargs='?',
|
||||||
|
|||||||
@@ -27,9 +27,13 @@ import time
|
|||||||
import toml
|
import toml
|
||||||
|
|
||||||
|
|
||||||
RUNNER_PATH = 'runners/test_runner'
|
RUNNER_PATH = './runners/test_runner'
|
||||||
HEADER_PATH = 'runners/test_runner.h'
|
HEADER_PATH = 'runners/test_runner.h'
|
||||||
|
|
||||||
|
GDB_TOOL = ['gdb']
|
||||||
|
VALGRIND_TOOL = ['valgrind']
|
||||||
|
PERF_SCRIPT = ['./scripts/perf.py']
|
||||||
|
|
||||||
|
|
||||||
def openio(path, mode='r', buffering=-1, nb=False):
|
def openio(path, mode='r', buffering=-1, nb=False):
|
||||||
if path == '-':
|
if path == '-':
|
||||||
@@ -516,12 +520,25 @@ def find_runner(runner, **args):
|
|||||||
|
|
||||||
# run under valgrind?
|
# run under valgrind?
|
||||||
if args.get('valgrind'):
|
if args.get('valgrind'):
|
||||||
cmd[:0] = filter(None, [
|
cmd[:0] = args['valgrind_tool'] + [
|
||||||
'valgrind',
|
|
||||||
'--leak-check=full',
|
'--leak-check=full',
|
||||||
'--track-origins=yes',
|
'--track-origins=yes',
|
||||||
'--error-exitcode=4',
|
'--error-exitcode=4',
|
||||||
'-q'])
|
'-q']
|
||||||
|
|
||||||
|
# run under perf?
|
||||||
|
if args.get('perf'):
|
||||||
|
cmd[:0] = args['perf_script'] + list(filter(None, [
|
||||||
|
'-R',
|
||||||
|
'--perf-freq=%s' % args['perf_freq']
|
||||||
|
if args.get('perf_freq') else None,
|
||||||
|
'--perf-period=%s' % args['perf_period']
|
||||||
|
if args.get('perf_period') else None,
|
||||||
|
'--perf-events=%s' % args['perf_events']
|
||||||
|
if args.get('perf_events') else None,
|
||||||
|
'--perf-tool=%s' % args['perf_tool']
|
||||||
|
if args.get('perf_tool') else None,
|
||||||
|
'-o%s' % args['perf']]))
|
||||||
|
|
||||||
# other context
|
# other context
|
||||||
if args.get('geometry'):
|
if args.get('geometry'):
|
||||||
@@ -799,9 +816,9 @@ def run_stage(name, runner_, ids, output_, **args):
|
|||||||
try:
|
try:
|
||||||
line = mpty.readline()
|
line = mpty.readline()
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == errno.EIO:
|
if e.errno != errno.EIO:
|
||||||
break
|
raise
|
||||||
raise
|
break
|
||||||
if not line:
|
if not line:
|
||||||
break
|
break
|
||||||
last_stdout.append(line)
|
last_stdout.append(line)
|
||||||
@@ -1126,24 +1143,24 @@ def run(runner, test_ids=[], **args):
|
|||||||
cmd = runner_ + [failure.id]
|
cmd = runner_ + [failure.id]
|
||||||
|
|
||||||
if args.get('gdb_main'):
|
if args.get('gdb_main'):
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'break main',
|
'-ex', 'break main',
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'--args']
|
'--args']
|
||||||
elif args.get('gdb_case'):
|
elif args.get('gdb_case'):
|
||||||
path, lineno = find_path(runner_, failure.id, **args)
|
path, lineno = find_path(runner_, failure.id, **args)
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'break %s:%d' % (path, lineno),
|
'-ex', 'break %s:%d' % (path, lineno),
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'--args']
|
'--args']
|
||||||
elif failure.assert_ is not None:
|
elif failure.assert_ is not None:
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'-ex', 'frame function raise',
|
'-ex', 'frame function raise',
|
||||||
'-ex', 'up 2',
|
'-ex', 'up 2',
|
||||||
'--args']
|
'--args']
|
||||||
else:
|
else:
|
||||||
cmd[:0] = ['gdb',
|
cmd[:0] = args['gdb_tool'] + [
|
||||||
'-ex', 'run',
|
'-ex', 'run',
|
||||||
'--args']
|
'--args']
|
||||||
|
|
||||||
@@ -1188,6 +1205,7 @@ if __name__ == "__main__":
|
|||||||
argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
|
argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Build and run tests.",
|
description="Build and run tests.",
|
||||||
|
allow_abbrev=False,
|
||||||
conflict_handler='ignore')
|
conflict_handler='ignore')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-v', '--verbose',
|
'-v', '--verbose',
|
||||||
@@ -1323,6 +1341,11 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Drop into gdb on test failure but stop at the beginning "
|
help="Drop into gdb on test failure but stop at the beginning "
|
||||||
"of main.")
|
"of main.")
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--gdb-tool',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
default=GDB_TOOL,
|
||||||
|
help="Path to gdb tool to use. Defaults to %r." % GDB_TOOL)
|
||||||
test_parser.add_argument(
|
test_parser.add_argument(
|
||||||
'--exec',
|
'--exec',
|
||||||
type=lambda e: e.split(),
|
type=lambda e: e.split(),
|
||||||
@@ -1332,6 +1355,37 @@ if __name__ == "__main__":
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help="Run under Valgrind to find memory errors. Implicitly sets "
|
help="Run under Valgrind to find memory errors. Implicitly sets "
|
||||||
"--isolate.")
|
"--isolate.")
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--valgrind-tool',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
default=VALGRIND_TOOL,
|
||||||
|
help="Path to Valgrind tool to use. Defaults to %r." % VALGRIND_TOOL)
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--perf',
|
||||||
|
help="Run under Linux's perf to sample performance counters, writing "
|
||||||
|
"samples to this file.")
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--perf-freq',
|
||||||
|
help="perf sampling frequency. This is passed directly to the perf "
|
||||||
|
"script.")
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--perf-period',
|
||||||
|
help="perf sampling period. This is passed directly to the perf "
|
||||||
|
"script.")
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--perf-events',
|
||||||
|
help="perf events to record. This is passed directly to the perf "
|
||||||
|
"script.")
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--perf-script',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
default=PERF_SCRIPT,
|
||||||
|
help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT)
|
||||||
|
test_parser.add_argument(
|
||||||
|
'--perf-tool',
|
||||||
|
type=lambda x: x.split(),
|
||||||
|
help="Path to the perf tool to use. This is passed directly to the "
|
||||||
|
"perf script")
|
||||||
|
|
||||||
# compilation flags
|
# compilation flags
|
||||||
comp_parser = parser.add_argument_group('compilation options')
|
comp_parser = parser.add_argument_group('compilation options')
|
||||||
@@ -1356,7 +1410,7 @@ if __name__ == "__main__":
|
|||||||
'-o', '--output',
|
'-o', '--output',
|
||||||
help="Output file.")
|
help="Output file.")
|
||||||
|
|
||||||
# runner + test_ids overlaps test_paths, so we need to do some munging here
|
# runner/test_paths overlap, so need to do some munging here
|
||||||
args = parser.parse_intermixed_args()
|
args = parser.parse_intermixed_args()
|
||||||
args.test_paths = [' '.join(args.runner or [])] + args.test_ids
|
args.test_paths = [' '.join(args.runner or [])] + args.test_ids
|
||||||
args.runner = args.runner or [RUNNER_PATH]
|
args.runner = args.runner or [RUNNER_PATH]
|
||||||
|
|||||||
@@ -830,7 +830,8 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Display operations on block devices based on "
|
description="Display operations on block devices based on "
|
||||||
"trace output.")
|
"trace output.",
|
||||||
|
allow_abbrev=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'path',
|
'path',
|
||||||
nargs='?',
|
nargs='?',
|
||||||
|
|||||||
Reference in New Issue
Block a user