Dropped csv field prefixes in scripts

The original idea was to allow merging a whole bunch of different csv
results into a single lfs.csv file, but this never really happened. It's
much easier to operate on smaller context-specific csv files, where the
field prefix:

- Doesn't really add much information
- Requires more typing
- Is confusing in how it doesn't match the table field names.

We can always use summary.py -fcode_size=size to add prefixes when
necessary anyways.
This commit is contained in:
Christopher Haster
2024-06-02 16:33:05 -05:00
parent dc7e7b9ab1
commit 54d77da2f5
10 changed files with 132 additions and 124 deletions

118
Makefile
View File

@@ -246,15 +246,15 @@ stack-diff: $(CI)
## Find function sizes
.PHONY: funcs
funcs: SUMMARYFLAGS+=-S
funcs: \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv
$(strip ./scripts/summary.py $^ \
-bfunction \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
funcs: SHELL=/bin/bash
funcs: $(BUILDDIR)/lfs.code.csv $(BUILDDIR)/lfs.stack.csv
$(strip ./scripts/summary.py \
<(./scripts/summary.py $(BUILDDIR)/lfs.code.csv \
-fcode=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.stack.csv \
-fstack=limit -q $(SUMMARYFLAGS) -o-) \
-bfunction -fcode -fstack \
--max=stack \
$(SUMMARYFLAGS))
## Compare function sizes
@@ -262,17 +262,20 @@ funcs: \
funcs-diff: SHELL=/bin/bash
funcs-diff: $(OBJ) $(CI)
$(strip ./scripts/summary.py \
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
<(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
-bfunction \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
<(./scripts/summary.py \
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
-fcode=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py \
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
-fstack=limit -q $(SUMMARYFLAGS) -o-) \
-bfunction -fcode -fstack \
--max=stack \
$(SUMMARYFLAGS) -d <(./scripts/summary.py \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
<(./scripts/summary.py $(BUILDDIR)/lfs.code.csv \
-fcode=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.stack.csv \
-fstack=limit -q $(SUMMARYFLAGS) -o-) \
-fcode -fstack \
-q $(SUMMARYFLAGS) -o-))
## Find struct sizes
@@ -333,37 +336,57 @@ perfbd-diff: $(BENCH_TRACE)
## Find a summary of compile-time sizes
.PHONY: summary sizes
summary sizes: SHELL=/bin/bash
summary sizes: \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)/lfs.structs.csv
$(strip ./scripts/summary.py $^ \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
-fstructs=struct_size \
-Y $(SUMMARYFLAGS))
$(strip ./scripts/summary.py \
<(./scripts/summary.py $(BUILDDIR)/lfs.code.csv \
-fcode=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.data.csv \
-fdata=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.stack.csv \
-fstack=limit -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.structs.csv \
-fstructs=size -q $(SUMMARYFLAGS) -o-) \
-bfunction -fcode -fdata -fstack -fstructs \
--max=stack \
-Y $(SUMMARYFLAGS) \
| cut -c 25-)
## Compare compile-time sizes
.PHONY: summary-diff sizes-diff
summary-diff sizes-diff: SHELL=/bin/bash
summary-diff sizes-diff: $(OBJ) $(CI)
$(strip ./scripts/summary.py \
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
<(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
<(./scripts/structs.py $(OBJ) -q $(STRUCTSFLAGS) -o-) \
-fcode=code_size \
-fdata=data_size \
-fstack=stack_limit --max=stack \
-fstructs=struct_size \
-Y $(SUMMARYFLAGS) -d <(./scripts/summary.py \
$(BUILDDIR)/lfs.code.csv \
$(BUILDDIR)/lfs.data.csv \
$(BUILDDIR)/lfs.stack.csv \
$(BUILDDIR)/lfs.structs.csv \
-q $(SUMMARYFLAGS) -o-))
<(./scripts/summary.py \
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
-fcode=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py \
<(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
-fdata=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py \
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
-fstack=limit -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py \
<(./scripts/structs.py $(OBJ) -q $(STRUCTSFLAGS) -o-) \
-fstructs=size -q $(SUMMARYFLAGS) -o-) \
-bfunction -fcode -fdata -fstack -fstructs \
--max=stack \
-Y -p $(SUMMARYFLAGS) -d <(./scripts/summary.py \
<(./scripts/summary.py $(BUILDDIR)/lfs.code.csv \
-fcode=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.data.csv \
-fdata=size -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.stack.csv \
-fstack=limit -q $(SUMMARYFLAGS) -o-) \
<(./scripts/summary.py $(BUILDDIR)/lfs.structs.csv \
-fstructs=size -q $(SUMMARYFLAGS) -o-) \
-fcode -fdata -fstack -fstructs \
-q $(SUMMARYFLAGS) -o-) \
| cut -c 25-)
## Build the test-runner
.PHONY: test-runner build-tests
@@ -396,17 +419,14 @@ test-list list-tests: test-runner
testmarks: SUMMARYFLAGS+=-spassed -Stime
testmarks: $(TEST_CSV) $(BUILDDIR)/lfs.test.csv
$(strip ./scripts/summary.py $(TEST_CSV) \
-bsuite \
-fpassed=test_passed \
-ftime=test_time \
-bsuite -fpassed -ftime \
$(SUMMARYFLAGS))
## Compare test results against a previous run
.PHONY: testmarks-diff
testmarks-diff: $(TEST_CSV)
$(strip ./scripts/summary.py $^ \
-bsuite \
-fpassed=test_passed \
-bsuite -fpassed -ftime \
$(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.test.csv)
## Build the bench-runner
@@ -440,20 +460,14 @@ bench-list list-benches: bench-runner
benchmarks: SUMMARYFLAGS+=-Serased -Sproged -Sreaded
benchmarks: $(BENCH_CSV) $(BUILDDIR)/lfs.bench.csv
$(strip ./scripts/summary.py $(BENCH_CSV) \
-bsuite \
-freaded=bench_readed \
-fproged=bench_proged \
-ferased=bench_erased \
-bsuite -freaded -fproged -ferased \
$(SUMMARYFLAGS))
## Compare bench results against a previous run
.PHONY: benchmarks-diff
benchmarks-diff: $(BENCH_CSV)
$(strip ./scripts/summary.py $^ \
-bsuite \
-freaded=bench_readed \
-fproged=bench_proged \
-ferased=bench_erased \
-bsuite -freaded -fproged -ferased \
$(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.bench.csv)

View File

@@ -1058,12 +1058,12 @@ def run_stage(name, runner, bench_ids, stdout_, trace_, output_, **args):
'suite': last_suite,
'case': last_case,
**last_defines,
'bench_meas': meas,
'bench_iter': iter,
'bench_size': size,
'bench_readed': readed_,
'bench_proged': proged_,
'bench_erased': erased_})
'meas': meas,
'iter': iter,
'size': size,
'readed': readed_,
'proged': proged_,
'erased': erased_})
# keep track of total for summary
readed += readed_
proged += proged_
@@ -1239,8 +1239,7 @@ def run(runner, bench_ids=[], **args):
if args.get('output'):
output = BenchOutput(args['output'],
['suite', 'case'],
['bench_meas', 'bench_iter', 'bench_size',
'bench_readed', 'bench_proged', 'bench_erased'])
['meas', 'iter', 'size', 'readed', 'proged', 'erased'])
# measure runtime
start = time.time()

View File

@@ -532,15 +532,15 @@ def main(obj_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('code_'+k in r and r['code_'+k].strip()
if not any(k in r and r[k].strip()
for k in CodeResult._fields):
continue
try:
results.append(CodeResult(
**{k: r[k] for k in CodeResult._by
if k in r and r[k].strip()},
**{k: r['code_'+k] for k in CodeResult._fields
if 'code_'+k in r and r['code_'+k].strip()}))
**{k: r[k] for k in CodeResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
@@ -562,14 +562,14 @@ def main(obj_paths, *,
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else CodeResult._by)
+ ['code_'+k for k in (
+ [k for k in (
fields if fields is not None else CodeResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else CodeResult._by)}
| {'code_'+k: getattr(r, k) for k in (
| {k: getattr(r, k) for k in (
fields if fields is not None else CodeResult._fields)})
# find previous results?
@@ -583,15 +583,15 @@ def main(obj_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('code_'+k in r and r['code_'+k].strip()
if not any(k in r and r[k].strip()
for k in CodeResult._fields):
continue
try:
diff_results.append(CodeResult(
**{k: r[k] for k in CodeResult._by
if k in r and r[k].strip()},
**{k: r['code_'+k] for k in CodeResult._fields
if 'code_'+k in r and r['code_'+k].strip()}))
**{k: r[k] for k in CodeResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
except FileNotFoundError:

View File

@@ -604,17 +604,16 @@ def main(gcda_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('cov_'+k in r and r['cov_'+k].strip()
if not any(k in r and r[k].strip()
for k in CovResult._fields):
continue
try:
results.append(CovResult(
**{k: r[k] for k in CovResult._by
if k in r and r[k].strip()},
**{k: r['cov_'+k]
**{k: r[k]
for k in CovResult._fields
if 'cov_'+k in r
and r['cov_'+k].strip()}))
if k in r and r[k].strip()}))
except TypeError:
pass
@@ -636,14 +635,14 @@ def main(gcda_paths, *,
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else CovResult._by)
+ ['cov_'+k for k in (
+ [k for k in (
fields if fields is not None else CovResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else CovResult._by)}
| {'cov_'+k: getattr(r, k) for k in (
| {k: getattr(r, k) for k in (
fields if fields is not None else CovResult._fields)})
# find previous results?
@@ -657,17 +656,16 @@ def main(gcda_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('cov_'+k in r and r['cov_'+k].strip()
if not any(k in r and r[k].strip()
for k in CovResult._fields):
continue
try:
diff_results.append(CovResult(
**{k: r[k] for k in CovResult._by
if k in r and r[k].strip()},
**{k: r['cov_'+k]
**{k: r[k]
for k in CovResult._fields
if 'cov_'+k in r
and r['cov_'+k].strip()}))
if k in r and r[k].strip()}))
except TypeError:
pass
except FileNotFoundError:

View File

@@ -536,8 +536,8 @@ def main(obj_paths, *,
results.append(DataResult(
**{k: r[k] for k in DataResult._by
if k in r and r[k].strip()},
**{k: r['data_'+k] for k in DataResult._fields
if 'data_'+k in r and r['data_'+k].strip()}))
**{k: r[k] for k in DataResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
@@ -559,14 +559,14 @@ def main(obj_paths, *,
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else DataResult._by)
+ ['data_'+k for k in (
+ [k for k in (
fields if fields is not None else DataResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else DataResult._by)}
| {'data_'+k: getattr(r, k) for k in (
| {k: getattr(r, k) for k in (
fields if fields is not None else DataResult._fields)})
# find previous results?
@@ -580,15 +580,15 @@ def main(obj_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('data_'+k in r and r['data_'+k].strip()
if not any(k in r and r[k].strip()
for k in DataResult._fields):
continue
try:
diff_results.append(DataResult(
**{k: r[k] for k in DataResult._by
if k in r and r[k].strip()},
**{k: r['data_'+k] for k in DataResult._fields
if 'data_'+k in r and r['data_'+k].strip()}))
**{k: r[k] for k in DataResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
except FileNotFoundError:

View File

@@ -1064,15 +1064,15 @@ def report(perf_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('perf_'+k in r and r['perf_'+k].strip()
if not any(k in r and r[k].strip()
for k in PerfResult._fields):
continue
try:
results.append(PerfResult(
**{k: r[k] for k in PerfResult._by
if k in r and r[k].strip()},
**{k: r['perf_'+k] for k in PerfResult._fields
if 'perf_'+k in r and r['perf_'+k].strip()}))
**{k: r[k] for k in PerfResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
@@ -1094,14 +1094,14 @@ def report(perf_paths, *,
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else PerfResult._by)
+ ['perf_'+k for k in (
+ [k for k in (
fields if fields is not None else PerfResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else PerfResult._by)}
| {'perf_'+k: getattr(r, k) for k in (
| {k: getattr(r, k) for k in (
fields if fields is not None else PerfResult._fields)})
# find previous results?
@@ -1115,15 +1115,15 @@ def report(perf_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('perf_'+k in r and r['perf_'+k].strip()
if not any(k in r and r[k].strip()
for k in PerfResult._fields):
continue
try:
diff_results.append(PerfResult(
**{k: r[k] for k in PerfResult._by
if k in r and r[k].strip()},
**{k: r['perf_'+k] for k in PerfResult._fields
if 'perf_'+k in r and r['perf_'+k].strip()}))
**{k: r[k] for k in PerfResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
except FileNotFoundError:

View File

@@ -1042,15 +1042,15 @@ def report(obj_path='', trace_paths=[], *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('perfbd_'+k in r and r['perfbd_'+k].strip()
if not any(k in r and r[k].strip()
for k in PerfBdResult._fields):
continue
try:
results.append(PerfBdResult(
**{k: r[k] for k in PerfBdResult._by
if k in r and r[k].strip()},
**{k: r['perfbd_'+k] for k in PerfBdResult._fields
if 'perfbd_'+k in r and r['perfbd_'+k].strip()}))
**{k: r[k] for k in PerfBdResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
@@ -1072,14 +1072,14 @@ def report(obj_path='', trace_paths=[], *,
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else PerfBdResult._by)
+ ['perfbd_'+k for k in (
+ [k for k in (
fields if fields is not None else PerfBdResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else PerfBdResult._by)}
| {'perfbd_'+k: getattr(r, k) for k in (
| {k: getattr(r, k) for k in (
fields if fields is not None else PerfBdResult._fields)})
# find previous results?
@@ -1093,16 +1093,15 @@ def report(obj_path='', trace_paths=[], *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('perfbd_'+k in r and r['perfbd_'+k].strip()
if not any(k in r and r[k].strip()
for k in PerfBdResult._fields):
continue
try:
diff_results.append(PerfBdResult(
**{k: r[k] for k in PerfBdResult._by
if k in r and r[k].strip()},
**{k: r['perfbd_'+k] for k in PerfBdResult._fields
if 'perfbd_'+k in r
and r['perfbd_'+k].strip()}))
**{k: r[k] for k in PerfBdResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
except FileNotFoundError:

View File

@@ -558,15 +558,15 @@ def main(ci_paths,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('stack_'+k in r and r['stack_'+k].strip()
if not any(k in r and r[k].strip()
for k in StackResult._fields):
continue
try:
results.append(StackResult(
**{k: r[k] for k in StackResult._by
if k in r and r[k].strip()},
**{k: r['stack_'+k] for k in StackResult._fields
if 'stack_'+k in r and r['stack_'+k].strip()}))
**{k: r[k] for k in StackResult._fields
if k in r and r[k].strip()}))
except TypeError:
pass
@@ -588,14 +588,14 @@ def main(ci_paths,
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else StackResult._by)
+ ['stack_'+k for k in (
+ [k for k in (
fields if fields is not None else StackResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else StackResult._by)}
| {'stack_'+k: getattr(r, k) for k in (
| {k: getattr(r, k) for k in (
fields if fields is not None else StackResult._fields)})
# find previous results?
@@ -609,15 +609,15 @@ def main(ci_paths,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('stack_'+k in r and r['stack_'+k].strip()
if not any(k in r and r[k].strip()
for k in StackResult._fields):
continue
try:
diff_results.append(StackResult(
**{k: r[k] for k in StackResult._by
if k in r and r[k].strip()},
**{k: r['stack_'+k] for k in StackResult._fields
if 'stack_'+k in r and r['stack_'+k].strip()}))
**{k: r[k] for k in StackResult._fields
if k in r and r[k].strip()}))
except TypeError:
raise
except FileNotFoundError:

View File

@@ -481,17 +481,16 @@ def main(obj_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('struct_'+k in r and r['struct_'+k].strip()
if not any(k in r and r[k].strip()
for k in StructResult._fields):
continue
try:
results.append(StructResult(
**{k: r[k] for k in StructResult._by
if k in r and r[k].strip()},
**{k: r['struct_'+k]
**{k: r[k]
for k in StructResult._fields
if 'struct_'+k in r
and r['struct_'+k].strip()}))
if k in r and r[k].strip()}))
except TypeError:
pass
@@ -513,14 +512,14 @@ def main(obj_paths, *,
with openio(args['output'], 'w') as f:
writer = csv.DictWriter(f,
(by if by is not None else StructResult._by)
+ ['struct_'+k for k in (
+ [k for k in (
fields if fields is not None else StructResult._fields)])
writer.writeheader()
for r in results:
writer.writerow(
{k: getattr(r, k) for k in (
by if by is not None else StructResult._by)}
| {'struct_'+k: getattr(r, k) for k in (
| {k: getattr(r, k) for k in (
fields if fields is not None else StructResult._fields)})
# find previous results?
@@ -534,17 +533,16 @@ def main(obj_paths, *,
if not all(k in r and r[k] in vs for k, vs in defines):
continue
if not any('struct_'+k in r and r['struct_'+k].strip()
if not any(k in r and r[k].strip()
for k in StructResult._fields):
continue
try:
diff_results.append(StructResult(
**{k: r[k] for k in StructResult._by
if k in r and r[k].strip()},
**{k: r['struct_'+k]
**{k: r[k]
for k in StructResult._fields
if 'struct_'+k in r
and r['struct_'+k].strip()}))
if k in r and r[k].strip()}))
except TypeError:
pass
except FileNotFoundError:

View File

@@ -1034,8 +1034,8 @@ def run_stage(name, runner, test_ids, stdout_, trace_, output_, **args):
'suite': suite,
'case': case,
**defines,
'test_passed': '1/1',
'test_time': '%.6f' % (
'passed': '1/1',
'time': '%.6f' % (
time.time() - last_time)})
elif op == 'skipped':
locals.seen_perms += 1
@@ -1096,7 +1096,7 @@ def run_stage(name, runner, test_ids, stdout_, trace_, output_, **args):
output_.writerow({
'suite': suite,
'case': case,
'test_passed': '0/1',
'passed': '0/1',
**defines})
# race condition for multiple failures?
@@ -1230,7 +1230,7 @@ def run(runner, test_ids=[], **args):
if args.get('output'):
output = TestOutput(args['output'],
['suite', 'case'],
['test_passed', 'test_time'])
['passed', 'time'])
# measure runtime
start = time.time()