forked from Imagelibrary/littlefs
Moved post-bench amor/avg analysis out into amor.py and avg.py
1. Being able to inspect results before benchmarks complete was useful to track their status. It also allows some analysis even if a benchmark fails. 2. Moving these scripts out of bench.py allows them to be a bit more flexible, at the cost of CSV parsing/structuring overhead. 3. Writing benchmark measurements immediately avoids RAM buildup as we store intermediate measurements for each bench permutation. This may increase the IO bottleneck, but we end up writing the same number of lines, so not sure... I realize avg.py has quite a bit of overlap with summary.py, but I don't want to entangle them further. summary.py is already trying to do too much as is...
This commit is contained in:
248
scripts/amor.py
Executable file
248
scripts/amor.py
Executable file
@@ -0,0 +1,248 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Amortize benchmark measurements
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
# parse different data representations
|
||||
def dat(x):
|
||||
# allow the first part of an a/b fraction
|
||||
if '/' in x:
|
||||
x, _ = x.split('/', 1)
|
||||
|
||||
# first try as int
|
||||
try:
|
||||
return int(x, 0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# then try as float
|
||||
try:
|
||||
return float(x)
|
||||
# just don't allow infinity or nan
|
||||
if m.isinf(x) or m.isnan(x):
|
||||
raise ValueError("invalid dat %r" % x)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# else give up
|
||||
raise ValueError("invalid dat %r" % x)
|
||||
|
||||
def collect(csv_paths, renames=[], defines=[]):
|
||||
# collect results from CSV files
|
||||
results = []
|
||||
for path in csv_paths:
|
||||
try:
|
||||
with openio(path) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
# apply any renames
|
||||
if renames:
|
||||
# make a copy so renames can overlap
|
||||
r_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in r:
|
||||
r_[new_k] = r[old_k]
|
||||
r.update(r_)
|
||||
|
||||
# filter by matching defines
|
||||
if not all(k in r and r[k] in vs for k, vs in defines):
|
||||
continue
|
||||
|
||||
results.append(r)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
return results
|
||||
|
||||
def main(csv_paths, output, *,
|
||||
amor=False,
|
||||
per=False,
|
||||
meas=None,
|
||||
iter=None,
|
||||
size=None,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=[]):
|
||||
# default to amortizing and per-byte results if size is present
|
||||
if not amor and not per:
|
||||
amor = True
|
||||
if size is not None:
|
||||
per = True
|
||||
|
||||
# separate out renames
|
||||
renames = list(it.chain.from_iterable(
|
||||
((k, v) for v in vs)
|
||||
for k, vs in it.chain(by or [], fields or [])))
|
||||
if by is not None:
|
||||
by = [k for k, _ in by]
|
||||
if fields is not None:
|
||||
fields = [k for k, _ in fields]
|
||||
|
||||
# collect results from csv files
|
||||
results = collect(csv_paths, renames, defines)
|
||||
|
||||
# if fields not specified, try to guess from data
|
||||
if fields is None:
|
||||
fields = co.OrderedDict()
|
||||
for r in results:
|
||||
for k, v in r.items():
|
||||
if k not in (by or []) and k != iter and v.strip():
|
||||
try:
|
||||
dat(v)
|
||||
fields[k] = True
|
||||
except ValueError:
|
||||
fields[k] = False
|
||||
fields = list(k for k,v in fields.items() if v)
|
||||
|
||||
# if by not specified, guess it's anything not in iter/fields and not a
|
||||
# source of a rename
|
||||
if by is None:
|
||||
by = co.OrderedDict()
|
||||
for r in results:
|
||||
# also ignore None keys, these are introduced by csv.DictReader
|
||||
# when header + row mismatch
|
||||
by.update((k, True) for k in r.keys()
|
||||
if k is not None
|
||||
and k != iter
|
||||
and k not in fields
|
||||
and not any(k == old_k for _, old_k in renames))
|
||||
by = list(by.keys())
|
||||
|
||||
# convert iter/fields to ints/floats
|
||||
for r in results:
|
||||
for k in {iter} | set(fields) | ({size} if size is not None else {}):
|
||||
if k in r:
|
||||
r[k] = dat(r[k]) if r[k].strip() else 0
|
||||
|
||||
# organize by 'by' values
|
||||
results_ = co.defaultdict(lambda: [])
|
||||
for r in results:
|
||||
key = tuple(r.get(k, '') for k in by)
|
||||
results_[key].append(r)
|
||||
results = results_
|
||||
|
||||
# for each key compute the amortized results
|
||||
amors = []
|
||||
for key, rs in results.items():
|
||||
# keep a running sum for each fied
|
||||
sums = {f: 0 for f in fields}
|
||||
size_ = 0
|
||||
for j, (i, r) in enumerate(sorted(
|
||||
((r.get(iter, 0), r) for r in rs),
|
||||
key=lambda p: p[0])):
|
||||
# update sums
|
||||
for f in fields:
|
||||
sums[f] += r.get(f, 0)
|
||||
size_ += r.get(size, 1)
|
||||
|
||||
# find amortized results
|
||||
if amor:
|
||||
amors.append(r
|
||||
| {f: sums[f] / (j+1) for f in fields}
|
||||
| ({} if meas is None
|
||||
else {meas: r[meas]+'+amor'} if meas in r
|
||||
else {meas: 'amor'}))
|
||||
|
||||
# also find per-byte results
|
||||
if per:
|
||||
amors.append(r
|
||||
| {f: r.get(f, 0) / size_ for f in fields}
|
||||
| ({} if meas is None
|
||||
else {meas: r[meas]+'+per'} if meas in r
|
||||
else {meas: 'per'}))
|
||||
|
||||
# write results to CSV
|
||||
with openio(output, 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
by + ([meas] if meas not in by else []) + [iter] + fields)
|
||||
writer.writeheader()
|
||||
for r in amors:
|
||||
writer.writerow(r)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Amortize benchmark measurements.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'csv_paths',
|
||||
nargs='*',
|
||||
help="Input *.csv files.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
required=True,
|
||||
help="*.csv file to write amortized measurements to.")
|
||||
parser.add_argument(
|
||||
'--amor',
|
||||
action='store_true',
|
||||
help="Compute amortized results.")
|
||||
parser.add_argument(
|
||||
'--per',
|
||||
action='store_true',
|
||||
help="Compute per-byte results.")
|
||||
parser.add_argument(
|
||||
'-m', '--meas',
|
||||
help="Optional name of measurement name field. If provided, the name "
|
||||
"will be modified with +amor or +per.")
|
||||
parser.add_argument(
|
||||
'-i', '--iter',
|
||||
required=True,
|
||||
help="Name of iteration field.")
|
||||
parser.add_argument(
|
||||
'-n', '--size',
|
||||
help="Optional name of size field.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k, vs=None: (
|
||||
k.strip(),
|
||||
tuple(v.strip() for v in vs.split(','))
|
||||
if vs is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Group by this field. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k, vs=None: (
|
||||
k.strip(),
|
||||
tuple(v.strip() for v in vs.split(','))
|
||||
if vs is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Field to amortize. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k, vs: (
|
||||
k.strip(),
|
||||
{v.strip() for v in vs.split(',')})
|
||||
)(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value. May include "
|
||||
"comma-separated options.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
|
||||
308
scripts/avg.py
Executable file
308
scripts/avg.py
Executable file
@@ -0,0 +1,308 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Compute averages/etc of benchmark measurements
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
# parse different data representations
|
||||
def dat(x):
|
||||
# allow the first part of an a/b fraction
|
||||
if '/' in x:
|
||||
x, _ = x.split('/', 1)
|
||||
|
||||
# first try as int
|
||||
try:
|
||||
return int(x, 0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# then try as float
|
||||
try:
|
||||
return float(x)
|
||||
# just don't allow infinity or nan
|
||||
if m.isinf(x) or m.isnan(x):
|
||||
raise ValueError("invalid dat %r" % x)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# else give up
|
||||
raise ValueError("invalid dat %r" % x)
|
||||
|
||||
def collect(csv_paths, renames=[], defines=[]):
|
||||
# collect results from CSV files
|
||||
results = []
|
||||
for path in csv_paths:
|
||||
try:
|
||||
with openio(path) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
# apply any renames
|
||||
if renames:
|
||||
# make a copy so renames can overlap
|
||||
r_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in r:
|
||||
r_[new_k] = r[old_k]
|
||||
r.update(r_)
|
||||
|
||||
# filter by matching defines
|
||||
if not all(k in r and r[k] in vs for k, vs in defines):
|
||||
continue
|
||||
|
||||
results.append(r)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
return results
|
||||
|
||||
def main(csv_paths, output, *,
|
||||
sum=False,
|
||||
prod=False,
|
||||
min=False,
|
||||
max=False,
|
||||
bnd=False,
|
||||
avg=False,
|
||||
stddev=False,
|
||||
gmean=False,
|
||||
gstddev=False,
|
||||
meas=None,
|
||||
by=None,
|
||||
seeds=None,
|
||||
fields=None,
|
||||
defines=[]):
|
||||
sum_, sum = sum, __builtins__.sum
|
||||
min_, min = min, __builtins__.min
|
||||
max_, max = max, __builtins__.max
|
||||
|
||||
# default to averaging
|
||||
if (not sum_
|
||||
and not prod
|
||||
and not min_
|
||||
and not max_
|
||||
and not bnd
|
||||
and not avg
|
||||
and not stddev
|
||||
and not gmean
|
||||
and not gstddev):
|
||||
avg = True
|
||||
|
||||
# separate out renames
|
||||
renames = list(it.chain.from_iterable(
|
||||
((k, v) for v in vs)
|
||||
for k, vs in it.chain(by or [], seeds or [], fields or [])))
|
||||
if by is not None:
|
||||
by = [k for k, _ in by]
|
||||
if seeds is not None:
|
||||
seeds = [k for k, _ in seeds]
|
||||
if fields is not None:
|
||||
fields = [k for k, _ in fields]
|
||||
|
||||
# collect results from csv files
|
||||
results = collect(csv_paths, renames, defines)
|
||||
|
||||
# if fields not specified, try to guess from data
|
||||
if fields is None:
|
||||
fields = co.OrderedDict()
|
||||
for r in results:
|
||||
for k, v in r.items():
|
||||
if k not in (by or []) and k not in (seeds or []) and v.strip():
|
||||
try:
|
||||
dat(v)
|
||||
fields[k] = True
|
||||
except ValueError:
|
||||
fields[k] = False
|
||||
fields = list(k for k,v in fields.items() if v)
|
||||
|
||||
# if by not specified, guess it's anything not in seeds/fields and not a
|
||||
# source of a rename
|
||||
if by is None:
|
||||
by = co.OrderedDict()
|
||||
for r in results:
|
||||
# also ignore None keys, these are introduced by csv.DictReader
|
||||
# when header + row mismatch
|
||||
by.update((k, True) for k in r.keys()
|
||||
if k is not None
|
||||
and k not in (seeds or [])
|
||||
and k not in fields
|
||||
and not any(k == old_k for _, old_k in renames))
|
||||
by = list(by.keys())
|
||||
|
||||
# convert fields to ints/floats
|
||||
for r in results:
|
||||
for k in fields:
|
||||
if k in r:
|
||||
r[k] = dat(r[k]) if r[k].strip() else 0
|
||||
|
||||
# organize by 'by' values
|
||||
results_ = co.defaultdict(lambda: [])
|
||||
for r in results:
|
||||
key = tuple(r.get(k, '') for k in by)
|
||||
results_[key].append(r)
|
||||
results = results_
|
||||
|
||||
# for each key calculate the avgs/etc
|
||||
avgs = []
|
||||
for key, rs in results.items():
|
||||
vs = {f: [] for f in fields}
|
||||
meas__ = None
|
||||
for r in rs:
|
||||
if all(k in r and r[k] == v for k, v in zip(by, key)):
|
||||
for f in fields:
|
||||
vs[f].append(r.get(f, 0))
|
||||
if meas is not None and meas in r:
|
||||
meas__ = r[meas]
|
||||
|
||||
def append(meas_, f_):
|
||||
avgs.append(
|
||||
{k: v for k, v in zip(by, key)}
|
||||
| {f: f_(vs_) for f, vs_ in vs.items()}
|
||||
| ({} if meas is None
|
||||
else {meas: meas_} if meas__ is None
|
||||
else {meas: meas__+'+'+meas_}))
|
||||
|
||||
if sum_: append('sum', lambda vs: sum(vs))
|
||||
if prod: append('prod', lambda vs: m.prod(vs))
|
||||
if min_: append('min', lambda vs: min(vs, default=0))
|
||||
if max_: append('max', lambda vs: max(vs, default=0))
|
||||
if bnd: append('bnd', lambda vs: min(vs, default=0))
|
||||
if bnd: append('bnd', lambda vs: max(vs, default=0))
|
||||
if avg: append('avg', lambda vs: sum(vs) / max(len(vs), 1))
|
||||
if stddev: append('stddev', lambda vs: (
|
||||
lambda avg: m.sqrt(
|
||||
sum((v - avg)**2 for v in vs) / max(len(vs), 1))
|
||||
)(sum(vs) / max(len(vs), 1)))
|
||||
if gmean: append('gmean', lambda vs:
|
||||
m.prod(float(v) for v in vs)**(1 / max(len(vs), 1)))
|
||||
if gstddev: append('gstddev', lambda vs: (
|
||||
lambda gmean: m.exp(m.sqrt(
|
||||
sum(m.log(v/gmean)**2 for v in vs) / max(len(vs), 1)))
|
||||
if gmean else m.inf
|
||||
)(m.prod(float(v) for v in vs)**(1 / max(len(vs), 1))))
|
||||
|
||||
# write results to CSVS
|
||||
with openio(output, 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
by + ([meas] if meas not in by else []) + fields)
|
||||
writer.writeheader()
|
||||
for r in avgs:
|
||||
writer.writerow(r)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Compute averages/etc of benchmark measurements.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'csv_paths',
|
||||
nargs='*',
|
||||
help="Input *.csv files.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
required=True,
|
||||
help="*.csv file to write amortized measurements to.")
|
||||
parser.add_argument(
|
||||
'--sum',
|
||||
action='store_true',
|
||||
help="Compute the sum.")
|
||||
parser.add_argument(
|
||||
'--prod',
|
||||
action='store_true',
|
||||
help="Compute the product.")
|
||||
parser.add_argument(
|
||||
'--min',
|
||||
action='store_true',
|
||||
help="Compute the min.")
|
||||
parser.add_argument(
|
||||
'--max',
|
||||
action='store_true',
|
||||
help="Compute the max.")
|
||||
parser.add_argument(
|
||||
'--bnd',
|
||||
action='store_true',
|
||||
help="Compute the bounds (min+max concatenated).")
|
||||
parser.add_argument(
|
||||
'--avg', '--mean',
|
||||
action='store_true',
|
||||
help="Compute the average (the default).")
|
||||
parser.add_argument(
|
||||
'--stddev',
|
||||
action='store_true',
|
||||
help="Compute the standard deviation.")
|
||||
parser.add_argument(
|
||||
'--gmean',
|
||||
action='store_true',
|
||||
help="Compute the geometric mean.")
|
||||
parser.add_argument(
|
||||
'--gstddev',
|
||||
action='store_true',
|
||||
help="Compute the geometric standard deviation.")
|
||||
parser.add_argument(
|
||||
'-m', '--meas',
|
||||
help="Optional name of measurement name field. If provided, the name "
|
||||
"will be modified with +amor or +per.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k, vs=None: (
|
||||
k.strip(),
|
||||
tuple(v.strip() for v in vs.split(','))
|
||||
if vs is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Group by this field. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-s', '--seed',
|
||||
dest='seeds',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k, vs=None: (
|
||||
k.strip(),
|
||||
tuple(v.strip() for v in vs.split(','))
|
||||
if vs is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Field to ignore when averaging. Can rename fields with "
|
||||
"new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k, vs=None: (
|
||||
k.strip(),
|
||||
tuple(v.strip() for v in vs.split(','))
|
||||
if vs is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Field to amortize. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k, vs: (
|
||||
k.strip(),
|
||||
{v.strip() for v in vs.split(',')})
|
||||
)(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value. May include "
|
||||
"comma-separated options.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
|
||||
156
scripts/bench.py
156
scripts/bench.py
@@ -942,54 +942,6 @@ class BenchOutput:
|
||||
for row in self.rows:
|
||||
self.writer.writerow(row)
|
||||
|
||||
def avg(self):
|
||||
# compute min/max/avg
|
||||
ops = ['bench_readed', 'bench_proged', 'bench_erased']
|
||||
results = co.defaultdict(lambda: {
|
||||
'sums': {op: 0 for op in ops},
|
||||
'mins': {op: +m.inf for op in ops},
|
||||
'maxs': {op: -m.inf for op in ops},
|
||||
'count': 0})
|
||||
|
||||
for row in self.rows:
|
||||
# we only care about results with a BENCH_SEED entry
|
||||
if 'BENCH_SEED' not in row:
|
||||
continue
|
||||
|
||||
# figure our a key for each row, this is everything but the bench
|
||||
# results/seed reencoded as a big tuple-tuple for hashability
|
||||
key = (row['bench_meas'], tuple(sorted(
|
||||
(k, v) for k, v in row.items()
|
||||
if k != 'BENCH_SEED'
|
||||
and k != 'bench_meas'
|
||||
and k != 'bench_agg'
|
||||
and k not in ops)))
|
||||
# find sum/min/max/etc
|
||||
result = results[key]
|
||||
for op in ops:
|
||||
result['sums'][op] += row[op]
|
||||
result['mins'][op] = min(result['mins'][op], row[op])
|
||||
result['maxs'][op] = max(result['maxs'][op], row[op])
|
||||
result['count'] += 1
|
||||
|
||||
# append results to output
|
||||
for (meas, key), result in results.items():
|
||||
self.writerow({
|
||||
'bench_meas': meas+'+avg',
|
||||
'bench_agg': 'avg',
|
||||
**{k: v for k, v in key},
|
||||
**{op: result['sums'][op] / result['count'] for op in ops}})
|
||||
self.writerow({
|
||||
'bench_meas': meas+'+min',
|
||||
'bench_agg': 'bnd',
|
||||
**{k: v for k, v in key},
|
||||
**{op: result['mins'][op] for op in ops}})
|
||||
self.writerow({
|
||||
'bench_meas': meas+'+max',
|
||||
'bench_agg': 'bnd',
|
||||
**{k: v for k, v in key},
|
||||
**{op: result['maxs'][op] for op in ops}})
|
||||
|
||||
# A bench failure
|
||||
class BenchFailure(Exception):
|
||||
def __init__(self, id, returncode, stdout, assert_=None):
|
||||
@@ -998,34 +950,6 @@ class BenchFailure(Exception):
|
||||
self.stdout = stdout
|
||||
self.assert_ = assert_
|
||||
|
||||
# computer extra result stuff, this includes averages and amortized results
|
||||
def bench_results(results):
|
||||
ops = ['readed', 'proged', 'erased']
|
||||
|
||||
# first compute amortized results
|
||||
amors = {}
|
||||
for meas in set(meas for meas, _ in results.keys()):
|
||||
# keep a running sum
|
||||
sums = {op: 0 for op in ops}
|
||||
size = 0
|
||||
for i, (iter, result) in enumerate(sorted(
|
||||
(iter, result) for (meas_, iter), result in results.items()
|
||||
if meas_ == meas)):
|
||||
for op in ops:
|
||||
sums[op] += result.get(op, 0)
|
||||
size += result.get('size', 1)
|
||||
|
||||
# find amortized results
|
||||
amors[meas+'+amor', iter] = {
|
||||
'size': result.get('size', 1),
|
||||
**{op: sums[op] / (i+1) for op in ops}}
|
||||
# also find per-byte results
|
||||
amors[meas+'+div', iter] = {
|
||||
'size': result.get('size', 1),
|
||||
**{op: result.get(op, 0) / size for op in ops}}
|
||||
|
||||
return results | amors
|
||||
|
||||
|
||||
def run_stage(name, runner, bench_ids, stdout_, trace_, output_, **args):
|
||||
# get expected suite/case/perm counts
|
||||
@@ -1081,10 +1005,11 @@ def run_stage(name, runner, bench_ids, stdout_, trace_, output_, **args):
|
||||
mpty = os.fdopen(mpty, 'r', 1)
|
||||
|
||||
last_id = None
|
||||
last_case = None
|
||||
last_suite = None
|
||||
last_defines = None # fetched on demand
|
||||
last_stdout = co.deque(maxlen=args.get('context', 5) + 1)
|
||||
last_assert = None
|
||||
if output_:
|
||||
last_results = {}
|
||||
try:
|
||||
while True:
|
||||
# parse a line for state changes
|
||||
@@ -1110,35 +1035,17 @@ def run_stage(name, runner, bench_ids, stdout_, trace_, output_, **args):
|
||||
if op == 'running':
|
||||
locals.seen_perms += 1
|
||||
last_id = m.group('id')
|
||||
last_case = m.group('case')
|
||||
last_suite = case_suites[last_case]
|
||||
last_defines = None
|
||||
last_stdout.clear()
|
||||
last_assert = None
|
||||
if output_:
|
||||
last_results = {}
|
||||
elif op == 'finished':
|
||||
case = m.group('case')
|
||||
suite = case_suites[case]
|
||||
passed_suite_perms[suite] += 1
|
||||
passed_case_perms[case] += 1
|
||||
passed_perms += 1
|
||||
if output_:
|
||||
# get defines and write to csv
|
||||
defines = find_defines(
|
||||
runner, m.group('id'), **args)
|
||||
# compute extra measurements here
|
||||
last_results = bench_results(last_results)
|
||||
for (meas, iter), result in (
|
||||
last_results.items()):
|
||||
output_.writerow({
|
||||
'suite': suite,
|
||||
'case': case,
|
||||
**defines,
|
||||
'bench_meas': meas,
|
||||
'bench_agg': 'raw',
|
||||
'bench_iter': iter,
|
||||
'bench_size': result['size'],
|
||||
'bench_readed': result['readed'],
|
||||
'bench_proged': result['proged'],
|
||||
'bench_erased': result['erased']})
|
||||
elif op == 'skipped':
|
||||
locals.seen_perms += 1
|
||||
elif op == 'assert':
|
||||
@@ -1153,28 +1060,39 @@ def run_stage(name, runner, bench_ids, stdout_, trace_, output_, **args):
|
||||
meas = m.group('meas')
|
||||
iter = int(m.group('iter'))
|
||||
size = int(m.group('size'))
|
||||
result = {'size': size}
|
||||
for op in ['readed', 'proged', 'erased']:
|
||||
if m.group(op) is None:
|
||||
result[op] = 0
|
||||
elif '.' in m.group(op):
|
||||
result[op] = float(m.group(op))
|
||||
# parse measurements
|
||||
def dat(v):
|
||||
if v is None:
|
||||
return 0
|
||||
elif '.' in v:
|
||||
return float(v)
|
||||
else:
|
||||
result[op] = int(m.group(op))
|
||||
# keep track of per-perm results
|
||||
return int(v)
|
||||
readed_ = dat(m.group('readed'))
|
||||
proged_ = dat(m.group('proged'))
|
||||
erased_ = dat(m.group('erased'))
|
||||
if output_:
|
||||
# if we've already seen this measurement, sum
|
||||
result_ = last_results.get((meas, iter))
|
||||
if result_ is not None:
|
||||
result['readed'] += result_['readed']
|
||||
result['proged'] += result_['proged']
|
||||
result['erased'] += result_['erased']
|
||||
result['size'] += result_['size']
|
||||
last_results[meas, iter] = result
|
||||
# fetch defines if needed, only do this at most
|
||||
# once per perm
|
||||
if last_defines is None:
|
||||
last_defines = find_defines(
|
||||
runner, last_id, **args)
|
||||
# write measurements immediately, this allows
|
||||
# analysis of partial results
|
||||
output_.writerow({
|
||||
'suite': last_suite,
|
||||
'case': last_case,
|
||||
**last_defines,
|
||||
'bench_meas': meas,
|
||||
'bench_iter': iter,
|
||||
'bench_size': size,
|
||||
'bench_readed': readed_,
|
||||
'bench_proged': proged_,
|
||||
'bench_erased': erased_})
|
||||
# keep track of total for summary
|
||||
readed += result['readed']
|
||||
proged += result['proged']
|
||||
erased += result['erased']
|
||||
readed += readed_
|
||||
proged += proged_
|
||||
erased += erased_
|
||||
except KeyboardInterrupt:
|
||||
raise BenchFailure(last_id, 1, list(last_stdout))
|
||||
finally:
|
||||
@@ -1388,8 +1306,6 @@ def run(runner, bench_ids=[], **args):
|
||||
except BrokenPipeError:
|
||||
pass
|
||||
if output:
|
||||
# computer averages?
|
||||
output.avg()
|
||||
output.close()
|
||||
|
||||
# show summary
|
||||
|
||||
@@ -30,10 +30,10 @@ OPS = {
|
||||
'prod': lambda xs: m.prod(xs[1:], start=xs[0]),
|
||||
'min': min,
|
||||
'max': max,
|
||||
'mean': lambda xs: Float(sum(float(x) for x in xs) / len(xs)),
|
||||
'avg': lambda xs: Float(sum(float(x) for x in xs) / len(xs)),
|
||||
'stddev': lambda xs: (
|
||||
lambda mean: Float(
|
||||
m.sqrt(sum((float(x) - mean)**2 for x in xs) / len(xs)))
|
||||
lambda avg: Float(
|
||||
m.sqrt(sum((float(x) - avg)**2 for x in xs) / len(xs)))
|
||||
)(sum(float(x) for x in xs) / len(xs)),
|
||||
'gmean': lambda xs: Float(m.prod(float(x) for x in xs)**(1/len(xs))),
|
||||
'gstddev': lambda xs: (
|
||||
@@ -817,7 +817,7 @@ if __name__ == "__main__":
|
||||
action='append',
|
||||
help="Take the maximum of these fields.")
|
||||
parser.add_argument(
|
||||
'--mean',
|
||||
'--avg', '--mean',
|
||||
action='append',
|
||||
help="Average these fields.")
|
||||
parser.add_argument(
|
||||
|
||||
Reference in New Issue
Block a user