forked from Imagelibrary/littlefs
scripts: Disentangled -r/--hot and -i/--enumerate
This removes most of the special behavior around how -r/--hot and -i/--enumerate interact. This does mean -r/--hot risks folding results if -i/--enumerate is not specified, but this is _technically_ a valid operation. For most of the recursive result scripts, I've replaced the "i" field with separate "z" and "i" fields for depth and field number, which I think is a bit more informative/useful. I've also added a default-hidden "off" field to structs.py/ctx.py, since we have that info available. I considered replacing "i" with this, but decided against it since non-zero offsets for union members would risk being confusing/mistake prone.
This commit is contained in:
@@ -138,26 +138,25 @@ class RInt(co.namedtuple('RInt', 'x')):
|
||||
|
||||
# perf results
|
||||
class PerfBdResult(co.namedtuple('PerfBdResult', [
|
||||
'i', 'file', 'function', 'line',
|
||||
'z', 'file', 'function', 'line',
|
||||
'readed', 'proged', 'erased',
|
||||
'children'])):
|
||||
_by = ['i', 'file', 'function', 'line']
|
||||
_by = ['z', 'file', 'function', 'line']
|
||||
_fields = ['readed', 'proged', 'erased']
|
||||
_sort = ['erased', 'proged', 'readed']
|
||||
_types = {'readed': RInt, 'proged': RInt, 'erased': RInt}
|
||||
_i = 'i'
|
||||
_children = 'children'
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, i=None, file='', function='', line=0,
|
||||
def __new__(cls, z=0, file='', function='', line=0,
|
||||
readed=0, proged=0, erased=0,
|
||||
children=None):
|
||||
return super().__new__(cls, i, file, function, int(RInt(line)),
|
||||
return super().__new__(cls, z, file, function, int(RInt(line)),
|
||||
RInt(readed), RInt(proged), RInt(erased),
|
||||
children if children is not None else [])
|
||||
|
||||
def __add__(self, other):
|
||||
return PerfBdResult(self.i, self.file, self.function, self.line,
|
||||
return PerfBdResult(self.z, self.file, self.function, self.line,
|
||||
self.readed + other.readed,
|
||||
self.proged + other.proged,
|
||||
self.erased + other.erased,
|
||||
@@ -715,15 +714,15 @@ def collect_job(path, start, stop, syms, lines, *,
|
||||
commit()
|
||||
|
||||
# rearrange results into result type
|
||||
def to_results(results):
|
||||
def to_results(results, z):
|
||||
results_ = []
|
||||
for name, (r, p, e, children) in results.items():
|
||||
results_.append(PerfBdResult(None, *name,
|
||||
results_.append(PerfBdResult(z, *name,
|
||||
r, p, e,
|
||||
children=to_results(children)))
|
||||
children=to_results(children, z+1)))
|
||||
return results_
|
||||
|
||||
return to_results(results)
|
||||
return to_results(results, 0)
|
||||
|
||||
def starapply(args):
|
||||
f, args, kwargs = args
|
||||
@@ -866,17 +865,17 @@ def fold(Result, results, *,
|
||||
return folded
|
||||
|
||||
def hotify(Result, results, *,
|
||||
fields=None,
|
||||
sort=None,
|
||||
enumerate=None,
|
||||
depth=1,
|
||||
hot=None,
|
||||
**_):
|
||||
# hotify only makes sense for recursive results
|
||||
assert hasattr(Result, '_i')
|
||||
assert hasattr(Result, '_children')
|
||||
# note! hotifying risks confusion if you don't enumerate/have a z
|
||||
# field, since it will allow folding across recursive boundaries
|
||||
import builtins
|
||||
enumerate_, enumerate = enumerate, builtins.enumerate
|
||||
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
# hotify only makes sense for recursive results
|
||||
assert hasattr(Result, '_children')
|
||||
|
||||
results_ = []
|
||||
for r in results:
|
||||
@@ -897,9 +896,10 @@ def hotify(Result, results, *,
|
||||
for k_ in ([k] if k else Result._sort)))
|
||||
for k, reverse in it.chain(hot, [(None, False)])))
|
||||
|
||||
hot_.append(r._replace(**{
|
||||
Result._i: RInt(len(hot_)),
|
||||
Result._children: []}))
|
||||
hot_.append(r._replace(**(
|
||||
({enumerate_: len(hot_)}
|
||||
if enumerate_ is not None else {})
|
||||
| {Result._children: []})))
|
||||
|
||||
# recurse?
|
||||
if depth_ > 1:
|
||||
@@ -907,8 +907,7 @@ def hotify(Result, results, *,
|
||||
depth_-1)
|
||||
|
||||
recurse(getattr(r, Result._children), depth-1)
|
||||
results_.append(r._replace(**{
|
||||
Result._children: hot_}))
|
||||
results_.append(r._replace(**{Result._children: hot_}))
|
||||
|
||||
return results_
|
||||
|
||||
@@ -1255,11 +1254,13 @@ def write_csv(path, Result, results, *,
|
||||
with openio(path, 'w') as f:
|
||||
# write csv?
|
||||
if not json:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else Result._by)
|
||||
+ [k for k in (fields
|
||||
if fields is not None
|
||||
else Result._fields)])
|
||||
writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain(
|
||||
by
|
||||
if by is not None
|
||||
else Result._by,
|
||||
fields
|
||||
if fields is not None
|
||||
else Result._fields)).keys()))
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
# note this allows by/fields to overlap
|
||||
@@ -1490,10 +1491,8 @@ def report(paths, *,
|
||||
# hotify?
|
||||
if hot:
|
||||
results = hotify(PerfBdResult, results,
|
||||
fields=fields,
|
||||
depth=depth,
|
||||
hot=hot,
|
||||
**args)
|
||||
hot=hot)
|
||||
|
||||
# write results to CSV/JSON
|
||||
if args.get('output'):
|
||||
@@ -1541,7 +1540,8 @@ def report(paths, *,
|
||||
# print table
|
||||
table(PerfBdResult, results, diff_results,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields,
|
||||
fields=fields if fields is not None
|
||||
else ['readed', 'proged', 'erased'],
|
||||
sort=sort,
|
||||
depth=depth,
|
||||
**args)
|
||||
|
||||
Reference in New Issue
Block a user