forked from Imagelibrary/littlefs
scripts: Re-adopted result prefixes
Now that I'm looking into some higher-level scripts, being able to merge results without first renaming everything is useful. This gives most scripts an implicit prefix for field fields, but _not_ by fields, allowing easy merging of results from different scripts: $ ./scripts/stack.py lfs.ci -o- function,stack_frame,stack_limit lfs_alloc,288,1328 lfs_alloc_discard,8,8 lfs_alloc_findfree,16,32 ... At least now these have better support in scripts with the addition of the --prefix flag (this was tricky for csv.py), which allows explicit control over field field prefixes: $ ./scripts/stack.py lfs.ci -o- --prefix= function,frame,limit lfs_alloc,288,1328 lfs_alloc_discard,8,8 lfs_alloc_findfree,16,32 ... $ ./scripts/stack.py lfs.ci -o- --prefix=wonky_ function,wonky_frame,wonky_limit lfs_alloc,288,1328 lfs_alloc_discard,8,8 lfs_alloc_findfree,16,32 ...
This commit is contained in:
@@ -141,6 +141,7 @@ class PerfBdResult(co.namedtuple('PerfBdResult', [
|
||||
'z', 'file', 'function', 'line',
|
||||
'readed', 'proged', 'erased',
|
||||
'children'])):
|
||||
_prefix = 'perfbd'
|
||||
_by = ['z', 'file', 'function', 'line']
|
||||
_fields = ['readed', 'proged', 'erased']
|
||||
_sort = ['erased', 'proged', 'readed']
|
||||
@@ -1203,7 +1204,18 @@ def table(Result, results, diff_results=None, *,
|
||||
|
||||
def read_csv(path, Result, *,
|
||||
depth=1,
|
||||
prefix=None,
|
||||
**_):
|
||||
# prefix? this only applies to field fields
|
||||
if prefix is None:
|
||||
if hasattr(Result, '_prefix'):
|
||||
prefix = '%s_' % Result._prefix
|
||||
else:
|
||||
prefix = ''
|
||||
|
||||
by = Result._by
|
||||
fields = Result._fields
|
||||
|
||||
with openio(path, 'r') as f:
|
||||
# csv or json? assume json starts with [
|
||||
json = (f.buffer.peek(1)[:1] == b'[')
|
||||
@@ -1213,16 +1225,18 @@ def read_csv(path, Result, *,
|
||||
results = []
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
if not any(prefix+k in r and r[prefix+k].strip()
|
||||
for k in fields):
|
||||
continue
|
||||
try:
|
||||
# note this allows by/fields to overlap
|
||||
results.append(Result(**(
|
||||
{k: r[k] for k in Result._by
|
||||
if k in r and r[k].strip()}
|
||||
| {k: r[k] for k in Result._fields
|
||||
if k in r and r[k].strip()})))
|
||||
{k: r[k] for k in by
|
||||
if k in r
|
||||
and r[k].strip()}
|
||||
| {k: r[prefix+k] for k in fields
|
||||
if prefix+k in r
|
||||
and r[prefix+k].strip()})))
|
||||
except TypeError:
|
||||
pass
|
||||
return results
|
||||
@@ -1233,16 +1247,18 @@ def read_csv(path, Result, *,
|
||||
def unjsonify(results, depth_):
|
||||
results_ = []
|
||||
for r in results:
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
if not any(prefix+k in r and r[prefix+k].strip()
|
||||
for k in fields):
|
||||
continue
|
||||
try:
|
||||
# note this allows by/fields to overlap
|
||||
results_.append(Result(**(
|
||||
{k: r[k] for k in Result._by
|
||||
if k in r and r[k] is not None}
|
||||
| {k: r[k] for k in Result._fields
|
||||
if k in r and r[k] is not None}
|
||||
{k: r[k] for k in by
|
||||
if k in r
|
||||
and r[k] is not None}
|
||||
| {k: r[prefix+k] for k in fields
|
||||
if prefix+k in r
|
||||
and r[prefix+k] is not None}
|
||||
| ({Result._children: unjsonify(
|
||||
r[Result._children],
|
||||
depth_-1)}
|
||||
@@ -1266,30 +1282,36 @@ def write_csv(path, Result, results, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
depth=1,
|
||||
prefix=None,
|
||||
**_):
|
||||
# prefix? this only applies to field fields
|
||||
if prefix is None:
|
||||
if hasattr(Result, '_prefix'):
|
||||
prefix = '%s_' % Result._prefix
|
||||
else:
|
||||
prefix = ''
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
|
||||
with openio(path, 'w') as f:
|
||||
# write csv?
|
||||
if not json:
|
||||
writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain(
|
||||
by
|
||||
if by is not None
|
||||
else Result._by,
|
||||
fields
|
||||
if fields is not None
|
||||
else Result._fields)).keys()))
|
||||
writer = csv.DictWriter(f, list(
|
||||
co.OrderedDict.fromkeys(it.chain(
|
||||
by,
|
||||
(prefix+k for k in fields))).keys()))
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
# note this allows by/fields to overlap
|
||||
writer.writerow(
|
||||
{k: getattr(r, k)
|
||||
for k in (by
|
||||
if by is not None
|
||||
else Result._by)
|
||||
for k in by
|
||||
if getattr(r, k) is not None}
|
||||
| {k: str(getattr(r, k))
|
||||
for k in (fields
|
||||
if fields is not None
|
||||
else Result._fields)
|
||||
| {prefix+k: str(getattr(r, k))
|
||||
for k in fields
|
||||
if getattr(r, k) is not None})
|
||||
|
||||
# write json?
|
||||
@@ -1302,14 +1324,10 @@ def write_csv(path, Result, results, *,
|
||||
# note this allows by/fields to overlap
|
||||
results_.append(
|
||||
{k: getattr(r, k)
|
||||
for k in (by
|
||||
if by is not None
|
||||
else Result._by)
|
||||
for k in by
|
||||
if getattr(r, k) is not None}
|
||||
| {k: str(getattr(r, k))
|
||||
for k in (fields
|
||||
if fields is not None
|
||||
else Result._fields)
|
||||
| {prefix+k: str(getattr(r, k))
|
||||
for k in fields
|
||||
if getattr(r, k) is not None}
|
||||
| ({Result._children: jsonify(
|
||||
getattr(r, Result._children),
|
||||
@@ -1731,6 +1749,10 @@ if __name__ == "__main__":
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'--prefix',
|
||||
help="Prefix to use for fields in CSV/JSON output. Defaults "
|
||||
"to %r." % ("%s_" % PerfBdResult._prefix))
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
|
||||
Reference in New Issue
Block a user