forked from Imagelibrary/littlefs
scripts: Re-adopted result prefixes
Now that I'm looking into some higher-level scripts, being able to merge results without first renaming everything is useful. This gives most scripts an implicit prefix for field fields, but _not_ by fields, allowing easy merging of results from different scripts: $ ./scripts/stack.py lfs.ci -o- function,stack_frame,stack_limit lfs_alloc,288,1328 lfs_alloc_discard,8,8 lfs_alloc_findfree,16,32 ... At least now these have better support in scripts with the addition of the --prefix flag (this was tricky for csv.py), which allows explicit control over field field prefixes: $ ./scripts/stack.py lfs.ci -o- --prefix= function,frame,limit lfs_alloc,288,1328 lfs_alloc_discard,8,8 lfs_alloc_findfree,16,32 ... $ ./scripts/stack.py lfs.ci -o- --prefix=wonky_ function,wonky_frame,wonky_limit lfs_alloc,288,1328 lfs_alloc_discard,8,8 lfs_alloc_findfree,16,32 ...
This commit is contained in:
@@ -136,6 +136,7 @@ class StructResult(co.namedtuple('StructResult', [
|
||||
'z', 'i', 'file', 'struct',
|
||||
'off', 'size', 'align',
|
||||
'children'])):
|
||||
_prefix = 'struct'
|
||||
_by = ['z', 'i', 'file', 'struct']
|
||||
_fields = ['off', 'size', 'align']
|
||||
_sort = ['size', 'align']
|
||||
@@ -949,7 +950,18 @@ def table(Result, results, diff_results=None, *,
|
||||
|
||||
def read_csv(path, Result, *,
|
||||
depth=1,
|
||||
prefix=None,
|
||||
**_):
|
||||
# prefix? this only applies to field fields
|
||||
if prefix is None:
|
||||
if hasattr(Result, '_prefix'):
|
||||
prefix = '%s_' % Result._prefix
|
||||
else:
|
||||
prefix = ''
|
||||
|
||||
by = Result._by
|
||||
fields = Result._fields
|
||||
|
||||
with openio(path, 'r') as f:
|
||||
# csv or json? assume json starts with [
|
||||
json = (f.buffer.peek(1)[:1] == b'[')
|
||||
@@ -959,16 +971,18 @@ def read_csv(path, Result, *,
|
||||
results = []
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
if not any(prefix+k in r and r[prefix+k].strip()
|
||||
for k in fields):
|
||||
continue
|
||||
try:
|
||||
# note this allows by/fields to overlap
|
||||
results.append(Result(**(
|
||||
{k: r[k] for k in Result._by
|
||||
if k in r and r[k].strip()}
|
||||
| {k: r[k] for k in Result._fields
|
||||
if k in r and r[k].strip()})))
|
||||
{k: r[k] for k in by
|
||||
if k in r
|
||||
and r[k].strip()}
|
||||
| {k: r[prefix+k] for k in fields
|
||||
if prefix+k in r
|
||||
and r[prefix+k].strip()})))
|
||||
except TypeError:
|
||||
pass
|
||||
return results
|
||||
@@ -979,16 +993,18 @@ def read_csv(path, Result, *,
|
||||
def unjsonify(results, depth_):
|
||||
results_ = []
|
||||
for r in results:
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
if not any(prefix+k in r and r[prefix+k].strip()
|
||||
for k in fields):
|
||||
continue
|
||||
try:
|
||||
# note this allows by/fields to overlap
|
||||
results_.append(Result(**(
|
||||
{k: r[k] for k in Result._by
|
||||
if k in r and r[k] is not None}
|
||||
| {k: r[k] for k in Result._fields
|
||||
if k in r and r[k] is not None}
|
||||
{k: r[k] for k in by
|
||||
if k in r
|
||||
and r[k] is not None}
|
||||
| {k: r[prefix+k] for k in fields
|
||||
if prefix+k in r
|
||||
and r[prefix+k] is not None}
|
||||
| ({Result._children: unjsonify(
|
||||
r[Result._children],
|
||||
depth_-1)}
|
||||
@@ -1012,30 +1028,36 @@ def write_csv(path, Result, results, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
depth=1,
|
||||
prefix=None,
|
||||
**_):
|
||||
# prefix? this only applies to field fields
|
||||
if prefix is None:
|
||||
if hasattr(Result, '_prefix'):
|
||||
prefix = '%s_' % Result._prefix
|
||||
else:
|
||||
prefix = ''
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
|
||||
with openio(path, 'w') as f:
|
||||
# write csv?
|
||||
if not json:
|
||||
writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain(
|
||||
by
|
||||
if by is not None
|
||||
else Result._by,
|
||||
fields
|
||||
if fields is not None
|
||||
else Result._fields)).keys()))
|
||||
writer = csv.DictWriter(f, list(
|
||||
co.OrderedDict.fromkeys(it.chain(
|
||||
by,
|
||||
(prefix+k for k in fields))).keys()))
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
# note this allows by/fields to overlap
|
||||
writer.writerow(
|
||||
{k: getattr(r, k)
|
||||
for k in (by
|
||||
if by is not None
|
||||
else Result._by)
|
||||
for k in by
|
||||
if getattr(r, k) is not None}
|
||||
| {k: str(getattr(r, k))
|
||||
for k in (fields
|
||||
if fields is not None
|
||||
else Result._fields)
|
||||
| {prefix+k: str(getattr(r, k))
|
||||
for k in fields
|
||||
if getattr(r, k) is not None})
|
||||
|
||||
# write json?
|
||||
@@ -1048,14 +1070,10 @@ def write_csv(path, Result, results, *,
|
||||
# note this allows by/fields to overlap
|
||||
results_.append(
|
||||
{k: getattr(r, k)
|
||||
for k in (by
|
||||
if by is not None
|
||||
else Result._by)
|
||||
for k in by
|
||||
if getattr(r, k) is not None}
|
||||
| {k: str(getattr(r, k))
|
||||
for k in (fields
|
||||
if fields is not None
|
||||
else Result._fields)
|
||||
| {prefix+k: str(getattr(r, k))
|
||||
for k in fields
|
||||
if getattr(r, k) is not None}
|
||||
| ({Result._children: jsonify(
|
||||
getattr(r, Result._children),
|
||||
@@ -1301,6 +1319,10 @@ if __name__ == "__main__":
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'--prefix',
|
||||
help="Prefix to use for fields in CSV/JSON output. Defaults "
|
||||
"to %r." % ("%s_" % StructResult._prefix))
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
|
||||
Reference in New Issue
Block a user