diff --git a/scripts/bench.py b/scripts/bench.py index 80a52cae..ec85f1d1 100755 --- a/scripts/bench.py +++ b/scripts/bench.py @@ -1130,12 +1130,12 @@ def run_stage(name, runner, bench_ids, stdout_, trace_, output_, **args): **last_defines, 'm': m_, 'n': n_, - 'readed': readed__, - 'proged': proged__, - 'erased': erased__, - 'creaded': readed_, - 'cproged': proged_, - 'cerased': erased_}) + 'bench_readed': readed__, + 'bench_proged': proged__, + 'bench_erased': erased__, + 'bench_creaded': readed_, + 'bench_cproged': proged_, + 'bench_cerased': erased_}) # keep track of total for summary readed += readed__ proged += proged__ @@ -1313,7 +1313,14 @@ def run(runner, bench_ids=[], **args): if args.get('output'): output = BenchOutput(args['output'], ['suite', 'case'], - ['m', 'n', 'readed', 'proged', 'erased']) + # defines go here + ['m', 'n', + 'bench_readed', + 'bench_proged', + 'bench_erased', + 'bench_creaded', + 'bench_cproged', + 'bench_cerased']) # measure runtime start = time.time() diff --git a/scripts/code.py b/scripts/code.py index 6fe07e21..f4a124ee 100755 --- a/scripts/code.py +++ b/scripts/code.py @@ -139,6 +139,7 @@ class RInt(co.namedtuple('RInt', 'x')): class CodeResult(co.namedtuple('CodeResult', [ 'file', 'function', 'size'])): + _prefix = 'code' _by = ['file', 'function'] _fields = ['size'] _sort = ['size'] @@ -871,7 +872,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -881,16 +893,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -901,16 +915,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -934,30 +950,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -970,14 +992,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1162,6 +1180,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % CodeResult._prefix)) parser.add_argument( '--everything', action='store_true', diff --git a/scripts/cov.py b/scripts/cov.py index b92e5140..7e636bf4 100755 --- a/scripts/cov.py +++ b/scripts/cov.py @@ -238,6 +238,7 @@ class RFrac(co.namedtuple('RFrac', 'a,b')): class CovResult(co.namedtuple('CovResult', [ 'file', 'function', 'line', 'calls', 'hits', 'funcs', 'lines', 'branches'])): + _prefix = 'cov' _by = ['file', 'function', 'line'] _fields = ['calls', 'hits', 'funcs', 'lines', 'branches'] _sort = ['funcs', 'lines', 'branches', 'hits', 'calls'] @@ -732,7 +733,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -742,16 +754,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -762,16 +776,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -795,30 +811,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -831,14 +853,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1134,6 +1152,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % CovResult._prefix)) parser.add_argument( '-F', '--source', dest='sources', diff --git a/scripts/csv.py b/scripts/csv.py index 1884750b..5487c8bf 100755 --- a/scripts/csv.py +++ b/scripts/csv.py @@ -1356,7 +1356,7 @@ def collect_csv(csv_paths, *, if not json: reader = csv.DictReader(f, restval='') # collect fields - fields.update((k, True) for k in reader.fieldnames) + fields.update((k, True) for k in reader.fieldnames or []) for r in reader: # strip and drop empty fields r_ = {k: v.strip() @@ -1415,7 +1415,13 @@ def compile(fields_, results, sort=None, children=None, hot=None, - notes=None): + notes=None, + prefix=None, + **_): + # default to no prefix + if prefix is None: + prefix = '' + by = by.copy() fields = fields.copy() @@ -1440,57 +1446,58 @@ def compile(fields_, results, fields__ = set(it.chain.from_iterable( exprs[k].fields() if k in exprs else [k] for k in fields)) - types = {} + types__ = {} for k in fields__: # check if dependency is in original fields # # it's tempting to also allow enumerate fields here, but this # currently doesn't work when hotifying - if k not in fields_: + if prefix+k not in fields_: print("error: no field %r?" % k, file=sys.stderr) sys.exit(2) for t in [RInt, RFloat, RFrac]: for r in results: - if k in r and r[k].strip(): + if prefix+k in r and r[prefix+k].strip(): try: - t(r[k]) + t(r[prefix+k]) except ValueError: break else: - types[k] = t + types__[k] = t break else: print("error: no type matches field %r?" % k, file=sys.stderr) sys.exit(2) - # typecheck exprs, note these may reference input fields - # with the same name - types__ = types.copy() + # typecheck exprs, note these may reference input fields with + # the same name, which is why we only do a single eval pass + types___ = types__.copy() for k, expr in exprs.items(): - types__[k] = expr.type(types) + types___[k] = expr.type(types__) # foldcheck field exprs - folds = {k: (RSum, t) for k, v in types.items()} + folds___ = {k: (RSum, t) for k, v in types__.items()} for k, expr in exprs.items(): - folds[k] = expr.fold(types) - folds = {k: (f(), t) for k, (f, t) in folds.items()} + folds___[k] = expr.fold(types__) + folds___ = {k: (f(), t) for k, (f, t) in folds___.items()} # create result class def __new__(cls, **r): - # evaluate types r_ = r.copy() - for k, t in types.items(): - r_[k] = t(r[k]) if k in r else t() - # evaluate exprs + # evaluate types, strip prefix + for k, t in types__.items(): + r_[k] = t(r[prefix+k]) if prefix+k in r else t() + r__ = r_.copy() + # evaluate exprs for k, expr in exprs.items(): r__[k] = expr.eval(r_) # evaluate mods for k, m in mods.items(): - r__[k] = punescape(m, r) + r__[k] = punescape(m, r_) # return result return cls.__mro__[1].__new__(cls, **( @@ -1529,7 +1536,7 @@ def compile(fields_, results, if k in fields: v = object.__getattribute__(self, k) if v[1]: - return folds[k][0](v[0][:v[1]]) + return folds___[k][0](v[0][:v[1]]) else: return None return object.__getattribute__(self, k) @@ -1549,7 +1556,7 @@ def compile(fields_, results, _by=by, _fields=fields, _sort=fields, - _types={k: t for k, (_, t) in folds.items()}, + _types={k: t for k, (_, t) in folds___.items()}, _mods=mods, _exprs=exprs, **{'_children': children} if children is not None else {}, @@ -1558,7 +1565,8 @@ def compile(fields_, results, def homogenize(Result, results, *, enumerates=None, defines=[], - depth=1): + depth=1, + **_): # this just converts all (possibly recursive) results to our # result type results_ = [] @@ -2014,7 +2022,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -2024,16 +2043,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -2044,16 +2065,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -2077,30 +2100,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -2113,14 +2142,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -2277,7 +2302,8 @@ def main(csv_paths, *, sort=sort, children=children, hot=hot, - notes=notes) + notes=notes, + **args) # homogenize results = homogenize(Result, results, @@ -2578,6 +2604,9 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output.") sys.exit(main(**{k: v for k, v in vars(parser.parse_intermixed_args()).items() if v is not None})) diff --git a/scripts/ctx.py b/scripts/ctx.py index cbcc78ba..36e62660 100755 --- a/scripts/ctx.py +++ b/scripts/ctx.py @@ -136,6 +136,7 @@ class CtxResult(co.namedtuple('CtxResult', [ 'z', 'i', 'file', 'function', 'off', 'size', 'children', 'notes'])): + _prefix = 'ctx' _by = ['z', 'i', 'file', 'function'] _fields = ['off', 'size'] _sort = ['size'] @@ -1129,7 +1130,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -1139,16 +1151,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -1159,16 +1173,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -1192,30 +1208,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -1228,14 +1250,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1481,6 +1499,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % CtxResult._prefix)) parser.add_argument( '--everything', action='store_true', diff --git a/scripts/data.py b/scripts/data.py index e34b2d46..639be0bb 100755 --- a/scripts/data.py +++ b/scripts/data.py @@ -139,6 +139,7 @@ class RInt(co.namedtuple('RInt', 'x')): class DataResult(co.namedtuple('DataResult', [ 'file', 'function', 'size'])): + _prefix = 'data' _by = ['file', 'function'] _fields = ['size'] _sort = ['size'] @@ -492,7 +493,7 @@ def collect_data(obj_paths, *, if not everything and sym.name.startswith('__'): continue - results.append(CodeResult(file, sym.name, sym.size)) + results.append(DataResult(file, sym.name, sym.size)) return results @@ -871,7 +872,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -881,16 +893,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -901,16 +915,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -934,30 +950,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -970,14 +992,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1162,6 +1180,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % DataResult._prefix)) parser.add_argument( '--everything', action='store_true', diff --git a/scripts/perf.py b/scripts/perf.py index d6ddc69e..4b54b04f 100755 --- a/scripts/perf.py +++ b/scripts/perf.py @@ -150,6 +150,7 @@ class PerfResult(co.namedtuple('PerfResult', [ 'z', 'file', 'function', 'line', 'cycles', 'bmisses', 'branches', 'cmisses', 'caches', 'children'])): + _prefix = 'perf' _by = ['z', 'file', 'function', 'line'] _fields = ['cycles', 'bmisses', 'branches', 'cmisses', 'caches'] _sort = ['cycles', 'bmisses', 'cmisses', 'branches', 'caches'] @@ -1233,7 +1234,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -1243,16 +1255,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -1263,16 +1277,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -1296,30 +1312,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -1332,14 +1354,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1734,6 +1752,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % PerfResult._prefix)) parser.add_argument( '-F', '--source', dest='sources', diff --git a/scripts/perfbd.py b/scripts/perfbd.py index 972d4431..ac7075a7 100755 --- a/scripts/perfbd.py +++ b/scripts/perfbd.py @@ -141,6 +141,7 @@ class PerfBdResult(co.namedtuple('PerfBdResult', [ 'z', 'file', 'function', 'line', 'readed', 'proged', 'erased', 'children'])): + _prefix = 'perfbd' _by = ['z', 'file', 'function', 'line'] _fields = ['readed', 'proged', 'erased'] _sort = ['erased', 'proged', 'readed'] @@ -1203,7 +1204,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -1213,16 +1225,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -1233,16 +1247,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -1266,30 +1282,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -1302,14 +1324,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1731,6 +1749,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % PerfBdResult._prefix)) parser.add_argument( '-F', '--source', dest='sources', diff --git a/scripts/stack.py b/scripts/stack.py index 5954665e..27e3c05a 100755 --- a/scripts/stack.py +++ b/scripts/stack.py @@ -136,6 +136,7 @@ class StackResult(co.namedtuple('StackResult', [ 'z', 'file', 'function', 'frame', 'limit', 'children', 'notes'])): + _prefix = 'stack' _by = ['z', 'file', 'function'] _fields = ['frame', 'limit'] _sort = ['limit', 'frame'] @@ -875,7 +876,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -885,16 +897,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -905,16 +919,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -938,30 +954,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -974,14 +996,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1228,6 +1246,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % StackResult._prefix)) parser.add_argument( '--everything', action='store_true', diff --git a/scripts/structs.py b/scripts/structs.py index a4aa15a1..4900e924 100755 --- a/scripts/structs.py +++ b/scripts/structs.py @@ -136,6 +136,7 @@ class StructResult(co.namedtuple('StructResult', [ 'z', 'i', 'file', 'struct', 'off', 'size', 'align', 'children'])): + _prefix = 'struct' _by = ['z', 'i', 'file', 'struct'] _fields = ['off', 'size', 'align'] _sort = ['size', 'align'] @@ -949,7 +950,18 @@ def table(Result, results, diff_results=None, *, def read_csv(path, Result, *, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + by = Result._by + fields = Result._fields + with openio(path, 'r') as f: # csv or json? assume json starts with [ json = (f.buffer.peek(1)[:1] == b'[') @@ -959,16 +971,18 @@ def read_csv(path, Result, *, results = [] reader = csv.DictReader(f, restval='') for r in reader: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k].strip()} - | {k: r[k] for k in Result._fields - if k in r and r[k].strip()}))) + {k: r[k] for k in by + if k in r + and r[k].strip()} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k].strip()}))) except TypeError: pass return results @@ -979,16 +993,18 @@ def read_csv(path, Result, *, def unjsonify(results, depth_): results_ = [] for r in results: - if not any(k in r and r[k].strip() - for k in Result._fields): + if not any(prefix+k in r and r[prefix+k].strip() + for k in fields): continue try: # note this allows by/fields to overlap results_.append(Result(**( - {k: r[k] for k in Result._by - if k in r and r[k] is not None} - | {k: r[k] for k in Result._fields - if k in r and r[k] is not None} + {k: r[k] for k in by + if k in r + and r[k] is not None} + | {k: r[prefix+k] for k in fields + if prefix+k in r + and r[prefix+k] is not None} | ({Result._children: unjsonify( r[Result._children], depth_-1)} @@ -1012,30 +1028,36 @@ def write_csv(path, Result, results, *, by=None, fields=None, depth=1, + prefix=None, **_): + # prefix? this only applies to field fields + if prefix is None: + if hasattr(Result, '_prefix'): + prefix = '%s_' % Result._prefix + else: + prefix = '' + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + with openio(path, 'w') as f: # write csv? if not json: - writer = csv.DictWriter(f, list(co.OrderedDict.fromkeys(it.chain( - by - if by is not None - else Result._by, - fields - if fields is not None - else Result._fields)).keys())) + writer = csv.DictWriter(f, list( + co.OrderedDict.fromkeys(it.chain( + by, + (prefix+k for k in fields))).keys())) writer.writeheader() for r in results: # note this allows by/fields to overlap writer.writerow( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None}) # write json? @@ -1048,14 +1070,10 @@ def write_csv(path, Result, results, *, # note this allows by/fields to overlap results_.append( {k: getattr(r, k) - for k in (by - if by is not None - else Result._by) + for k in by if getattr(r, k) is not None} - | {k: str(getattr(r, k)) - for k in (fields - if fields is not None - else Result._fields) + | {prefix+k: str(getattr(r, k)) + for k in fields if getattr(r, k) is not None} | ({Result._children: jsonify( getattr(r, Result._children), @@ -1301,6 +1319,10 @@ if __name__ == "__main__": '-Y', '--summary', action='store_true', help="Only show the total.") + parser.add_argument( + '--prefix', + help="Prefix to use for fields in CSV/JSON output. Defaults " + "to %r." % ("%s_" % StructResult._prefix)) parser.add_argument( '--everything', action='store_true', diff --git a/scripts/test.py b/scripts/test.py index c5d79f9b..49d02f8a 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -1099,8 +1099,8 @@ def run_stage(name, runner, test_ids, stdout_, trace_, output_, **args): 'suite': suite, 'case': case, **defines, - 'passed': '1/1', - 'time': '%.6f' % ( + 'test_passed': '1/1', + 'test_time': '%.6f' % ( time.time() - last_time)}) elif op == 'skipped': locals.seen_perms += 1 @@ -1161,7 +1161,7 @@ def run_stage(name, runner, test_ids, stdout_, trace_, output_, **args): output_.writerow({ 'suite': suite, 'case': case, - 'passed': '0/1', + 'test_passed': '0/1', **defines}) # race condition for multiple failures? @@ -1297,7 +1297,8 @@ def run(runner, test_ids=[], **args): if args.get('output'): output = TestOutput(args['output'], ['suite', 'case'], - ['passed', 'time']) + # defines go here + ['test_passed', 'test_time']) # measure runtime start = time.time()