diff options
author | ncteisen <ncteisen@gmail.com> | 2017-07-20 14:55:49 -0700 |
---|---|---|
committer | ncteisen <ncteisen@gmail.com> | 2017-07-20 14:55:49 -0700 |
commit | 5c92dcde1ee0c25a3c341c88af62fec600645252 (patch) | |
tree | 689cf73c9becfbe7fd0bb56775362f13e96d0352 /tools/profiling | |
parent | 66cedf645dfdb1c195181d1cff289792a43854b6 (diff) |
Allow passing regex to bm_diff tools
Diffstat (limited to 'tools/profiling')
-rwxr-xr-x | tools/profiling/microbenchmarks/bm_diff/bm_diff.py | 5 | ||||
-rwxr-xr-x | tools/profiling/microbenchmarks/bm_diff/bm_main.py | 14 | ||||
-rwxr-xr-x | tools/profiling/microbenchmarks/bm_diff/bm_run.py | 21 |
3 files changed, 20 insertions, 20 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py index 809817a1a8..1ac951f3d8 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py @@ -144,7 +144,7 @@ def _read_json(filename, badjson_files, nonexistant_files): def fmt_dict(d): return ''.join([" " + k + ": " + str(d[k]) + "\n" for k in d]) -def diff(bms, loops, track, old, new, counters): +def diff(bms, loops, regex, track, old, new, counters): benchmarks = collections.defaultdict(Benchmark) badjson_files = {} @@ -153,7 +153,8 @@ def diff(bms, loops, track, old, new, counters): for loop in range(0, loops): for line in subprocess.check_output( ['bm_diff_%s/opt/%s' % (old, bm), - '--benchmark_list_tests']).splitlines(): + '--benchmark_list_tests', + '--benchmark_filter=%s' % regex]).splitlines(): stripped_line = line.strip().replace("/", "_").replace( "<", "_").replace(">", "_").replace(", ", "_") js_new_opt = _read_json('%s.%s.opt.%s.%d.json' % diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py index 8b4e0cb69a..5aa11ac391 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py @@ -63,10 +63,10 @@ def _args(): help='Name of baseline run to compare to. Ususally just called "old"') argp.add_argument( '-r', - '--repetitions', - type=int, - default=1, - help='Number of repetitions to pass to the benchmarks') + '--regex', + type=str, + default="", + help='Regex to filter benchmarks run') argp.add_argument( '-l', '--loops', @@ -125,10 +125,10 @@ def main(args): subprocess.check_call(['git', 'checkout', where_am_i]) subprocess.check_call(['git', 'submodule', 'update']) - bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters) - bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters) + bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.regex, args.counters) + bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.regex, args.counters) - diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old, + diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old, 'new', args.counters) if diff: text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py index 72b3d3cf10..206f7c5845 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py @@ -56,10 +56,10 @@ def _args(): ) argp.add_argument( '-r', - '--repetitions', - type=int, - default=1, - help='Number of repetitions to pass to the benchmarks') + '--regex', + type=str, + default="", + help='Regex to filter benchmarks run') argp.add_argument( '-l', '--loops', @@ -77,18 +77,17 @@ def _args(): return args -def _collect_bm_data(bm, cfg, name, reps, idx, loops): +def _collect_bm_data(bm, cfg, name, regex, idx, loops): jobs_list = [] for line in subprocess.check_output( ['bm_diff_%s/%s/%s' % (name, cfg, bm), - '--benchmark_list_tests']).splitlines(): + '--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines(): stripped_line = line.strip().replace("/", "_").replace( "<", "_").replace(">", "_").replace(", ", "_") cmd = [ 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' % line, '--benchmark_out=%s.%s.%s.%s.%d.json' % (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json', - '--benchmark_repetitions=%d' % (reps) ] jobs_list.append( jobset.JobSpec( @@ -100,13 +99,13 @@ def _collect_bm_data(bm, cfg, name, reps, idx, loops): return jobs_list -def run(name, benchmarks, jobs, loops, reps, counters): +def run(name, benchmarks, jobs, loops, regex, counters): jobs_list = [] for loop in range(0, loops): for bm in benchmarks: - jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops) + jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops) if counters: - jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop, + jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop, loops) random.shuffle(jobs_list, random.SystemRandom().random) jobset.run(jobs_list, maxjobs=jobs) @@ -114,4 +113,4 @@ def run(name, benchmarks, jobs, loops, reps, counters): if __name__ == '__main__': args = _args() - run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters) + run(args.name, args.benchmarks, args.jobs, args.loops, args.regex, args.counters) |