diff options
author | ncteisen <ncteisen@gmail.com> | 2017-06-01 10:33:42 -0700 |
---|---|---|
committer | ncteisen <ncteisen@gmail.com> | 2017-06-01 10:33:42 -0700 |
commit | bea7c1954c922387d405d8ffc9469ae7171753ec (patch) | |
tree | d47d96cf8bcb3e3a8f3b77cd965da6367dcb8106 /tools/profiling/microbenchmarks | |
parent | bb8cc8814b89650fcf310026db557991a165377e (diff) |
Split bm runs by individual bm
Diffstat (limited to 'tools/profiling/microbenchmarks')
-rwxr-xr-x | tools/profiling/microbenchmarks/bm_diff/bm_diff.py | 38 | ||||
-rwxr-xr-x | tools/profiling/microbenchmarks/bm_diff/bm_run.py | 38 |
2 files changed, 41 insertions, 35 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py index bc02b42bf2..77c0015ba1 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py @@ -42,6 +42,7 @@ import json import tabulate import argparse import collections +import subprocess verbose = False @@ -142,23 +143,26 @@ def diff(bms, loops, track, old, new): for bm in bms: for loop in range(0, loops): - js_new_ctr = _read_json('%s.counters.%s.%d.json' % (bm, new, loop)) - js_new_opt = _read_json('%s.opt.%s.%d.json' % (bm, new, loop)) - js_old_ctr = _read_json('%s.counters.%s.%d.json' % (bm, old, loop)) - js_old_opt = _read_json('%s.opt.%s.%d.json' % (bm, old, loop)) - - if js_new_ctr: - for row in bm_json.expand_json(js_new_ctr, js_new_opt): - name = row['cpp_name'] - if name.endswith('_mean') or name.endswith('_stddev'): - continue - benchmarks[name].add_sample(track, row, True) - if js_old_ctr: - for row in bm_json.expand_json(js_old_ctr, js_old_opt): - name = row['cpp_name'] - if name.endswith('_mean') or name.endswith('_stddev'): - continue - benchmarks[name].add_sample(track, row, False) + for line in subprocess.check_output(['bm_diff_%s/opt/%s' % (old, bm), + '--benchmark_list_tests']).splitlines(): + stripped_line = line.strip().replace("/","_").replace("<","_").replace(">","_") + js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' % (bm, stripped_line, new, loop)) + js_new_opt = _read_json('%s.%s.opt.%s.%d.json' % (bm, stripped_line, new, loop)) + js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' % (bm, stripped_line, old, loop)) + js_old_opt = _read_json('%s.%s.opt.%s.%d.json' % (bm, stripped_line, old, loop)) + + if js_new_ctr: + for row in bm_json.expand_json(js_new_ctr, js_new_opt): + name = row['cpp_name'] + if name.endswith('_mean') or name.endswith('_stddev'): + continue + benchmarks[name].add_sample(track, row, True) + if js_old_ctr: + for row in bm_json.expand_json(js_old_ctr, js_old_opt): + name = row['cpp_name'] + if name.endswith('_mean') or name.endswith('_stddev'): + continue + benchmarks[name].add_sample(track, row, False) really_interesting = set() for name, bm in benchmarks.items(): diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py index d52617ce2f..b382b7b377 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py @@ -33,6 +33,7 @@ import bm_constants import argparse +import subprocess import multiprocessing import random import itertools @@ -88,30 +89,31 @@ def _args(): def _collect_bm_data(bm, cfg, name, reps, idx, loops): - cmd = [ - 'bm_diff_%s/%s/%s' % (name, cfg, bm), - '--benchmark_out=%s.%s.%s.%d.json' % (bm, cfg, name, idx), - '--benchmark_out_format=json', '--benchmark_repetitions=%d' % (reps) - ] - return jobset.JobSpec( - cmd, - shortname='%s %s %s %d/%d' % (bm, cfg, name, idx + 1, loops), - verbose_success=True, - timeout_seconds=None) + jobs_list = [] + for line in subprocess.check_output(['bm_diff_%s/%s/%s' % (name, cfg, bm), + '--benchmark_list_tests']).splitlines(): + stripped_line = line.strip().replace("/","_").replace("<","_").replace(">","_") + cmd = [ + 'bm_diff_%s/%s/%s' % (name, cfg, bm), + '--benchmark_filter=^%s$' % line, + '--benchmark_out=%s.%s.%s.%s.%d.json' % (bm, stripped_line, cfg, name, idx), + '--benchmark_out_format=json', '--benchmark_repetitions=%d' % (reps) + ] + jobs_list.append(jobset.JobSpec( + cmd, + shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1, loops), + verbose_success=True, + timeout_seconds=None)) + return jobs_list def run(name, benchmarks, jobs, loops, reps): jobs_list = [] for loop in range(0, loops): - jobs_list.extend( - x - for x in itertools.chain( - (_collect_bm_data(bm, 'opt', name, reps, loop, loops) - for bm in benchmarks), - (_collect_bm_data(bm, 'counters', name, reps, loop, loops) - for bm in benchmarks),)) + for bm in benchmarks: + jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops) + jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop, loops) random.shuffle(jobs_list, random.SystemRandom().random) - jobset.run(jobs_list, maxjobs=jobs) |