aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_microbenchmark.py
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-03-01 14:11:15 -0800
committerGravatar Craig Tiller <ctiller@google.com>2017-03-01 14:11:15 -0800
commitff84b3650fd277e866b4367ccd5689729125ac65 (patch)
tree5b3db4e7a84c866b826ef87407b9d1befcb0071c /tools/run_tests/run_microbenchmark.py
parent18a948ad2f1b76416211d9aadc07887601e9eac1 (diff)
Experimental version
Diffstat (limited to 'tools/run_tests/run_microbenchmark.py')
-rwxr-xr-xtools/run_tests/run_microbenchmark.py28
1 files changed, 22 insertions, 6 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index e045b4aafd..84f0586cdf 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -170,12 +170,12 @@ def collect_perf(bm_name, args):
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-def run_summary(cfg):
+def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call(
['make', bm_name,
'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
cmd = ['bins/%s/%s' % (cfg, bm_name),
- '--benchmark_out=out.%s.json' % cfg,
+ '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
'--benchmark_out_format=json']
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
@@ -183,9 +183,9 @@ def run_summary(cfg):
def collect_summary(bm_name, args):
heading('Summary: %s [no counters]' % bm_name)
- text(run_summary('opt'))
+ text(run_summary(bm_name, 'opt', 'out'))
heading('Summary: %s [with counters]' % bm_name)
- text(run_summary('counters'))
+ text(run_summary(bm_name, 'counters', 'out'))
if args.bigquery_upload:
with open('out.csv', 'w') as f:
f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.counters.json', 'out.opt.json']))
@@ -233,8 +233,24 @@ for bm_name in args.benchmarks:
for collect in args.collect:
collectors[collect](bm_name, args)
if args.diff_perf:
- pass
-
+ for bm_name in args.benchmarks:
+ run_summary(bm_name, 'opt', '%s.new' % bm_name)
+ where_am_i = submodule.check_call(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
+ submodule.check_call(['git', 'checkout', args.diff_perf])
+ comparables = []
+ try:
+ for bm_name in args.benchmarks:
+ try:
+ run_summary(bm_name, 'opt', '%s.old' % bm_name)
+ comparables.append(bm_name)
+ except subprocess.CalledProcessError, e:
+ pass
+ finally:
+ submodule.check_call(['git', 'checkout', where_am_i])
+ for bm_name in comparables:
+ submodule.check_call(['third_party/benchmark/tools/compare_bench.py',
+ '%s.new.opt.json' % bm_name,
+ '%s.old.opt.json' % bm_name])
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f: