aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-03-01 09:08:12 -0800
committerGravatar GitHub <noreply@github.com>2017-03-01 09:08:12 -0800
commit823e87b1d99097b439763f64e101655e7cf8acc8 (patch)
treea1a825ab5aaef29364f5562a171260c16a54988a /tools
parent25c38e761753da95586bca4ab5b1c5c5fa3a4c1b (diff)
parent541b87e0155ea7de3b0f0123bdb28941fad32c8a (diff)
Merge pull request #9933 from ctiller/better_summary
Report non-counter cpu times to bigquery to get more accurate dashboards
Diffstat (limited to 'tools')
-rwxr-xr-xtools/profiling/microbenchmarks/bm2bq.py12
-rwxr-xr-xtools/run_tests/run_microbenchmark.py19
2 files changed, 24 insertions, 7 deletions
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py
index 280f217e69..76ed0fef0d 100755
--- a/tools/profiling/microbenchmarks/bm2bq.py
+++ b/tools/profiling/microbenchmarks/bm2bq.py
@@ -80,6 +80,12 @@ if sys.argv[1] == '--schema':
with open(sys.argv[1]) as f:
js = json.loads(f.read())
+if len(sys.argv) > 2:
+ with open(sys.argv[2]) as f:
+ js2 = json.loads(f.read())
+else:
+ js2 = None
+
writer = csv.DictWriter(sys.stdout, [c for c,t in columns])
bm_specs = {
@@ -215,4 +221,10 @@ for bm in js['benchmarks']:
row.update(labels)
if 'label' in row:
del row['label']
+ if js2:
+ for bm2 in js2['benchmarks']:
+ if bm['name'] == bm2['name']:
+ row['cpu_time'] = bm2['cpu_time']
+ row['real_time'] = bm2['real_time']
+ row['iterations'] = bm2['iterations']
writer.writerow(row)
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 4307906a7e..c6cc60715e 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -170,20 +170,25 @@ def collect_perf(bm_name, args):
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-def collect_summary(bm_name, args):
- heading('Summary: %s' % bm_name)
+def run_summary(cfg):
subprocess.check_call(
['make', bm_name,
- 'CONFIG=counters', '-j', '%d' % multiprocessing.cpu_count()])
- cmd = ['bins/counters/%s' % bm_name,
- '--benchmark_out=out.json',
+ 'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
+ cmd = ['bins/%s/%s' % (cfg, bm_name),
+ '--benchmark_out=out.%s.json' % cfg,
'--benchmark_out_format=json']
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
- text(subprocess.check_output(cmd))
+ return subprocess.check_output(cmd)
+
+def collect_summary(bm_name, args):
+ heading('Summary: %s [no counters]' % bm_name)
+ text(run_summary('opt'))
+ heading('Summary: %s [with counters]' % bm_name)
+ text(run_summary('counters'))
if args.bigquery_upload:
with open('out.csv', 'w') as f:
- f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.json']))
+ f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.counters.json', 'out.opt.json']))
subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', 'out.csv'])
collectors = {