aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_microbenchmark.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests/run_microbenchmark.py')
-rwxr-xr-xtools/run_tests/run_microbenchmark.py84
1 files changed, 67 insertions, 17 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 096d7d78ab..92685de5ee 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -32,6 +32,7 @@ import multiprocessing
import os
import subprocess
import sys
+import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
@@ -72,12 +73,16 @@ def link(txt, tgt):
global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (tgt, txt)
-benchmarks = []
-profile_analysis = []
-cleanup = []
+def text(txt):
+ global index_html
+ index_html += "<p><pre>%s</pre></p>\n" % txt
+
+def collect_latency(bm_name, args):
+ """generate latency profiles"""
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
-for bm_name in sys.argv[1:]:
- # generate latency profiles
heading('Latency Profiles: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
@@ -115,26 +120,71 @@ for bm_name in sys.argv[1:]:
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
- # generate flamegraphs
+def collect_perf(bm_name, args):
+ """generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
- subprocess.check_call(['sudo', 'perf', 'record', '-g', '-c', '1000',
+ link(line, '%s.svg' % fnize(line))
+ subprocess.check_call(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
+ '-g', '-c', '1000',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
- '--benchmark_min_time=20'])
- with open('/tmp/bm.perf', 'w') as f:
- f.write(subprocess.check_output(['sudo', 'perf', 'script']))
- with open('/tmp/bm.folded', 'w') as f:
- f.write(subprocess.check_output([
- '%s/stackcollapse-perf.pl' % flamegraph_dir, '/tmp/bm.perf']))
- link(line, '%s.svg' % fnize(line))
- with open('reports/%s.svg' % fnize(line), 'w') as f:
- f.write(subprocess.check_output([
- '%s/flamegraph.pl' % flamegraph_dir, '/tmp/bm.folded']))
+ '--benchmark_min_time=10'])
+ env = os.environ.copy()
+ env.update({
+ 'PERF_BASE_NAME': fnize(line),
+ 'OUTPUT_DIR': 'reports',
+ 'OUTPUT_FILENAME': fnize(line),
+ })
+ subprocess.check_call(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
+ env=env)
+ subprocess.check_call(['rm', '%s-perf.data' % fnize(line)])
+ subprocess.check_call(['rm', '%s-out.perf' % fnize(line)])
+
+def collect_summary(bm_name, args):
+ heading('Summary: %s' % bm_name)
+ subprocess.check_call(
+ ['make', bm_name,
+ 'CONFIG=counters', '-j', '%d' % multiprocessing.cpu_count()])
+ text(subprocess.check_output(['bins/counters/%s' % bm_name,
+ '--benchmark_out=out.json',
+ '--benchmark_out_format=json']))
+ if args.bigquery_upload:
+ with open('out.csv', 'w') as f:
+ f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.json']))
+ subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', 'out.csv'])
+
+collectors = {
+ 'latency': collect_latency,
+ 'perf': collect_perf,
+ 'summary': collect_summary,
+}
+
+argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
+argp.add_argument('-c', '--collect',
+ choices=sorted(collectors.keys()),
+ nargs='+',
+ default=sorted(collectors.keys()),
+ help='Which collectors should be run against each benchmark')
+argp.add_argument('-b', '--benchmarks',
+ default=['bm_fullstack'],
+ nargs='+',
+ type=str,
+ help='Which microbenchmarks should be run')
+argp.add_argument('--bigquery_upload',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Upload results from summary collection to bigquery')
+args = argp.parse_args()
+
+for bm_name in args.benchmarks:
+ for collect in args.collect:
+ collectors[collect](bm_name, args)
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f: