aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_microbenchmark.py
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-02-16 09:45:17 -0800
committerGravatar Craig Tiller <ctiller@google.com>2017-02-16 09:45:17 -0800
commit985736358cc96a343edd8b3df651f8eb6bd66025 (patch)
tree08b1f9c7f8792de13c38673fd1a00f662b097ba9 /tools/run_tests/run_microbenchmark.py
parent7848b44e960168f70047555cea14530950547192 (diff)
parent2e0f02b5fbbd098335e38585b2651056c9d7dee8 (diff)
Merge github.com:grpc/grpc into bm_cq
Diffstat (limited to 'tools/run_tests/run_microbenchmark.py')
-rwxr-xr-xtools/run_tests/run_microbenchmark.py52
1 files changed, 37 insertions, 15 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 4da35bc142..ceb832c642 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -126,23 +126,45 @@ def collect_perf(bm_name, args):
subprocess.check_call(
['make', bm_name,
'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
- subprocess.check_call(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
- '-g', '-c', '1000',
- 'bins/mutrace/%s' % bm_name,
- '--benchmark_filter=^%s$' % line,
- '--benchmark_min_time=10'])
- env = os.environ.copy()
- env.update({
- 'PERF_BASE_NAME': fnize(line),
- 'OUTPUT_DIR': 'reports',
- 'OUTPUT_FILENAME': fnize(line),
- })
- subprocess.check_call(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
- env=env)
- subprocess.check_call(['rm', '%s-perf.data' % fnize(line)])
- subprocess.check_call(['rm', '%s-out.perf' % fnize(line)])
+ link(line, '%s.svg' % fnize(line))
+ benchmarks.append(
+ jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
+ '-g', '-F', '997',
+ 'bins/mutrace/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line,
+ '--benchmark_min_time=10']))
+ profile_analysis.append(
+ jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
+ environ = {
+ 'PERF_BASE_NAME': fnize(line),
+ 'OUTPUT_DIR': 'reports',
+ 'OUTPUT_FILENAME': fnize(line),
+ }))
+ cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
+ cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
+ # periodically flush out the list of jobs: temporary space required for this
+ # processing is large
+ if len(benchmarks) >= 20:
+ # run up to half the cpu count: each benchmark can use up to two cores
+ # (one for the microbenchmark, one for the data flush)
+ jobset.run(benchmarks, maxjobs=1,
+ add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ # run the remaining benchmarks that weren't flushed
+ if len(benchmarks):
+ jobset.run(benchmarks, maxjobs=1,
+ add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_summary(bm_name, args):
heading('Summary: %s' % bm_name)