diff options
-rwxr-xr-x | tools/run_tests/run_microbenchmark.py | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index bda31f6b3d..a2852546b3 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -134,7 +134,7 @@ def collect_perf(bm_name, args): link(line, '%s.svg' % fnize(line)) benchmarks.append( jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line), - '-g', '-c', '1000', + '-g', '-f', '997', 'bins/mutrace/%s' % bm_name, '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'])) @@ -152,7 +152,7 @@ def collect_perf(bm_name, args): if len(benchmarks) >= 20: # run up to half the cpu count: each benchmark can use up to two cores # (one for the microbenchmark, one for the data flush) - jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2), + jobset.run(benchmarks, maxjobs=1), add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) @@ -161,7 +161,7 @@ def collect_perf(bm_name, args): cleanup = [] # run the remaining benchmarks that weren't flushed if len(benchmarks): - jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2), + jobset.run(benchmarks, maxjobs=1), add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) |