aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_microbenchmark.py
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-02-17 10:27:56 -0800
committerGravatar Craig Tiller <ctiller@google.com>2017-02-17 10:27:56 -0800
commitcba864bfbe7b7d3afb3dbe43289a20872489d172 (patch)
treeeb60c7341d5ded1edf9510ecfdccd0045551a657 /tools/run_tests/run_microbenchmark.py
parent13e185419cd177b7fb552601665e43820321a96b (diff)
Eliminate port server env var, force its use always, consolidate code
Diffstat (limited to 'tools/run_tests/run_microbenchmark.py')
-rwxr-xr-xtools/run_tests/run_microbenchmark.py15
1 files changed, 5 insertions, 10 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 262c05b421..a67c8468d9 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -44,8 +44,7 @@ os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
-port_server_port = 32766
-start_port_server.start_port_server(port_server_port)
+start_port_server.start_port_server()
def fnize(s):
out = ''
@@ -108,8 +107,7 @@ def collect_latency(bm_name, args):
if len(benchmarks) >= min(4, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
@@ -117,8 +115,7 @@ def collect_latency(bm_name, args):
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
@@ -154,8 +151,7 @@ def collect_perf(bm_name, args):
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=1,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
@@ -163,8 +159,7 @@ def collect_perf(bm_name, args):
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
- jobset.run(benchmarks, maxjobs=1,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())