aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_microbenchmark.py
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-03-01 15:11:37 -0800
committerGravatar GitHub <noreply@github.com>2017-03-01 15:11:37 -0800
commitfbedd676f7207fc9ebb2586999fc14908b779471 (patch)
tree82ce83f0e980281f98e5123e09679725f2342c89 /tools/run_tests/run_microbenchmark.py
parent58bbbe999b7e7b955bf998ba529a5f317dc8f7d9 (diff)
parent46ab5f59833a2c443ea0bd6cc147a96ab66a3507 (diff)
Merge pull request #9769 from ctiller/always-use-port-server
Always use port server
Diffstat (limited to 'tools/run_tests/run_microbenchmark.py')
-rwxr-xr-xtools/run_tests/run_microbenchmark.py15
1 files changed, 5 insertions, 10 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 2da52e5d49..3a9461ecd3 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -44,8 +44,7 @@ os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
-port_server_port = 32766
-start_port_server.start_port_server(port_server_port)
+start_port_server.start_port_server()
def fnize(s):
out = ''
@@ -110,8 +109,7 @@ def collect_latency(bm_name, args):
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
@@ -119,8 +117,7 @@ def collect_latency(bm_name, args):
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
@@ -156,8 +153,7 @@ def collect_perf(bm_name, args):
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=1,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
@@ -165,8 +161,7 @@ def collect_perf(bm_name, args):
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
- jobset.run(benchmarks, maxjobs=1,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())