diff options
author | Craig Tiller <ctiller@google.com> | 2017-03-01 16:00:19 -0800 |
---|---|---|
committer | Craig Tiller <ctiller@google.com> | 2017-03-01 16:00:19 -0800 |
commit | 36281f950a30de08fc0b651a0a08af1d84074fe1 (patch) | |
tree | 84e972f264afa16a6f30d1b3d428163081ac3bee /tools/run_tests | |
parent | f9abb2921b4e7808af4ab0dc8681d9435705aad9 (diff) | |
parent | fbedd676f7207fc9ebb2586999fc14908b779471 (diff) |
Merge github.com:grpc/grpc into lto
Diffstat (limited to 'tools/run_tests')
-rw-r--r-- | tools/run_tests/generated/sources_and_headers.json | 4 | ||||
-rw-r--r-- | tools/run_tests/python_utils/start_port_server.py | 15 | ||||
-rwxr-xr-x | tools/run_tests/run_microbenchmark.py | 49 | ||||
-rwxr-xr-x | tools/run_tests/run_tests.py | 4 | ||||
-rwxr-xr-x | tools/run_tests/start_port_server.py | 46 |
5 files changed, 91 insertions, 27 deletions
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 15bcf5621e..ba48994c8e 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -7952,12 +7952,10 @@ "test/core/util/parse_hexstring.h", "test/core/util/passthru_endpoint.c", "test/core/util/passthru_endpoint.h", + "test/core/util/port.c", "test/core/util/port.h", - "test/core/util/port_posix.c", "test/core/util/port_server_client.c", "test/core/util/port_server_client.h", - "test/core/util/port_uv.c", - "test/core/util/port_windows.c", "test/core/util/slice_splitter.c", "test/core/util/slice_splitter.h", "test/core/util/trickle_endpoint.c", diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py index 4c9f6aac63..deb7354438 100644 --- a/tools/run_tests/python_utils/start_port_server.py +++ b/tools/run_tests/python_utils/start_port_server.py @@ -40,7 +40,10 @@ import tempfile import time -def start_port_server(port_server_port): +# must be synchronized with test/core/utils/port_server_client.h +_PORT_SERVER_PORT = 32766 + +def start_port_server(): # check if a compatible port server is running # if incompatible (version mismatch) ==> start a new one # if not running ==> start a new one @@ -48,7 +51,7 @@ def start_port_server(port_server_port): try: version = int( urllib.request.urlopen( - 'http://localhost:%d/version_number' % port_server_port, + 'http://localhost:%d/version_number' % _PORT_SERVER_PORT, timeout=10).read()) logging.info('detected port server running version %d', version) running = True @@ -67,7 +70,7 @@ def start_port_server(port_server_port): if not running: logging.info('port_server version mismatch: killing the old one') urllib.request.urlopen('http://localhost:%d/quitquitquit' % - port_server_port).read() + _PORT_SERVER_PORT).read() time.sleep(1) if not running: fd, logfile = tempfile.mkstemp() @@ -76,7 +79,7 @@ def start_port_server(port_server_port): args = [ sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'), - '-p', '%d' % port_server_port, '-l', logfile + '-p', '%d' % _PORT_SERVER_PORT, '-l', logfile ] env = dict(os.environ) env['BUILD_ID'] = 'pleaseDontKillMeJenkins' @@ -107,7 +110,7 @@ def start_port_server(port_server_port): time.sleep(1) try: urllib.request.urlopen( - 'http://localhost:%d/get' % port_server_port, + 'http://localhost:%d/get' % _PORT_SERVER_PORT, timeout=1).read() logging.info( 'last ditch attempt to contact port server succeeded') @@ -119,7 +122,7 @@ def start_port_server(port_server_port): print(port_log) sys.exit(1) try: - port_server_url = 'http://localhost:%d/get' % port_server_port + port_server_url = 'http://localhost:%d/get' % _PORT_SERVER_PORT urllib.request.urlopen(port_server_url, timeout=1).read() logging.info('port server is up and ready') break diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index 561867d82d..0c773ddd43 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -44,8 +44,7 @@ os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..')) if not os.path.exists('reports'): os.makedirs('reports') -port_server_port = 32766 -start_port_server.start_port_server(port_server_port) +start_port_server.start_port_server() def fnize(s): out = '' @@ -110,8 +109,7 @@ def collect_latency(bm_name, args): if len(benchmarks) >= min(16, multiprocessing.cpu_count()): # run up to half the cpu count: each benchmark can use up to two cores # (one for the microbenchmark, one for the data flush) - jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2), - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) benchmarks = [] @@ -119,8 +117,7 @@ def collect_latency(bm_name, args): cleanup = [] # run the remaining benchmarks that weren't flushed if len(benchmarks): - jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2), - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) @@ -156,8 +153,7 @@ def collect_perf(bm_name, args): if len(benchmarks) >= 20: # run up to half the cpu count: each benchmark can use up to two cores # (one for the microbenchmark, one for the data flush) - jobset.run(benchmarks, maxjobs=1, - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + jobset.run(benchmarks, maxjobs=1) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) benchmarks = [] @@ -165,17 +161,16 @@ def collect_perf(bm_name, args): cleanup = [] # run the remaining benchmarks that weren't flushed if len(benchmarks): - jobset.run(benchmarks, maxjobs=1, - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + jobset.run(benchmarks, maxjobs=1) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) -def run_summary(cfg): +def run_summary(bm_name, cfg, base_json_name): subprocess.check_call( ['make', bm_name, 'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()]) cmd = ['bins/%s/%s' % (cfg, bm_name), - '--benchmark_out=out.%s.json' % cfg, + '--benchmark_out=%s.%s.json' % (base_json_name, cfg), '--benchmark_out_format=json'] if args.summary_time is not None: cmd += ['--benchmark_min_time=%d' % args.summary_time] @@ -183,9 +178,9 @@ def run_summary(cfg): def collect_summary(bm_name, args): heading('Summary: %s [no counters]' % bm_name) - text(run_summary('lto')) + text(run_summary(bm_name, 'lto', 'out')) heading('Summary: %s [with counters]' % bm_name) - text(run_summary('counters')) + text(run_summary(bm_name, 'counters', 'out')) if args.bigquery_upload: with open('out.csv', 'w') as f: f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.counters.json', 'out.opt.json'])) @@ -200,7 +195,7 @@ collectors = { argp = argparse.ArgumentParser(description='Collect data from microbenchmarks') argp.add_argument('-c', '--collect', choices=sorted(collectors.keys()), - nargs='+', + nargs='*', default=sorted(collectors.keys()), help='Which collectors should be run against each benchmark') argp.add_argument('-b', '--benchmarks', @@ -214,6 +209,10 @@ argp.add_argument('-b', '--benchmarks', nargs='+', type=str, help='Which microbenchmarks should be run') +argp.add_argument('--diff_perf', + default=None, + type=str, + help='Diff microbenchmarks against this git revision') argp.add_argument('--bigquery_upload', default=False, action='store_const', @@ -228,6 +227,26 @@ args = argp.parse_args() for bm_name in args.benchmarks: for collect in args.collect: collectors[collect](bm_name, args) +if args.diff_perf: + for bm_name in args.benchmarks: + run_summary(bm_name, 'lto', '%s.new' % bm_name) + where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip() + subprocess.check_call(['git', 'checkout', args.diff_perf]) + comparables = [] + subprocess.check_call(['make', 'clean']) + try: + for bm_name in args.benchmarks: + try: + run_summary(bm_name, 'lto', '%s.old' % bm_name) + comparables.append(bm_name) + except subprocess.CalledProcessError, e: + pass + finally: + subprocess.check_call(['git', 'checkout', where_am_i]) + for bm_name in comparables: + subprocess.check_call(['third_party/benchmark/tools/compare_bench.py', + '%s.new.opt.json' % bm_name, + '%s.old.opt.json' % bm_name]) index_html += "</body>\n</html>\n" with open('reports/index.html', 'w') as f: diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 9741624c4f..93e414d05f 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -1451,8 +1451,7 @@ def _build_and_run( # start antagonists antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py']) for _ in range(0, args.antagonists)] - port_server_port = 32766 - start_port_server.start_port_server(port_server_port) + start_port_server.start_port_server() resultset = None num_test_failures = 0 try: @@ -1495,7 +1494,6 @@ def _build_and_run( all_runs, check_cancelled, newline_on_success=newline_on_success, travis=args.travis, maxjobs=args.jobs, stop_on_failure=args.stop_on_failure, - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}, quiet_success=args.quiet_success) if resultset: for k, v in sorted(resultset.items()): diff --git a/tools/run_tests/start_port_server.py b/tools/run_tests/start_port_server.py new file mode 100755 index 0000000000..e33ac12bd3 --- /dev/null +++ b/tools/run_tests/start_port_server.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +# Copyright 2017, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Wrapper around port server starting code. + +Used by developers who wish to run individual C/C++ tests outside of the +run_tests.py infrastructure. + +The path to this file is called out in test/core/util/port.c, and printed as +an error message to users. +""" + +import python_utils.start_port_server as start_port_server + +start_port_server.start_port_server() + +print "Port server started successfully" |