aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar ncteisen <ncteisen@gmail.com>2017-12-11 18:00:40 -0800
committerGravatar ncteisen <ncteisen@gmail.com>2017-12-11 18:08:35 -0800
commit888093c6ed0d24eed699173b5fb35235fe7a6069 (patch)
treeffbbc9fbfaacff17c3b94a2f2d780ece6abbfa36 /tools
parenta69c6901f9c4e375ad543cb4ee6ce747d953bf18 (diff)
yapf run_tests
Diffstat (limited to 'tools')
-rwxr-xr-xtools/distrib/yapf_code.sh5
-rwxr-xr-xtools/run_tests/run_build_statistics.py302
-rwxr-xr-xtools/run_tests/run_interop_tests.py1988
-rwxr-xr-xtools/run_tests/run_microbenchmark.py346
-rwxr-xr-xtools/run_tests/run_performance_tests.py1143
-rwxr-xr-xtools/run_tests/run_tests.py2668
-rwxr-xr-xtools/run_tests/run_tests_matrix.py922
-rwxr-xr-xtools/run_tests/start_port_server.py1
-rwxr-xr-xtools/run_tests/task_runner.py100
9 files changed, 4005 insertions, 3470 deletions
diff --git a/tools/distrib/yapf_code.sh b/tools/distrib/yapf_code.sh
index 6d2759ddd1..698c341d88 100755
--- a/tools/distrib/yapf_code.sh
+++ b/tools/distrib/yapf_code.sh
@@ -25,10 +25,7 @@ DIRS=(
'tools/distrib'
'tools/interop_matrix'
'tools/profiling'
- 'tools/run_tests/python_utils'
- 'tools/run_tests/sanity'
- 'tools/run_tests/performance'
- 'tools/run_tests/artifacts'
+ 'tools/run_tests'
)
EXCLUSIONS=(
'grpcio/grpc_*.py'
diff --git a/tools/run_tests/run_build_statistics.py b/tools/run_tests/run_build_statistics.py
index 1e957b6677..4af00a47a6 100755
--- a/tools/run_tests/run_build_statistics.py
+++ b/tools/run_tests/run_build_statistics.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Tool to get build statistics from Jenkins and upload to BigQuery."""
from __future__ import print_function
@@ -27,39 +26,38 @@ import re
import sys
import urllib
-
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
-
_PROJECT_ID = 'grpc-testing'
_HAS_MATRIX = True
-_BUILDS = {'gRPC_interop_master': not _HAS_MATRIX,
- 'gRPC_master_linux': not _HAS_MATRIX,
- 'gRPC_master_macos': not _HAS_MATRIX,
- 'gRPC_master_windows': not _HAS_MATRIX,
- 'gRPC_performance_master': not _HAS_MATRIX,
- 'gRPC_portability_master_linux': not _HAS_MATRIX,
- 'gRPC_portability_master_windows': not _HAS_MATRIX,
- 'gRPC_master_asanitizer_c': not _HAS_MATRIX,
- 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
- 'gRPC_master_msan_c': not _HAS_MATRIX,
- 'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
- 'gRPC_master_tsan_cpp': not _HAS_MATRIX,
- 'gRPC_interop_pull_requests': not _HAS_MATRIX,
- 'gRPC_performance_pull_requests': not _HAS_MATRIX,
- 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
- 'gRPC_portability_pr_win': not _HAS_MATRIX,
- 'gRPC_pull_requests_linux': not _HAS_MATRIX,
- 'gRPC_pull_requests_macos': not _HAS_MATRIX,
- 'gRPC_pr_win': not _HAS_MATRIX,
- 'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
- 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
- 'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
- 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
- 'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
+_BUILDS = {
+ 'gRPC_interop_master': not _HAS_MATRIX,
+ 'gRPC_master_linux': not _HAS_MATRIX,
+ 'gRPC_master_macos': not _HAS_MATRIX,
+ 'gRPC_master_windows': not _HAS_MATRIX,
+ 'gRPC_performance_master': not _HAS_MATRIX,
+ 'gRPC_portability_master_linux': not _HAS_MATRIX,
+ 'gRPC_portability_master_windows': not _HAS_MATRIX,
+ 'gRPC_master_asanitizer_c': not _HAS_MATRIX,
+ 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
+ 'gRPC_master_msan_c': not _HAS_MATRIX,
+ 'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
+ 'gRPC_master_tsan_cpp': not _HAS_MATRIX,
+ 'gRPC_interop_pull_requests': not _HAS_MATRIX,
+ 'gRPC_performance_pull_requests': not _HAS_MATRIX,
+ 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
+ 'gRPC_portability_pr_win': not _HAS_MATRIX,
+ 'gRPC_pull_requests_linux': not _HAS_MATRIX,
+ 'gRPC_pull_requests_macos': not _HAS_MATRIX,
+ 'gRPC_pr_win': not _HAS_MATRIX,
+ 'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
+ 'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
}
_URL_BASE = 'https://grpc-testing.appspot.com/job'
@@ -99,147 +97,155 @@ _DATASET_ID = 'build_statistics'
def _scrape_for_known_errors(html):
- error_list = []
- for known_error in _KNOWN_ERRORS:
- errors = re.findall(known_error, html)
- this_error_count = len(errors)
- if this_error_count > 0:
- error_list.append({'description': known_error,
- 'count': this_error_count})
- print('====> %d failures due to %s' % (this_error_count, known_error))
- return error_list
+ error_list = []
+ for known_error in _KNOWN_ERRORS:
+ errors = re.findall(known_error, html)
+ this_error_count = len(errors)
+ if this_error_count > 0:
+ error_list.append({
+ 'description': known_error,
+ 'count': this_error_count
+ })
+ print('====> %d failures due to %s' %
+ (this_error_count, known_error))
+ return error_list
def _no_report_files_found(html):
- return _NO_REPORT_FILES_FOUND_ERROR in html
+ return _NO_REPORT_FILES_FOUND_ERROR in html
def _get_last_processed_buildnumber(build_name):
- query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
- _PROJECT_ID, _DATASET_ID, build_name)
- query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
- page = bq.jobs().getQueryResults(
- pageToken=None,
- **query_job['jobReference']).execute(num_retries=3)
- if page['rows'][0]['f'][0]['v']:
- return int(page['rows'][0]['f'][0]['v'])
- return 0
+ query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
+ _PROJECT_ID, _DATASET_ID, build_name)
+ query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
+ page = bq.jobs().getQueryResults(
+ pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+ if page['rows'][0]['f'][0]['v']:
+ return int(page['rows'][0]['f'][0]['v'])
+ return 0
def _process_matrix(build, url_base):
- matrix_list = []
- for matrix in build.get_matrix_runs():
- matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
- matrix.name).groups()[0]
- matrix_tuple = matrix_str.split(',')
- json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
- url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
- console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
- url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
- matrix_dict = {'name': matrix_str,
- 'duration': matrix.get_duration().total_seconds()}
- matrix_dict.update(_process_build(json_url, console_url))
- matrix_list.append(matrix_dict)
-
- return matrix_list
+ matrix_list = []
+ for matrix in build.get_matrix_runs():
+ matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
+ matrix.name).groups()[0]
+ matrix_tuple = matrix_str.split(',')
+ json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
+ url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+ console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
+ url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+ matrix_dict = {
+ 'name': matrix_str,
+ 'duration': matrix.get_duration().total_seconds()
+ }
+ matrix_dict.update(_process_build(json_url, console_url))
+ matrix_list.append(matrix_dict)
+
+ return matrix_list
def _process_build(json_url, console_url):
- build_result = {}
- error_list = []
- try:
- html = urllib.urlopen(json_url).read()
- test_result = json.loads(html)
- print('====> Parsing result from %s' % json_url)
- failure_count = test_result['failCount']
- build_result['pass_count'] = test_result['passCount']
- build_result['failure_count'] = failure_count
- # This means Jenkins failure occurred.
- build_result['no_report_files_found'] = _no_report_files_found(html)
- # Only check errors if Jenkins failure occurred.
- if build_result['no_report_files_found']:
- error_list = _scrape_for_known_errors(html)
- except Exception as e:
- print('====> Got exception for %s: %s.' % (json_url, str(e)))
- print('====> Parsing errors from %s.' % console_url)
- html = urllib.urlopen(console_url).read()
- build_result['pass_count'] = 0
- build_result['failure_count'] = 1
- # In this case, the string doesn't exist in the result html but the fact
- # that we fail to parse the result html indicates Jenkins failure and hence
- # no report files were generated.
- build_result['no_report_files_found'] = True
- error_list = _scrape_for_known_errors(html)
-
- if error_list:
- build_result['error'] = error_list
- elif build_result['no_report_files_found']:
- build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
- else:
- build_result['error'] = [{'description': '', 'count': 0}]
-
- return build_result
+ build_result = {}
+ error_list = []
+ try:
+ html = urllib.urlopen(json_url).read()
+ test_result = json.loads(html)
+ print('====> Parsing result from %s' % json_url)
+ failure_count = test_result['failCount']
+ build_result['pass_count'] = test_result['passCount']
+ build_result['failure_count'] = failure_count
+ # This means Jenkins failure occurred.
+ build_result['no_report_files_found'] = _no_report_files_found(html)
+ # Only check errors if Jenkins failure occurred.
+ if build_result['no_report_files_found']:
+ error_list = _scrape_for_known_errors(html)
+ except Exception as e:
+ print('====> Got exception for %s: %s.' % (json_url, str(e)))
+ print('====> Parsing errors from %s.' % console_url)
+ html = urllib.urlopen(console_url).read()
+ build_result['pass_count'] = 0
+ build_result['failure_count'] = 1
+ # In this case, the string doesn't exist in the result html but the fact
+ # that we fail to parse the result html indicates Jenkins failure and hence
+ # no report files were generated.
+ build_result['no_report_files_found'] = True
+ error_list = _scrape_for_known_errors(html)
+
+ if error_list:
+ build_result['error'] = error_list
+ elif build_result['no_report_files_found']:
+ build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
+ else:
+ build_result['error'] = [{'description': '', 'count': 0}]
+
+ return build_result
# parse command line
argp = argparse.ArgumentParser(description='Get build statistics.')
argp.add_argument('-u', '--username', default='jenkins')
-argp.add_argument('-b', '--builds',
- choices=['all'] + sorted(_BUILDS.keys()),
- nargs='+',
- default=['all'])
+argp.add_argument(
+ '-b',
+ '--builds',
+ choices=['all'] + sorted(_BUILDS.keys()),
+ nargs='+',
+ default=['all'])
args = argp.parse_args()
J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken')
bq = big_query_utils.create_big_query()
for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds:
- print('====> Build: %s' % build_name)
- # Since get_last_completed_build() always fails due to malformatted string
- # error, we use get_build_metadata() instead.
- job = None
- try:
- job = J[build_name]
- except Exception as e:
- print('====> Failed to get build %s: %s.' % (build_name, str(e)))
- continue
- last_processed_build_number = _get_last_processed_buildnumber(build_name)
- last_complete_build_number = job.get_last_completed_buildnumber()
- # To avoid processing all builds for a project never looked at. In this case,
- # only examine 10 latest builds.
- starting_build_number = max(last_processed_build_number+1,
- last_complete_build_number-9)
- for build_number in xrange(starting_build_number,
- last_complete_build_number+1):
- print('====> Processing %s build %d.' % (build_name, build_number))
- build = None
+ print('====> Build: %s' % build_name)
+ # Since get_last_completed_build() always fails due to malformatted string
+ # error, we use get_build_metadata() instead.
+ job = None
try:
- build = job.get_build_metadata(build_number)
- print('====> Build status: %s.' % build.get_status())
- if build.get_status() == 'ABORTED':
+ job = J[build_name]
+ except Exception as e:
+ print('====> Failed to get build %s: %s.' % (build_name, str(e)))
continue
- # If any build is still running, stop processing this job. Next time, we
- # start from where it was left so that all builds are processed
- # sequentially.
- if build.is_running():
- print('====> Build %d is still running.' % build_number)
- break
- except KeyError:
- print('====> Build %s is missing. Skip.' % build_number)
- continue
- build_result = {'build_number': build_number,
- 'timestamp': str(build.get_timestamp())}
- url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
- if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
- build_result['matrix'] = _process_matrix(build, url_base)
- else:
- json_url = '%s/testReport/api/json' % url_base
- console_url = '%s/consoleFull' % url_base
- build_result['duration'] = build.get_duration().total_seconds()
- build_stat = _process_build(json_url, console_url)
- build_result.update(build_stat)
- rows = [big_query_utils.make_row(build_number, build_result)]
- if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, build_name,
- rows):
- print('====> Error uploading result to bigquery.')
- sys.exit(1)
+ last_processed_build_number = _get_last_processed_buildnumber(build_name)
+ last_complete_build_number = job.get_last_completed_buildnumber()
+ # To avoid processing all builds for a project never looked at. In this case,
+ # only examine 10 latest builds.
+ starting_build_number = max(last_processed_build_number + 1,
+ last_complete_build_number - 9)
+ for build_number in xrange(starting_build_number,
+ last_complete_build_number + 1):
+ print('====> Processing %s build %d.' % (build_name, build_number))
+ build = None
+ try:
+ build = job.get_build_metadata(build_number)
+ print('====> Build status: %s.' % build.get_status())
+ if build.get_status() == 'ABORTED':
+ continue
+ # If any build is still running, stop processing this job. Next time, we
+ # start from where it was left so that all builds are processed
+ # sequentially.
+ if build.is_running():
+ print('====> Build %d is still running.' % build_number)
+ break
+ except KeyError:
+ print('====> Build %s is missing. Skip.' % build_number)
+ continue
+ build_result = {
+ 'build_number': build_number,
+ 'timestamp': str(build.get_timestamp())
+ }
+ url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
+ if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
+ build_result['matrix'] = _process_matrix(build, url_base)
+ else:
+ json_url = '%s/testReport/api/json' % url_base
+ console_url = '%s/consoleFull' % url_base
+ build_result['duration'] = build.get_duration().total_seconds()
+ build_stat = _process_build(json_url, console_url)
+ build_result.update(build_stat)
+ rows = [big_query_utils.make_row(build_number, build_result)]
+ if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+ build_name, rows):
+ print('====> Error uploading result to bigquery.')
+ sys.exit(1)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 8f46ea99fd..99f4298813 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run interop (cross-language) tests in parallel."""
from __future__ import print_function
@@ -37,9 +36,9 @@ import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
# It's ok to not import because this is only necessary to upload results to BQ.
try:
- from python_utils.upload_test_results import upload_interop_results_to_bq
+ from python_utils.upload_test_results import upload_interop_results_to_bq
except ImportError as e:
- print(e)
+ print(e)
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
@@ -47,22 +46,24 @@ atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
-_DEFAULT_SERVER_PORT=8080
+_DEFAULT_SERVER_PORT = 8080
-_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary',
- 'client_compressed_streaming']
+_SKIP_CLIENT_COMPRESSION = [
+ 'client_compressed_unary', 'client_compressed_streaming'
+]
-_SKIP_SERVER_COMPRESSION = ['server_compressed_unary',
- 'server_compressed_streaming']
+_SKIP_SERVER_COMPRESSION = [
+ 'server_compressed_unary', 'server_compressed_streaming'
+]
_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
-_SKIP_ADVANCED = ['status_code_and_message',
- 'custom_metadata',
- 'unimplemented_method',
- 'unimplemented_service']
+_SKIP_ADVANCED = [
+ 'status_code_and_message', 'custom_metadata', 'unimplemented_method',
+ 'unimplemented_service'
+]
-_TEST_TIMEOUT = 3*60
+_TEST_TIMEOUT = 3 * 60
# disable this test on core-based languages,
# see https://github.com/grpc/grpc/issues/9779
@@ -77,977 +78,1054 @@ _XML_REPORT = 'report.xml'
class CXXLanguage:
- def __init__(self):
- self.client_cwd = None
- self.server_cwd = None
- self.http2_cwd = None
- self.safename = 'cxx'
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.http2_cwd = None
+ self.safename = 'cxx'
- def client_cmd(self, args):
- return ['bins/opt/interop_client'] + args
+ def client_cmd(self, args):
+ return ['bins/opt/interop_client'] + args
- def client_cmd_http2interop(self, args):
- return ['bins/opt/http2_client'] + args
+ def client_cmd_http2interop(self, args):
+ return ['bins/opt/http2_client'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['bins/opt/interop_server'] + args
+ def server_cmd(self, args):
+ return ['bins/opt/interop_server'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return []
+ def unimplemented_test_cases_server(self):
+ return []
- def __str__(self):
- return 'c++'
+ def __str__(self):
+ return 'c++'
class CSharpLanguage:
- def __init__(self):
- self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
- self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
+ self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
+ def client_cmd(self, args):
+ return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
+ def server_cmd(self, args):
+ return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'csharp'
+ def __str__(self):
+ return 'csharp'
class CSharpCoreCLRLanguage:
- def __init__(self):
- self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
- self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
+ self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
+ def client_cmd(self, args):
+ return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
+ def server_cmd(self, args):
+ return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'csharpcoreclr'
+ def __str__(self):
+ return 'csharpcoreclr'
class JavaLanguage:
- def __init__(self):
- self.client_cwd = '../grpc-java'
- self.server_cwd = '../grpc-java'
- self.http2_cwd = '../grpc-java'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = '../grpc-java'
+ self.server_cwd = '../grpc-java'
+ self.http2_cwd = '../grpc-java'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['./run-test-client.sh'] + args
+ def client_cmd(self, args):
+ return ['./run-test-client.sh'] + args
- def client_cmd_http2interop(self, args):
- return ['./interop-testing/build/install/grpc-interop-testing/bin/http2-client'] + args
+ def client_cmd_http2interop(self, args):
+ return [
+ './interop-testing/build/install/grpc-interop-testing/bin/http2-client'
+ ] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['./run-test-server.sh'] + args
+ def server_cmd(self, args):
+ return ['./run-test-server.sh'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return []
+ def unimplemented_test_cases(self):
+ return []
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'java'
+ def __str__(self):
+ return 'java'
class JavaOkHttpClient:
- def __init__(self):
- self.client_cwd = '../grpc-java'
- self.safename = 'java'
+ def __init__(self):
+ self.client_cwd = '../grpc-java'
+ self.safename = 'java'
- def client_cmd(self, args):
- return ['./run-test-client.sh', '--use_okhttp=true'] + args
+ def client_cmd(self, args):
+ return ['./run-test-client.sh', '--use_okhttp=true'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_DATA_FRAME_PADDING
- def __str__(self):
- return 'javaokhttp'
+ def __str__(self):
+ return 'javaokhttp'
class GoLanguage:
- def __init__(self):
- # TODO: this relies on running inside docker
- self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
- self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
- self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
- self.safename = str(self)
+ def __init__(self):
+ # TODO: this relies on running inside docker
+ self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
+ self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
+ self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['go', 'run', 'client.go'] + args
- def client_cmd(self, args):
- return ['go', 'run', 'client.go'] + args
+ def client_cmd_http2interop(self, args):
+ return ['go', 'run', 'negative_http2_client.go'] + args
- def client_cmd_http2interop(self, args):
- return ['go', 'run', 'negative_http2_client.go'] + args
+ def cloud_to_prod_env(self):
+ return {}
- def cloud_to_prod_env(self):
- return {}
+ def server_cmd(self, args):
+ return ['go', 'run', 'server.go'] + args
- def server_cmd(self, args):
- return ['go', 'run', 'server.go'] + args
+ def global_env(self):
+ return {}
- def global_env(self):
- return {}
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def __str__(self):
+ return 'go'
- def __str__(self):
- return 'go'
class Http2Server:
- """Represents the HTTP/2 Interop Test server
+ """Represents the HTTP/2 Interop Test server
This pretends to be a language in order to be built and run, but really it
isn't.
"""
- def __init__(self):
- self.server_cwd = None
- self.safename = str(self)
- def server_cmd(self, args):
- return ['python test/http2_test/http2_test_server.py']
+ def __init__(self):
+ self.server_cwd = None
+ self.safename = str(self)
+
+ def server_cmd(self, args):
+ return ['python test/http2_test/http2_test_server.py']
+
+ def cloud_to_prod_env(self):
+ return {}
- def cloud_to_prod_env(self):
- return {}
+ def global_env(self):
+ return {}
- def global_env(self):
- return {}
+ def unimplemented_test_cases(self):
+ return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases(self):
- return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases_server(self):
+ return _TEST_CASES
- def unimplemented_test_cases_server(self):
- return _TEST_CASES
+ def __str__(self):
+ return 'http2'
- def __str__(self):
- return 'http2'
class Http2Client:
- """Represents the HTTP/2 Interop Test
+ """Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
- def __init__(self):
- self.client_cwd = None
- self.safename = str(self)
- def client_cmd(self, args):
- return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
- def cloud_to_prod_env(self):
- return {}
+ def client_cmd(self, args):
+ return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
- def global_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _TEST_CASES
+ def global_env(self):
+ return {}
- def unimplemented_test_cases_server(self):
- return _TEST_CASES
+ def unimplemented_test_cases(self):
+ return _TEST_CASES
+
+ def unimplemented_test_cases_server(self):
+ return _TEST_CASES
+
+ def __str__(self):
+ return 'http2'
- def __str__(self):
- return 'http2'
class NodeLanguage:
- def __init__(self):
- self.client_cwd = '../grpc-node'
- self.server_cwd = '../grpc-node'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = '../grpc-node'
+ self.server_cwd = '../grpc-node'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+ def client_cmd(self, args):
+ return [
+ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
'node', '--require', './test/fixtures/native_native',
- 'test/interop/interop_client.js'] + args
+ 'test/interop/interop_client.js'
+ ] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+ def server_cmd(self, args):
+ return [
+ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
'node', '--require', './test/fixtures/native_native',
- 'test/interop/interop_server.js'] + args
+ 'test/interop/interop_server.js'
+ ] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'node'
+ def __str__(self):
+ return 'node'
class PHPLanguage:
- def __init__(self):
- self.client_cwd = None
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['src/php/bin/interop_client.sh'] + args
+ def client_cmd(self, args):
+ return ['src/php/bin/interop_client.sh'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return []
+ def unimplemented_test_cases_server(self):
+ return []
- def __str__(self):
- return 'php'
+ def __str__(self):
+ return 'php'
class PHP7Language:
- def __init__(self):
- self.client_cwd = None
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['src/php/bin/interop_client.sh'] + args
+ def client_cmd(self, args):
+ return ['src/php/bin/interop_client.sh'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return []
+ def unimplemented_test_cases_server(self):
+ return []
+
+ def __str__(self):
+ return 'php7'
- def __str__(self):
- return 'php7'
class ObjcLanguage:
- def __init__(self):
- self.client_cwd = 'src/objective-c/tests'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = 'src/objective-c/tests'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ # from args, extract the server port and craft xcodebuild command out of it
+ for arg in args:
+ port = re.search('--server_port=(\d+)', arg)
+ if port:
+ portnum = port.group(1)
+ cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum
+ return [cmdline]
- def client_cmd(self, args):
- # from args, extract the server port and craft xcodebuild command out of it
- for arg in args:
- port = re.search('--server_port=(\d+)', arg)
- if port:
- portnum = port.group(1)
- cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test'%portnum
- return [cmdline]
+ def cloud_to_prod_env(self):
+ return {}
- def cloud_to_prod_env(self):
- return {}
+ def global_env(self):
+ return {}
- def global_env(self):
- return {}
+ def unimplemented_test_cases(self):
+ # ObjC test runs all cases with the same command. It ignores the testcase
+ # cmdline argument. Here we return all but one test cases as unimplemented,
+ # and depend upon ObjC test's behavior that it runs all cases even when
+ # we tell it to run just one.
+ return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases(self):
- # ObjC test runs all cases with the same command. It ignores the testcase
- # cmdline argument. Here we return all but one test cases as unimplemented,
- # and depend upon ObjC test's behavior that it runs all cases even when
- # we tell it to run just one.
- return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def __str__(self):
+ return 'objc'
- def __str__(self):
- return 'objc'
class RubyLanguage:
- def __init__(self):
- self.client_cwd = None
- self.server_cwd = None
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['tools/run_tests/interop/with_rvm.sh',
- 'ruby', 'src/ruby/pb/test/client.rb'] + args
+ def client_cmd(self, args):
+ return [
+ 'tools/run_tests/interop/with_rvm.sh', 'ruby',
+ 'src/ruby/pb/test/client.rb'
+ ] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['tools/run_tests/interop/with_rvm.sh',
- 'ruby', 'src/ruby/pb/test/server.rb'] + args
+ def server_cmd(self, args):
+ return [
+ 'tools/run_tests/interop/with_rvm.sh', 'ruby',
+ 'src/ruby/pb/test/server.rb'
+ ] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'ruby'
- def __str__(self):
- return 'ruby'
class PythonLanguage:
- def __init__(self):
- self.client_cwd = None
- self.server_cwd = None
- self.http2_cwd = None
- self.safename = str(self)
-
- def client_cmd(self, args):
- return [
- 'py27/bin/python',
- 'src/python/grpcio_tests/setup.py',
- 'run_interop',
- '--client',
- '--args="{}"'.format(' '.join(args))
- ]
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.http2_cwd = None
+ self.safename = str(self)
- def client_cmd_http2interop(self, args):
- return [ 'py27/bin/python',
- 'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
- ] + args
-
- def cloud_to_prod_env(self):
- return {}
-
- def server_cmd(self, args):
- return [
- 'py27/bin/python',
- 'src/python/grpcio_tests/setup.py',
- 'run_interop',
- '--server',
- '--args="{}"'.format(' '.join(args))
- ]
+ def client_cmd(self, args):
+ return [
+ 'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--client', '--args="{}"'.format(' '.join(args))
+ ]
+
+ def client_cmd_http2interop(self, args):
+ return [
+ 'py27/bin/python',
+ 'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
+ ] + args
- def global_env(self):
- return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
- 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
+ def cloud_to_prod_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def server_cmd(self, args):
+ return [
+ 'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--server', '--args="{}"'.format(' '.join(args))
+ ]
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def global_env(self):
+ return {
+ 'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
+ 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)
+ }
- def __str__(self):
- return 'python'
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'python'
_LANGUAGES = {
- 'c++' : CXXLanguage(),
- 'csharp' : CSharpLanguage(),
- 'csharpcoreclr' : CSharpCoreCLRLanguage(),
- 'go' : GoLanguage(),
- 'java' : JavaLanguage(),
- 'javaokhttp' : JavaOkHttpClient(),
- 'node' : NodeLanguage(),
- 'php' : PHPLanguage(),
- 'php7' : PHP7Language(),
- 'objc' : ObjcLanguage(),
- 'ruby' : RubyLanguage(),
- 'python' : PythonLanguage(),
+ 'c++': CXXLanguage(),
+ 'csharp': CSharpLanguage(),
+ 'csharpcoreclr': CSharpCoreCLRLanguage(),
+ 'go': GoLanguage(),
+ 'java': JavaLanguage(),
+ 'javaokhttp': JavaOkHttpClient(),
+ 'node': NodeLanguage(),
+ 'php': PHPLanguage(),
+ 'php7': PHP7Language(),
+ 'objc': ObjcLanguage(),
+ 'ruby': RubyLanguage(),
+ 'python': PythonLanguage(),
}
# languages supported as cloud_to_cloud servers
-_SERVERS = ['c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python']
-
-_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
- 'empty_stream', 'client_streaming', 'server_streaming',
- 'cancel_after_begin', 'cancel_after_first_response',
- 'timeout_on_sleeping_server', 'custom_metadata',
- 'status_code_and_message', 'unimplemented_method',
- 'client_compressed_unary', 'server_compressed_unary',
- 'client_compressed_streaming', 'server_compressed_streaming',
- 'unimplemented_service']
-
-_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
- 'oauth2_auth_token', 'per_rpc_creds']
+_SERVERS = [
+ 'c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python'
+]
+
+_TEST_CASES = [
+ 'large_unary', 'empty_unary', 'ping_pong', 'empty_stream',
+ 'client_streaming', 'server_streaming', 'cancel_after_begin',
+ 'cancel_after_first_response', 'timeout_on_sleeping_server',
+ 'custom_metadata', 'status_code_and_message', 'unimplemented_method',
+ 'client_compressed_unary', 'server_compressed_unary',
+ 'client_compressed_streaming', 'server_compressed_streaming',
+ 'unimplemented_service'
+]
+
+_AUTH_TEST_CASES = [
+ 'compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token',
+ 'per_rpc_creds'
+]
_HTTP2_TEST_CASES = ['tls', 'framing']
-_HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
- 'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test']
+_HTTP2_SERVER_TEST_CASES = [
+ 'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping',
+ 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test'
+]
-_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' }
+_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = {
+ 'data_frame_padding': 'large_unary',
+ 'no_df_padding_sanity_test': 'large_unary'
+}
-_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
+_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys(
+)
-_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++']
+_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [
+ 'java', 'go', 'python', 'c++'
+]
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
+
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
- """Wraps given cmdline array to create 'docker run' cmdline from it."""
- docker_cmdline = ['docker', 'run', '-i', '--rm=true']
+ """Wraps given cmdline array to create 'docker run' cmdline from it."""
+ docker_cmdline = ['docker', 'run', '-i', '--rm=true']
- # turn environ into -e docker args
- if environ:
- for k,v in environ.items():
- docker_cmdline += ['-e', '%s=%s' % (k,v)]
+ # turn environ into -e docker args
+ if environ:
+ for k, v in environ.items():
+ docker_cmdline += ['-e', '%s=%s' % (k, v)]
- # set working directory
- workdir = DOCKER_WORKDIR_ROOT
- if cwd:
- workdir = os.path.join(workdir, cwd)
- docker_cmdline += ['-w', workdir]
+ # set working directory
+ workdir = DOCKER_WORKDIR_ROOT
+ if cwd:
+ workdir = os.path.join(workdir, cwd)
+ docker_cmdline += ['-w', workdir]
- docker_cmdline += docker_args + [image] + cmdline
- return docker_cmdline
+ docker_cmdline += docker_args + [image] + cmdline
+ return docker_cmdline
def manual_cmdline(docker_cmdline, docker_image):
- """Returns docker cmdline adjusted for manual invocation."""
- print_cmdline = []
- for item in docker_cmdline:
- if item.startswith('--name='):
- continue
- if item == docker_image:
- item = "$docker_image"
- item = item.replace('"', '\\"')
- # add quotes when necessary
- if any(character.isspace() for character in item):
- item = "\"%s\"" % item
- print_cmdline.append(item)
- return ' '.join(print_cmdline)
+ """Returns docker cmdline adjusted for manual invocation."""
+ print_cmdline = []
+ for item in docker_cmdline:
+ if item.startswith('--name='):
+ continue
+ if item == docker_image:
+ item = "$docker_image"
+ item = item.replace('"', '\\"')
+ # add quotes when necessary
+ if any(character.isspace() for character in item):
+ item = "\"%s\"" % item
+ print_cmdline.append(item)
+ return ' '.join(print_cmdline)
def write_cmdlog_maybe(cmdlog, filename):
- """Returns docker cmdline adjusted for manual invocation."""
- if cmdlog:
- with open(filename, 'w') as logfile:
- logfile.write('#!/bin/bash\n')
- logfile.writelines("%s\n" % line for line in cmdlog)
- print('Command log written to file %s' % filename)
+ """Returns docker cmdline adjusted for manual invocation."""
+ if cmdlog:
+ with open(filename, 'w') as logfile:
+ logfile.write('#!/bin/bash\n')
+ logfile.writelines("%s\n" % line for line in cmdlog)
+ print('Command log written to file %s' % filename)
def bash_cmdline(cmdline):
- """Creates bash -c cmdline from args list."""
- # Use login shell:
- # * makes error messages clearer if executables are missing
- return ['bash', '-c', ' '.join(cmdline)]
+ """Creates bash -c cmdline from args list."""
+ # Use login shell:
+ # * makes error messages clearer if executables are missing
+ return ['bash', '-c', ' '.join(cmdline)]
def auth_options(language, test_case):
- """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
+ """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
- language = str(language)
- cmdargs = []
- env = {}
+ language = str(language)
+ cmdargs = []
+ env = {}
- # TODO(jtattermusch): this file path only works inside docker
- key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
- oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
- key_file_arg = '--service_account_key_file=%s' % key_filepath
- default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
+ # TODO(jtattermusch): this file path only works inside docker
+ key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
+ oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
+ key_file_arg = '--service_account_key_file=%s' % key_filepath
+ default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
- if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
- if language in ['csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby']:
- env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
- else:
- cmdargs += [key_file_arg]
+ if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
+ if language in [
+ 'csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python',
+ 'ruby'
+ ]:
+ env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
+ else:
+ cmdargs += [key_file_arg]
- if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
- cmdargs += [oauth_scope_arg]
+ if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
+ cmdargs += [oauth_scope_arg]
- if test_case == 'oauth2_auth_token' and language == 'c++':
- # C++ oauth2 test uses GCE creds and thus needs to know the default account
- cmdargs += [default_account_arg]
+ if test_case == 'oauth2_auth_token' and language == 'c++':
+ # C++ oauth2 test uses GCE creds and thus needs to know the default account
+ cmdargs += [default_account_arg]
- if test_case == 'compute_engine_creds':
- cmdargs += [oauth_scope_arg, default_account_arg]
+ if test_case == 'compute_engine_creds':
+ cmdargs += [oauth_scope_arg, default_account_arg]
- return (cmdargs, env)
+ return (cmdargs, env)
def _job_kill_handler(job):
- if job._spec.container_name:
- dockerjob.docker_kill(job._spec.container_name)
- # When the job times out and we decide to kill it,
- # we need to wait a before restarting the job
- # to prevent "container name already in use" error.
- # TODO(jtattermusch): figure out a cleaner way to to this.
- time.sleep(2)
-
-
-def cloud_to_prod_jobspec(language, test_case, server_host_name,
- server_host_detail, docker_image=None, auth=False,
+ if job._spec.container_name:
+ dockerjob.docker_kill(job._spec.container_name)
+ # When the job times out and we decide to kill it,
+ # we need to wait a before restarting the job
+ # to prevent "container name already in use" error.
+ # TODO(jtattermusch): figure out a cleaner way to to this.
+ time.sleep(2)
+
+
+def cloud_to_prod_jobspec(language,
+ test_case,
+ server_host_name,
+ server_host_detail,
+ docker_image=None,
+ auth=False,
manual_cmd_log=None):
- """Creates jobspec for cloud-to-prod interop test"""
- container_name = None
- cmdargs = [
- '--server_host=%s' % server_host_detail[0],
- '--server_host_override=%s' % server_host_detail[1],
- '--server_port=443',
- '--use_tls=true',
- '--test_case=%s' % test_case]
- environ = dict(language.cloud_to_prod_env(), **language.global_env())
- if auth:
- auth_cmdargs, auth_env = auth_options(language, test_case)
- cmdargs += auth_cmdargs
- environ.update(auth_env)
- cmdline = bash_cmdline(language.client_cmd(cmdargs))
- cwd = language.client_cwd
-
- if docker_image:
- container_name = dockerjob.random_name('interop_client_%s' %
- language.safename)
- cmdline = docker_run_cmdline(cmdline,
- image=docker_image,
- cwd=cwd,
- environ=environ,
- docker_args=['--net=host',
- '--name=%s' % container_name])
- if manual_cmd_log is not None:
- if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
- manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
- cwd = None
- environ = None
-
- suite_name='cloud_to_prod_auth' if auth else 'cloud_to_prod'
- test_job = jobset.JobSpec(
- cmdline=cmdline,
- cwd=cwd,
- environ=environ,
- shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
- test_case),
- timeout_seconds=_TEST_TIMEOUT,
- flake_retries=4 if args.allow_flakes else 0,
- timeout_retries=2 if args.allow_flakes else 0,
- kill_handler=_job_kill_handler)
- if docker_image:
- test_job.container_name = container_name
- return test_job
-
-
-def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
- server_port, docker_image=None, insecure=False,
- manual_cmd_log=None):
- """Creates jobspec for cloud-to-cloud interop test"""
- interop_only_options = [
- '--server_host_override=foo.test.google.fr',
- '--use_tls=%s' % ('false' if insecure else 'true'),
- '--use_test_ca=true',
- ]
-
- client_test_case = test_case
- if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
- client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case]
- if client_test_case in language.unimplemented_test_cases():
- print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case))
- sys.exit(1)
+ """Creates jobspec for cloud-to-prod interop test"""
+ container_name = None
+ cmdargs = [
+ '--server_host=%s' % server_host_detail[0],
+ '--server_host_override=%s' % server_host_detail[1],
+ '--server_port=443', '--use_tls=true', '--test_case=%s' % test_case
+ ]
+ environ = dict(language.cloud_to_prod_env(), **language.global_env())
+ if auth:
+ auth_cmdargs, auth_env = auth_options(language, test_case)
+ cmdargs += auth_cmdargs
+ environ.update(auth_env)
+ cmdline = bash_cmdline(language.client_cmd(cmdargs))
+ cwd = language.client_cwd
- common_options = [
- '--test_case=%s' % client_test_case,
- '--server_host=%s' % server_host,
- '--server_port=%s' % server_port,
- ]
+ if docker_image:
+ container_name = dockerjob.random_name('interop_client_%s' %
+ language.safename)
+ cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ cwd=cwd,
+ environ=environ,
+ docker_args=['--net=host', '--name=%s' % container_name])
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+ cwd = None
+ environ = None
+
+ suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod'
+ test_job = jobset.JobSpec(
+ cmdline=cmdline,
+ cwd=cwd,
+ environ=environ,
+ shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
+ test_case),
+ timeout_seconds=_TEST_TIMEOUT,
+ flake_retries=4 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0,
+ kill_handler=_job_kill_handler)
+ if docker_image:
+ test_job.container_name = container_name
+ return test_job
+
+
+def cloud_to_cloud_jobspec(language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=None,
+ insecure=False,
+ manual_cmd_log=None):
+ """Creates jobspec for cloud-to-cloud interop test"""
+ interop_only_options = [
+ '--server_host_override=foo.test.google.fr',
+ '--use_tls=%s' % ('false' if insecure else 'true'),
+ '--use_test_ca=true',
+ ]
- if test_case in _HTTP2_SERVER_TEST_CASES:
+ client_test_case = test_case
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
- client_options = interop_only_options + common_options
- cmdline = bash_cmdline(language.client_cmd(client_options))
- cwd = language.client_cwd
- else:
- cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
- cwd = language.http2_cwd
- else:
- cmdline = bash_cmdline(language.client_cmd(common_options+interop_only_options))
- cwd = language.client_cwd
+ client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[
+ test_case]
+ if client_test_case in language.unimplemented_test_cases():
+ print('asking client %s to run unimplemented test case %s' %
+ (repr(language), client_test_case))
+ sys.exit(1)
+
+ common_options = [
+ '--test_case=%s' % client_test_case,
+ '--server_host=%s' % server_host,
+ '--server_port=%s' % server_port,
+ ]
- environ = language.global_env()
- if docker_image and language.safename != 'objc':
- # we can't run client in docker for objc.
- container_name = dockerjob.random_name('interop_client_%s' % language.safename)
- cmdline = docker_run_cmdline(cmdline,
- image=docker_image,
- environ=environ,
- cwd=cwd,
- docker_args=['--net=host',
- '--name=%s' % container_name])
- if manual_cmd_log is not None:
- if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
- manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
- cwd = None
-
- test_job = jobset.JobSpec(
- cmdline=cmdline,
- cwd=cwd,
- environ=environ,
- shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
- test_case),
- timeout_seconds=_TEST_TIMEOUT,
- flake_retries=4 if args.allow_flakes else 0,
- timeout_retries=2 if args.allow_flakes else 0,
- kill_handler=_job_kill_handler)
- if docker_image:
- test_job.container_name = container_name
- return test_job
+ if test_case in _HTTP2_SERVER_TEST_CASES:
+ if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ client_options = interop_only_options + common_options
+ cmdline = bash_cmdline(language.client_cmd(client_options))
+ cwd = language.client_cwd
+ else:
+ cmdline = bash_cmdline(
+ language.client_cmd_http2interop(common_options))
+ cwd = language.http2_cwd
+ else:
+ cmdline = bash_cmdline(
+ language.client_cmd(common_options + interop_only_options))
+ cwd = language.client_cwd
+
+ environ = language.global_env()
+ if docker_image and language.safename != 'objc':
+ # we can't run client in docker for objc.
+ container_name = dockerjob.random_name('interop_client_%s' %
+ language.safename)
+ cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ environ=environ,
+ cwd=cwd,
+ docker_args=['--net=host', '--name=%s' % container_name])
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+ cwd = None
+
+ test_job = jobset.JobSpec(
+ cmdline=cmdline,
+ cwd=cwd,
+ environ=environ,
+ shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
+ test_case),
+ timeout_seconds=_TEST_TIMEOUT,
+ flake_retries=4 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0,
+ kill_handler=_job_kill_handler)
+ if docker_image:
+ test_job.container_name = container_name
+ return test_job
def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
- """Create jobspec for running a server"""
- container_name = dockerjob.random_name('interop_server_%s' % language.safename)
- cmdline = bash_cmdline(
- language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT,
- '--use_tls=%s' % ('false' if insecure else 'true')]))
- environ = language.global_env()
- docker_args = ['--name=%s' % container_name]
- if language.safename == 'http2':
- # we are running the http2 interop server. Open next N ports beginning
- # with the server port. These ports are used for http2 interop test
- # (one test case per port).
- docker_args += list(
- itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
- for i in range(
- len(_HTTP2_SERVER_TEST_CASES))))
- # Enable docker's healthcheck mechanism.
- # This runs a Python script inside the container every second. The script
- # pings the http2 server to verify it is ready. The 'health-retries' flag
- # specifies the number of consecutive failures before docker will report
- # the container's status as 'unhealthy'. Prior to the first 'health_retries'
- # failures or the first success, the status will be 'starting'. 'docker ps'
- # or 'docker inspect' can be used to see the health of the container on the
- # command line.
- docker_args += [
- '--health-cmd=python test/http2_test/http2_server_health_check.py '
- '--server_host=%s --server_port=%d'
- % ('localhost', _DEFAULT_SERVER_PORT),
- '--health-interval=1s',
- '--health-retries=5',
- '--health-timeout=10s',
- ]
+ """Create jobspec for running a server"""
+ container_name = dockerjob.random_name('interop_server_%s' %
+ language.safename)
+ cmdline = bash_cmdline(
+ language.server_cmd([
+ '--port=%s' % _DEFAULT_SERVER_PORT, '--use_tls=%s' % (
+ 'false' if insecure else 'true')
+ ]))
+ environ = language.global_env()
+ docker_args = ['--name=%s' % container_name]
+ if language.safename == 'http2':
+ # we are running the http2 interop server. Open next N ports beginning
+ # with the server port. These ports are used for http2 interop test
+ # (one test case per port).
+ docker_args += list(
+ itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
+ for i in range(
+ len(_HTTP2_SERVER_TEST_CASES))))
+ # Enable docker's healthcheck mechanism.
+ # This runs a Python script inside the container every second. The script
+ # pings the http2 server to verify it is ready. The 'health-retries' flag
+ # specifies the number of consecutive failures before docker will report
+ # the container's status as 'unhealthy'. Prior to the first 'health_retries'
+ # failures or the first success, the status will be 'starting'. 'docker ps'
+ # or 'docker inspect' can be used to see the health of the container on the
+ # command line.
+ docker_args += [
+ '--health-cmd=python test/http2_test/http2_server_health_check.py '
+ '--server_host=%s --server_port=%d' %
+ ('localhost', _DEFAULT_SERVER_PORT),
+ '--health-interval=1s',
+ '--health-retries=5',
+ '--health-timeout=10s',
+ ]
- else:
- docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
-
- docker_cmdline = docker_run_cmdline(cmdline,
- image=docker_image,
- cwd=language.server_cwd,
- environ=environ,
- docker_args=docker_args)
- if manual_cmd_log is not None:
- if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
- manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
- server_job = jobset.JobSpec(
- cmdline=docker_cmdline,
- environ=environ,
- shortname='interop_server_%s' % language,
- timeout_seconds=30*60)
- server_job.container_name = container_name
- return server_job
+ else:
+ docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
+
+ docker_cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ cwd=language.server_cwd,
+ environ=environ,
+ docker_args=docker_args)
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
+ server_job = jobset.JobSpec(
+ cmdline=docker_cmdline,
+ environ=environ,
+ shortname='interop_server_%s' % language,
+ timeout_seconds=30 * 60)
+ server_job.container_name = container_name
+ return server_job
def build_interop_image_jobspec(language, tag=None):
- """Creates jobspec for building interop docker image for a language"""
- if not tag:
- tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
- env = {'INTEROP_IMAGE': tag,
- 'BASE_NAME': 'grpc_interop_%s' % language.safename}
- if not args.travis:
- env['TTY_FLAG'] = '-t'
- # This env variable is used to get around the github rate limit
- # error when running the PHP `composer install` command
- host_file = '%s/.composer/auth.json' % os.environ['HOME']
- if language.safename == 'php' and os.path.exists(host_file):
- env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
- '-v %s:/root/.composer/auth.json:ro' % host_file
- build_job = jobset.JobSpec(
- cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
- environ=env,
- shortname='build_docker_%s' % (language),
- timeout_seconds=30*60)
- build_job.tag = tag
- return build_job
+ """Creates jobspec for building interop docker image for a language"""
+ if not tag:
+ tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
+ env = {
+ 'INTEROP_IMAGE': tag,
+ 'BASE_NAME': 'grpc_interop_%s' % language.safename
+ }
+ if not args.travis:
+ env['TTY_FLAG'] = '-t'
+ # This env variable is used to get around the github rate limit
+ # error when running the PHP `composer install` command
+ host_file = '%s/.composer/auth.json' % os.environ['HOME']
+ if language.safename == 'php' and os.path.exists(host_file):
+ env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
+ '-v %s:/root/.composer/auth.json:ro' % host_file
+ build_job = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
+ environ=env,
+ shortname='build_docker_%s' % (language),
+ timeout_seconds=30 * 60)
+ build_job.tag = tag
+ return build_job
def aggregate_http2_results(stdout):
- match = re.search(r'\{"cases[^\]]*\]\}', stdout)
- if not match:
- return None
-
- results = json.loads(match.group(0))
- skipped = 0
- passed = 0
- failed = 0
- failed_cases = []
- for case in results['cases']:
- if case.get('skipped', False):
- skipped += 1
- else:
- if case.get('passed', False):
- passed += 1
- else:
- failed += 1
- failed_cases.append(case.get('name', "NONAME"))
- return {
- 'passed': passed,
- 'failed': failed,
- 'skipped': skipped,
- 'failed_cases': ', '.join(failed_cases),
- 'percent': 1.0 * passed / (passed + failed)
- }
+ match = re.search(r'\{"cases[^\]]*\]\}', stdout)
+ if not match:
+ return None
+
+ results = json.loads(match.group(0))
+ skipped = 0
+ passed = 0
+ failed = 0
+ failed_cases = []
+ for case in results['cases']:
+ if case.get('skipped', False):
+ skipped += 1
+ else:
+ if case.get('passed', False):
+ passed += 1
+ else:
+ failed += 1
+ failed_cases.append(case.get('name', "NONAME"))
+ return {
+ 'passed': passed,
+ 'failed': failed,
+ 'skipped': skipped,
+ 'failed_cases': ', '.join(failed_cases),
+ 'percent': 1.0 * passed / (passed + failed)
+ }
+
# A dictionary of prod servers to test.
# Format: server_name: (server_host, server_host_override, errors_allowed)
# TODO(adelez): implement logic for errors_allowed where if the indicated tests
# fail, they don't impact the overall test result.
prod_servers = {
- 'default': ('216.239.32.254',
- 'grpc-test.sandbox.googleapis.com', False),
- 'gateway_v2': ('216.239.32.254',
- 'grpc-test2.sandbox.googleapis.com', True),
+ 'default': ('216.239.32.254', 'grpc-test.sandbox.googleapis.com', False),
+ 'gateway_v2': ('216.239.32.254', 'grpc-test2.sandbox.googleapis.com', True),
'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com',
False),
'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com',
True),
- 'gateway_v4': ('216.239.32.254',
- 'grpc-test4.sandbox.googleapis.com', True),
+ 'gateway_v4': ('216.239.32.254', 'grpc-test4.sandbox.googleapis.com', True),
'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com',
True),
}
argp = argparse.ArgumentParser(description='Run interop tests.')
-argp.add_argument('-l', '--language',
- choices=['all'] + sorted(_LANGUAGES),
- nargs='+',
- default=['all'],
- help='Clients to run. Objc client can be only run on OSX.')
+argp.add_argument(
+ '-l',
+ '--language',
+ choices=['all'] + sorted(_LANGUAGES),
+ nargs='+',
+ default=['all'],
+ help='Clients to run. Objc client can be only run on OSX.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('--cloud_to_prod',
- default=False,
- action='store_const',
- const=True,
- help='Run cloud_to_prod tests.')
-argp.add_argument('--cloud_to_prod_auth',
- default=False,
- action='store_const',
- const=True,
- help='Run cloud_to_prod_auth tests.')
-argp.add_argument('--prod_servers',
- choices=prod_servers.keys(),
- default=['default'],
- nargs='+',
- help=('The servers to run cloud_to_prod and '
- 'cloud_to_prod_auth tests against.'))
-argp.add_argument('-s', '--server',
- choices=['all'] + sorted(_SERVERS),
- nargs='+',
- help='Run cloud_to_cloud servers in a separate docker ' +
- 'image. Servers can only be started automatically if ' +
- '--use_docker option is enabled.',
- default=[])
-argp.add_argument('--override_server',
- action='append',
- type=lambda kv: kv.split('='),
- help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
- default=[])
-argp.add_argument('-t', '--travis',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('-v', '--verbose',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('--use_docker',
- default=False,
- action='store_const',
- const=True,
- help='Run all the interop tests under docker. That provides ' +
- 'additional isolation and prevents the need to install ' +
- 'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
- default=False,
- action='store_const',
- const=True,
- help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--manual_run',
- default=False,
- action='store_const',
- const=True,
- help='Prepare things for running interop tests manually. ' +
- 'Preserve docker images after building them and skip '
- 'actually running the tests. Only print commands to run by ' +
- 'hand.')
-argp.add_argument('--http2_interop',
- default=False,
- action='store_const',
- const=True,
- help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
-argp.add_argument('--http2_server_interop',
- default=False,
- action='store_const',
- const=True,
- help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests')
-argp.add_argument('--insecure',
- default=False,
- action='store_const',
- const=True,
- help='Whether to use secure channel.')
-argp.add_argument('--internal_ci',
- default=False,
- action='store_const',
- const=True,
- help=('Put reports into subdirectories to improve '
- 'presentation of results by Internal CI.'))
-argp.add_argument('--bq_result_table',
- default='',
- type=str,
- nargs='?',
- help='Upload test results to a specified BQ table.')
+argp.add_argument(
+ '--cloud_to_prod',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run cloud_to_prod tests.')
+argp.add_argument(
+ '--cloud_to_prod_auth',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run cloud_to_prod_auth tests.')
+argp.add_argument(
+ '--prod_servers',
+ choices=prod_servers.keys(),
+ default=['default'],
+ nargs='+',
+ help=('The servers to run cloud_to_prod and '
+ 'cloud_to_prod_auth tests against.'))
+argp.add_argument(
+ '-s',
+ '--server',
+ choices=['all'] + sorted(_SERVERS),
+ nargs='+',
+ help='Run cloud_to_cloud servers in a separate docker ' +
+ 'image. Servers can only be started automatically if ' +
+ '--use_docker option is enabled.',
+ default=[])
+argp.add_argument(
+ '--override_server',
+ action='append',
+ type=lambda kv: kv.split('='),
+ help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+ default=[])
+argp.add_argument(
+ '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+ '-v', '--verbose', default=False, action='store_const', const=True)
+argp.add_argument(
+ '--use_docker',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run all the interop tests under docker. That provides ' +
+ 'additional isolation and prevents the need to install ' +
+ 'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+ '--allow_flakes',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+ '--manual_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Prepare things for running interop tests manually. ' +
+ 'Preserve docker images after building them and skip '
+ 'actually running the tests. Only print commands to run by ' + 'hand.')
+argp.add_argument(
+ '--http2_interop',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
+argp.add_argument(
+ '--http2_server_interop',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
+)
+argp.add_argument(
+ '--insecure',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Whether to use secure channel.')
+argp.add_argument(
+ '--internal_ci',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Put reports into subdirectories to improve '
+ 'presentation of results by Internal CI.'))
+argp.add_argument(
+ '--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
args = argp.parse_args()
-servers = set(s for s in itertools.chain.from_iterable(_SERVERS
- if x == 'all' else [x]
- for x in args.server))
+servers = set(
+ s
+ for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
+ for x in args.server))
if args.use_docker:
- if not args.travis:
- print('Seen --use_docker flag, will run interop tests under docker.')
- print('')
- print('IMPORTANT: The changes you are testing need to be locally committed')
- print('because only the committed changes in the current branch will be')
- print('copied to the docker environment.')
- time.sleep(5)
+ if not args.travis:
+ print('Seen --use_docker flag, will run interop tests under docker.')
+ print('')
+ print(
+ 'IMPORTANT: The changes you are testing need to be locally committed'
+ )
+ print(
+ 'because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
+ time.sleep(5)
if args.manual_run and not args.use_docker:
- print('--manual_run is only supported with --use_docker option enabled.')
- sys.exit(1)
+ print('--manual_run is only supported with --use_docker option enabled.')
+ sys.exit(1)
if not args.use_docker and servers:
- print('Running interop servers is only supported with --use_docker option enabled.')
- sys.exit(1)
-
+ print(
+ 'Running interop servers is only supported with --use_docker option enabled.'
+ )
+ sys.exit(1)
# we want to include everything but objc in 'all'
# because objc won't run on non-mac platforms
all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
-languages = set(_LANGUAGES[l]
- for l in itertools.chain.from_iterable(
- all_but_objc if x == 'all' else [x]
- for x in args.language))
+languages = set(
+ _LANGUAGES[l]
+ for l in itertools.chain.from_iterable(all_but_objc if x == 'all' else [x]
+ for x in args.language))
languages_http2_clients_for_http2_server_interop = set()
if args.http2_server_interop:
- languages_http2_clients_for_http2_server_interop = set(
- _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
- if 'all' in args.language or l in args.language)
+ languages_http2_clients_for_http2_server_interop = set(
+ _LANGUAGES[l]
+ for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
+ if 'all' in args.language or l in args.language)
http2Interop = Http2Client() if args.http2_interop else None
http2InteropServer = Http2Server() if args.http2_server_interop else None
-docker_images={}
+docker_images = {}
if args.use_docker:
- # languages for which to build docker images
- languages_to_build = set(
- _LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers]))
- languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
-
- if args.http2_interop:
- languages_to_build.add(http2Interop)
-
- if args.http2_server_interop:
- languages_to_build.add(http2InteropServer)
-
- build_jobs = []
- for l in languages_to_build:
- if str(l) == 'objc':
- # we don't need to build a docker image for objc
- continue
- job = build_interop_image_jobspec(l)
- docker_images[str(l)] = job.tag
- build_jobs.append(job)
-
- if build_jobs:
- jobset.message('START', 'Building interop docker images.', do_newline=True)
- if args.verbose:
- print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+ # languages for which to build docker images
+ languages_to_build = set(
+ _LANGUAGES[k]
+ for k in set([str(l) for l in languages] + [s for s in servers]))
+ languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
- num_failures, _ = jobset.run(
- build_jobs, newline_on_success=True, maxjobs=args.jobs)
- if num_failures == 0:
- jobset.message('SUCCESS', 'All docker images built successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Failed to build interop docker images.',
- do_newline=True)
- for image in six.itervalues(docker_images):
- dockerjob.remove_image(image, skip_nonexistent=True)
- sys.exit(1)
+ if args.http2_interop:
+ languages_to_build.add(http2Interop)
+
+ if args.http2_server_interop:
+ languages_to_build.add(http2InteropServer)
+
+ build_jobs = []
+ for l in languages_to_build:
+ if str(l) == 'objc':
+ # we don't need to build a docker image for objc
+ continue
+ job = build_interop_image_jobspec(l)
+ docker_images[str(l)] = job.tag
+ build_jobs.append(job)
+
+ if build_jobs:
+ jobset.message(
+ 'START', 'Building interop docker images.', do_newline=True)
+ if args.verbose:
+ print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+
+ num_failures, _ = jobset.run(
+ build_jobs, newline_on_success=True, maxjobs=args.jobs)
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS',
+ 'All docker images built successfully.',
+ do_newline=True)
+ else:
+ jobset.message(
+ 'FAILED',
+ 'Failed to build interop docker images.',
+ do_newline=True)
+ for image in six.itervalues(docker_images):
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ sys.exit(1)
server_manual_cmd_log = [] if args.manual_run else None
client_manual_cmd_log = [] if args.manual_run else None
@@ -1056,214 +1134,236 @@ client_manual_cmd_log = [] if args.manual_run else None
server_jobs = {}
server_addresses = {}
try:
- for s in servers:
- lang = str(s)
- spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang),
- args.insecure, manual_cmd_log=server_manual_cmd_log)
- if not args.manual_run:
- job = dockerjob.DockerJob(spec)
- server_jobs[lang] = job
- server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
- else:
- # don't run the server, set server port to a placeholder value
- server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
- http2_server_job = None
- if args.http2_server_interop:
- # launch a HTTP2 server emulator that creates edge cases
- lang = str(http2InteropServer)
- spec = server_jobspec(http2InteropServer, docker_images.get(lang),
- manual_cmd_log=server_manual_cmd_log)
- if not args.manual_run:
- http2_server_job = dockerjob.DockerJob(spec)
- server_jobs[lang] = http2_server_job
- else:
- # don't run the server, set server port to a placeholder value
- server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
- jobs = []
- if args.cloud_to_prod:
- if args.insecure:
- print('TLS is always enabled for cloud_to_prod scenarios.')
- for server_host_name in args.prod_servers:
- for language in languages:
- for test_case in _TEST_CASES:
- if not test_case in language.unimplemented_test_cases():
- if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
- test_job = cloud_to_prod_jobspec(
- language, test_case, server_host_name,
- prod_servers[server_host_name],
- docker_image=docker_images.get(str(language)),
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.http2_interop:
- for test_case in _HTTP2_TEST_CASES:
- test_job = cloud_to_prod_jobspec(
- http2Interop, test_case, server_host_name,
- prod_servers[server_host_name],
- docker_image=docker_images.get(str(http2Interop)),
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.cloud_to_prod_auth:
- if args.insecure:
- print('TLS is always enabled for cloud_to_prod scenarios.')
- for server_host_name in args.prod_servers:
- for language in languages:
- for test_case in _AUTH_TEST_CASES:
- if not test_case in language.unimplemented_test_cases():
- test_job = cloud_to_prod_jobspec(
- language, test_case, server_host_name,
- prod_servers[server_host_name],
- docker_image=docker_images.get(str(language)), auth=True,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- for server in args.override_server:
- server_name = server[0]
- (server_host, server_port) = server[1].split(':')
- server_addresses[server_name] = (server_host, server_port)
-
- for server_name, server_address in server_addresses.items():
- (server_host, server_port) = server_address
- server_language = _LANGUAGES.get(server_name, None)
- skip_server = [] # test cases unimplemented by server
- if server_language:
- skip_server = server_language.unimplemented_test_cases_server()
- for language in languages:
- for test_case in _TEST_CASES:
- if not test_case in language.unimplemented_test_cases():
- if not test_case in skip_server:
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- server_name,
- server_host,
- server_port,
- docker_image=docker_images.get(str(language)),
- insecure=args.insecure,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.http2_interop:
- for test_case in _HTTP2_TEST_CASES:
- if server_name == "go":
- # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
- continue
- test_job = cloud_to_cloud_jobspec(http2Interop,
- test_case,
- server_name,
- server_host,
- server_port,
- docker_image=docker_images.get(str(http2Interop)),
- insecure=args.insecure,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.http2_server_interop:
- if not args.manual_run:
- http2_server_job.wait_for_healthy(timeout_seconds=600)
- for language in languages_http2_clients_for_http2_server_interop:
- for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
- offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
- server_port = _DEFAULT_SERVER_PORT+offset
+ for s in servers:
+ lang = str(s)
+ spec = server_jobspec(
+ _LANGUAGES[lang],
+ docker_images.get(lang),
+ args.insecure,
+ manual_cmd_log=server_manual_cmd_log)
if not args.manual_run:
- server_port = http2_server_job.mapped_port(server_port)
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- str(http2InteropServer),
- 'localhost',
- server_port,
- docker_image=docker_images.get(str(language)),
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
- for language in languages:
- # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
- # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
- # than specialized http2 clients, reusing existing test implementations.
- # For example, in the "data_frame_padding" test, use language's gRPC
- # interop clients and make them think that theyre running "large_unary"
- # test case. This avoids implementing a new test case in each language.
- for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
- if test_case not in language.unimplemented_test_cases():
- offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
- server_port = _DEFAULT_SERVER_PORT+offset
- if not args.manual_run:
- server_port = http2_server_job.mapped_port(server_port)
- if not args.insecure:
- print(('Creating grpc cient to http2 server test case with insecure connection, even though'
- ' args.insecure is False. Http2 test server only supports insecure connections.'))
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- str(http2InteropServer),
- 'localhost',
- server_port,
- docker_image=docker_images.get(str(language)),
- insecure=True,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if not jobs:
- print('No jobs to run.')
- for image in six.itervalues(docker_images):
- dockerjob.remove_image(image, skip_nonexistent=True)
- sys.exit(1)
-
- if args.manual_run:
- print('All tests will skipped --manual_run option is active.')
-
- if args.verbose:
- print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+ job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = job
+ server_addresses[lang] = ('localhost',
+ job.mapped_port(_DEFAULT_SERVER_PORT))
+ else:
+ # don't run the server, set server port to a placeholder value
+ server_addresses[lang] = ('localhost', '${SERVER_PORT}')
+
+ http2_server_job = None
+ if args.http2_server_interop:
+ # launch a HTTP2 server emulator that creates edge cases
+ lang = str(http2InteropServer)
+ spec = server_jobspec(
+ http2InteropServer,
+ docker_images.get(lang),
+ manual_cmd_log=server_manual_cmd_log)
+ if not args.manual_run:
+ http2_server_job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = http2_server_job
+ else:
+ # don't run the server, set server port to a placeholder value
+ server_addresses[lang] = ('localhost', '${SERVER_PORT}')
+
+ jobs = []
+ if args.cloud_to_prod:
+ if args.insecure:
+ print('TLS is always enabled for cloud_to_prod scenarios.')
+ for server_host_name in args.prod_servers:
+ for language in languages:
+ for test_case in _TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
+ test_job = cloud_to_prod_jobspec(
+ language,
+ test_case,
+ server_host_name,
+ prod_servers[server_host_name],
+ docker_image=docker_images.get(str(language)),
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_interop:
+ for test_case in _HTTP2_TEST_CASES:
+ test_job = cloud_to_prod_jobspec(
+ http2Interop,
+ test_case,
+ server_host_name,
+ prod_servers[server_host_name],
+ docker_image=docker_images.get(str(http2Interop)),
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.cloud_to_prod_auth:
+ if args.insecure:
+ print('TLS is always enabled for cloud_to_prod scenarios.')
+ for server_host_name in args.prod_servers:
+ for language in languages:
+ for test_case in _AUTH_TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ test_job = cloud_to_prod_jobspec(
+ language,
+ test_case,
+ server_host_name,
+ prod_servers[server_host_name],
+ docker_image=docker_images.get(str(language)),
+ auth=True,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ for server in args.override_server:
+ server_name = server[0]
+ (server_host, server_port) = server[1].split(':')
+ server_addresses[server_name] = (server_host, server_port)
+
+ for server_name, server_address in server_addresses.items():
+ (server_host, server_port) = server_address
+ server_language = _LANGUAGES.get(server_name, None)
+ skip_server = [] # test cases unimplemented by server
+ if server_language:
+ skip_server = server_language.unimplemented_test_cases_server()
+ for language in languages:
+ for test_case in _TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ if not test_case in skip_server:
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ insecure=args.insecure,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_interop:
+ for test_case in _HTTP2_TEST_CASES:
+ if server_name == "go":
+ # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
+ continue
+ test_job = cloud_to_cloud_jobspec(
+ http2Interop,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(http2Interop)),
+ insecure=args.insecure,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_server_interop:
+ if not args.manual_run:
+ http2_server_job.wait_for_healthy(timeout_seconds=600)
+ for language in languages_http2_clients_for_http2_server_interop:
+ for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(
+ _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+ server_port = _DEFAULT_SERVER_PORT + offset
+ if not args.manual_run:
+ server_port = http2_server_job.mapped_port(server_port)
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ str(http2InteropServer),
+ 'localhost',
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+ for language in languages:
+ # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
+ # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
+ # than specialized http2 clients, reusing existing test implementations.
+ # For example, in the "data_frame_padding" test, use language's gRPC
+ # interop clients and make them think that theyre running "large_unary"
+ # test case. This avoids implementing a new test case in each language.
+ for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ if test_case not in language.unimplemented_test_cases():
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+ server_port = _DEFAULT_SERVER_PORT + offset
+ if not args.manual_run:
+ server_port = http2_server_job.mapped_port(server_port)
+ if not args.insecure:
+ print((
+ 'Creating grpc cient to http2 server test case with insecure connection, even though'
+ ' args.insecure is False. Http2 test server only supports insecure connections.'
+ ))
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ str(http2InteropServer),
+ 'localhost',
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ insecure=True,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if not jobs:
+ print('No jobs to run.')
+ for image in six.itervalues(docker_images):
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ sys.exit(1)
+
+ if args.manual_run:
+ print('All tests will skipped --manual_run option is active.')
- num_failures, resultset = jobset.run(jobs, newline_on_success=True,
- maxjobs=args.jobs,
- skip_jobs=args.manual_run)
- if args.bq_result_table and resultset:
- upload_interop_results_to_bq(resultset, args.bq_result_table, args)
- if num_failures:
- jobset.message('FAILED', 'Some tests failed', do_newline=True)
- else:
- jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+ if args.verbose:
+ print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+
+ num_failures, resultset = jobset.run(
+ jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs,
+ skip_jobs=args.manual_run)
+ if args.bq_result_table and resultset:
+ upload_interop_results_to_bq(resultset, args.bq_result_table, args)
+ if num_failures:
+ jobset.message('FAILED', 'Some tests failed', do_newline=True)
+ else:
+ jobset.message('SUCCESS', 'All tests passed', do_newline=True)
- write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
- write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
+ write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
+ write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
- xml_report_name = _XML_REPORT
- if args.internal_ci:
- xml_report_name = _INTERNAL_CL_XML_REPORT
- report_utils.render_junit_xml_report(resultset, xml_report_name)
+ xml_report_name = _XML_REPORT
+ if args.internal_ci:
+ xml_report_name = _INTERNAL_CL_XML_REPORT
+ report_utils.render_junit_xml_report(resultset, xml_report_name)
- for name, job in resultset.items():
- if "http2" in name:
- job[0].http2results = aggregate_http2_results(job[0].message)
+ for name, job in resultset.items():
+ if "http2" in name:
+ job[0].http2results = aggregate_http2_results(job[0].message)
- http2_server_test_cases = (
- _HTTP2_SERVER_TEST_CASES if args.http2_server_interop else [])
+ http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES
+ if args.http2_server_interop else [])
- report_utils.render_interop_html_report(
- set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
- _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures,
- args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
- args.http2_interop)
+ report_utils.render_interop_html_report(
+ set([str(l) for l in languages]), servers, _TEST_CASES,
+ _AUTH_TEST_CASES, _HTTP2_TEST_CASES, http2_server_test_cases, resultset,
+ num_failures, args.cloud_to_prod_auth or args.cloud_to_prod,
+ args.prod_servers, args.http2_interop)
- if num_failures:
- sys.exit(1)
- else:
- sys.exit(0)
+ if num_failures:
+ sys.exit(1)
+ else:
+ sys.exit(0)
except Exception as e:
- print('exception occurred:')
- traceback.print_exc(file=sys.stdout)
+ print('exception occurred:')
+ traceback.print_exc(file=sys.stdout)
finally:
- # Check if servers are still running.
- for server, job in server_jobs.items():
- if not job.is_running():
- print('Server "%s" has exited prematurely.' % server)
+ # Check if servers are still running.
+ for server, job in server_jobs.items():
+ if not job.is_running():
+ print('Server "%s" has exited prematurely.' % server)
- dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
+ dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
- for image in six.itervalues(docker_images):
- if not args.manual_run:
- print('Removing docker image %s' % image)
- dockerjob.remove_image(image)
- else:
- print('Preserving docker image: %s' % image)
+ for image in six.itervalues(docker_images):
+ if not args.manual_run:
+ print('Removing docker image %s' % image)
+ dockerjob.remove_image(image)
+ else:
+ print('Preserving docker image: %s' % image)
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index c136af58cb..561217ceb1 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -23,26 +23,31 @@ import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
-sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks', 'bm_diff'))
+sys.path.append(
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks',
+ 'bm_diff'))
import bm_constants
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
- os.makedirs('reports')
+ os.makedirs('reports')
start_port_server.start_port_server()
+
def fnize(s):
- out = ''
- for c in s:
- if c in '<>, /':
- if len(out) and out[-1] == '_': continue
- out += '_'
- else:
- out += c
- return out
+ out = ''
+ for c in s:
+ if c in '<>, /':
+ if len(out) and out[-1] == '_': continue
+ out += '_'
+ else:
+ out += c
+ return out
+
# index html
index_html = """
@@ -53,169 +58,202 @@ index_html = """
<body>
"""
+
def heading(name):
- global index_html
- index_html += "<h1>%s</h1>\n" % name
+ global index_html
+ index_html += "<h1>%s</h1>\n" % name
+
def link(txt, tgt):
- global index_html
- index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
- cgi.escape(tgt, quote=True), cgi.escape(txt))
+ global index_html
+ index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
+ cgi.escape(tgt, quote=True), cgi.escape(txt))
+
def text(txt):
- global index_html
- index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+ global index_html
+ index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+
def collect_latency(bm_name, args):
- """generate latency profiles"""
- benchmarks = []
- profile_analysis = []
- cleanup = []
-
- heading('Latency Profiles: %s' % bm_name)
- subprocess.check_call(
- ['make', bm_name,
- 'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
- for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
- '--benchmark_list_tests']).splitlines():
- link(line, '%s.txt' % fnize(line))
- benchmarks.append(
- jobset.JobSpec(['bins/basicprof/%s' % bm_name,
- '--benchmark_filter=^%s$' % line,
- '--benchmark_min_time=0.05'],
- environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
- shortname='profile-%s' % fnize(line)))
- profile_analysis.append(
- jobset.JobSpec([sys.executable,
- 'tools/profiling/latency_profile/profile_analyzer.py',
- '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
- '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60,
- shortname='analyze-%s' % fnize(line)))
- cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
- # periodically flush out the list of jobs: profile_analysis jobs at least
- # consume upwards of five gigabytes of ram in some cases, and so analysing
- # hundreds of them at once is impractical -- but we want at least some
- # concurrency or the work takes too long
- if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
- # run up to half the cpu count: each benchmark can use up to two cores
- # (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
- benchmarks = []
- profile_analysis = []
- cleanup = []
- # run the remaining benchmarks that weren't flushed
- if len(benchmarks):
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ """generate latency profiles"""
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+
+ heading('Latency Profiles: %s' % bm_name)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=basicprof', '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ for line in subprocess.check_output(
+ ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+ link(line, '%s.txt' % fnize(line))
+ benchmarks.append(
+ jobset.JobSpec(
+ [
+ 'bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' %
+ line, '--benchmark_min_time=0.05'
+ ],
+ environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
+ shortname='profile-%s' % fnize(line)))
+ profile_analysis.append(
+ jobset.JobSpec(
+ [
+ sys.executable,
+ 'tools/profiling/latency_profile/profile_analyzer.py',
+ '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
+ '--out', 'reports/%s.txt' % fnize(line)
+ ],
+ timeout_seconds=20 * 60,
+ shortname='analyze-%s' % fnize(line)))
+ cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
+ # periodically flush out the list of jobs: profile_analysis jobs at least
+ # consume upwards of five gigabytes of ram in some cases, and so analysing
+ # hundreds of them at once is impractical -- but we want at least some
+ # concurrency or the work takes too long
+ if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
+ # run up to half the cpu count: each benchmark can use up to two cores
+ # (one for the microbenchmark, one for the data flush)
+ jobset.run(
+ benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ # run the remaining benchmarks that weren't flushed
+ if len(benchmarks):
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
def collect_perf(bm_name, args):
- """generate flamegraphs"""
- heading('Flamegraphs: %s' % bm_name)
- subprocess.check_call(
- ['make', bm_name,
- 'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
- benchmarks = []
- profile_analysis = []
- cleanup = []
- for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
- '--benchmark_list_tests']).splitlines():
- link(line, '%s.svg' % fnize(line))
- benchmarks.append(
- jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
- '-g', '-F', '997',
- 'bins/mutrace/%s' % bm_name,
- '--benchmark_filter=^%s$' % line,
- '--benchmark_min_time=10'],
- shortname='perf-%s' % fnize(line)))
- profile_analysis.append(
- jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
- environ = {
- 'PERF_BASE_NAME': fnize(line),
- 'OUTPUT_DIR': 'reports',
- 'OUTPUT_FILENAME': fnize(line),
- },
- shortname='flame-%s' % fnize(line)))
- cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
- cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
- # periodically flush out the list of jobs: temporary space required for this
- # processing is large
- if len(benchmarks) >= 20:
- # run up to half the cpu count: each benchmark can use up to two cores
- # (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=1)
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
- benchmarks = []
- profile_analysis = []
- cleanup = []
- # run the remaining benchmarks that weren't flushed
- if len(benchmarks):
- jobset.run(benchmarks, maxjobs=1)
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ """generate flamegraphs"""
+ heading('Flamegraphs: %s' % bm_name)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=mutrace', '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ for line in subprocess.check_output(
+ ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+ link(line, '%s.svg' % fnize(line))
+ benchmarks.append(
+ jobset.JobSpec(
+ [
+ 'perf', 'record', '-o', '%s-perf.data' % fnize(
+ line), '-g', '-F', '997', 'bins/mutrace/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
+ ],
+ shortname='perf-%s' % fnize(line)))
+ profile_analysis.append(
+ jobset.JobSpec(
+ [
+ 'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
+ ],
+ environ={
+ 'PERF_BASE_NAME': fnize(line),
+ 'OUTPUT_DIR': 'reports',
+ 'OUTPUT_FILENAME': fnize(line),
+ },
+ shortname='flame-%s' % fnize(line)))
+ cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
+ cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
+ # periodically flush out the list of jobs: temporary space required for this
+ # processing is large
+ if len(benchmarks) >= 20:
+ # run up to half the cpu count: each benchmark can use up to two cores
+ # (one for the microbenchmark, one for the data flush)
+ jobset.run(benchmarks, maxjobs=1)
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ # run the remaining benchmarks that weren't flushed
+ if len(benchmarks):
+ jobset.run(benchmarks, maxjobs=1)
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
def run_summary(bm_name, cfg, base_json_name):
- subprocess.check_call(
- ['make', bm_name,
- 'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
- cmd = ['bins/%s/%s' % (cfg, bm_name),
- '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
- '--benchmark_out_format=json']
- if args.summary_time is not None:
- cmd += ['--benchmark_min_time=%d' % args.summary_time]
- return subprocess.check_output(cmd)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=%s' % cfg, '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ cmd = [
+ 'bins/%s/%s' % (cfg, bm_name), '--benchmark_out=%s.%s.json' %
+ (base_json_name, cfg), '--benchmark_out_format=json'
+ ]
+ if args.summary_time is not None:
+ cmd += ['--benchmark_min_time=%d' % args.summary_time]
+ return subprocess.check_output(cmd)
+
def collect_summary(bm_name, args):
- heading('Summary: %s [no counters]' % bm_name)
- text(run_summary(bm_name, 'opt', bm_name))
- heading('Summary: %s [with counters]' % bm_name)
- text(run_summary(bm_name, 'counters', bm_name))
- if args.bigquery_upload:
- with open('%s.csv' % bm_name, 'w') as f:
- f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py',
- '%s.counters.json' % bm_name,
- '%s.opt.json' % bm_name]))
- subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name])
+ heading('Summary: %s [no counters]' % bm_name)
+ text(run_summary(bm_name, 'opt', bm_name))
+ heading('Summary: %s [with counters]' % bm_name)
+ text(run_summary(bm_name, 'counters', bm_name))
+ if args.bigquery_upload:
+ with open('%s.csv' % bm_name, 'w') as f:
+ f.write(
+ subprocess.check_output([
+ 'tools/profiling/microbenchmarks/bm2bq.py',
+ '%s.counters.json' % bm_name, '%s.opt.json' % bm_name
+ ]))
+ subprocess.check_call([
+ 'bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name
+ ])
+
collectors = {
- 'latency': collect_latency,
- 'perf': collect_perf,
- 'summary': collect_summary,
+ 'latency': collect_latency,
+ 'perf': collect_perf,
+ 'summary': collect_summary,
}
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
-argp.add_argument('-c', '--collect',
- choices=sorted(collectors.keys()),
- nargs='*',
- default=sorted(collectors.keys()),
- help='Which collectors should be run against each benchmark')
-argp.add_argument('-b', '--benchmarks',
- choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- nargs='+',
- type=str,
- help='Which microbenchmarks should be run')
-argp.add_argument('--bigquery_upload',
- default=False,
- action='store_const',
- const=True,
- help='Upload results from summary collection to bigquery')
-argp.add_argument('--summary_time',
- default=None,
- type=int,
- help='Minimum time to run benchmarks for the summary collection')
+argp.add_argument(
+ '-c',
+ '--collect',
+ choices=sorted(collectors.keys()),
+ nargs='*',
+ default=sorted(collectors.keys()),
+ help='Which collectors should be run against each benchmark')
+argp.add_argument(
+ '-b',
+ '--benchmarks',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ nargs='+',
+ type=str,
+ help='Which microbenchmarks should be run')
+argp.add_argument(
+ '--bigquery_upload',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Upload results from summary collection to bigquery')
+argp.add_argument(
+ '--summary_time',
+ default=None,
+ type=int,
+ help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
try:
- for collect in args.collect:
- for bm_name in args.benchmarks:
- collectors[collect](bm_name, args)
+ for collect in args.collect:
+ for bm_name in args.benchmarks:
+ collectors[collect](bm_name, args)
finally:
- if not os.path.exists('reports'):
- os.makedirs('reports')
- index_html += "</body>\n</html>\n"
- with open('reports/index.html', 'w') as f:
- f.write(index_html)
+ if not os.path.exists('reports'):
+ os.makedirs('reports')
+ index_html += "</body>\n</html>\n"
+ with open('reports/index.html', 'w') as f:
+ f.write(index_html)
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index aa305be466..03b684b318 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run performance tests locally or remotely."""
from __future__ import print_function
@@ -37,566 +36,666 @@ import performance.scenario_config as scenario_config
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
-
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
-
_REMOTE_HOST_USERNAME = 'jenkins'
class QpsWorkerJob:
- """Encapsulates a qps worker server job."""
-
- def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
- self._spec = spec
- self.language = language
- self.host_and_port = host_and_port
- self._job = None
- self.perf_file_base_name = perf_file_base_name
-
- def start(self):
- self._job = jobset.Job(self._spec, newline_on_success=True, travis=True, add_env={})
-
- def is_running(self):
- """Polls a job and returns True if given job is still running."""
- return self._job and self._job.state() == jobset._RUNNING
-
- def kill(self):
- if self._job:
- self._job.kill()
- self._job = None
-
-
-def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, perf_cmd=None):
- cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
-
- if remote_host:
- host_and_port='%s:%s' % (remote_host, port)
- else:
- host_and_port='localhost:%s' % port
-
- perf_file_base_name = None
- if perf_cmd:
- perf_file_base_name = '%s-%s' % (host_and_port, shortname)
- # specify -o output file so perf.data gets collected when worker stopped
- cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name] + cmdline
-
- worker_timeout = 3 * 60
- if remote_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
- ssh_cmd = ['ssh']
- cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
- ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s' % ' '.join(cmdline)])
- cmdline = ssh_cmd
-
- jobspec = jobset.JobSpec(
- cmdline=cmdline,
- shortname=shortname,
- timeout_seconds=worker_timeout, # workers get restarted after each scenario
- verbose_success=True)
- return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
-
-
-def create_scenario_jobspec(scenario_json, workers, remote_host=None,
- bq_result_table=None, server_cpu_load=0):
- """Runs one scenario using QPS driver."""
- # setting QPS_WORKERS env variable here makes sure it works with SSH too.
- cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
- if bq_result_table:
- cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
- cmd += 'tools/run_tests/performance/run_qps_driver.sh '
- cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
- cmd += '--scenario_result_file=scenario_result.json '
- if server_cpu_load != 0:
- cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
- if remote_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
- cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
-
- return jobset.JobSpec(
- cmdline=[cmd],
- shortname='qps_json_driver.%s' % scenario_json['name'],
- timeout_seconds=12*60,
- shell=True,
- verbose_success=True)
+ """Encapsulates a qps worker server job."""
+
+ def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
+ self._spec = spec
+ self.language = language
+ self.host_and_port = host_and_port
+ self._job = None
+ self.perf_file_base_name = perf_file_base_name
+
+ def start(self):
+ self._job = jobset.Job(
+ self._spec, newline_on_success=True, travis=True, add_env={})
+
+ def is_running(self):
+ """Polls a job and returns True if given job is still running."""
+ return self._job and self._job.state() == jobset._RUNNING
+
+ def kill(self):
+ if self._job:
+ self._job.kill()
+ self._job = None
+
+
+def create_qpsworker_job(language,
+ shortname=None,
+ port=10000,
+ remote_host=None,
+ perf_cmd=None):
+ cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
+
+ if remote_host:
+ host_and_port = '%s:%s' % (remote_host, port)
+ else:
+ host_and_port = 'localhost:%s' % port
+
+ perf_file_base_name = None
+ if perf_cmd:
+ perf_file_base_name = '%s-%s' % (host_and_port, shortname)
+ # specify -o output file so perf.data gets collected when worker stopped
+ cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
+ ] + cmdline
+
+ worker_timeout = 3 * 60
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ ssh_cmd = ['ssh']
+ cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
+ ssh_cmd.extend([
+ str(user_at_host),
+ 'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s'
+ % ' '.join(cmdline)
+ ])
+ cmdline = ssh_cmd
+
+ jobspec = jobset.JobSpec(
+ cmdline=cmdline,
+ shortname=shortname,
+ timeout_seconds=worker_timeout, # workers get restarted after each scenario
+ verbose_success=True)
+ return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
+
+
+def create_scenario_jobspec(scenario_json,
+ workers,
+ remote_host=None,
+ bq_result_table=None,
+ server_cpu_load=0):
+ """Runs one scenario using QPS driver."""
+ # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+ cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
+ if bq_result_table:
+ cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+ cmd += 'tools/run_tests/performance/run_qps_driver.sh '
+ cmd += '--scenarios_json=%s ' % pipes.quote(
+ json.dumps({
+ 'scenarios': [scenario_json]
+ }))
+ cmd += '--scenario_result_file=scenario_result.json '
+ if server_cpu_load != 0:
+ cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='qps_json_driver.%s' % scenario_json['name'],
+ timeout_seconds=12 * 60,
+ shell=True,
+ verbose_success=True)
def create_quit_jobspec(workers, remote_host=None):
- """Runs quit using QPS driver."""
- # setting QPS_WORKERS env variable here makes sure it works with SSH too.
- cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(w.host_and_port for w in workers)
- if remote_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
- cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
-
- return jobset.JobSpec(
- cmdline=[cmd],
- shortname='qps_json_driver.quit',
- timeout_seconds=3*60,
- shell=True,
- verbose_success=True)
-
-
-def create_netperf_jobspec(server_host='localhost', client_host=None,
+ """Runs quit using QPS driver."""
+ # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+ cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(
+ w.host_and_port for w in workers)
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='qps_json_driver.quit',
+ timeout_seconds=3 * 60,
+ shell=True,
+ verbose_success=True)
+
+
+def create_netperf_jobspec(server_host='localhost',
+ client_host=None,
bq_result_table=None):
- """Runs netperf benchmark."""
- cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
- if bq_result_table:
- cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
- if client_host:
- # If netperf is running remotely, the env variables populated by Jenkins
- # won't be available on the client, but we need them for uploading results
- # to BigQuery.
- jenkins_job_name = os.getenv('JOB_NAME')
- if jenkins_job_name:
- cmd += 'JOB_NAME="%s" ' % jenkins_job_name
- jenkins_build_number = os.getenv('BUILD_NUMBER')
- if jenkins_build_number:
- cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
-
- cmd += 'tools/run_tests/performance/run_netperf.sh'
- if client_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
- cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
-
- return jobset.JobSpec(
- cmdline=[cmd],
- shortname='netperf',
- timeout_seconds=60,
- shell=True,
- verbose_success=True)
+ """Runs netperf benchmark."""
+ cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
+ if bq_result_table:
+ cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+ if client_host:
+ # If netperf is running remotely, the env variables populated by Jenkins
+ # won't be available on the client, but we need them for uploading results
+ # to BigQuery.
+ jenkins_job_name = os.getenv('JOB_NAME')
+ if jenkins_job_name:
+ cmd += 'JOB_NAME="%s" ' % jenkins_job_name
+ jenkins_build_number = os.getenv('BUILD_NUMBER')
+ if jenkins_build_number:
+ cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
+
+ cmd += 'tools/run_tests/performance/run_netperf.sh'
+ if client_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='netperf',
+ timeout_seconds=60,
+ shell=True,
+ verbose_success=True)
def archive_repo(languages):
- """Archives local version of repo including submodules."""
- cmdline=['tar', '-cf', '../grpc.tar', '../grpc/']
- if 'java' in languages:
- cmdline.append('../grpc-java')
- if 'go' in languages:
- cmdline.append('../grpc-go')
-
- archive_job = jobset.JobSpec(
- cmdline=cmdline,
- shortname='archive_repo',
- timeout_seconds=3*60)
-
- jobset.message('START', 'Archiving local repository.', do_newline=True)
- num_failures, _ = jobset.run(
- [archive_job], newline_on_success=True, maxjobs=1)
- if num_failures == 0:
- jobset.message('SUCCESS',
- 'Archive with local repository created successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Failed to archive local repository.',
- do_newline=True)
- sys.exit(1)
+ """Archives local version of repo including submodules."""
+ cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
+ if 'java' in languages:
+ cmdline.append('../grpc-java')
+ if 'go' in languages:
+ cmdline.append('../grpc-go')
+
+ archive_job = jobset.JobSpec(
+ cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
+
+ jobset.message('START', 'Archiving local repository.', do_newline=True)
+ num_failures, _ = jobset.run(
+ [archive_job], newline_on_success=True, maxjobs=1)
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS',
+ 'Archive with local repository created successfully.',
+ do_newline=True)
+ else:
+ jobset.message(
+ 'FAILED', 'Failed to archive local repository.', do_newline=True)
+ sys.exit(1)
def prepare_remote_hosts(hosts, prepare_local=False):
- """Prepares remote hosts (and maybe prepare localhost as well)."""
- prepare_timeout = 10*60
- prepare_jobs = []
- for host in hosts:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
- prepare_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
- shortname='remote_host_prepare.%s' % host,
- environ = {'USER_AT_HOST': user_at_host},
- timeout_seconds=prepare_timeout))
- if prepare_local:
- # Prepare localhost as well
- prepare_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/kill_workers.sh'],
- shortname='local_prepare',
- timeout_seconds=prepare_timeout))
- jobset.message('START', 'Preparing hosts.', do_newline=True)
- num_failures, _ = jobset.run(
- prepare_jobs, newline_on_success=True, maxjobs=10)
- if num_failures == 0:
- jobset.message('SUCCESS',
- 'Prepare step completed successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Failed to prepare remote hosts.',
- do_newline=True)
- sys.exit(1)
-
-
-def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), build_local=False):
- """Builds performance worker on remote hosts (and maybe also locally)."""
- build_timeout = 15*60
- # Kokoro VMs (which are local only) do not have caching, so they need more time to build
- local_build_timeout = 30*60
- build_jobs = []
- for host in hosts:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
- build_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/remote_host_build.sh'] + languages,
- shortname='remote_host_build.%s' % host,
- environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
- timeout_seconds=build_timeout))
- if build_local:
- # Build locally as well
- build_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/build_performance.sh'] + languages,
- shortname='local_build',
- environ = {'CONFIG': 'opt'},
- timeout_seconds=local_build_timeout))
- jobset.message('START', 'Building.', do_newline=True)
- num_failures, _ = jobset.run(
- build_jobs, newline_on_success=True, maxjobs=10)
- if num_failures == 0:
- jobset.message('SUCCESS',
- 'Built successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Build failed.',
- do_newline=True)
- sys.exit(1)
+ """Prepares remote hosts (and maybe prepare localhost as well)."""
+ prepare_timeout = 10 * 60
+ prepare_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ prepare_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
+ shortname='remote_host_prepare.%s' % host,
+ environ={'USER_AT_HOST': user_at_host},
+ timeout_seconds=prepare_timeout))
+ if prepare_local:
+ # Prepare localhost as well
+ prepare_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/kill_workers.sh'],
+ shortname='local_prepare',
+ timeout_seconds=prepare_timeout))
+ jobset.message('START', 'Preparing hosts.', do_newline=True)
+ num_failures, _ = jobset.run(
+ prepare_jobs, newline_on_success=True, maxjobs=10)
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS', 'Prepare step completed successfully.', do_newline=True)
+ else:
+ jobset.message(
+ 'FAILED', 'Failed to prepare remote hosts.', do_newline=True)
+ sys.exit(1)
+
+
+def build_on_remote_hosts(hosts,
+ languages=scenario_config.LANGUAGES.keys(),
+ build_local=False):
+ """Builds performance worker on remote hosts (and maybe also locally)."""
+ build_timeout = 15 * 60
+ # Kokoro VMs (which are local only) do not have caching, so they need more time to build
+ local_build_timeout = 30 * 60
+ build_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
+ languages,
+ shortname='remote_host_build.%s' % host,
+ environ={'USER_AT_HOST': user_at_host,
+ 'CONFIG': 'opt'},
+ timeout_seconds=build_timeout))
+ if build_local:
+ # Build locally as well
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/build_performance.sh'] +
+ languages,
+ shortname='local_build',
+ environ={'CONFIG': 'opt'},
+ timeout_seconds=local_build_timeout))
+ jobset.message('START', 'Building.', do_newline=True)
+ num_failures, _ = jobset.run(
+ build_jobs, newline_on_success=True, maxjobs=10)
+ if num_failures == 0:
+ jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Build failed.', do_newline=True)
+ sys.exit(1)
def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
- """Creates QPS workers (but does not start them)."""
- if not worker_hosts:
- # run two workers locally (for each language)
- workers=[(None, 10000), (None, 10010)]
- elif len(worker_hosts) == 1:
- # run two workers on the remote host (for each language)
- workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
- else:
- # run one worker per each remote host (for each language)
- workers=[(worker_host, 10000) for worker_host in worker_hosts]
-
- return [create_qpsworker_job(language,
- shortname= 'qps_worker_%s_%s' % (language,
- worker_idx),
- port=worker[1] + language.worker_port_offset(),
- remote_host=worker[0],
- perf_cmd=perf_cmd)
- for language in languages
- for worker_idx, worker in enumerate(workers)]
-
-
-def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports):
- print('Creating perf report collection job for %s' % worker_host)
- cmd = ''
- if worker_host != 'localhost':
- user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
- cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
- tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \
- % (user_at_host, output_filename, flame_graph_reports, perf_base_name)
- else:
- cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
- tools/run_tests/performance/process_local_perf_flamegraphs.sh" \
- % (output_filename, flame_graph_reports, perf_base_name)
-
- return jobset.JobSpec(cmdline=cmd,
- timeout_seconds=3*60,
- shell=True,
- verbose_success=True,
- shortname='process perf report')
+ """Creates QPS workers (but does not start them)."""
+ if not worker_hosts:
+ # run two workers locally (for each language)
+ workers = [(None, 10000), (None, 10010)]
+ elif len(worker_hosts) == 1:
+ # run two workers on the remote host (for each language)
+ workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
+ else:
+ # run one worker per each remote host (for each language)
+ workers = [(worker_host, 10000) for worker_host in worker_hosts]
+
+ return [
+ create_qpsworker_job(
+ language,
+ shortname='qps_worker_%s_%s' % (language, worker_idx),
+ port=worker[1] + language.worker_port_offset(),
+ remote_host=worker[0],
+ perf_cmd=perf_cmd)
+ for language in languages for worker_idx, worker in enumerate(workers)
+ ]
+
+
+def perf_report_processor_job(worker_host, perf_base_name, output_filename,
+ flame_graph_reports):
+ print('Creating perf report collection job for %s' % worker_host)
+ cmd = ''
+ if worker_host != 'localhost':
+ user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
+ cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
+ user_at_host, output_filename, flame_graph_reports, perf_base_name)
+ else:
+ cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
+ output_filename, flame_graph_reports, perf_base_name)
+
+ return jobset.JobSpec(
+ cmdline=cmd,
+ timeout_seconds=3 * 60,
+ shell=True,
+ verbose_success=True,
+ shortname='process perf report')
Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
-def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
- category='all', bq_result_table=None,
- netperf=False, netperf_hosts=[], server_cpu_load=0):
- """Create jobspecs for scenarios to run."""
- all_workers = [worker
- for workers in workers_by_lang.values()
- for worker in workers]
- scenarios = []
- _NO_WORKERS = []
-
- if netperf:
- if not netperf_hosts:
- netperf_server='localhost'
- netperf_client=None
- elif len(netperf_hosts) == 1:
- netperf_server=netperf_hosts[0]
- netperf_client=netperf_hosts[0]
- else:
- netperf_server=netperf_hosts[0]
- netperf_client=netperf_hosts[1]
- scenarios.append(Scenario(
- create_netperf_jobspec(server_host=netperf_server,
- client_host=netperf_client,
- bq_result_table=bq_result_table),
- _NO_WORKERS, 'netperf'))
-
- for language in languages:
- for scenario_json in language.scenarios():
- if re.search(regex, scenario_json['name']):
- categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest'])
- if category in categories or category == 'all':
- workers = workers_by_lang[str(language)][:]
- # 'SERVER_LANGUAGE' is an indicator for this script to pick
- # a server in different language.
- custom_server_lang = scenario_json.get('SERVER_LANGUAGE', None)
- custom_client_lang = scenario_json.get('CLIENT_LANGUAGE', None)
- scenario_json = scenario_config.remove_nonproto_fields(scenario_json)
- if custom_server_lang and custom_client_lang:
- raise Exception('Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
+def create_scenarios(languages,
+ workers_by_lang,
+ remote_host=None,
+ regex='.*',
+ category='all',
+ bq_result_table=None,
+ netperf=False,
+ netperf_hosts=[],
+ server_cpu_load=0):
+ """Create jobspecs for scenarios to run."""
+ all_workers = [
+ worker for workers in workers_by_lang.values() for worker in workers
+ ]
+ scenarios = []
+ _NO_WORKERS = []
+
+ if netperf:
+ if not netperf_hosts:
+ netperf_server = 'localhost'
+ netperf_client = None
+ elif len(netperf_hosts) == 1:
+ netperf_server = netperf_hosts[0]
+ netperf_client = netperf_hosts[0]
+ else:
+ netperf_server = netperf_hosts[0]
+ netperf_client = netperf_hosts[1]
+ scenarios.append(
+ Scenario(
+ create_netperf_jobspec(
+ server_host=netperf_server,
+ client_host=netperf_client,
+ bq_result_table=bq_result_table), _NO_WORKERS, 'netperf'))
+
+ for language in languages:
+ for scenario_json in language.scenarios():
+ if re.search(regex, scenario_json['name']):
+ categories = scenario_json.get('CATEGORIES',
+ ['scalable', 'smoketest'])
+ if category in categories or category == 'all':
+ workers = workers_by_lang[str(language)][:]
+ # 'SERVER_LANGUAGE' is an indicator for this script to pick
+ # a server in different language.
+ custom_server_lang = scenario_json.get('SERVER_LANGUAGE',
+ None)
+ custom_client_lang = scenario_json.get('CLIENT_LANGUAGE',
+ None)
+ scenario_json = scenario_config.remove_nonproto_fields(
+ scenario_json)
+ if custom_server_lang and custom_client_lang:
+ raise Exception(
+ 'Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
'in the same scenario')
- if custom_server_lang:
- if not workers_by_lang.get(custom_server_lang, []):
- print('Warning: Skipping scenario %s as' % scenario_json['name'])
- print('SERVER_LANGUAGE is set to %s yet the language has '
- 'not been selected with -l' % custom_server_lang)
- continue
- for idx in range(0, scenario_json['num_servers']):
- # replace first X workers by workers of a different language
- workers[idx] = workers_by_lang[custom_server_lang][idx]
- if custom_client_lang:
- if not workers_by_lang.get(custom_client_lang, []):
- print('Warning: Skipping scenario %s as' % scenario_json['name'])
- print('CLIENT_LANGUAGE is set to %s yet the language has '
- 'not been selected with -l' % custom_client_lang)
- continue
- for idx in range(scenario_json['num_servers'], len(workers)):
- # replace all client workers by workers of a different language,
- # leave num_server workers as they are server workers.
- workers[idx] = workers_by_lang[custom_client_lang][idx]
- scenario = Scenario(
- create_scenario_jobspec(scenario_json,
- [w.host_and_port for w in workers],
- remote_host=remote_host,
- bq_result_table=bq_result_table,
- server_cpu_load=server_cpu_load),
- workers,
- scenario_json['name'])
- scenarios.append(scenario)
-
- return scenarios
+ if custom_server_lang:
+ if not workers_by_lang.get(custom_server_lang, []):
+ print('Warning: Skipping scenario %s as' %
+ scenario_json['name'])
+ print(
+ 'SERVER_LANGUAGE is set to %s yet the language has '
+ 'not been selected with -l' %
+ custom_server_lang)
+ continue
+ for idx in range(0, scenario_json['num_servers']):
+ # replace first X workers by workers of a different language
+ workers[idx] = workers_by_lang[custom_server_lang][
+ idx]
+ if custom_client_lang:
+ if not workers_by_lang.get(custom_client_lang, []):
+ print('Warning: Skipping scenario %s as' %
+ scenario_json['name'])
+ print(
+ 'CLIENT_LANGUAGE is set to %s yet the language has '
+ 'not been selected with -l' %
+ custom_client_lang)
+ continue
+ for idx in range(scenario_json['num_servers'],
+ len(workers)):
+ # replace all client workers by workers of a different language,
+ # leave num_server workers as they are server workers.
+ workers[idx] = workers_by_lang[custom_client_lang][
+ idx]
+ scenario = Scenario(
+ create_scenario_jobspec(
+ scenario_json, [w.host_and_port for w in workers],
+ remote_host=remote_host,
+ bq_result_table=bq_result_table,
+ server_cpu_load=server_cpu_load), workers,
+ scenario_json['name'])
+ scenarios.append(scenario)
+
+ return scenarios
def finish_qps_workers(jobs, qpsworker_jobs):
- """Waits for given jobs to finish and eventually kills them."""
- retries = 0
- num_killed = 0
- while any(job.is_running() for job in jobs):
- for job in qpsworker_jobs:
- if job.is_running():
- print('QPS worker "%s" is still running.' % job.host_and_port)
- if retries > 10:
- print('Killing all QPS workers.')
- for job in jobs:
- job.kill()
- num_killed += 1
- retries += 1
- time.sleep(3)
- print('All QPS workers finished.')
- return num_killed
+ """Waits for given jobs to finish and eventually kills them."""
+ retries = 0
+ num_killed = 0
+ while any(job.is_running() for job in jobs):
+ for job in qpsworker_jobs:
+ if job.is_running():
+ print('QPS worker "%s" is still running.' % job.host_and_port)
+ if retries > 10:
+ print('Killing all QPS workers.')
+ for job in jobs:
+ job.kill()
+ num_killed += 1
+ retries += 1
+ time.sleep(3)
+ print('All QPS workers finished.')
+ return num_killed
+
profile_output_files = []
+
# Collect perf text reports and flamegraphs if perf_cmd was used
# Note the base names of perf text reports are used when creating and processing
# perf data. The scenario name uniqifies the output name in the final
# perf reports directory.
# Alos, the perf profiles need to be fetched and processed after each scenario
# in order to avoid clobbering the output files.
-def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports):
- perf_report_jobs = []
- global profile_output_files
- for host_and_port in hosts_and_base_names:
- perf_base_name = hosts_and_base_names[host_and_port]
- output_filename = '%s-%s' % (scenario_name, perf_base_name)
- # from the base filename, create .svg output filename
- host = host_and_port.split(':')[0]
- profile_output_files.append('%s.svg' % output_filename)
- perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports))
-
- jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
- failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1)
- jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
- return failures
+def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
+ flame_graph_reports):
+ perf_report_jobs = []
+ global profile_output_files
+ for host_and_port in hosts_and_base_names:
+ perf_base_name = hosts_and_base_names[host_and_port]
+ output_filename = '%s-%s' % (scenario_name, perf_base_name)
+ # from the base filename, create .svg output filename
+ host = host_and_port.split(':')[0]
+ profile_output_files.append('%s.svg' % output_filename)
+ perf_report_jobs.append(
+ perf_report_processor_job(host, perf_base_name, output_filename,
+ flame_graph_reports))
+
+ jobset.message(
+ 'START', 'Collecting perf reports from qps workers', do_newline=True)
+ failures, _ = jobset.run(
+ perf_report_jobs, newline_on_success=True, maxjobs=1)
+ jobset.message(
+ 'END', 'Collecting perf reports from qps workers', do_newline=True)
+ return failures
+
def main():
- argp = argparse.ArgumentParser(description='Run performance tests.')
- argp.add_argument('-l', '--language',
- choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
- nargs='+',
- required=True,
- help='Languages to benchmark.')
- argp.add_argument('--remote_driver_host',
- default=None,
- help='Run QPS driver on given host. By default, QPS driver is run locally.')
- argp.add_argument('--remote_worker_host',
- nargs='+',
- default=[],
- help='Worker hosts where to start QPS workers.')
- argp.add_argument('--dry_run',
- default=False,
- action='store_const',
- const=True,
- help='Just list scenarios to be run, but don\'t run them.')
- argp.add_argument('-r', '--regex', default='.*', type=str,
- help='Regex to select scenarios to run.')
- argp.add_argument('--bq_result_table', default=None, type=str,
- help='Bigquery "dataset.table" to upload results to.')
- argp.add_argument('--category',
- choices=['smoketest','all','scalable','sweep'],
- default='all',
- help='Select a category of tests to run.')
- argp.add_argument('--netperf',
- default=False,
- action='store_const',
- const=True,
- help='Run netperf benchmark as one of the scenarios.')
- argp.add_argument('--server_cpu_load',
- default=0, type=int,
- help='Select a targeted server cpu load to run. 0 means ignore this flag')
- argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
- help='Name of XML report file to generate.')
- argp.add_argument('--perf_args',
- help=('Example usage: "--perf_args=record -F 99 -g". '
- 'Wrap QPS workers in a perf command '
- 'with the arguments to perf specified here. '
- '".svg" flame graph profiles will be '
- 'created for each Qps Worker on each scenario. '
- 'Files will output to "<repo_root>/<args.flame_graph_reports>" '
- 'directory. Output files from running the worker '
- 'under perf are saved in the repo root where its ran. '
- 'Note that the perf "-g" flag is necessary for '
- 'flame graphs generation to work (assuming the binary '
- 'being profiled uses frame pointers, check out '
- '"--call-graph dwarf" option using libunwind otherwise.) '
- 'Also note that the entire "--perf_args=<arg(s)>" must '
- 'be wrapped in quotes as in the example usage. '
- 'If the "--perg_args" is unspecified, "perf" will '
- 'not be used at all. '
- 'See http://www.brendangregg.com/perf.html '
- 'for more general perf examples.'))
- argp.add_argument('--skip_generate_flamegraphs',
- default=False,
- action='store_const',
- const=True,
- help=('Turn flame graph generation off. '
- 'May be useful if "perf_args" arguments do not make sense for '
- 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
- argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str,
- help='Name of directory to output flame graph profiles to, if any are created.')
- argp.add_argument('-u', '--remote_host_username', default='', type=str,
- help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
-
- args = argp.parse_args()
-
- global _REMOTE_HOST_USERNAME
- if args.remote_host_username:
- _REMOTE_HOST_USERNAME = args.remote_host_username
-
- languages = set(scenario_config.LANGUAGES[l]
- for l in itertools.chain.from_iterable(
- six.iterkeys(scenario_config.LANGUAGES) if x == 'all'
- else [x] for x in args.language))
-
-
- # Put together set of remote hosts where to run and build
- remote_hosts = set()
- if args.remote_worker_host:
- for host in args.remote_worker_host:
- remote_hosts.add(host)
- if args.remote_driver_host:
- remote_hosts.add(args.remote_driver_host)
-
- if not args.dry_run:
- if remote_hosts:
- archive_repo(languages=[str(l) for l in languages])
- prepare_remote_hosts(remote_hosts, prepare_local=True)
- else:
- prepare_remote_hosts([], prepare_local=True)
-
- build_local = False
- if not args.remote_driver_host:
- build_local = True
- if not args.dry_run:
- build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
-
- perf_cmd = None
- if args.perf_args:
- print('Running workers under perf profiler')
- # Expect /usr/bin/perf to be installed here, as is usual
- perf_cmd = ['/usr/bin/perf']
- perf_cmd.extend(re.split('\s+', args.perf_args))
-
- qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd)
-
- # get list of worker addresses for each language.
- workers_by_lang = dict([(str(language), []) for language in languages])
- for job in qpsworker_jobs:
- workers_by_lang[str(job.language)].append(job)
-
- scenarios = create_scenarios(languages,
- workers_by_lang=workers_by_lang,
- remote_host=args.remote_driver_host,
- regex=args.regex,
- category=args.category,
- bq_result_table=args.bq_result_table,
- netperf=args.netperf,
- netperf_hosts=args.remote_worker_host,
- server_cpu_load=args.server_cpu_load)
-
- if not scenarios:
- raise Exception('No scenarios to run')
-
- total_scenario_failures = 0
- qps_workers_killed = 0
- merged_resultset = {}
- perf_report_failures = 0
-
- for scenario in scenarios:
- if args.dry_run:
- print(scenario.name)
- else:
- scenario_failures = 0
- try:
- for worker in scenario.workers:
- worker.start()
- jobs = [scenario.jobspec]
- if scenario.workers:
- jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
- scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1)
- total_scenario_failures += scenario_failures
- merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset),
- six.iteritems(resultset)))
- finally:
- # Consider qps workers that need to be killed as failures
- qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs)
-
- if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
- workers_and_base_names = {}
- for worker in scenario.workers:
- if not worker.perf_file_base_name:
- raise Exception('using perf buf perf report filename is unspecified')
- workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
- perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports)
-
-
- # Still write the index.html even if some scenarios failed.
- # 'profile_output_files' will only have names for scenarios that passed
- if perf_cmd and not args.skip_generate_flamegraphs:
- # write the index fil to the output dir, with all profiles from all scenarios/workers
- report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files)
-
- report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
- suite_name='benchmarks')
-
- if total_scenario_failures > 0 or qps_workers_killed > 0:
- print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
- sys.exit(1)
-
- if perf_report_failures > 0:
- print('%s perf profile collection jobs failed' % perf_report_failures)
- sys.exit(1)
+ argp = argparse.ArgumentParser(description='Run performance tests.')
+ argp.add_argument(
+ '-l',
+ '--language',
+ choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
+ nargs='+',
+ required=True,
+ help='Languages to benchmark.')
+ argp.add_argument(
+ '--remote_driver_host',
+ default=None,
+ help='Run QPS driver on given host. By default, QPS driver is run locally.'
+ )
+ argp.add_argument(
+ '--remote_worker_host',
+ nargs='+',
+ default=[],
+ help='Worker hosts where to start QPS workers.')
+ argp.add_argument(
+ '--dry_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Just list scenarios to be run, but don\'t run them.')
+ argp.add_argument(
+ '-r',
+ '--regex',
+ default='.*',
+ type=str,
+ help='Regex to select scenarios to run.')
+ argp.add_argument(
+ '--bq_result_table',
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to upload results to.')
+ argp.add_argument(
+ '--category',
+ choices=['smoketest', 'all', 'scalable', 'sweep'],
+ default='all',
+ help='Select a category of tests to run.')
+ argp.add_argument(
+ '--netperf',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run netperf benchmark as one of the scenarios.')
+ argp.add_argument(
+ '--server_cpu_load',
+ default=0,
+ type=int,
+ help='Select a targeted server cpu load to run. 0 means ignore this flag'
+ )
+ argp.add_argument(
+ '-x',
+ '--xml_report',
+ default='report.xml',
+ type=str,
+ help='Name of XML report file to generate.')
+ argp.add_argument(
+ '--perf_args',
+ help=('Example usage: "--perf_args=record -F 99 -g". '
+ 'Wrap QPS workers in a perf command '
+ 'with the arguments to perf specified here. '
+ '".svg" flame graph profiles will be '
+ 'created for each Qps Worker on each scenario. '
+ 'Files will output to "<repo_root>/<args.flame_graph_reports>" '
+ 'directory. Output files from running the worker '
+ 'under perf are saved in the repo root where its ran. '
+ 'Note that the perf "-g" flag is necessary for '
+ 'flame graphs generation to work (assuming the binary '
+ 'being profiled uses frame pointers, check out '
+ '"--call-graph dwarf" option using libunwind otherwise.) '
+ 'Also note that the entire "--perf_args=<arg(s)>" must '
+ 'be wrapped in quotes as in the example usage. '
+ 'If the "--perg_args" is unspecified, "perf" will '
+ 'not be used at all. '
+ 'See http://www.brendangregg.com/perf.html '
+ 'for more general perf examples.'))
+ argp.add_argument(
+ '--skip_generate_flamegraphs',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Turn flame graph generation off. '
+ 'May be useful if "perf_args" arguments do not make sense for '
+ 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
+ argp.add_argument(
+ '-f',
+ '--flame_graph_reports',
+ default='perf_reports',
+ type=str,
+ help='Name of directory to output flame graph profiles to, if any are created.'
+ )
+ argp.add_argument(
+ '-u',
+ '--remote_host_username',
+ default='',
+ type=str,
+ help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
+
+ args = argp.parse_args()
+
+ global _REMOTE_HOST_USERNAME
+ if args.remote_host_username:
+ _REMOTE_HOST_USERNAME = args.remote_host_username
+
+ languages = set(
+ scenario_config.LANGUAGES[l]
+ for l in itertools.chain.from_iterable(
+ six.iterkeys(scenario_config.LANGUAGES) if x == 'all' else [x]
+ for x in args.language))
+
+ # Put together set of remote hosts where to run and build
+ remote_hosts = set()
+ if args.remote_worker_host:
+ for host in args.remote_worker_host:
+ remote_hosts.add(host)
+ if args.remote_driver_host:
+ remote_hosts.add(args.remote_driver_host)
+
+ if not args.dry_run:
+ if remote_hosts:
+ archive_repo(languages=[str(l) for l in languages])
+ prepare_remote_hosts(remote_hosts, prepare_local=True)
+ else:
+ prepare_remote_hosts([], prepare_local=True)
+
+ build_local = False
+ if not args.remote_driver_host:
+ build_local = True
+ if not args.dry_run:
+ build_on_remote_hosts(
+ remote_hosts,
+ languages=[str(l) for l in languages],
+ build_local=build_local)
+
+ perf_cmd = None
+ if args.perf_args:
+ print('Running workers under perf profiler')
+ # Expect /usr/bin/perf to be installed here, as is usual
+ perf_cmd = ['/usr/bin/perf']
+ perf_cmd.extend(re.split('\s+', args.perf_args))
+
+ qpsworker_jobs = create_qpsworkers(
+ languages, args.remote_worker_host, perf_cmd=perf_cmd)
+
+ # get list of worker addresses for each language.
+ workers_by_lang = dict([(str(language), []) for language in languages])
+ for job in qpsworker_jobs:
+ workers_by_lang[str(job.language)].append(job)
+
+ scenarios = create_scenarios(
+ languages,
+ workers_by_lang=workers_by_lang,
+ remote_host=args.remote_driver_host,
+ regex=args.regex,
+ category=args.category,
+ bq_result_table=args.bq_result_table,
+ netperf=args.netperf,
+ netperf_hosts=args.remote_worker_host,
+ server_cpu_load=args.server_cpu_load)
+
+ if not scenarios:
+ raise Exception('No scenarios to run')
+
+ total_scenario_failures = 0
+ qps_workers_killed = 0
+ merged_resultset = {}
+ perf_report_failures = 0
+
+ for scenario in scenarios:
+ if args.dry_run:
+ print(scenario.name)
+ else:
+ scenario_failures = 0
+ try:
+ for worker in scenario.workers:
+ worker.start()
+ jobs = [scenario.jobspec]
+ if scenario.workers:
+ jobs.append(
+ create_quit_jobspec(
+ scenario.workers,
+ remote_host=args.remote_driver_host))
+ scenario_failures, resultset = jobset.run(
+ jobs, newline_on_success=True, maxjobs=1)
+ total_scenario_failures += scenario_failures
+ merged_resultset = dict(
+ itertools.chain(
+ six.iteritems(merged_resultset),
+ six.iteritems(resultset)))
+ finally:
+ # Consider qps workers that need to be killed as failures
+ qps_workers_killed += finish_qps_workers(scenario.workers,
+ qpsworker_jobs)
+
+ if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
+ workers_and_base_names = {}
+ for worker in scenario.workers:
+ if not worker.perf_file_base_name:
+ raise Exception(
+ 'using perf buf perf report filename is unspecified')
+ workers_and_base_names[
+ worker.host_and_port] = worker.perf_file_base_name
+ perf_report_failures += run_collect_perf_profile_jobs(
+ workers_and_base_names, scenario.name,
+ args.flame_graph_reports)
+
+ # Still write the index.html even if some scenarios failed.
+ # 'profile_output_files' will only have names for scenarios that passed
+ if perf_cmd and not args.skip_generate_flamegraphs:
+ # write the index fil to the output dir, with all profiles from all scenarios/workers
+ report_utils.render_perf_profiling_results(
+ '%s/index.html' % args.flame_graph_reports, profile_output_files)
+
+ report_utils.render_junit_xml_report(
+ merged_resultset, args.xml_report, suite_name='benchmarks')
+
+ if total_scenario_failures > 0 or qps_workers_killed > 0:
+ print('%s scenarios failed and %s qps worker jobs killed' %
+ (total_scenario_failures, qps_workers_killed))
+ sys.exit(1)
+
+ if perf_report_failures > 0:
+ print('%s perf profile collection jobs failed' % perf_report_failures)
+ sys.exit(1)
+
if __name__ == "__main__":
- main()
+ main()
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 471f5d99e7..bd5b8644b3 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run tests in parallel."""
from __future__ import print_function
@@ -46,36 +45,34 @@ import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
- from python_utils.upload_test_results import upload_results_to_bq
+ from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
- pass # It's ok to not import because this is only necessary to upload results to BQ.
+ pass # It's ok to not import because this is only necessary to upload results to BQ.
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
-
_FORCE_ENVIRON_FOR_WRAPPERS = {
- 'GRPC_VERBOSITY': 'DEBUG',
+ 'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
- 'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
- 'mac': ['poll'],
+ 'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
+ 'mac': ['poll'],
}
-
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
- import big_query_utils
+ import big_query_utils
- bq = big_query_utils.create_big_query()
- query = """
+ bq = big_query_utils.create_big_query()
+ query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
@@ -88,941 +85,1068 @@ SELECT
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
- AND platform = '"""+platform_string()+"""'
+ AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
- if limit:
- query += " limit {}".format(limit)
- query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
- page = bq.jobs().getQueryResults(
- pageToken=None,
- **query_job['jobReference']).execute(num_retries=3)
- test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']]
- return test_data
+ if limit:
+ query += " limit {}".format(limit)
+ query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
+ page = bq.jobs().getQueryResults(
+ pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+ test_data = [
+ BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
+ float(row['f'][2]['v'])) for row in page['rows']
+ ]
+ return test_data
def platform_string():
- return jobset.platform_string()
+ return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
+
def run_shell_command(cmd, env=None, cwd=None):
- try:
- subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
- except subprocess.CalledProcessError as e:
- logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
- e.cmd, e.returncode, e.output)
- raise
+ try:
+ subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
+ except subprocess.CalledProcessError as e:
+ logging.exception(
+ "Error while running command '%s'. Exit status %d. Output:\n%s",
+ e.cmd, e.returncode, e.output)
+ raise
+
def max_parallel_tests_for_current_platform():
- # Too much test parallelization has only been seen to be a problem
- # so far on windows.
- if jobset.platform_string() == 'windows':
- return 64
- return 1024
+ # Too much test parallelization has only been seen to be a problem
+ # so far on windows.
+ if jobset.platform_string() == 'windows':
+ return 64
+ return 1024
+
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
- def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
- if environ is None:
- environ = {}
- self.build_config = config
- self.environ = environ
- self.environ['CONFIG'] = config
- self.tool_prefix = tool_prefix
- self.timeout_multiplier = timeout_multiplier
- self.iomgr_platform = iomgr_platform
-
- def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
- shortname=None, environ={}, cpu_cost=1.0, flaky=False):
- """Construct a jobset.JobSpec for a test under this config
+ def __init__(self,
+ config,
+ environ=None,
+ timeout_multiplier=1,
+ tool_prefix=[],
+ iomgr_platform='native'):
+ if environ is None:
+ environ = {}
+ self.build_config = config
+ self.environ = environ
+ self.environ['CONFIG'] = config
+ self.tool_prefix = tool_prefix
+ self.timeout_multiplier = timeout_multiplier
+ self.iomgr_platform = iomgr_platform
+
+ def job_spec(self,
+ cmdline,
+ timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
+ shortname=None,
+ environ={},
+ cpu_cost=1.0,
+ flaky=False):
+ """Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
- actual_environ = self.environ.copy()
- for k, v in environ.items():
- actual_environ[k] = v
- if not flaky and shortname and shortname in flaky_tests:
- flaky = True
- if shortname in shortname_to_cpu:
- cpu_cost = shortname_to_cpu[shortname]
- return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
- shortname=shortname,
- environ=actual_environ,
- cpu_cost=cpu_cost,
- timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
- flake_retries=4 if flaky or args.allow_flakes else 0,
- timeout_retries=1 if flaky or args.allow_flakes else 0)
-
-
-def get_c_tests(travis, test_lang) :
- out = []
- platforms_str = 'ci_platforms' if travis else 'platforms'
- with open('tools/run_tests/generated/tests.json') as f:
- js = json.load(f)
- return [tgt
- for tgt in js
- if tgt['language'] == test_lang and
- platform_string() in tgt[platforms_str] and
- not (travis and tgt['flaky'])]
+ actual_environ = self.environ.copy()
+ for k, v in environ.items():
+ actual_environ[k] = v
+ if not flaky and shortname and shortname in flaky_tests:
+ flaky = True
+ if shortname in shortname_to_cpu:
+ cpu_cost = shortname_to_cpu[shortname]
+ return jobset.JobSpec(
+ cmdline=self.tool_prefix + cmdline,
+ shortname=shortname,
+ environ=actual_environ,
+ cpu_cost=cpu_cost,
+ timeout_seconds=(self.timeout_multiplier * timeout_seconds
+ if timeout_seconds else None),
+ flake_retries=4 if flaky or args.allow_flakes else 0,
+ timeout_retries=1 if flaky or args.allow_flakes else 0)
+
+
+def get_c_tests(travis, test_lang):
+ out = []
+ platforms_str = 'ci_platforms' if travis else 'platforms'
+ with open('tools/run_tests/generated/tests.json') as f:
+ js = json.load(f)
+ return [
+ tgt for tgt in js
+ if tgt['language'] == test_lang and platform_string() in tgt[
+ platforms_str] and not (travis and tgt['flaky'])
+ ]
def _check_compiler(compiler, supported_compilers):
- if compiler not in supported_compilers:
- raise Exception('Compiler %s not supported (on this platform).' % compiler)
+ if compiler not in supported_compilers:
+ raise Exception('Compiler %s not supported (on this platform).' %
+ compiler)
def _check_arch(arch, supported_archs):
- if arch not in supported_archs:
- raise Exception('Architecture %s not supported.' % arch)
+ if arch not in supported_archs:
+ raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
- """Returns True if running running as a --use_docker child."""
- return True if os.getenv('RUN_TESTS_COMMAND') else False
+ """Returns True if running running as a --use_docker child."""
+ return True if os.getenv('RUN_TESTS_COMMAND') else False
-_PythonConfigVars = collections.namedtuple(
- '_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
- 'venv_relative_python', 'toolchain', 'runner'])
+_PythonConfigVars = collections.namedtuple('_ConfigVars', [
+ 'shell', 'builder', 'builder_prefix_arguments', 'venv_relative_python',
+ 'toolchain', 'runner'
+])
def _python_config_generator(name, major, minor, bits, config_vars):
- return PythonConfig(
- name,
- config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
- _python_pattern_function(major=major, minor=minor, bits=bits)] + [
- name] + config_vars.venv_relative_python + config_vars.toolchain,
- config_vars.shell + config_vars.runner + [
- os.path.join(name, config_vars.venv_relative_python[0])])
+ return PythonConfig(
+ name, config_vars.shell + config_vars.builder +
+ config_vars.builder_prefix_arguments + [
+ _python_pattern_function(major=major, minor=minor, bits=bits)
+ ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+ config_vars.shell + config_vars.runner +
+ [os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
- return PythonConfig(
- name,
- config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
- _pypy_pattern_function(major=major)] + [
- name] + config_vars.venv_relative_python + config_vars.toolchain,
- config_vars.shell + config_vars.runner + [
- os.path.join(name, config_vars.venv_relative_python[0])])
+ return PythonConfig(
+ name,
+ config_vars.shell + config_vars.builder +
+ config_vars.builder_prefix_arguments + [
+ _pypy_pattern_function(major=major)
+ ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+ config_vars.shell + config_vars.runner +
+ [os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
- # Bit-ness is handled by the test machine's environment
- if os.name == "nt":
- if bits == "64":
- return '/c/Python{major}{minor}/python.exe'.format(
- major=major, minor=minor, bits=bits)
+ # Bit-ness is handled by the test machine's environment
+ if os.name == "nt":
+ if bits == "64":
+ return '/c/Python{major}{minor}/python.exe'.format(
+ major=major, minor=minor, bits=bits)
+ else:
+ return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
+ major=major, minor=minor, bits=bits)
else:
- return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
- major=major, minor=minor, bits=bits)
- else:
- return 'python{major}.{minor}'.format(major=major, minor=minor)
+ return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
- if major == '2':
- return 'pypy'
- elif major == '3':
- return 'pypy3'
- else:
- raise ValueError("Unknown PyPy major version")
+ if major == '2':
+ return 'pypy'
+ elif major == '3':
+ return 'pypy3'
+ else:
+ raise ValueError("Unknown PyPy major version")
class CLanguage(object):
- def __init__(self, make_target, test_lang):
- self.make_target = make_target
- self.platform = platform_string()
- self.test_lang = test_lang
-
- def configure(self, config, args):
- self.config = config
- self.args = args
- if self.platform == 'windows':
- _check_compiler(self.args.compiler, ['default', 'cmake', 'cmake_vs2015',
- 'cmake_vs2017'])
- _check_arch(self.args.arch, ['default', 'x64', 'x86'])
- self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
- self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
- self._use_cmake = True
- self._make_options = []
- elif self.args.compiler == 'cmake':
- _check_arch(self.args.arch, ['default'])
- self._use_cmake = True
- self._docker_distro = 'jessie'
- self._make_options = []
- else:
- self._use_cmake = False
- self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
- self.args.compiler)
- if args.iomgr_platform == "uv":
- cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
- try:
- cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
- except (subprocess.CalledProcessError, OSError):
- pass
- try:
- ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
- except (subprocess.CalledProcessError, OSError):
- ldflags = '-luv '
- self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
- 'EXTRA_LDLIBS={}'.format(ldflags)]
-
- def test_specs(self):
- out = []
- binaries = get_c_tests(self.args.travis, self.test_lang)
- for target in binaries:
- if self._use_cmake and target.get('boringssl', False):
- # cmake doesn't build boringssl tests
- continue
- auto_timeout_scaling = target.get('auto_timeout_scaling', True)
- polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
- if target.get('uses_polling', True)
- else ['none'])
- if self.args.iomgr_platform == 'uv':
- polling_strategies = ['all']
- for polling_strategy in polling_strategies:
- env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
- _ROOT + '/src/core/tsi/test_creds/ca.pem',
- 'GRPC_POLL_STRATEGY': polling_strategy,
- 'GRPC_VERBOSITY': 'DEBUG'}
- resolver = os.environ.get('GRPC_DNS_RESOLVER', None);
- if resolver:
- env['GRPC_DNS_RESOLVER'] = resolver
- shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
- if polling_strategy in target.get('excluded_poll_engines', []):
- continue
-
- timeout_scaling = 1
- if auto_timeout_scaling:
- config = self.args.config
- if ('asan' in config
- or config == 'msan'
- or config == 'tsan'
- or config == 'ubsan'
- or config == 'helgrind'
- or config == 'memcheck'):
- # Scale overall test timeout if running under various sanitizers.
- # scaling value is based on historical data analysis
- timeout_scaling *= 3
- elif polling_strategy == 'poll-cv':
- # scale test timeout if running with poll-cv
- # sanitizer and poll-cv scaling is not cumulative to ensure
- # reasonable timeout values.
- # TODO(jtattermusch): based on historical data and 5min default
- # test timeout poll-cv scaling is currently not useful.
- # Leaving here so it can be reintroduced if the default test timeout
- # is decreased in the future.
- timeout_scaling *= 1
-
- if self.config.build_config in target['exclude_configs']:
- continue
- if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
- continue
+ def __init__(self, make_target, test_lang):
+ self.make_target = make_target
+ self.platform = platform_string()
+ self.test_lang = test_lang
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
if self.platform == 'windows':
- binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
+ _check_compiler(self.args.compiler, [
+ 'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
+ ])
+ _check_arch(self.args.arch, ['default', 'x64', 'x86'])
+ self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
+ self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
+ self._use_cmake = True
+ self._make_options = []
+ elif self.args.compiler == 'cmake':
+ _check_arch(self.args.arch, ['default'])
+ self._use_cmake = True
+ self._docker_distro = 'jessie'
+ self._make_options = []
else:
- if self._use_cmake:
- binary = 'cmake/build/%s' % target['name']
- else:
- binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
- cpu_cost = target['cpu_cost']
- if cpu_cost == 'capacity':
- cpu_cost = multiprocessing.cpu_count()
- if os.path.isfile(binary):
- list_test_command = None
- filter_test_command = None
-
- # these are the flag defined by gtest and benchmark framework to list
- # and filter test runs. We use them to split each individual test
- # into its own JobSpec, and thus into its own process.
- if 'benchmark' in target and target['benchmark']:
- with open(os.devnull, 'w') as fnull:
- tests = subprocess.check_output([binary, '--benchmark_list_tests'],
- stderr=fnull)
- for line in tests.split('\n'):
- test = line.strip()
- if not test: continue
- cmdline = [binary, '--benchmark_filter=%s$' % test] + target['args']
- out.append(self.config.job_spec(cmdline,
- shortname='%s %s' % (' '.join(cmdline), shortname_ext),
- cpu_cost=cpu_cost,
- timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
- environ=env))
- elif 'gtest' in target and target['gtest']:
- # here we parse the output of --gtest_list_tests to build up a complete
- # list of the tests contained in a binary for each test, we then
- # add a job to run, filtering for just that test.
- with open(os.devnull, 'w') as fnull:
- tests = subprocess.check_output([binary, '--gtest_list_tests'],
- stderr=fnull)
- base = None
- for line in tests.split('\n'):
- i = line.find('#')
- if i >= 0: line = line[:i]
- if not line: continue
- if line[0] != ' ':
- base = line.strip()
- else:
- assert base is not None
- assert line[1] == ' '
- test = base + line.strip()
- cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
- out.append(self.config.job_spec(cmdline,
- shortname='%s %s' % (' '.join(cmdline), shortname_ext),
- cpu_cost=cpu_cost,
- timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
- environ=env))
- else:
- cmdline = [binary] + target['args']
- shortname = target.get('shortname', ' '.join(
- pipes.quote(arg)
- for arg in cmdline))
- shortname += shortname_ext
- out.append(self.config.job_spec(cmdline,
- shortname=shortname,
- cpu_cost=cpu_cost,
- flaky=target.get('flaky', False),
- timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
- environ=env))
- elif self.args.regex == '.*' or self.platform == 'windows':
- print('\nWARNING: binary not found, skipping', binary)
- return sorted(out)
-
- def make_targets(self):
- if self.platform == 'windows':
- # don't build tools on windows just yet
- return ['buildtests_%s' % self.make_target]
- return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
- 'check_epollexclusive']
-
- def make_options(self):
- return self._make_options
-
- def pre_build_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
- self._cmake_generator_option,
- self._cmake_arch_option]]
- elif self._use_cmake:
- return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
- else:
- return []
+ self._use_cmake = False
+ self._docker_distro, self._make_options = self._compiler_options(
+ self.args.use_docker, self.args.compiler)
+ if args.iomgr_platform == "uv":
+ cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
+ try:
+ cflags += subprocess.check_output(
+ ['pkg-config', '--cflags', 'libuv']).strip() + ' '
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ try:
+ ldflags = subprocess.check_output(
+ ['pkg-config', '--libs', 'libuv']).strip() + ' '
+ except (subprocess.CalledProcessError, OSError):
+ ldflags = '-luv '
+ self._make_options += [
+ 'EXTRA_CPPFLAGS={}'.format(cflags),
+ 'EXTRA_LDLIBS={}'.format(ldflags)
+ ]
+
+ def test_specs(self):
+ out = []
+ binaries = get_c_tests(self.args.travis, self.test_lang)
+ for target in binaries:
+ if self._use_cmake and target.get('boringssl', False):
+ # cmake doesn't build boringssl tests
+ continue
+ auto_timeout_scaling = target.get('auto_timeout_scaling', True)
+ polling_strategies = (
+ _POLLING_STRATEGIES.get(self.platform, ['all'])
+ if target.get('uses_polling', True) else ['none'])
+ if self.args.iomgr_platform == 'uv':
+ polling_strategies = ['all']
+ for polling_strategy in polling_strategies:
+ env = {
+ 'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
+ _ROOT + '/src/core/tsi/test_creds/ca.pem',
+ 'GRPC_POLL_STRATEGY':
+ polling_strategy,
+ 'GRPC_VERBOSITY':
+ 'DEBUG'
+ }
+ resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
+ if resolver:
+ env['GRPC_DNS_RESOLVER'] = resolver
+ shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
+ if polling_strategy in target.get('excluded_poll_engines', []):
+ continue
+
+ timeout_scaling = 1
+ if auto_timeout_scaling:
+ config = self.args.config
+ if ('asan' in config or config == 'msan' or
+ config == 'tsan' or config == 'ubsan' or
+ config == 'helgrind' or config == 'memcheck'):
+ # Scale overall test timeout if running under various sanitizers.
+ # scaling value is based on historical data analysis
+ timeout_scaling *= 3
+ elif polling_strategy == 'poll-cv':
+ # scale test timeout if running with poll-cv
+ # sanitizer and poll-cv scaling is not cumulative to ensure
+ # reasonable timeout values.
+ # TODO(jtattermusch): based on historical data and 5min default
+ # test timeout poll-cv scaling is currently not useful.
+ # Leaving here so it can be reintroduced if the default test timeout
+ # is decreased in the future.
+ timeout_scaling *= 1
+
+ if self.config.build_config in target['exclude_configs']:
+ continue
+ if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
+ continue
+ if self.platform == 'windows':
+ binary = 'cmake/build/%s/%s.exe' % (
+ _MSBUILD_CONFIG[self.config.build_config],
+ target['name'])
+ else:
+ if self._use_cmake:
+ binary = 'cmake/build/%s' % target['name']
+ else:
+ binary = 'bins/%s/%s' % (self.config.build_config,
+ target['name'])
+ cpu_cost = target['cpu_cost']
+ if cpu_cost == 'capacity':
+ cpu_cost = multiprocessing.cpu_count()
+ if os.path.isfile(binary):
+ list_test_command = None
+ filter_test_command = None
+
+ # these are the flag defined by gtest and benchmark framework to list
+ # and filter test runs. We use them to split each individual test
+ # into its own JobSpec, and thus into its own process.
+ if 'benchmark' in target and target['benchmark']:
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output(
+ [binary, '--benchmark_list_tests'],
+ stderr=fnull)
+ for line in tests.split('\n'):
+ test = line.strip()
+ if not test: continue
+ cmdline = [binary, '--benchmark_filter=%s$' % test
+ ] + target['args']
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='%s %s' % (' '.join(cmdline),
+ shortname_ext),
+ cpu_cost=cpu_cost,
+ timeout_seconds=_DEFAULT_TIMEOUT_SECONDS *
+ timeout_scaling,
+ environ=env))
+ elif 'gtest' in target and target['gtest']:
+ # here we parse the output of --gtest_list_tests to build up a complete
+ # list of the tests contained in a binary for each test, we then
+ # add a job to run, filtering for just that test.
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output(
+ [binary, '--gtest_list_tests'], stderr=fnull)
+ base = None
+ for line in tests.split('\n'):
+ i = line.find('#')
+ if i >= 0: line = line[:i]
+ if not line: continue
+ if line[0] != ' ':
+ base = line.strip()
+ else:
+ assert base is not None
+ assert line[1] == ' '
+ test = base + line.strip()
+ cmdline = [binary, '--gtest_filter=%s' % test
+ ] + target['args']
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='%s %s' % (' '.join(cmdline),
+ shortname_ext),
+ cpu_cost=cpu_cost,
+ timeout_seconds=target.get(
+ 'timeout_seconds',
+ _DEFAULT_TIMEOUT_SECONDS) *
+ timeout_scaling,
+ environ=env))
+ else:
+ cmdline = [binary] + target['args']
+ shortname = target.get('shortname', ' '.join(
+ pipes.quote(arg) for arg in cmdline))
+ shortname += shortname_ext
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname=shortname,
+ cpu_cost=cpu_cost,
+ flaky=target.get('flaky', False),
+ timeout_seconds=target.get(
+ 'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
+ * timeout_scaling,
+ environ=env))
+ elif self.args.regex == '.*' or self.platform == 'windows':
+ print('\nWARNING: binary not found, skipping', binary)
+ return sorted(out)
+
+ def make_targets(self):
+ if self.platform == 'windows':
+ # don't build tools on windows just yet
+ return ['buildtests_%s' % self.make_target]
+ return [
+ 'buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
+ 'check_epollexclusive'
+ ]
- def build_steps(self):
- return []
+ def make_options(self):
+ return self._make_options
- def post_tests_steps(self):
- if self.platform == 'windows':
- return []
- else:
- return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+ def pre_build_steps(self):
+ if self.platform == 'windows':
+ return [[
+ 'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
+ self._cmake_generator_option, self._cmake_arch_option
+ ]]
+ elif self._use_cmake:
+ return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
+ else:
+ return []
- def makefile_name(self):
- if self._use_cmake:
- return 'cmake/build/Makefile'
- else:
- return 'Makefile'
-
- def _clang_make_options(self, version_suffix=''):
- return ['CC=clang%s' % version_suffix,
- 'CXX=clang++%s' % version_suffix,
- 'LD=clang%s' % version_suffix,
- 'LDXX=clang++%s' % version_suffix]
-
- def _gcc_make_options(self, version_suffix):
- return ['CC=gcc%s' % version_suffix,
- 'CXX=g++%s' % version_suffix,
- 'LD=gcc%s' % version_suffix,
- 'LDXX=g++%s' % version_suffix]
-
- def _compiler_options(self, use_docker, compiler):
- """Returns docker distro and make options to use for given compiler."""
- if not use_docker and not _is_use_docker_child():
- _check_compiler(compiler, ['default'])
-
- if compiler == 'gcc4.9' or compiler == 'default':
- return ('jessie', [])
- elif compiler == 'gcc4.8':
- return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
- elif compiler == 'gcc5.3':
- return ('ubuntu1604', [])
- elif compiler == 'gcc_musl':
- return ('alpine', [])
- elif compiler == 'clang3.4':
- # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
- return ('ubuntu1404', self._clang_make_options())
- elif compiler == 'clang3.5':
- return ('jessie', self._clang_make_options(version_suffix='-3.5'))
- elif compiler == 'clang3.6':
- return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
- elif compiler == 'clang3.7':
- return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
- else:
- raise Exception('Compiler %s not supported.' % compiler)
+ def build_steps(self):
+ return []
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
- _docker_arch_suffix(self.args.arch))
+ def post_tests_steps(self):
+ if self.platform == 'windows':
+ return []
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+
+ def makefile_name(self):
+ if self._use_cmake:
+ return 'cmake/build/Makefile'
+ else:
+ return 'Makefile'
+
+ def _clang_make_options(self, version_suffix=''):
+ return [
+ 'CC=clang%s' % version_suffix, 'CXX=clang++%s' % version_suffix,
+ 'LD=clang%s' % version_suffix, 'LDXX=clang++%s' % version_suffix
+ ]
+
+ def _gcc_make_options(self, version_suffix):
+ return [
+ 'CC=gcc%s' % version_suffix, 'CXX=g++%s' % version_suffix,
+ 'LD=gcc%s' % version_suffix, 'LDXX=g++%s' % version_suffix
+ ]
+
+ def _compiler_options(self, use_docker, compiler):
+ """Returns docker distro and make options to use for given compiler."""
+ if not use_docker and not _is_use_docker_child():
+ _check_compiler(compiler, ['default'])
+
+ if compiler == 'gcc4.9' or compiler == 'default':
+ return ('jessie', [])
+ elif compiler == 'gcc4.8':
+ return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
+ elif compiler == 'gcc5.3':
+ return ('ubuntu1604', [])
+ elif compiler == 'gcc_musl':
+ return ('alpine', [])
+ elif compiler == 'clang3.4':
+ # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
+ return ('ubuntu1404', self._clang_make_options())
+ elif compiler == 'clang3.5':
+ return ('jessie', self._clang_make_options(version_suffix='-3.5'))
+ elif compiler == 'clang3.6':
+ return ('ubuntu1604',
+ self._clang_make_options(version_suffix='-3.6'))
+ elif compiler == 'clang3.7':
+ return ('ubuntu1604',
+ self._clang_make_options(version_suffix='-3.7'))
+ else:
+ raise Exception('Compiler %s not supported.' % compiler)
- def __str__(self):
- return self.make_target
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/cxx_%s_%s' % (
+ self._docker_distro, _docker_arch_suffix(self.args.arch))
+
+ def __str__(self):
+ return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
- def __init__(self):
- self.platform = platform_string()
-
- def configure(self, config, args):
- self.config = config
- self.args = args
- # Note: electron ABI only depends on major and minor version, so that's all
- # we should specify in the compiler argument
- _check_compiler(self.args.compiler, ['default', 'node0.12',
- 'node4', 'node5', 'node6',
- 'node7', 'node8',
- 'electron1.3', 'electron1.6'])
- if self.args.compiler == 'default':
- self.runtime = 'node'
- self.node_version = '8'
- else:
- if self.args.compiler.startswith('electron'):
- self.runtime = 'electron'
- self.node_version = self.args.compiler[8:]
- else:
- self.runtime = 'node'
- # Take off the word "node"
- self.node_version = self.args.compiler[4:]
-
- # TODO: update with Windows/electron scripts when available for grpc/grpc-node
- def test_specs(self):
- if self.platform == 'windows':
- return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
- else:
- return [self.config.job_spec(['tools/run_tests/helper_scripts/run_grpc-node.sh'],
- None,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+ def __init__(self):
+ self.platform = platform_string()
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ # Note: electron ABI only depends on major and minor version, so that's all
+ # we should specify in the compiler argument
+ _check_compiler(self.args.compiler, [
+ 'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
+ 'electron1.3', 'electron1.6'
+ ])
+ if self.args.compiler == 'default':
+ self.runtime = 'node'
+ self.node_version = '8'
+ else:
+ if self.args.compiler.startswith('electron'):
+ self.runtime = 'electron'
+ self.node_version = self.args.compiler[8:]
+ else:
+ self.runtime = 'node'
+ # Take off the word "node"
+ self.node_version = self.args.compiler[4:]
+
+ # TODO: update with Windows/electron scripts when available for grpc/grpc-node
+ def test_specs(self):
+ if self.platform == 'windows':
+ return [
+ self.config.job_spec(
+ ['tools\\run_tests\\helper_scripts\\run_node.bat'])
+ ]
+ else:
+ return [
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
+ None,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
- def pre_build_steps(self):
- return []
+ def pre_build_steps(self):
+ return []
- def make_targets(self):
- return []
+ def make_targets(self):
+ return []
- def make_options(self):
- return []
+ def make_options(self):
+ return []
- def build_steps(self):
- return []
+ def build_steps(self):
+ return []
- def post_tests_steps(self):
- return []
+ def post_tests_steps(self):
+ return []
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def __str__(self):
- return 'grpc-node'
+ def __str__(self):
+ return 'grpc-node'
class PhpLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
- self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
- def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'],
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+ def test_specs(self):
+ return [
+ self.config.job_spec(
+ ['src/php/bin/run_tests.sh'],
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
- def pre_build_steps(self):
- return []
+ def pre_build_steps(self):
+ return []
- def make_targets(self):
- return ['static_c', 'shared_c']
+ def make_targets(self):
+ return ['static_c', 'shared_c']
- def make_options(self):
- return self._make_options;
+ def make_options(self):
+ return self._make_options
- def build_steps(self):
- return [['tools/run_tests/helper_scripts/build_php.sh']]
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
- def post_tests_steps(self):
- return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def __str__(self):
- return 'php'
+ def __str__(self):
+ return 'php'
class Php7Language(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
- self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+
+ def test_specs(self):
+ return [
+ self.config.job_spec(
+ ['src/php/bin/run_tests.sh'],
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
- def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'],
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+ def pre_build_steps(self):
+ return []
- def pre_build_steps(self):
- return []
+ def make_targets(self):
+ return ['static_c', 'shared_c']
- def make_targets(self):
- return ['static_c', 'shared_c']
+ def make_options(self):
+ return self._make_options
- def make_options(self):
- return self._make_options;
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
- def build_steps(self):
- return [['tools/run_tests/helper_scripts/build_php.sh']]
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
- def post_tests_steps(self):
- return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+ def makefile_name(self):
+ return 'Makefile'
- def makefile_name(self):
- return 'Makefile'
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def __str__(self):
+ return 'php7'
- def __str__(self):
- return 'php7'
+class PythonConfig(
+ collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
+ """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
-class PythonConfig(collections.namedtuple('PythonConfig', [
- 'name', 'build', 'run'])):
- """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- self.pythons = self._get_pythons(self.args)
-
- def test_specs(self):
- # load list of known test suites
- with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
- tests_json = json.load(tests_json_file)
- environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
- return [self.config.job_spec(
- config.run,
- timeout_seconds=5*60,
- environ=dict(list(environment.items()) +
- [('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
- shortname='%s.test.%s' % (config.name, suite_name),)
- for suite_name in tests_json
- for config in self.pythons]
-
- def pre_build_steps(self):
- return []
-
- def make_targets(self):
- return []
-
- def make_options(self):
- return []
-
- def build_steps(self):
- return [config.build for config in self.pythons]
-
- def post_tests_steps(self):
- if self.config.build_config != 'gcov':
- return []
- else:
- return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ self.pythons = self._get_pythons(self.args)
+
+ def test_specs(self):
+ # load list of known test suites
+ with open(
+ 'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
+ tests_json = json.load(tests_json_file)
+ environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
+ return [
+ self.config.job_spec(
+ config.run,
+ timeout_seconds=5 * 60,
+ environ=dict(
+ list(environment.items()) + [(
+ 'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
+ shortname='%s.test.%s' % (config.name, suite_name),)
+ for suite_name in tests_json for config in self.pythons
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return []
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return [config.build for config in self.pythons]
+
+ def post_tests_steps(self):
+ if self.config.build_config != 'gcov':
+ return []
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/python_%s_%s' % (
+ self.python_manager_name(), _docker_arch_suffix(self.args.arch))
- def python_manager_name(self):
- if self.args.compiler in ['python3.5', 'python3.6']:
- return 'pyenv'
- elif self.args.compiler == 'python_alpine':
- return 'alpine'
- else:
- return 'jessie'
+ def python_manager_name(self):
+ if self.args.compiler in ['python3.5', 'python3.6']:
+ return 'pyenv'
+ elif self.args.compiler == 'python_alpine':
+ return 'alpine'
+ else:
+ return 'jessie'
- def _get_pythons(self, args):
- if args.arch == 'x86':
- bits = '32'
- else:
- bits = '64'
-
- if os.name == 'nt':
- shell = ['bash']
- builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
- builder_prefix_arguments = ['MINGW{}'.format(bits)]
- venv_relative_python = ['Scripts/python.exe']
- toolchain = ['mingw32']
- else:
- shell = []
- builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
- builder_prefix_arguments = []
- venv_relative_python = ['bin/python']
- toolchain = ['unix']
-
- runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
- config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
- venv_relative_python, toolchain, runner)
- python27_config = _python_config_generator(name='py27', major='2',
- minor='7', bits=bits,
- config_vars=config_vars)
- python34_config = _python_config_generator(name='py34', major='3',
- minor='4', bits=bits,
- config_vars=config_vars)
- python35_config = _python_config_generator(name='py35', major='3',
- minor='5', bits=bits,
- config_vars=config_vars)
- python36_config = _python_config_generator(name='py36', major='3',
- minor='6', bits=bits,
- config_vars=config_vars)
- pypy27_config = _pypy_config_generator(name='pypy', major='2',
- config_vars=config_vars)
- pypy32_config = _pypy_config_generator(name='pypy3', major='3',
- config_vars=config_vars)
-
- if args.compiler == 'default':
- if os.name == 'nt':
- return (python35_config,)
- else:
- return (python27_config, python34_config,)
- elif args.compiler == 'python2.7':
- return (python27_config,)
- elif args.compiler == 'python3.4':
- return (python34_config,)
- elif args.compiler == 'python3.5':
- return (python35_config,)
- elif args.compiler == 'python3.6':
- return (python36_config,)
- elif args.compiler == 'pypy':
- return (pypy27_config,)
- elif args.compiler == 'pypy3':
- return (pypy32_config,)
- elif args.compiler == 'python_alpine':
- return (python27_config,)
- elif args.compiler == 'all_the_cpythons':
- return (python27_config, python34_config, python35_config,
- python36_config,)
- else:
- raise Exception('Compiler %s not supported.' % args.compiler)
+ def _get_pythons(self, args):
+ if args.arch == 'x86':
+ bits = '32'
+ else:
+ bits = '64'
+
+ if os.name == 'nt':
+ shell = ['bash']
+ builder = [
+ os.path.abspath(
+ 'tools/run_tests/helper_scripts/build_python_msys2.sh')
+ ]
+ builder_prefix_arguments = ['MINGW{}'.format(bits)]
+ venv_relative_python = ['Scripts/python.exe']
+ toolchain = ['mingw32']
+ else:
+ shell = []
+ builder = [
+ os.path.abspath(
+ 'tools/run_tests/helper_scripts/build_python.sh')
+ ]
+ builder_prefix_arguments = []
+ venv_relative_python = ['bin/python']
+ toolchain = ['unix']
+
+ runner = [
+ os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
+ ]
+ config_vars = _PythonConfigVars(shell, builder,
+ builder_prefix_arguments,
+ venv_relative_python, toolchain, runner)
+ python27_config = _python_config_generator(
+ name='py27',
+ major='2',
+ minor='7',
+ bits=bits,
+ config_vars=config_vars)
+ python34_config = _python_config_generator(
+ name='py34',
+ major='3',
+ minor='4',
+ bits=bits,
+ config_vars=config_vars)
+ python35_config = _python_config_generator(
+ name='py35',
+ major='3',
+ minor='5',
+ bits=bits,
+ config_vars=config_vars)
+ python36_config = _python_config_generator(
+ name='py36',
+ major='3',
+ minor='6',
+ bits=bits,
+ config_vars=config_vars)
+ pypy27_config = _pypy_config_generator(
+ name='pypy', major='2', config_vars=config_vars)
+ pypy32_config = _pypy_config_generator(
+ name='pypy3', major='3', config_vars=config_vars)
+
+ if args.compiler == 'default':
+ if os.name == 'nt':
+ return (python35_config,)
+ else:
+ return (python27_config, python34_config,)
+ elif args.compiler == 'python2.7':
+ return (python27_config,)
+ elif args.compiler == 'python3.4':
+ return (python34_config,)
+ elif args.compiler == 'python3.5':
+ return (python35_config,)
+ elif args.compiler == 'python3.6':
+ return (python36_config,)
+ elif args.compiler == 'pypy':
+ return (pypy27_config,)
+ elif args.compiler == 'pypy3':
+ return (pypy32_config,)
+ elif args.compiler == 'python_alpine':
+ return (python27_config,)
+ elif args.compiler == 'all_the_cpythons':
+ return (python27_config, python34_config, python35_config,
+ python36_config,)
+ else:
+ raise Exception('Compiler %s not supported.' % args.compiler)
- def __str__(self):
- return 'python'
+ def __str__(self):
+ return 'python'
class RubyLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
- def test_specs(self):
- tests = [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
- timeout_seconds=10*60,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
- tests.append(self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
- timeout_seconds=10*60,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS))
- return tests
+ def test_specs(self):
+ tests = [
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_ruby.sh'],
+ timeout_seconds=10 * 60,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
+ tests.append(
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
+ timeout_seconds=10 * 60,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ return tests
- def pre_build_steps(self):
- return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
+ def pre_build_steps(self):
+ return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
- def make_targets(self):
- return []
+ def make_targets(self):
+ return []
- def make_options(self):
- return []
+ def make_options(self):
+ return []
- def build_steps(self):
- return [['tools/run_tests/helper_scripts/build_ruby.sh']]
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_ruby.sh']]
- def post_tests_steps(self):
- return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def __str__(self):
- return 'ruby'
+ def __str__(self):
+ return 'ruby'
class CSharpLanguage(object):
- def __init__(self):
- self.platform = platform_string()
-
- def configure(self, config, args):
- self.config = config
- self.args = args
- if self.platform == 'windows':
- _check_compiler(self.args.compiler, ['coreclr', 'default'])
- _check_arch(self.args.arch, ['default'])
- self._cmake_arch_option = 'x64'
- self._make_options = []
- else:
- _check_compiler(self.args.compiler, ['default', 'coreclr'])
- self._docker_distro = 'jessie'
-
- if self.platform == 'mac':
- # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
- self._make_options = ['EMBED_OPENSSL=true']
- if self.args.compiler != 'coreclr':
- # On Mac, official distribution of mono is 32bit.
- self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
- else:
- self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+ def __init__(self):
+ self.platform = platform_string()
- def test_specs(self):
- with open('src/csharp/tests.json') as f:
- tests_by_assembly = json.load(f)
-
- msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
- nunit_args = ['--labels=All', '--noresult', '--workers=1']
- assembly_subdir = 'bin/%s' % msbuild_config
- assembly_extension = '.exe'
-
- if self.args.compiler == 'coreclr':
- assembly_subdir += '/netcoreapp1.0'
- runtime_cmd = ['dotnet', 'exec']
- assembly_extension = '.dll'
- else:
- assembly_subdir += '/net45'
- if self.platform == 'windows':
- runtime_cmd = []
- else:
- runtime_cmd = ['mono']
-
- specs = []
- for assembly in six.iterkeys(tests_by_assembly):
- assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
- assembly_subdir,
- assembly,
- assembly_extension)
- if self.config.build_config != 'gcov' or self.platform != 'windows':
- # normally, run each test as a separate process
- for test in tests_by_assembly[assembly]:
- cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
- specs.append(self.config.job_spec(cmdline,
- shortname='csharp.%s' % test,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS))
- else:
- # For C# test coverage, run all tests from the same assembly at once
- # using OpenCover.Console (only works on Windows).
- cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
- '-target:%s' % assembly_file,
- '-targetdir:src\\csharp',
- '-targetargs:%s' % ' '.join(nunit_args),
- '-filter:+[Grpc.Core]*',
- '-register:user',
- '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
-
- # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
- # to prevent problems with registering the profiler.
- run_exclusive = 1000000
- specs.append(self.config.job_spec(cmdline,
- shortname='csharp.coverage.%s' % assembly,
- cpu_cost=run_exclusive,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS))
- return specs
-
- def pre_build_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
- else:
- return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ if self.platform == 'windows':
+ _check_compiler(self.args.compiler, ['coreclr', 'default'])
+ _check_arch(self.args.arch, ['default'])
+ self._cmake_arch_option = 'x64'
+ self._make_options = []
+ else:
+ _check_compiler(self.args.compiler, ['default', 'coreclr'])
+ self._docker_distro = 'jessie'
+
+ if self.platform == 'mac':
+ # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
+ self._make_options = ['EMBED_OPENSSL=true']
+ if self.args.compiler != 'coreclr':
+ # On Mac, official distribution of mono is 32bit.
+ self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
+ else:
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+
+ def test_specs(self):
+ with open('src/csharp/tests.json') as f:
+ tests_by_assembly = json.load(f)
+
+ msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
+ nunit_args = ['--labels=All', '--noresult', '--workers=1']
+ assembly_subdir = 'bin/%s' % msbuild_config
+ assembly_extension = '.exe'
+
+ if self.args.compiler == 'coreclr':
+ assembly_subdir += '/netcoreapp1.0'
+ runtime_cmd = ['dotnet', 'exec']
+ assembly_extension = '.dll'
+ else:
+ assembly_subdir += '/net45'
+ if self.platform == 'windows':
+ runtime_cmd = []
+ else:
+ runtime_cmd = ['mono']
+
+ specs = []
+ for assembly in six.iterkeys(tests_by_assembly):
+ assembly_file = 'src/csharp/%s/%s/%s%s' % (
+ assembly, assembly_subdir, assembly, assembly_extension)
+ if self.config.build_config != 'gcov' or self.platform != 'windows':
+ # normally, run each test as a separate process
+ for test in tests_by_assembly[assembly]:
+ cmdline = runtime_cmd + [assembly_file, '--test=%s' % test
+ ] + nunit_args
+ specs.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='csharp.%s' % test,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ else:
+ # For C# test coverage, run all tests from the same assembly at once
+ # using OpenCover.Console (only works on Windows).
+ cmdline = [
+ 'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
+ '-target:%s' % assembly_file, '-targetdir:src\\csharp',
+ '-targetargs:%s' % ' '.join(nunit_args),
+ '-filter:+[Grpc.Core]*', '-register:user',
+ '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
+ ]
+
+ # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
+ # to prevent problems with registering the profiler.
+ run_exclusive = 1000000
+ specs.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='csharp.coverage.%s' % assembly,
+ cpu_cost=run_exclusive,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ return specs
+
+ def pre_build_steps(self):
+ if self.platform == 'windows':
+ return [[
+ 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
+ self._cmake_arch_option
+ ]]
+ else:
+ return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
- def make_targets(self):
- return ['grpc_csharp_ext']
+ def make_targets(self):
+ return ['grpc_csharp_ext']
- def make_options(self):
- return self._make_options;
+ def make_options(self):
+ return self._make_options
- def build_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
- else:
- return [['tools/run_tests/helper_scripts/build_csharp.sh']]
+ def build_steps(self):
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
+ else:
+ return [['tools/run_tests/helper_scripts/build_csharp.sh']]
- def post_tests_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
- else:
- return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
+ def post_tests_steps(self):
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
- def makefile_name(self):
- if self.platform == 'windows':
- return 'cmake/build/%s/Makefile' % self._cmake_arch_option
- else:
- return 'Makefile'
+ def makefile_name(self):
+ if self.platform == 'windows':
+ return 'cmake/build/%s/Makefile' % self._cmake_arch_option
+ else:
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
- _docker_arch_suffix(self.args.arch))
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/csharp_%s_%s' % (
+ self._docker_distro, _docker_arch_suffix(self.args.arch))
- def __str__(self):
- return 'csharp'
+ def __str__(self):
+ return 'csharp'
class ObjCLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
-
- def test_specs(self):
- return [
- self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
- timeout_seconds=60*60,
- shortname='objc-tests',
- cpu_cost=1e6,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS),
- self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
- timeout_seconds=60*60,
- shortname='objc-plugin-tests',
- cpu_cost=1e6,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-helloworld',
- cpu_cost=1e6,
- environ={'SCHEME': 'HelloWorld',
- 'EXAMPLE_PATH': 'examples/objective-c/helloworld'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-routeguide',
- cpu_cost=1e6,
- environ={'SCHEME': 'RouteGuideClient',
- 'EXAMPLE_PATH': 'examples/objective-c/route_guide'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-authsample',
- cpu_cost=1e6,
- environ={'SCHEME': 'AuthSample',
- 'EXAMPLE_PATH': 'examples/objective-c/auth_sample'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-sample',
- cpu_cost=1e6,
- environ={'SCHEME': 'Sample',
- 'EXAMPLE_PATH': 'src/objective-c/examples/Sample'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-sample-frameworks',
- cpu_cost=1e6,
- environ={'SCHEME': 'Sample',
- 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
- 'FRAMEWORKS': 'YES'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-switftsample',
- cpu_cost=1e6,
- environ={'SCHEME': 'SwiftSample',
- 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'}),
- ]
-
- def pre_build_steps(self):
- return []
-
- def make_targets(self):
- return ['interop_server']
-
- def make_options(self):
- return []
-
- def build_steps(self):
- return [['src/objective-c/tests/build_tests.sh']]
-
- def post_tests_steps(self):
- return []
-
- def makefile_name(self):
- return 'Makefile'
-
- def dockerfile_dir(self):
- return None
-
- def __str__(self):
- return 'objc'
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+
+ def test_specs(self):
+ return [
+ self.config.job_spec(
+ ['src/objective-c/tests/run_tests.sh'],
+ timeout_seconds=60 * 60,
+ shortname='objc-tests',
+ cpu_cost=1e6,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+ self.config.job_spec(
+ ['src/objective-c/tests/run_plugin_tests.sh'],
+ timeout_seconds=60 * 60,
+ shortname='objc-plugin-tests',
+ cpu_cost=1e6,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-helloworld',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'HelloWorld',
+ 'EXAMPLE_PATH': 'examples/objective-c/helloworld'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-routeguide',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'RouteGuideClient',
+ 'EXAMPLE_PATH': 'examples/objective-c/route_guide'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-authsample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'AuthSample',
+ 'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-sample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'Sample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-sample-frameworks',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'Sample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
+ 'FRAMEWORKS': 'YES'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-switftsample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'SwiftSample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
+ }),
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return ['interop_server']
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return [['src/objective-c/tests/build_tests.sh']]
+
+ def post_tests_steps(self):
+ return []
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return None
+
+ def __str__(self):
+ return 'objc'
class Sanity(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
- def test_specs(self):
- import yaml
- with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
- environ={'TEST': 'true'}
- if _is_use_docker_child():
- environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
- return [self.config.job_spec(cmd['script'].split(),
- timeout_seconds=30*60,
- environ=environ,
- cpu_cost=cmd.get('cpu_cost', 1))
- for cmd in yaml.load(f)]
+ def test_specs(self):
+ import yaml
+ with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
+ environ = {'TEST': 'true'}
+ if _is_use_docker_child():
+ environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
+ return [
+ self.config.job_spec(
+ cmd['script'].split(),
+ timeout_seconds=30 * 60,
+ environ=environ,
+ cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
+ ]
- def pre_build_steps(self):
- return []
+ def pre_build_steps(self):
+ return []
- def make_targets(self):
- return ['run_dep_checks']
+ def make_targets(self):
+ return ['run_dep_checks']
- def make_options(self):
- return []
+ def make_options(self):
+ return []
- def build_steps(self):
- return []
+ def build_steps(self):
+ return []
- def post_tests_steps(self):
- return []
+ def post_tests_steps(self):
+ return []
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/sanity'
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/sanity'
+
+ def __str__(self):
+ return 'sanity'
- def __str__(self):
- return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
- _CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
-
+ _CONFIGS = dict((cfg['config'], Config(**cfg))
+ for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
@@ -1033,60 +1157,61 @@ _LANGUAGES = {
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
- 'objc' : ObjCLanguage(),
+ 'objc': ObjCLanguage(),
'sanity': Sanity()
- }
-
+}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
- }
+}
def _windows_arch_option(arch):
- """Returns msbuild cmdline option for selected architecture."""
- if arch == 'default' or arch == 'x86':
- return '/p:Platform=Win32'
- elif arch == 'x64':
- return '/p:Platform=x64'
- else:
- print('Architecture %s not supported.' % arch)
- sys.exit(1)
+ """Returns msbuild cmdline option for selected architecture."""
+ if arch == 'default' or arch == 'x86':
+ return '/p:Platform=Win32'
+ elif arch == 'x64':
+ return '/p:Platform=x64'
+ else:
+ print('Architecture %s not supported.' % arch)
+ sys.exit(1)
def _check_arch_option(arch):
- """Checks that architecture option is valid."""
- if platform_string() == 'windows':
- _windows_arch_option(arch)
- elif platform_string() == 'linux':
- # On linux, we need to be running under docker with the right architecture.
- runtime_arch = platform.architecture()[0]
- if arch == 'default':
- return
- elif runtime_arch == '64bit' and arch == 'x64':
- return
- elif runtime_arch == '32bit' and arch == 'x86':
- return
+ """Checks that architecture option is valid."""
+ if platform_string() == 'windows':
+ _windows_arch_option(arch)
+ elif platform_string() == 'linux':
+ # On linux, we need to be running under docker with the right architecture.
+ runtime_arch = platform.architecture()[0]
+ if arch == 'default':
+ return
+ elif runtime_arch == '64bit' and arch == 'x64':
+ return
+ elif runtime_arch == '32bit' and arch == 'x86':
+ return
+ else:
+ print('Architecture %s does not match current runtime architecture.'
+ % arch)
+ sys.exit(1)
else:
- print('Architecture %s does not match current runtime architecture.' % arch)
- sys.exit(1)
- else:
- if args.arch != 'default':
- print('Architecture %s not supported on current platform.' % args.arch)
- sys.exit(1)
+ if args.arch != 'default':
+ print('Architecture %s not supported on current platform.' %
+ args.arch)
+ sys.exit(1)
def _docker_arch_suffix(arch):
- """Returns suffix to dockerfile dir to use."""
- if arch == 'default' or arch == 'x64':
- return 'x64'
- elif arch == 'x86':
- return 'x86'
- else:
- print('Architecture %s not supported with current settings.' % arch)
- sys.exit(1)
+ """Returns suffix to dockerfile dir to use."""
+ if arch == 'default' or arch == 'x64':
+ return 'x64'
+ elif arch == 'x86':
+ return 'x86'
+ else:
+ print('Architecture %s not supported with current settings.' % arch)
+ sys.exit(1)
def runs_per_test_type(arg_str):
@@ -1111,478 +1236,581 @@ def runs_per_test_type(arg_str):
def percent_type(arg_str):
- pct = float(arg_str)
- if pct > 100 or pct < 0:
- raise argparse.ArgumentTypeError(
- "'%f' is not a valid percentage in the [0, 100] range" % pct)
- return pct
+ pct = float(arg_str)
+ if pct > 100 or pct < 0:
+ raise argparse.ArgumentTypeError(
+ "'%f' is not a valid percentage in the [0, 100] range" % pct)
+ return pct
+
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
- return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+ return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
-argp.add_argument('-c', '--config',
- choices=sorted(_CONFIGS.keys()),
- default='opt')
-argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
- help='A positive integer or "inf". If "inf", all tests will run in an '
- 'infinite loop. Especially useful in combination with "-f"')
+argp.add_argument(
+ '-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
+argp.add_argument(
+ '-n',
+ '--runs_per_test',
+ default=1,
+ type=runs_per_test_type,
+ help='A positive integer or "inf". If "inf", all tests will run in an '
+ 'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
-argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
- help='Run a random sample with that percentage of tests')
-argp.add_argument('-f', '--forever',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('-t', '--travis',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('--newline_on_success',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('-l', '--language',
- choices=['all'] + sorted(_LANGUAGES.keys()),
- nargs='+',
- default=['all'])
-argp.add_argument('-S', '--stop_on_failure',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('--use_docker',
- default=False,
- action='store_const',
- const=True,
- help='Run all the tests under docker. That provides ' +
- 'additional isolation and prevents the need to install ' +
- 'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
- default=False,
- action='store_const',
- const=True,
- help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--arch',
- choices=['default', 'x86', 'x64'],
- default='default',
- help='Selects architecture to target. For some platforms "default" is the only supported choice.')
-argp.add_argument('--compiler',
- choices=['default',
- 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
- 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
- 'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine', 'all_the_cpythons',
- 'electron1.3', 'electron1.6',
- 'coreclr',
- 'cmake', 'cmake_vs2015', 'cmake_vs2017'],
- default='default',
- help='Selects compiler to use. Allowed values depend on the platform and language.')
-argp.add_argument('--iomgr_platform',
- choices=['native', 'uv'],
- default='native',
- help='Selects iomgr platform to build on')
-argp.add_argument('--build_only',
- default=False,
- action='store_const',
- const=True,
- help='Perform all the build steps but don\'t run any tests.')
-argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
- help='Measure the cpu costs of tests')
-argp.add_argument('--update_submodules', default=[], nargs='*',
- help='Update some submodules before building. If any are updated, also run generate_projects. ' +
- 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
+argp.add_argument(
+ '-p',
+ '--sample_percent',
+ default=100.0,
+ type=percent_type,
+ help='Run a random sample with that percentage of tests')
+argp.add_argument(
+ '-f', '--forever', default=False, action='store_const', const=True)
+argp.add_argument(
+ '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+ '--newline_on_success', default=False, action='store_const', const=True)
+argp.add_argument(
+ '-l',
+ '--language',
+ choices=['all'] + sorted(_LANGUAGES.keys()),
+ nargs='+',
+ default=['all'])
+argp.add_argument(
+ '-S', '--stop_on_failure', default=False, action='store_const', const=True)
+argp.add_argument(
+ '--use_docker',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run all the tests under docker. That provides ' +
+ 'additional isolation and prevents the need to install ' +
+ 'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+ '--allow_flakes',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+ '--arch',
+ choices=['default', 'x86', 'x64'],
+ default='default',
+ help='Selects architecture to target. For some platforms "default" is the only supported choice.'
+)
+argp.add_argument(
+ '--compiler',
+ choices=[
+ 'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
+ 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'python2.7',
+ 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
+ 'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
+ 'cmake_vs2015', 'cmake_vs2017'
+ ],
+ default='default',
+ help='Selects compiler to use. Allowed values depend on the platform and language.'
+)
+argp.add_argument(
+ '--iomgr_platform',
+ choices=['native', 'uv'],
+ default='native',
+ help='Selects iomgr platform to build on')
+argp.add_argument(
+ '--build_only',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Perform all the build steps but don\'t run any tests.')
+argp.add_argument(
+ '--measure_cpu_costs',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Measure the cpu costs of tests')
+argp.add_argument(
+ '--update_submodules',
+ default=[],
+ nargs='*',
+ help='Update some submodules before building. If any are updated, also run generate_projects. '
+ +
+ 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
+)
argp.add_argument('-a', '--antagonists', default=0, type=int)
-argp.add_argument('-x', '--xml_report', default=None, type=str,
- help='Generates a JUnit-compatible XML report')
-argp.add_argument('--report_suite_name', default='tests', type=str,
- help='Test suite name to use in generated JUnit XML report')
-argp.add_argument('--quiet_success',
- default=False,
- action='store_const',
- const=True,
- help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
- 'Useful when running many iterations of each test (argument -n).')
-argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
- help='Don\'t try to iterate over many polling strategies when they exist')
-argp.add_argument('--force_use_pollers', default=None, type=str,
- help='Only use the specified comma-delimited list of polling engines. '
- 'Example: --force_use_pollers epollsig,poll '
- ' (This flag has no effect if --force_default_poller flag is also used)')
-argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
-argp.add_argument('--bq_result_table',
- default='',
- type=str,
- nargs='?',
- help='Upload test results to a specified BQ table.')
-argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action='store_const',
- help='Disable rerunning historically flaky tests')
+argp.add_argument(
+ '-x',
+ '--xml_report',
+ default=None,
+ type=str,
+ help='Generates a JUnit-compatible XML report')
+argp.add_argument(
+ '--report_suite_name',
+ default='tests',
+ type=str,
+ help='Test suite name to use in generated JUnit XML report')
+argp.add_argument(
+ '--quiet_success',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ + 'Useful when running many iterations of each test (argument -n).')
+argp.add_argument(
+ '--force_default_poller',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Don\'t try to iterate over many polling strategies when they exist')
+argp.add_argument(
+ '--force_use_pollers',
+ default=None,
+ type=str,
+ help='Only use the specified comma-delimited list of polling engines. '
+ 'Example: --force_use_pollers epollsig,poll '
+ ' (This flag has no effect if --force_default_poller flag is also used)')
+argp.add_argument(
+ '--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
+argp.add_argument(
+ '--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
+argp.add_argument(
+ '--disable_auto_set_flakes',
+ default=False,
+ const=True,
+ action='store_const',
+ help='Disable rerunning historically flaky tests')
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if not args.disable_auto_set_flakes:
- try:
- for test in get_bqtest_data():
- if test.flaky: flaky_tests.add(test.name)
- if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
- except:
- print("Unexpected error getting flaky tests: %s" % traceback.format_exc())
+ try:
+ for test in get_bqtest_data():
+ if test.flaky: flaky_tests.add(test.name)
+ if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
+ except:
+ print("Unexpected error getting flaky tests: %s" %
+ traceback.format_exc())
if args.force_default_poller:
- _POLLING_STRATEGIES = {}
+ _POLLING_STRATEGIES = {}
elif args.force_use_pollers:
- _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
+ _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
- spec = spec.split(':', 1)
- if len(spec) == 1:
- submodule = spec[0]
- branch = 'master'
- elif len(spec) == 2:
- submodule = spec[0]
- branch = spec[1]
- cwd = 'third_party/%s' % submodule
- def git(cmd, cwd=cwd):
- print('in %s: git %s' % (cwd, cmd))
- run_shell_command('git %s' % cmd, cwd=cwd)
- git('fetch')
- git('checkout %s' % branch)
- git('pull origin %s' % branch)
- if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
- need_to_regenerate_projects = True
+ spec = spec.split(':', 1)
+ if len(spec) == 1:
+ submodule = spec[0]
+ branch = 'master'
+ elif len(spec) == 2:
+ submodule = spec[0]
+ branch = spec[1]
+ cwd = 'third_party/%s' % submodule
+
+ def git(cmd, cwd=cwd):
+ print('in %s: git %s' % (cwd, cmd))
+ run_shell_command('git %s' % cmd, cwd=cwd)
+
+ git('fetch')
+ git('checkout %s' % branch)
+ git('pull origin %s' % branch)
+ if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
+ need_to_regenerate_projects = True
if need_to_regenerate_projects:
- if jobset.platform_string() == 'linux':
- run_shell_command('tools/buildgen/generate_projects.sh')
- else:
- print('WARNING: may need to regenerate projects, but since we are not on')
- print(' Linux this step is being skipped. Compilation MAY fail.')
-
+ if jobset.platform_string() == 'linux':
+ run_shell_command('tools/buildgen/generate_projects.sh')
+ else:
+ print(
+ 'WARNING: may need to regenerate projects, but since we are not on')
+ print(
+ ' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
- _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
+ _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
- lang_list = _LANGUAGES.keys()
+ lang_list = _LANGUAGES.keys()
else:
- lang_list = args.language
+ lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
- for bad in ['objc', 'sanity']:
- if bad in lang_list:
- lang_list.remove(bad)
+ for bad in ['objc', 'sanity']:
+ if bad in lang_list:
+ lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
- l.configure(run_config, args)
+ l.configure(run_config, args)
-language_make_options=[]
+language_make_options = []
if any(language.make_options() for language in languages):
- if not 'gcov' in args.config and len(languages) != 1:
- print('languages with custom make options cannot be built simultaneously with other languages')
- sys.exit(1)
- else:
- # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
- # together, and is only used under gcov. All other configs should build languages individually.
- language_make_options = list(set([make_option for lang in languages for make_option in lang.make_options()]))
+ if not 'gcov' in args.config and len(languages) != 1:
+ print(
+ 'languages with custom make options cannot be built simultaneously with other languages'
+ )
+ sys.exit(1)
+ else:
+ # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
+ # together, and is only used under gcov. All other configs should build languages individually.
+ language_make_options = list(
+ set([
+ make_option
+ for lang in languages for make_option in lang.make_options()
+ ]))
if args.use_docker:
- if not args.travis:
- print('Seen --use_docker flag, will run tests under docker.')
- print('')
- print('IMPORTANT: The changes you are testing need to be locally committed')
- print('because only the committed changes in the current branch will be')
- print('copied to the docker environment.')
- time.sleep(5)
-
- dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
- if len(dockerfile_dirs) > 1:
- if 'gcov' in args.config:
- dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
- print ('Using multilang_jessie_x64 docker image for code coverage for '
- 'all languages.')
+ if not args.travis:
+ print('Seen --use_docker flag, will run tests under docker.')
+ print('')
+ print(
+ 'IMPORTANT: The changes you are testing need to be locally committed'
+ )
+ print(
+ 'because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
+ time.sleep(5)
+
+ dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
+ if len(dockerfile_dirs) > 1:
+ if 'gcov' in args.config:
+ dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
+ print(
+ 'Using multilang_jessie_x64 docker image for code coverage for '
+ 'all languages.')
+ else:
+ print(
+ 'Languages to be tested require running under different docker '
+ 'images.')
+ sys.exit(1)
else:
- print ('Languages to be tested require running under different docker '
- 'images.')
- sys.exit(1)
- else:
- dockerfile_dir = next(iter(dockerfile_dirs))
-
- child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
- run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
-
- env = os.environ.copy()
- env['RUN_TESTS_COMMAND'] = run_tests_cmd
- env['DOCKERFILE_DIR'] = dockerfile_dir
- env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
- if args.xml_report:
- env['XML_REPORT'] = args.xml_report
- if not args.travis:
- env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
-
- subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
- shell=True,
- env=env)
- sys.exit(0)
+ dockerfile_dir = next(iter(dockerfile_dirs))
+
+ child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
+ run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
+ child_argv[1:])
+
+ env = os.environ.copy()
+ env['RUN_TESTS_COMMAND'] = run_tests_cmd
+ env['DOCKERFILE_DIR'] = dockerfile_dir
+ env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
+ if args.xml_report:
+ env['XML_REPORT'] = args.xml_report
+ if not args.travis:
+ env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
+
+ subprocess.check_call(
+ 'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
+ shell=True,
+ env=env)
+ sys.exit(0)
_check_arch_option(args.arch)
+
def make_jobspec(cfg, targets, makefile='Makefile'):
- if platform_string() == 'windows':
- return [jobset.JobSpec(['cmake', '--build', '.',
- '--target', '%s' % target,
- '--config', _MSBUILD_CONFIG[cfg]],
- cwd=os.path.dirname(makefile),
- timeout_seconds=None) for target in targets]
- else:
- if targets and makefile.startswith('cmake/build/'):
- # With cmake, we've passed all the build configuration in the pre-build step already
- return [jobset.JobSpec([os.getenv('MAKE', 'make'),
- '-j', '%d' % args.jobs] +
- targets,
- cwd='cmake/build',
- timeout_seconds=None)]
- if targets:
- return [jobset.JobSpec([os.getenv('MAKE', 'make'),
- '-f', makefile,
- '-j', '%d' % args.jobs,
- 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
- 'CONFIG=%s' % cfg,
- 'Q='] +
- language_make_options +
- ([] if not args.travis else ['JENKINS_BUILD=1']) +
- targets,
- timeout_seconds=None)]
+ if platform_string() == 'windows':
+ return [
+ jobset.JobSpec(
+ [
+ 'cmake', '--build', '.', '--target', '%s' % target,
+ '--config', _MSBUILD_CONFIG[cfg]
+ ],
+ cwd=os.path.dirname(makefile),
+ timeout_seconds=None) for target in targets
+ ]
else:
- return []
+ if targets and makefile.startswith('cmake/build/'):
+ # With cmake, we've passed all the build configuration in the pre-build step already
+ return [
+ jobset.JobSpec(
+ [os.getenv('MAKE', 'make'), '-j', '%d' % args.jobs] +
+ targets,
+ cwd='cmake/build',
+ timeout_seconds=None)
+ ]
+ if targets:
+ return [
+ jobset.JobSpec(
+ [
+ os.getenv('MAKE', 'make'), '-f', makefile, '-j', '%d' %
+ args.jobs,
+ 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
+ args.slowdown, 'CONFIG=%s' % cfg, 'Q='
+ ] + language_make_options +
+ ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
+ timeout_seconds=None)
+ ]
+ else:
+ return []
+
make_targets = {}
for l in languages:
- makefile = l.makefile_name()
- make_targets[makefile] = make_targets.get(makefile, set()).union(
- set(l.make_targets()))
+ makefile = l.makefile_name()
+ make_targets[makefile] = make_targets.get(
+ makefile, set()).union(set(l.make_targets()))
+
def build_step_environ(cfg):
- environ = {'CONFIG': cfg}
- msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
- if msbuild_cfg:
- environ['MSBUILD_CONFIG'] = msbuild_cfg
- return environ
-
-build_steps = list(set(
- jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=2)
- for l in languages
- for cmdline in l.pre_build_steps()))
+ environ = {'CONFIG': cfg}
+ msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
+ if msbuild_cfg:
+ environ['MSBUILD_CONFIG'] = msbuild_cfg
+ return environ
+
+
+build_steps = list(
+ set(
+ jobset.JobSpec(
+ cmdline, environ=build_step_environ(build_config), flake_retries=2)
+ for l in languages for cmdline in l.pre_build_steps()))
if make_targets:
- make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
- build_steps.extend(set(make_commands))
-build_steps.extend(set(
- jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
- for l in languages
- for cmdline in l.build_steps()))
-
-post_tests_steps = list(set(
- jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
- for l in languages
- for cmdline in l.post_tests_steps()))
+ make_commands = itertools.chain.from_iterable(
+ make_jobspec(build_config, list(targets), makefile)
+ for (makefile, targets) in make_targets.items())
+ build_steps.extend(set(make_commands))
+build_steps.extend(
+ set(
+ jobset.JobSpec(
+ cmdline,
+ environ=build_step_environ(build_config),
+ timeout_seconds=None)
+ for l in languages for cmdline in l.build_steps()))
+
+post_tests_steps = list(
+ set(
+ jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
+ for l in languages for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
- try:
- version = int(urllib.request.urlopen(
- 'http://localhost:%d/version_number' % legacy_server_port,
- timeout=10).read())
- except:
- pass
- else:
- urllib.request.urlopen(
- 'http://localhost:%d/quitquitquit' % legacy_server_port).read()
+ try:
+ version = int(
+ urllib.request.urlopen(
+ 'http://localhost:%d/version_number' % legacy_server_port,
+ timeout=10).read())
+ except:
+ pass
+ else:
+ urllib.request.urlopen('http://localhost:%d/quitquitquit' %
+ legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
- """Caculate number of runs and failures for a particular test.
+ """Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
- num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
- num_failures = 0
- for jobresult in list_of_results:
- if jobresult.retries > 0:
- num_runs += jobresult.retries
- if jobresult.num_failures > 0:
- num_failures += jobresult.num_failures
- return num_runs, num_failures
+ num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
+ num_failures = 0
+ for jobresult in list_of_results:
+ if jobresult.retries > 0:
+ num_runs += jobresult.retries
+ if jobresult.num_failures > 0:
+ num_failures += jobresult.num_failures
+ return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
- BUILD = object()
- TEST = object()
- POST_TEST = object()
+ BUILD = object()
+ TEST = object()
+ POST_TEST = object()
def _has_epollexclusive():
- binary = 'bins/%s/check_epollexclusive' % args.config
- if not os.path.exists(binary):
- return False
- try:
- subprocess.check_call(binary)
- return True
- except subprocess.CalledProcessError, e:
- return False
- except OSError, e:
- # For languages other than C and Windows the binary won't exist
- return False
+ binary = 'bins/%s/check_epollexclusive' % args.config
+ if not os.path.exists(binary):
+ return False
+ try:
+ subprocess.check_call(binary)
+ return True
+ except subprocess.CalledProcessError, e:
+ return False
+ except OSError, e:
+ # For languages other than C and Windows the binary won't exist
+ return False
# returns a list of things that failed (or an empty list on success)
-def _build_and_run(
- check_cancelled, newline_on_success, xml_report=None, build_only=False):
- """Do one pass of building & running tests."""
- # build latest sequentially
- num_failures, resultset = jobset.run(
- build_steps, maxjobs=1, stop_on_failure=True,
- newline_on_success=newline_on_success, travis=args.travis)
- if num_failures:
- return [BuildAndRunError.BUILD]
-
- if build_only:
- if xml_report:
- report_utils.render_junit_xml_report(resultset, xml_report,
- suite_name=args.report_suite_name)
- return []
-
- if not args.travis and not _has_epollexclusive() and platform_string() in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string()]:
- print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
- _POLLING_STRATEGIES[platform_string()].remove('epollex')
-
- # start antagonists
- antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
- for _ in range(0, args.antagonists)]
- start_port_server.start_port_server()
- resultset = None
- num_test_failures = 0
- try:
- infinite_runs = runs_per_test == 0
- one_run = set(
- spec
- for language in languages
- for spec in language.test_specs()
- if (re.search(args.regex, spec.shortname) and
- (args.regex_exclude == '' or
- not re.search(args.regex_exclude, spec.shortname))))
- # When running on travis, we want out test runs to be as similar as possible
- # for reproducibility purposes.
- if args.travis and args.max_time <= 0:
- massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
- else:
- # whereas otherwise, we want to shuffle things up to give all tests a
- # chance to run.
- massaged_one_run = list(one_run) # random.sample needs an indexable seq.
- num_jobs = len(massaged_one_run)
- # for a random sample, get as many as indicated by the 'sample_percent'
- # argument. By default this arg is 100, resulting in a shuffle of all
- # jobs.
- sample_size = int(num_jobs * args.sample_percent/100.0)
- massaged_one_run = random.sample(massaged_one_run, sample_size)
- if not isclose(args.sample_percent, 100.0):
- assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
- print("Running %d tests out of %d (~%d%%)" %
- (sample_size, num_jobs, args.sample_percent))
- if infinite_runs:
- assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
- runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
- else itertools.repeat(massaged_one_run, runs_per_test))
- all_runs = itertools.chain.from_iterable(runs_sequence)
-
- if args.quiet_success:
- jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
- num_test_failures, resultset = jobset.run(
- all_runs, check_cancelled, newline_on_success=newline_on_success,
- travis=args.travis, maxjobs=args.jobs, maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
- stop_on_failure=args.stop_on_failure,
- quiet_success=args.quiet_success, max_time=args.max_time)
- if resultset:
- for k, v in sorted(resultset.items()):
- num_runs, num_failures = _calculate_num_runs_failures(v)
- if num_failures > 0:
- if num_failures == num_runs: # what about infinite_runs???
- jobset.message('FAILED', k, do_newline=True)
- else:
+def _build_and_run(check_cancelled,
+ newline_on_success,
+ xml_report=None,
+ build_only=False):
+ """Do one pass of building & running tests."""
+ # build latest sequentially
+ num_failures, resultset = jobset.run(
+ build_steps,
+ maxjobs=1,
+ stop_on_failure=True,
+ newline_on_success=newline_on_success,
+ travis=args.travis)
+ if num_failures:
+ return [BuildAndRunError.BUILD]
+
+ if build_only:
+ if xml_report:
+ report_utils.render_junit_xml_report(
+ resultset, xml_report, suite_name=args.report_suite_name)
+ return []
+
+ if not args.travis and not _has_epollexclusive() and platform_string(
+ ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
+ platform_string()]:
+ print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
+ _POLLING_STRATEGIES[platform_string()].remove('epollex')
+
+ # start antagonists
+ antagonists = [
+ subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
+ for _ in range(0, args.antagonists)
+ ]
+ start_port_server.start_port_server()
+ resultset = None
+ num_test_failures = 0
+ try:
+ infinite_runs = runs_per_test == 0
+ one_run = set(spec
+ for language in languages
+ for spec in language.test_specs()
+ if (re.search(args.regex, spec.shortname) and (
+ args.regex_exclude == '' or not re.search(
+ args.regex_exclude, spec.shortname))))
+ # When running on travis, we want out test runs to be as similar as possible
+ # for reproducibility purposes.
+ if args.travis and args.max_time <= 0:
+ massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
+ else:
+ # whereas otherwise, we want to shuffle things up to give all tests a
+ # chance to run.
+ massaged_one_run = list(
+ one_run) # random.sample needs an indexable seq.
+ num_jobs = len(massaged_one_run)
+ # for a random sample, get as many as indicated by the 'sample_percent'
+ # argument. By default this arg is 100, resulting in a shuffle of all
+ # jobs.
+ sample_size = int(num_jobs * args.sample_percent / 100.0)
+ massaged_one_run = random.sample(massaged_one_run, sample_size)
+ if not isclose(args.sample_percent, 100.0):
+ assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
+ print("Running %d tests out of %d (~%d%%)" %
+ (sample_size, num_jobs, args.sample_percent))
+ if infinite_runs:
+ assert len(massaged_one_run
+ ) > 0, 'Must have at least one test for a -n inf run'
+ runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
+ else itertools.repeat(massaged_one_run, runs_per_test))
+ all_runs = itertools.chain.from_iterable(runs_sequence)
+
+ if args.quiet_success:
jobset.message(
- 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
+ 'START',
+ 'Running tests quietly, only failing tests will be reported',
do_newline=True)
- finally:
- for antagonist in antagonists:
- antagonist.kill()
- if args.bq_result_table and resultset:
- upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
- if xml_report and resultset:
- report_utils.render_junit_xml_report(resultset, xml_report,
- suite_name=args.report_suite_name)
-
- number_failures, _ = jobset.run(
- post_tests_steps, maxjobs=1, stop_on_failure=False,
- newline_on_success=newline_on_success, travis=args.travis)
+ num_test_failures, resultset = jobset.run(
+ all_runs,
+ check_cancelled,
+ newline_on_success=newline_on_success,
+ travis=args.travis,
+ maxjobs=args.jobs,
+ maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
+ stop_on_failure=args.stop_on_failure,
+ quiet_success=args.quiet_success,
+ max_time=args.max_time)
+ if resultset:
+ for k, v in sorted(resultset.items()):
+ num_runs, num_failures = _calculate_num_runs_failures(v)
+ if num_failures > 0:
+ if num_failures == num_runs: # what about infinite_runs???
+ jobset.message('FAILED', k, do_newline=True)
+ else:
+ jobset.message(
+ 'FLAKE',
+ '%s [%d/%d runs flaked]' %
+ (k, num_failures, num_runs),
+ do_newline=True)
+ finally:
+ for antagonist in antagonists:
+ antagonist.kill()
+ if args.bq_result_table and resultset:
+ upload_results_to_bq(resultset, args.bq_result_table, args,
+ platform_string())
+ if xml_report and resultset:
+ report_utils.render_junit_xml_report(
+ resultset, xml_report, suite_name=args.report_suite_name)
+
+ number_failures, _ = jobset.run(
+ post_tests_steps,
+ maxjobs=1,
+ stop_on_failure=False,
+ newline_on_success=newline_on_success,
+ travis=args.travis)
- out = []
- if number_failures:
- out.append(BuildAndRunError.POST_TEST)
- if num_test_failures:
- out.append(BuildAndRunError.TEST)
+ out = []
+ if number_failures:
+ out.append(BuildAndRunError.POST_TEST)
+ if num_test_failures:
+ out.append(BuildAndRunError.TEST)
- return out
+ return out
if forever:
- success = True
- while True:
- dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
- initial_time = dw.most_recent_change()
- have_files_changed = lambda: dw.most_recent_change() != initial_time
- previous_success = success
- errors = _build_and_run(check_cancelled=have_files_changed,
- newline_on_success=False,
- build_only=args.build_only) == 0
- if not previous_success and not errors:
- jobset.message('SUCCESS',
- 'All tests are now passing properly',
- do_newline=True)
- jobset.message('IDLE', 'No change detected')
- while not have_files_changed():
- time.sleep(1)
+ success = True
+ while True:
+ dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
+ initial_time = dw.most_recent_change()
+ have_files_changed = lambda: dw.most_recent_change() != initial_time
+ previous_success = success
+ errors = _build_and_run(
+ check_cancelled=have_files_changed,
+ newline_on_success=False,
+ build_only=args.build_only) == 0
+ if not previous_success and not errors:
+ jobset.message(
+ 'SUCCESS',
+ 'All tests are now passing properly',
+ do_newline=True)
+ jobset.message('IDLE', 'No change detected')
+ while not have_files_changed():
+ time.sleep(1)
else:
- errors = _build_and_run(check_cancelled=lambda: False,
- newline_on_success=args.newline_on_success,
- xml_report=args.xml_report,
- build_only=args.build_only)
- if not errors:
- jobset.message('SUCCESS', 'All tests passed', do_newline=True)
- else:
- jobset.message('FAILED', 'Some tests failed', do_newline=True)
- exit_code = 0
- if BuildAndRunError.BUILD in errors:
- exit_code |= 1
- if BuildAndRunError.TEST in errors:
- exit_code |= 2
- if BuildAndRunError.POST_TEST in errors:
- exit_code |= 4
- sys.exit(exit_code)
+ errors = _build_and_run(
+ check_cancelled=lambda: False,
+ newline_on_success=args.newline_on_success,
+ xml_report=args.xml_report,
+ build_only=args.build_only)
+ if not errors:
+ jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Some tests failed', do_newline=True)
+ exit_code = 0
+ if BuildAndRunError.BUILD in errors:
+ exit_code |= 1
+ if BuildAndRunError.TEST in errors:
+ exit_code |= 2
+ if BuildAndRunError.POST_TEST in errors:
+ exit_code |= 4
+ sys.exit(exit_code)
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 7c58d8efb1..49be8f1d7e 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run test matrix."""
from __future__ import print_function
@@ -29,14 +28,14 @@ from python_utils.filter_pull_request_tests import filter_tests
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
-_DEFAULT_RUNTESTS_TIMEOUT = 1*60*60
+_DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
# Set the timeout high to allow enough time for sanitizers and pre-building
# clang docker.
-_CPP_RUNTESTS_TIMEOUT = 4*60*60
+_CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
# C++ TSAN takes longer than other sanitizers
-_CPP_TSAN_RUNTESTS_TIMEOUT = 8*60*60
+_CPP_TSAN_RUNTESTS_TIMEOUT = 8 * 60 * 60
# Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2
@@ -46,448 +45,517 @@ _REPORT_SUFFIX = 'sponge_log.xml'
def _report_filename(name):
- """Generates report file name"""
- return 'report_%s_%s' % (name, _REPORT_SUFFIX)
+ """Generates report file name"""
+ return 'report_%s_%s' % (name, _REPORT_SUFFIX)
def _report_filename_internal_ci(name):
- """Generates report file name that leads to better presentation by internal CI"""
- return '%s/%s' % (name, _REPORT_SUFFIX)
+ """Generates report file name that leads to better presentation by internal CI"""
+ return '%s/%s' % (name, _REPORT_SUFFIX)
-def _docker_jobspec(name, runtests_args=[], runtests_envs={},
+def _docker_jobspec(name,
+ runtests_args=[],
+ runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None):
- """Run a single instance of run_tests.py in a docker container"""
- if not timeout_seconds:
- timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
- test_job = jobset.JobSpec(
- cmdline=['python', 'tools/run_tests/run_tests.py',
- '--use_docker',
- '-t',
- '-j', str(inner_jobs),
- '-x', _report_filename(name),
- '--report_suite_name', '%s' % name] + runtests_args,
- environ=runtests_envs,
- shortname='run_tests_%s' % name,
- timeout_seconds=timeout_seconds)
- return test_job
-
-
-def _workspace_jobspec(name, runtests_args=[], workspace_name=None,
- runtests_envs={}, inner_jobs=_DEFAULT_INNER_JOBS,
+ """Run a single instance of run_tests.py in a docker container"""
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+ test_job = jobset.JobSpec(
+ cmdline=[
+ 'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
+ '-j', str(inner_jobs), '-x', _report_filename(name),
+ '--report_suite_name', '%s' % name
+ ] + runtests_args,
+ environ=runtests_envs,
+ shortname='run_tests_%s' % name,
+ timeout_seconds=timeout_seconds)
+ return test_job
+
+
+def _workspace_jobspec(name,
+ runtests_args=[],
+ workspace_name=None,
+ runtests_envs={},
+ inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None):
- """Run a single instance of run_tests.py in a separate workspace"""
- if not workspace_name:
- workspace_name = 'workspace_%s' % name
- if not timeout_seconds:
- timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
- env = {'WORKSPACE_NAME': workspace_name}
- env.update(runtests_envs)
- test_job = jobset.JobSpec(
- cmdline=['bash',
- 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
- '-t',
- '-j', str(inner_jobs),
- '-x', '../%s' % _report_filename(name),
- '--report_suite_name', '%s' % name] + runtests_args,
- environ=env,
- shortname='run_tests_%s' % name,
- timeout_seconds=timeout_seconds)
- return test_job
-
-
-def _generate_jobs(languages, configs, platforms, iomgr_platform = 'native',
- arch=None, compiler=None,
- labels=[], extra_args=[], extra_envs={},
- inner_jobs=_DEFAULT_INNER_JOBS,
- timeout_seconds=None):
- result = []
- for language in languages:
- for platform in platforms:
- for config in configs:
- name = '%s_%s_%s_%s' % (language, platform, config, iomgr_platform)
- runtests_args = ['-l', language,
- '-c', config,
- '--iomgr_platform', iomgr_platform]
- if arch or compiler:
- name += '_%s_%s' % (arch, compiler)
- runtests_args += ['--arch', arch,
- '--compiler', compiler]
- if '--build_only' in extra_args:
- name += '_buildonly'
- for extra_env in extra_envs:
- name += '_%s_%s' % (extra_env, extra_envs[extra_env])
-
- runtests_args += extra_args
- if platform == 'linux':
- job = _docker_jobspec(name=name, runtests_args=runtests_args,
- runtests_envs=extra_envs, inner_jobs=inner_jobs,
- timeout_seconds=timeout_seconds)
- else:
- job = _workspace_jobspec(name=name, runtests_args=runtests_args,
- runtests_envs=extra_envs, inner_jobs=inner_jobs,
- timeout_seconds=timeout_seconds)
-
- job.labels = [platform, config, language, iomgr_platform] + labels
- result.append(job)
- return result
+ """Run a single instance of run_tests.py in a separate workspace"""
+ if not workspace_name:
+ workspace_name = 'workspace_%s' % name
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+ env = {'WORKSPACE_NAME': workspace_name}
+ env.update(runtests_envs)
+ test_job = jobset.JobSpec(
+ cmdline=[
+ 'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
+ '-t', '-j', str(inner_jobs), '-x', '../%s' % _report_filename(name),
+ '--report_suite_name', '%s' % name
+ ] + runtests_args,
+ environ=env,
+ shortname='run_tests_%s' % name,
+ timeout_seconds=timeout_seconds)
+ return test_job
+
+
+def _generate_jobs(languages,
+ configs,
+ platforms,
+ iomgr_platform='native',
+ arch=None,
+ compiler=None,
+ labels=[],
+ extra_args=[],
+ extra_envs={},
+ inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
+ result = []
+ for language in languages:
+ for platform in platforms:
+ for config in configs:
+ name = '%s_%s_%s_%s' % (language, platform, config,
+ iomgr_platform)
+ runtests_args = [
+ '-l', language, '-c', config, '--iomgr_platform',
+ iomgr_platform
+ ]
+ if arch or compiler:
+ name += '_%s_%s' % (arch, compiler)
+ runtests_args += ['--arch', arch, '--compiler', compiler]
+ if '--build_only' in extra_args:
+ name += '_buildonly'
+ for extra_env in extra_envs:
+ name += '_%s_%s' % (extra_env, extra_envs[extra_env])
+
+ runtests_args += extra_args
+ if platform == 'linux':
+ job = _docker_jobspec(
+ name=name,
+ runtests_args=runtests_args,
+ runtests_envs=extra_envs,
+ inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
+ else:
+ job = _workspace_jobspec(
+ name=name,
+ runtests_args=runtests_args,
+ runtests_envs=extra_envs,
+ inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
+
+ job.labels = [platform, config, language, iomgr_platform
+ ] + labels
+ result.append(job)
+ return result
def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
- test_jobs = []
- # supported on linux only
- test_jobs += _generate_jobs(languages=['sanity', 'php7'],
- configs=['dbg', 'opt'],
- platforms=['linux'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # supported on all platforms.
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos', 'windows'],
- labels=['basictests', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- test_jobs += _generate_jobs(languages=['csharp', 'python'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos', 'windows'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # supported on linux and mac.
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos'],
- labels=['basictests', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # supported on mac only.
- test_jobs += _generate_jobs(languages=['objc'],
- configs=['dbg', 'opt'],
- platforms=['macos'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # sanitizers
- test_jobs += _generate_jobs(languages=['c'],
- configs=['msan', 'asan', 'tsan', 'ubsan'],
- platforms=['linux'],
- labels=['sanitizers', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['asan'],
- platforms=['linux'],
- labels=['sanitizers', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['tsan'],
- platforms=['linux'],
- labels=['sanitizers', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
-
- return test_jobs
-
-
-def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
- test_jobs = []
- # portability C x86
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg'],
- platforms=['linux'],
- arch='x86',
- compiler='default',
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # portability C and C++ on x64
- for compiler in ['gcc4.8', 'gcc5.3', 'gcc_musl',
- 'clang3.5', 'clang3.6', 'clang3.7']:
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'],
- platforms=['linux'],
- arch='x64',
- compiler=compiler,
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- # portability C on Windows 64-bit (x86 is the default)
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg'],
- platforms=['windows'],
- arch='x64',
- compiler='default',
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # portability C++ on Windows
- # TODO(jtattermusch): some of the tests are failing, so we force --build_only
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['dbg'],
- platforms=['windows'],
- arch='default',
- compiler='default',
- labels=['portability', 'corelang'],
- extra_args=extra_args + ['--build_only'],
- inner_jobs=inner_jobs)
-
- # portability C and C++ on Windows using VS2017 (build only)
- # TODO(jtattermusch): some of the tests are failing, so we force --build_only
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'],
- platforms=['windows'],
- arch='x64',
- compiler='cmake_vs2017',
- labels=['portability', 'corelang'],
- extra_args=extra_args + ['--build_only'],
- inner_jobs=inner_jobs)
-
- # C and C++ with the c-ares DNS resolver on Linux
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'], platforms=['linux'],
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- # TODO(zyc): Turn on this test after adding c-ares support on windows.
- # C with the c-ares DNS resolver on Windows
- # test_jobs += _generate_jobs(languages=['c'],
- # configs=['dbg'], platforms=['windows'],
- # labels=['portability', 'corelang'],
- # extra_args=extra_args,
- # extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
-
- # C and C++ build with cmake on Linux
- # TODO(jtattermusch): some of the tests are failing, so we force --build_only
- # to make sure it's buildable at least.
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'],
- platforms=['linux'],
- arch='default',
- compiler='cmake',
- labels=['portability', 'corelang'],
- extra_args=extra_args + ['--build_only'],
- inner_jobs=inner_jobs)
-
- test_jobs += _generate_jobs(languages=['python'],
- configs=['dbg'],
- platforms=['linux'],
- arch='default',
- compiler='python_alpine',
- labels=['portability', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- test_jobs += _generate_jobs(languages=['csharp'],
- configs=['dbg'],
- platforms=['linux'],
- arch='default',
- compiler='coreclr',
- labels=['portability', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg'],
- platforms=['linux'],
- iomgr_platform='uv',
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- return test_jobs
+ test_jobs = []
+ # supported on linux only
+ test_jobs += _generate_jobs(
+ languages=['sanity', 'php7'],
+ configs=['dbg', 'opt'],
+ platforms=['linux'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # supported on all platforms.
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos', 'windows'],
+ labels=['basictests', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ test_jobs += _generate_jobs(
+ languages=['csharp', 'python'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos', 'windows'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # supported on linux and mac.
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos'],
+ labels=['basictests', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ test_jobs += _generate_jobs(
+ languages=['grpc-node', 'ruby', 'php'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # supported on mac only.
+ test_jobs += _generate_jobs(
+ languages=['objc'],
+ configs=['dbg', 'opt'],
+ platforms=['macos'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # sanitizers
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['msan', 'asan', 'tsan', 'ubsan'],
+ platforms=['linux'],
+ labels=['sanitizers', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['asan'],
+ platforms=['linux'],
+ labels=['sanitizers', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['tsan'],
+ platforms=['linux'],
+ labels=['sanitizers', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
+
+ return test_jobs
+
+
+def _create_portability_test_jobs(extra_args=[],
+ inner_jobs=_DEFAULT_INNER_JOBS):
+ test_jobs = []
+ # portability C x86
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='x86',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # portability C and C++ on x64
+ for compiler in [
+ 'gcc4.8', 'gcc5.3', 'gcc_musl', 'clang3.5', 'clang3.6', 'clang3.7'
+ ]:
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='x64',
+ compiler=compiler,
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # portability C on Windows 64-bit (x86 is the default)
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # portability C++ on Windows
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='default',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
+ # portability C and C++ on Windows using VS2017 (build only)
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='cmake_vs2017',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
+ # C and C++ with the c-ares DNS resolver on Linux
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # TODO(zyc): Turn on this test after adding c-ares support on windows.
+ # C with the c-ares DNS resolver on Windows
+ # test_jobs += _generate_jobs(languages=['c'],
+ # configs=['dbg'], platforms=['windows'],
+ # labels=['portability', 'corelang'],
+ # extra_args=extra_args,
+ # extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
+
+ # C and C++ build with cmake on Linux
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ # to make sure it's buildable at least.
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='cmake',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(
+ languages=['python'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='python_alpine',
+ labels=['portability', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(
+ languages=['csharp'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='coreclr',
+ labels=['portability', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg'],
+ platforms=['linux'],
+ iomgr_platform='uv',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ return test_jobs
def _allowed_labels():
- """Returns a list of existing job labels."""
- all_labels = set()
- for job in _create_test_jobs() + _create_portability_test_jobs():
- for label in job.labels:
- all_labels.add(label)
- return sorted(all_labels)
+ """Returns a list of existing job labels."""
+ all_labels = set()
+ for job in _create_test_jobs() + _create_portability_test_jobs():
+ for label in job.labels:
+ all_labels.add(label)
+ return sorted(all_labels)
def _runs_per_test_type(arg_str):
- """Auxiliary function to parse the "runs_per_test" flag."""
- try:
- n = int(arg_str)
- if n <= 0: raise ValueError
- return n
- except:
- msg = '\'{}\' is not a positive integer'.format(arg_str)
- raise argparse.ArgumentTypeError(msg)
+ """Auxiliary function to parse the "runs_per_test" flag."""
+ try:
+ n = int(arg_str)
+ if n <= 0: raise ValueError
+ return n
+ except:
+ msg = '\'{}\' is not a positive integer'.format(arg_str)
+ raise argparse.ArgumentTypeError(msg)
if __name__ == "__main__":
- argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
- argp.add_argument('-j', '--jobs',
- default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
- type=int,
- help='Number of concurrent run_tests.py instances.')
- argp.add_argument('-f', '--filter',
- choices=_allowed_labels(),
- nargs='+',
- default=[],
- help='Filter targets to run by label with AND semantics.')
- argp.add_argument('--exclude',
- choices=_allowed_labels(),
- nargs='+',
- default=[],
- help='Exclude targets with any of given labels.')
- argp.add_argument('--build_only',
- default=False,
- action='store_const',
- const=True,
- help='Pass --build_only flag to run_tests.py instances.')
- argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
- help='Pass --force_default_poller to run_tests.py instances.')
- argp.add_argument('--dry_run',
- default=False,
- action='store_const',
- const=True,
- help='Only print what would be run.')
- argp.add_argument('--filter_pr_tests',
- default=False,
- action='store_const',
- const=True,
- help='Filters out tests irrelevant to pull request changes.')
- argp.add_argument('--base_branch',
- default='origin/master',
- type=str,
- help='Branch that pull request is requesting to merge into')
- argp.add_argument('--inner_jobs',
- default=_DEFAULT_INNER_JOBS,
- type=int,
- help='Number of jobs in each run_tests.py instance')
- argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type,
- help='How many times to run each tests. >1 runs implies ' +
- 'omitting passing test from the output & reports.')
- argp.add_argument('--max_time', default=-1, type=int,
- help='Maximum amount of time to run tests for' +
- '(other tests will be skipped)')
- argp.add_argument('--internal_ci',
- default=False,
- action='store_const',
- const=True,
- help='Put reports into subdirectories to improve presentation of '
- 'results by Internal CI.')
- argp.add_argument('--bq_result_table',
- default='',
- type=str,
- nargs='?',
- help='Upload test results to a specified BQ table.')
- args = argp.parse_args()
-
- if args.internal_ci:
- _report_filename = _report_filename_internal_ci # override the function
-
- extra_args = []
- if args.build_only:
- extra_args.append('--build_only')
- if args.force_default_poller:
- extra_args.append('--force_default_poller')
- if args.runs_per_test > 1:
- extra_args.append('-n')
- extra_args.append('%s' % args.runs_per_test)
- extra_args.append('--quiet_success')
- if args.max_time > 0:
- extra_args.extend(('--max_time', '%d' % args.max_time))
- if args.bq_result_table:
- extra_args.append('--bq_result_table')
- extra_args.append('%s' % args.bq_result_table)
- extra_args.append('--measure_cpu_costs')
- extra_args.append('--disable_auto_set_flakes')
-
- all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
- _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
-
- jobs = []
- for job in all_jobs:
- if not args.filter or all(filter in job.labels for filter in args.filter):
- if not any(exclude_label in job.labels for exclude_label in args.exclude):
- jobs.append(job)
-
- if not jobs:
- jobset.message('FAILED', 'No test suites match given criteria.',
- do_newline=True)
- sys.exit(1)
-
- print('IMPORTANT: The changes you are testing need to be locally committed')
- print('because only the committed changes in the current branch will be')
- print('copied to the docker environment or into subworkspaces.')
-
- skipped_jobs = []
-
- if args.filter_pr_tests:
- print('Looking for irrelevant tests to skip...')
- relevant_jobs = filter_tests(jobs, args.base_branch)
- if len(relevant_jobs) == len(jobs):
- print('No tests will be skipped.')
- else:
- print('These tests will be skipped:')
- skipped_jobs = list(set(jobs) - set(relevant_jobs))
- # Sort by shortnames to make printing of skipped tests consistent
- skipped_jobs.sort(key=lambda job: job.shortname)
- for job in list(skipped_jobs):
- print(' %s' % job.shortname)
- jobs = relevant_jobs
-
- print('Will run these tests:')
- for job in jobs:
+ argp = argparse.ArgumentParser(
+ description='Run a matrix of run_tests.py tests.')
+ argp.add_argument(
+ '-j',
+ '--jobs',
+ default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
+ type=int,
+ help='Number of concurrent run_tests.py instances.')
+ argp.add_argument(
+ '-f',
+ '--filter',
+ choices=_allowed_labels(),
+ nargs='+',
+ default=[],
+ help='Filter targets to run by label with AND semantics.')
+ argp.add_argument(
+ '--exclude',
+ choices=_allowed_labels(),
+ nargs='+',
+ default=[],
+ help='Exclude targets with any of given labels.')
+ argp.add_argument(
+ '--build_only',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Pass --build_only flag to run_tests.py instances.')
+ argp.add_argument(
+ '--force_default_poller',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Pass --force_default_poller to run_tests.py instances.')
+ argp.add_argument(
+ '--dry_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Only print what would be run.')
+ argp.add_argument(
+ '--filter_pr_tests',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Filters out tests irrelevant to pull request changes.')
+ argp.add_argument(
+ '--base_branch',
+ default='origin/master',
+ type=str,
+ help='Branch that pull request is requesting to merge into')
+ argp.add_argument(
+ '--inner_jobs',
+ default=_DEFAULT_INNER_JOBS,
+ type=int,
+ help='Number of jobs in each run_tests.py instance')
+ argp.add_argument(
+ '-n',
+ '--runs_per_test',
+ default=1,
+ type=_runs_per_test_type,
+ help='How many times to run each tests. >1 runs implies ' +
+ 'omitting passing test from the output & reports.')
+ argp.add_argument(
+ '--max_time',
+ default=-1,
+ type=int,
+ help='Maximum amount of time to run tests for' +
+ '(other tests will be skipped)')
+ argp.add_argument(
+ '--internal_ci',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Put reports into subdirectories to improve presentation of '
+ 'results by Internal CI.')
+ argp.add_argument(
+ '--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
+ args = argp.parse_args()
+
+ if args.internal_ci:
+ _report_filename = _report_filename_internal_ci # override the function
+
+ extra_args = []
+ if args.build_only:
+ extra_args.append('--build_only')
+ if args.force_default_poller:
+ extra_args.append('--force_default_poller')
+ if args.runs_per_test > 1:
+ extra_args.append('-n')
+ extra_args.append('%s' % args.runs_per_test)
+ extra_args.append('--quiet_success')
+ if args.max_time > 0:
+ extra_args.extend(('--max_time', '%d' % args.max_time))
+ if args.bq_result_table:
+ extra_args.append('--bq_result_table')
+ extra_args.append('%s' % args.bq_result_table)
+ extra_args.append('--measure_cpu_costs')
+ extra_args.append('--disable_auto_set_flakes')
+
+ all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
+ _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
+
+ jobs = []
+ for job in all_jobs:
+ if not args.filter or all(filter in job.labels
+ for filter in args.filter):
+ if not any(exclude_label in job.labels
+ for exclude_label in args.exclude):
+ jobs.append(job)
+
+ if not jobs:
+ jobset.message(
+ 'FAILED', 'No test suites match given criteria.', do_newline=True)
+ sys.exit(1)
+
+ print('IMPORTANT: The changes you are testing need to be locally committed')
+ print('because only the committed changes in the current branch will be')
+ print('copied to the docker environment or into subworkspaces.')
+
+ skipped_jobs = []
+
+ if args.filter_pr_tests:
+ print('Looking for irrelevant tests to skip...')
+ relevant_jobs = filter_tests(jobs, args.base_branch)
+ if len(relevant_jobs) == len(jobs):
+ print('No tests will be skipped.')
+ else:
+ print('These tests will be skipped:')
+ skipped_jobs = list(set(jobs) - set(relevant_jobs))
+ # Sort by shortnames to make printing of skipped tests consistent
+ skipped_jobs.sort(key=lambda job: job.shortname)
+ for job in list(skipped_jobs):
+ print(' %s' % job.shortname)
+ jobs = relevant_jobs
+
+ print('Will run these tests:')
+ for job in jobs:
+ if args.dry_run:
+ print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+ else:
+ print(' %s' % job.shortname)
+ print
+
if args.dry_run:
- print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+ print('--dry_run was used, exiting')
+ sys.exit(1)
+
+ jobset.message('START', 'Running test matrix.', do_newline=True)
+ num_failures, resultset = jobset.run(
+ jobs, newline_on_success=True, travis=True, maxjobs=args.jobs)
+ # Merge skipped tests into results to show skipped tests on report.xml
+ if skipped_jobs:
+ ignored_num_skipped_failures, skipped_results = jobset.run(
+ skipped_jobs, skip_jobs=True)
+ resultset.update(skipped_results)
+ report_utils.render_junit_xml_report(
+ resultset,
+ _report_filename('aggregate_tests'),
+ suite_name='aggregate_tests')
+
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS',
+ 'All run_tests.py instance finished successfully.',
+ do_newline=True)
else:
- print(' %s' % job.shortname)
- print
-
- if args.dry_run:
- print('--dry_run was used, exiting')
- sys.exit(1)
-
- jobset.message('START', 'Running test matrix.', do_newline=True)
- num_failures, resultset = jobset.run(jobs,
- newline_on_success=True,
- travis=True,
- maxjobs=args.jobs)
- # Merge skipped tests into results to show skipped tests on report.xml
- if skipped_jobs:
- ignored_num_skipped_failures, skipped_results = jobset.run(
- skipped_jobs, skip_jobs=True)
- resultset.update(skipped_results)
- report_utils.render_junit_xml_report(resultset, _report_filename('aggregate_tests'),
- suite_name='aggregate_tests')
-
- if num_failures == 0:
- jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Some run_tests.py instance have failed.',
- do_newline=True)
- sys.exit(1)
+ jobset.message(
+ 'FAILED',
+ 'Some run_tests.py instance have failed.',
+ do_newline=True)
+ sys.exit(1)
diff --git a/tools/run_tests/start_port_server.py b/tools/run_tests/start_port_server.py
index 362875036f..0eeceb4ce9 100755
--- a/tools/run_tests/start_port_server.py
+++ b/tools/run_tests/start_port_server.py
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""
Wrapper around port server starting code.
diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py
index a065bb84cb..794db6e1be 100755
--- a/tools/run_tests/task_runner.py
+++ b/tools/run_tests/task_runner.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Runs selected gRPC test/build tasks."""
from __future__ import print_function
@@ -32,52 +31,54 @@ _TARGETS += artifact_targets.targets()
_TARGETS += distribtest_targets.targets()
_TARGETS += package_targets.targets()
+
def _create_build_map():
- """Maps task names and labels to list of tasks to be built."""
- target_build_map = dict([(target.name, [target])
- for target in _TARGETS])
- if len(_TARGETS) > len(target_build_map.keys()):
- raise Exception('Target names need to be unique')
-
- label_build_map = {}
- label_build_map['all'] = [t for t in _TARGETS] # to build all targets
- for target in _TARGETS:
- for label in target.labels:
- if label in label_build_map:
- label_build_map[label].append(target)
- else:
- label_build_map[label] = [target]
-
- if set(target_build_map.keys()).intersection(label_build_map.keys()):
- raise Exception('Target names need to be distinct from label names')
- return dict( target_build_map.items() + label_build_map.items())
+ """Maps task names and labels to list of tasks to be built."""
+ target_build_map = dict([(target.name, [target]) for target in _TARGETS])
+ if len(_TARGETS) > len(target_build_map.keys()):
+ raise Exception('Target names need to be unique')
+
+ label_build_map = {}
+ label_build_map['all'] = [t for t in _TARGETS] # to build all targets
+ for target in _TARGETS:
+ for label in target.labels:
+ if label in label_build_map:
+ label_build_map[label].append(target)
+ else:
+ label_build_map[label] = [target]
+
+ if set(target_build_map.keys()).intersection(label_build_map.keys()):
+ raise Exception('Target names need to be distinct from label names')
+ return dict(target_build_map.items() + label_build_map.items())
_BUILD_MAP = _create_build_map()
argp = argparse.ArgumentParser(description='Runs build/test targets.')
-argp.add_argument('-b', '--build',
- choices=sorted(_BUILD_MAP.keys()),
- nargs='+',
- default=['all'],
- help='Target name or target label to build.')
-argp.add_argument('-f', '--filter',
- choices=sorted(_BUILD_MAP.keys()),
- nargs='+',
- default=[],
- help='Filter targets to build with AND semantics.')
+argp.add_argument(
+ '-b',
+ '--build',
+ choices=sorted(_BUILD_MAP.keys()),
+ nargs='+',
+ default=['all'],
+ help='Target name or target label to build.')
+argp.add_argument(
+ '-f',
+ '--filter',
+ choices=sorted(_BUILD_MAP.keys()),
+ nargs='+',
+ default=[],
+ help='Filter targets to build with AND semantics.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('-t', '--travis',
- default=False,
- action='store_const',
- const=True)
+argp.add_argument(
+ '-t', '--travis', default=False, action='store_const', const=True)
args = argp.parse_args()
# Figure out which targets to build
targets = []
for label in args.build:
- targets += _BUILD_MAP[label]
+ targets += _BUILD_MAP[label]
# Among targets selected by -b, filter out those that don't match the filter
targets = [t for t in targets if all(f in t.labels for f in args.filter)]
@@ -86,30 +87,29 @@ targets = sorted(set(targets))
# Execute pre-build phase
prebuild_jobs = []
for target in targets:
- prebuild_jobs += target.pre_build_jobspecs()
+ prebuild_jobs += target.pre_build_jobspecs()
if prebuild_jobs:
- num_failures, _ = jobset.run(
- prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
- if num_failures != 0:
- jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
- sys.exit(1)
+ num_failures, _ = jobset.run(
+ prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
+ if num_failures != 0:
+ jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
+ sys.exit(1)
build_jobs = []
for target in targets:
- build_jobs.append(target.build_jobspec())
+ build_jobs.append(target.build_jobspec())
if not build_jobs:
- print('Nothing to build.')
- sys.exit(1)
+ print('Nothing to build.')
+ sys.exit(1)
jobset.message('START', 'Building targets.', do_newline=True)
num_failures, resultset = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs)
-report_utils.render_junit_xml_report(resultset, 'report_taskrunner_sponge_log.xml',
- suite_name='tasks')
+report_utils.render_junit_xml_report(
+ resultset, 'report_taskrunner_sponge_log.xml', suite_name='tasks')
if num_failures == 0:
- jobset.message('SUCCESS', 'All targets built successfully.',
- do_newline=True)
+ jobset.message(
+ 'SUCCESS', 'All targets built successfully.', do_newline=True)
else:
- jobset.message('FAILED', 'Failed to build targets.',
- do_newline=True)
- sys.exit(1)
+ jobset.message('FAILED', 'Failed to build targets.', do_newline=True)
+ sys.exit(1)