aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_tests.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests/run_tests.py')
-rwxr-xr-xtools/run_tests/run_tests.py320
1 files changed, 165 insertions, 155 deletions
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 4fc1be199c..28a93dd822 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -33,7 +33,6 @@
import argparse
import ast
import glob
-import hashlib
import itertools
import json
import multiprocessing
@@ -62,6 +61,11 @@ os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {}
+_POLLING_STRATEGIES = {
+ 'linux': ['epoll', 'poll', 'legacy']
+}
+
+
def platform_string():
return jobset.platform_string()
@@ -73,24 +77,18 @@ class Config(object):
if environ is None:
environ = {}
self.build_config = config
- self.allow_hashing = (config != 'gcov')
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
- def job_spec(self, cmdline, hash_targets, timeout_seconds=5*60,
+ def job_spec(self, cmdline, timeout_seconds=5*60,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
- hash_targets: either None (don't do caching of test results), or
- a list of strings specifying files to include in a
- binary hash to check if a test has changed
- -- if used, all artifacts needed to run the test must
- be listed
"""
actual_environ = self.environ.copy()
for k, v in environ.iteritems():
@@ -100,8 +98,6 @@ class Config(object):
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
- hash_targets=hash_targets
- if self.allow_hashing else None,
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
@@ -154,51 +150,57 @@ class CLanguage(object):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
- if self.config.build_config in target['exclude_configs']:
- continue
- if self.platform == 'windows':
- binary = 'vsprojects/%s%s/%s.exe' % (
- 'x64/' if self.args.arch == 'x64' else '',
- _MSBUILD_CONFIG[self.config.build_config],
- target['name'])
- else:
- binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
- if os.path.isfile(binary):
- if 'gtest' in target and target['gtest']:
- # here we parse the output of --gtest_list_tests to build up a
- # complete list of the tests contained in a binary
- # for each test, we then add a job to run, filtering for just that
- # test
- with open(os.devnull, 'w') as fnull:
- tests = subprocess.check_output([binary, '--gtest_list_tests'],
- stderr=fnull)
- base = None
- for line in tests.split('\n'):
- i = line.find('#')
- if i >= 0: line = line[:i]
- if not line: continue
- if line[0] != ' ':
- base = line.strip()
- else:
- assert base is not None
- assert line[1] == ' '
- test = base + line.strip()
- cmdline = [binary] + ['--gtest_filter=%s' % test]
- out.append(self.config.job_spec(cmdline, [binary],
- shortname='%s:%s' % (binary, test),
- cpu_cost=target['cpu_cost'],
- environ={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
- _ROOT + '/src/core/lib/tsi/test_creds/ca.pem'}))
+ polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
+ if target.get('uses_polling', True)
+ else ['all'])
+ for polling_strategy in polling_strategies:
+ env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
+ _ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
+ 'GRPC_POLL_STRATEGY': polling_strategy}
+ shortname_ext = '' if polling_strategy=='all' else ' polling=%s' % polling_strategy
+ if self.config.build_config in target['exclude_configs']:
+ continue
+ if self.platform == 'windows':
+ binary = 'vsprojects/%s%s/%s.exe' % (
+ 'x64/' if self.args.arch == 'x64' else '',
+ _MSBUILD_CONFIG[self.config.build_config],
+ target['name'])
else:
- cmdline = [binary] + target['args']
- out.append(self.config.job_spec(cmdline, [binary],
- shortname=target.get('shortname', ' '.join(cmdline)),
- cpu_cost=target['cpu_cost'],
- flaky=target.get('flaky', False),
- environ={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
- _ROOT + '/src/core/lib/tsi/test_creds/ca.pem'}))
- elif self.args.regex == '.*' or self.platform == 'windows':
- print '\nWARNING: binary not found, skipping', binary
+ binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
+ if os.path.isfile(binary):
+ if 'gtest' in target and target['gtest']:
+ # here we parse the output of --gtest_list_tests to build up a
+ # complete list of the tests contained in a binary
+ # for each test, we then add a job to run, filtering for just that
+ # test
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output([binary, '--gtest_list_tests'],
+ stderr=fnull)
+ base = None
+ for line in tests.split('\n'):
+ i = line.find('#')
+ if i >= 0: line = line[:i]
+ if not line: continue
+ if line[0] != ' ':
+ base = line.strip()
+ else:
+ assert base is not None
+ assert line[1] == ' '
+ test = base + line.strip()
+ cmdline = [binary] + ['--gtest_filter=%s' % test]
+ out.append(self.config.job_spec(cmdline, [binary],
+ shortname='%s:%s %s' % (binary, test, shortname_ext),
+ cpu_cost=target['cpu_cost'],
+ environ=env))
+ else:
+ cmdline = [binary] + target['args']
+ out.append(self.config.job_spec(cmdline, [binary],
+ shortname=' '.join(cmdline) + shortname_ext,
+ cpu_cost=target['cpu_cost'],
+ flaky=target.get('flaky', False),
+ environ=env))
+ elif self.args.regex == '.*' or self.platform == 'windows':
+ print '\nWARNING: binary not found, skipping', binary
return sorted(out)
def make_targets(self):
@@ -234,29 +236,40 @@ class CLanguage(object):
def makefile_name(self):
return 'Makefile'
- def _clang_make_options(self):
- return ['CC=clang', 'CXX=clang++', 'LD=clang', 'LDXX=clang++']
+ def _clang_make_options(self, version_suffix=''):
+ return ['CC=clang%s' % version_suffix,
+ 'CXX=clang++%s' % version_suffix,
+ 'LD=clang%s' % version_suffix,
+ 'LDXX=clang++%s' % version_suffix]
- def _gcc44_make_options(self):
- return ['CC=gcc-4.4', 'CXX=g++-4.4', 'LD=gcc-4.4', 'LDXX=g++-4.4']
+ def _gcc_make_options(self, version_suffix):
+ return ['CC=gcc%s' % version_suffix,
+ 'CXX=g++%s' % version_suffix,
+ 'LD=gcc%s' % version_suffix,
+ 'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
- if _is_use_docker_child():
- return ("already_under_docker", [])
- if not use_docker:
+ if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.4':
- return ('wheezy', self._gcc44_make_options())
+ return ('wheezy', self._gcc_make_options(version_suffix='-4.4'))
+ elif compiler == 'gcc4.6':
+ return ('wheezy', self._gcc_make_options(version_suffix='-4.6'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'clang3.4':
+ # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
+ elif compiler == 'clang3.5':
+ return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
- return ('ubuntu1604', self._clang_make_options())
+ return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
+ elif compiler == 'clang3.7':
+ return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
@@ -364,29 +377,31 @@ class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
- self._tox_env = self._get_tox_env(self.args.compiler)
+ self._tox_envs = self._get_tox_envs(self.args.compiler)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
- environment['PYTHONPATH'] = os.path.abspath('src/python/gens')
+ environment['PYTHONPATH'] = '{}:{}'.format(
+ os.path.abspath('src/python/gens'),
+ os.path.abspath('src/python/grpcio_health_checking'))
if self.config.build_config != 'gcov':
return [self.config.job_spec(
- ['tools/run_tests/run_python.sh', self._tox_env],
- None,
+ ['tools/run_tests/run_python.sh', tox_env],
environ=dict(environment.items() +
[('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
- shortname='py.test.%s' % suite_name,
+ shortname='%s.test.%s' % (tox_env, suite_name),
timeout_seconds=5*60)
- for suite_name in tests_json]
+ for suite_name in tests_json
+ for tox_env in self._tox_envs]
else:
- return [self.config.job_spec(['tools/run_tests/run_python.sh'],
- None,
+ return [self.config.job_spec(['tools/run_tests/run_python.sh', tox_env],
environ=environment,
- shortname='py.test.coverage',
- timeout_seconds=15*60)]
+ shortname='%s.test.coverage' % tox_env,
+ timeout_seconds=15*60)
+ for tox_env in self._tox_envs]
def pre_build_steps(self):
@@ -399,7 +414,8 @@ class PythonLanguage(object):
return []
def build_steps(self):
- return [['tools/run_tests/build_python.sh', self._tox_env]]
+ return [['tools/run_tests/build_python.sh', tox_env]
+ for tox_env in self._tox_envs]
def post_tests_steps(self):
return []
@@ -410,12 +426,14 @@ class PythonLanguage(object):
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_jessie_%s' % _docker_arch_suffix(self.args.arch)
- def _get_tox_env(self, compiler):
+ def _get_tox_envs(self, compiler):
"""Returns name of tox environment based on selected compiler."""
- if compiler == 'python2.7' or compiler == 'default':
- return 'py27'
+ if compiler == 'default':
+ return ('py27', 'py34')
+ elif compiler == 'python2.7':
+ return ('py27',)
elif compiler == 'python3.4':
- return 'py34'
+ return ('py34',)
else:
raise Exception('Compiler %s not supported.' % compiler)
@@ -431,7 +449,7 @@ class RubyLanguage(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- return [self.config.job_spec(['tools/run_tests/run_ruby.sh'], None,
+ return [self.config.job_spec(['tools/run_tests/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
@@ -471,15 +489,23 @@ class CSharpLanguage(object):
if self.platform == 'windows':
# Explicitly choosing between x86 and x64 arch doesn't work yet
_check_arch(self.args.arch, ['default'])
+ # CoreCLR use 64bit runtime by default.
+ arch_option = 'x64' if self.args.compiler == 'coreclr' else self.args.arch
self._make_options = [_windows_toolset_option(self.args.compiler),
- _windows_arch_option(self.args.arch)]
+ _windows_arch_option(arch_option)]
else:
- _check_compiler(self.args.compiler, ['default'])
+ _check_compiler(self.args.compiler, ['default', 'coreclr'])
+ if self.platform == 'linux' and self.args.compiler == 'coreclr':
+ self._docker_distro = 'coreclr'
+ else:
+ self._docker_distro = 'jessie'
+
if self.platform == 'mac':
- # On Mac, official distribution of mono is 32bit.
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
- self._make_options = ['EMBED_OPENSSL=true',
- 'CFLAGS=-m32', 'LDFLAGS=-m32']
+ self._make_options = ['EMBED_OPENSSL=true']
+ if self.args.compiler != 'coreclr':
+ # On Mac, official distribution of mono is 32bit.
+ self._make_options += ['CFLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
@@ -488,17 +514,33 @@ class CSharpLanguage(object):
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
- nunit_args = ['--labels=All',
- '--noresult',
- '--workers=1']
- if self.platform == 'windows':
+ nunit_args = ['--labels=All']
+ assembly_subdir = 'bin/%s' % msbuild_config
+ assembly_extension = '.exe'
+
+ if self.args.compiler == 'coreclr':
+ if self.platform == 'linux':
+ assembly_subdir += '/netstandard1.5/debian.8-x64'
+ assembly_extension = ''
+ elif self.platform == 'mac':
+ assembly_subdir += '/netstandard1.5/osx.10.11-x64'
+ assembly_extension = ''
+ else:
+ assembly_subdir += '/netstandard1.5/win7-x64'
runtime_cmd = []
else:
- runtime_cmd = ['mono']
+ nunit_args += ['--noresult', '--workers=1']
+ if self.platform == 'windows':
+ runtime_cmd = []
+ else:
+ runtime_cmd = ['mono']
specs = []
for assembly in tests_by_assembly.iterkeys():
- assembly_file = 'src/csharp/%s/bin/%s/%s.exe' % (assembly, msbuild_config, assembly)
+ assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
+ assembly_subdir,
+ assembly,
+ assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
@@ -541,12 +583,18 @@ class CSharpLanguage(object):
return self._make_options;
def build_steps(self):
- if self.platform == 'windows':
- return [[_windows_build_bat(self.args.compiler),
- 'src/csharp/Grpc.sln',
- '/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
+ if self.args.compiler == 'coreclr':
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\build_csharp_coreclr.bat']]
+ else:
+ return [['tools/run_tests/build_csharp_coreclr.sh']]
else:
- return [['tools/run_tests/build_csharp.sh']]
+ if self.platform == 'windows':
+ return [[_windows_build_bat(self.args.compiler),
+ 'src/csharp/Grpc.sln',
+ '/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
+ else:
+ return [['tools/run_tests/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
@@ -558,7 +606,8 @@ class CSharpLanguage(object):
return 'Makefile'
def dockerfile_dir(self):
- return 'tools/dockerfile/test/csharp_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
+ _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
@@ -610,7 +659,7 @@ class Sanity(object):
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
- return [self.config.job_spec(cmd['script'].split(), None,
+ return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=None, environ={'TEST': 'true'},
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
@@ -700,7 +749,8 @@ def _check_arch_option(arch):
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
- if compiler == 'default' or compiler == 'vs2013':
+ # For CoreCLR, fall back to the default compiler for C core
+ if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
@@ -713,7 +763,8 @@ def _windows_build_bat(compiler):
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
- if compiler == 'default' or compiler == 'vs2013':
+ # For CoreCLR, fall back to the default compiler for C core
+ if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
@@ -764,6 +815,7 @@ argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
+argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-f', '--forever',
@@ -804,11 +856,12 @@ argp.add_argument('--arch',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
- 'gcc4.4', 'gcc4.9', 'gcc5.3',
- 'clang3.4', 'clang3.6',
+ 'gcc4.4', 'gcc4.6', 'gcc4.9', 'gcc5.3',
+ 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2010', 'vs2013', 'vs2015',
'python2.7', 'python3.4',
- 'node0.12', 'node4', 'node5'],
+ 'node0.12', 'node4', 'node5',
+ 'coreclr'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--build_only',
@@ -828,6 +881,9 @@ argp.add_argument('--force_default_poller', default=False, action='store_const',
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
+if args.force_default_poller:
+ _POLLING_STRATEGIES = {}
+
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
@@ -880,7 +936,7 @@ for l in languages:
language_make_options=[]
if any(language.make_options() for language in languages):
- if len(languages) != 1:
+ if not 'gcov' in args.config and len(languages) != 1:
print 'languages with custom make options cannot be built simultaneously with other languages'
sys.exit(1)
else:
@@ -991,46 +1047,6 @@ runs_per_test = args.runs_per_test
forever = args.forever
-class TestCache(object):
- """Cache for running tests."""
-
- def __init__(self, use_cache_results):
- self._last_successful_run = {}
- self._use_cache_results = use_cache_results
- self._last_save = time.time()
-
- def should_run(self, cmdline, bin_hash):
- if cmdline not in self._last_successful_run:
- return True
- if self._last_successful_run[cmdline] != bin_hash:
- return True
- if not self._use_cache_results:
- return True
- return False
-
- def finished(self, cmdline, bin_hash):
- self._last_successful_run[cmdline] = bin_hash
- if time.time() - self._last_save > 1:
- self.save()
-
- def dump(self):
- return [{'cmdline': k, 'hash': v}
- for k, v in self._last_successful_run.iteritems()]
-
- def parse(self, exdump):
- self._last_successful_run = dict((o['cmdline'], o['hash']) for o in exdump)
-
- def save(self):
- with open('.run_tests_cache', 'w') as f:
- f.write(json.dumps(self.dump()))
- self._last_save = time.time()
-
- def maybe_load(self):
- if os.path.exists('.run_tests_cache'):
- with open('.run_tests_cache') as f:
- self.parse(json.loads(f.read()))
-
-
def _start_port_server(port_server_port):
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
@@ -1039,7 +1055,7 @@ def _start_port_server(port_server_port):
try:
version = int(urllib2.urlopen(
'http://localhost:%d/version_number' % port_server_port,
- timeout=1).read())
+ timeout=10).read())
print 'detected port server running version %d' % version
running = True
except Exception as e:
@@ -1150,7 +1166,7 @@ class BuildAndRunError(object):
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
- check_cancelled, newline_on_success, cache, xml_report=None, build_only=False):
+ check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
@@ -1177,7 +1193,9 @@ def _build_and_run(
spec
for language in languages
for spec in language.test_specs()
- if re.search(args.regex, spec.shortname))
+ if (re.search(args.regex, spec.shortname) and
+ (args.regex_exclude == '' or
+ not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis:
@@ -1197,10 +1215,9 @@ def _build_and_run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
- cache=cache if not xml_report else None,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
if resultset:
- for k, v in resultset.iteritems():
+ for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
@@ -1226,14 +1243,9 @@ def _build_and_run(
if num_test_failures:
out.append(BuildAndRunError.TEST)
- if cache: cache.save()
-
return out
-test_cache = TestCache(runs_per_test == 1)
-test_cache.maybe_load()
-
if forever:
success = True
while True:
@@ -1243,7 +1255,6 @@ if forever:
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
- cache=test_cache,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
@@ -1255,7 +1266,6 @@ if forever:
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
- cache=test_cache,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors: