aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_tests.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests/run_tests.py')
-rwxr-xr-xtools/run_tests/run_tests.py180
1 files changed, 125 insertions, 55 deletions
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index db261d98c2..924274191e 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -54,9 +54,9 @@ import time
from six.moves import urllib
import uuid
-import jobset
-import report_utils
-import watch_dirs
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+import python_utils.watch_dirs as watch_dirs
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
@@ -69,7 +69,7 @@ _FORCE_ENVIRON_FOR_WRAPPERS = {
_POLLING_STRATEGIES = {
- 'linux': ['epoll', 'poll', 'poll-cv', 'legacy']
+ 'linux': ['epoll', 'poll', 'poll-cv']
}
@@ -116,7 +116,7 @@ class Config(object):
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
- with open('tools/run_tests/tests.json') as f:
+ with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
@@ -231,6 +231,9 @@ class CLanguage(object):
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
+ timeout_scaling = 1
+ if polling_strategy == 'poll-cv':
+ timeout_scaling *= 5
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
@@ -242,6 +245,9 @@ class CLanguage(object):
target['name'])
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
+ cpu_cost = target['cpu_cost']
+ if cpu_cost == 'capacity':
+ cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
@@ -265,7 +271,8 @@ class CLanguage(object):
cmdline = [binary] + ['--gtest_filter=%s' % test]
out.append(self.config.job_spec(cmdline,
shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext),
- cpu_cost=target['cpu_cost'],
+ cpu_cost=cpu_cost,
+ timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
@@ -274,9 +281,9 @@ class CLanguage(object):
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
- cpu_cost=target['cpu_cost'],
+ cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
- timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS),
+ timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
@@ -293,7 +300,7 @@ class CLanguage(object):
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\pre_build_c.bat']]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
@@ -304,7 +311,7 @@ class CLanguage(object):
if self.platform == 'windows':
return []
else:
- return [['tools/run_tests/post_tests_c.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
return 'Makefile'
@@ -332,6 +339,8 @@ class CLanguage(object):
return ('wheezy', self._gcc_make_options(version_suffix='-4.4'))
elif compiler == 'gcc4.6':
return ('wheezy', self._gcc_make_options(version_suffix='-4.6'))
+ elif compiler == 'gcc4.8':
+ return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'clang3.4':
@@ -363,7 +372,8 @@ class NodeLanguage(object):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
- 'node4', 'node5', 'node6'])
+ 'node4', 'node5', 'node6',
+ 'node7'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
@@ -372,17 +382,16 @@ class NodeLanguage(object):
def test_specs(self):
if self.platform == 'windows':
- return [self.config.job_spec(['tools\\run_tests\\run_node.bat'], None)]
+ return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
- return [self.config.job_spec(['tools/run_tests/run_node.sh', self.node_version],
- None,
+ return [self.config.job_spec(['tools/run_tests/helper_scripts/run_node.sh', self.node_version],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\pre_build_node.bat']]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
- return [['tools/run_tests/pre_build_node.sh', self.node_version]]
+ return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
@@ -392,9 +401,9 @@ class NodeLanguage(object):
def build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\build_node.bat']]
+ return [['tools\\run_tests\\helper_scripts\\build_node.bat']]
else:
- return [['tools/run_tests/build_node.sh', self.node_version]]
+ return [['tools/run_tests/helper_scripts/build_node.sh', self.node_version]]
def post_tests_steps(self):
return []
@@ -417,7 +426,7 @@ class PhpLanguage(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
+ return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
@@ -430,10 +439,10 @@ class PhpLanguage(object):
return []
def build_steps(self):
- return [['tools/run_tests/build_php.sh']]
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
- return [['tools/run_tests/post_tests_php.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
@@ -453,7 +462,7 @@ class Php7Language(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
+ return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
@@ -466,10 +475,10 @@ class Php7Language(object):
return []
def build_steps(self):
- return [['tools/run_tests/build_php.sh']]
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
- return [['tools/run_tests/post_tests_php.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
@@ -501,7 +510,7 @@ class PythonLanguage(object):
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
- [('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
+ [('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
@@ -538,18 +547,18 @@ class PythonLanguage(object):
if os.name == 'nt':
shell = ['bash']
- builder = [os.path.abspath('tools/run_tests/build_python_msys2.sh')]
+ builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
- builder = [os.path.abspath('tools/run_tests/build_python.sh')]
+ builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
- runner = [os.path.abspath('tools/run_tests/run_python.sh')]
+ runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
@@ -601,12 +610,12 @@ class RubyLanguage(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- return [self.config.job_spec(['tools/run_tests/run_ruby.sh'],
+ return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
- return [['tools/run_tests/pre_build_ruby.sh']]
+ return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
@@ -615,10 +624,10 @@ class RubyLanguage(object):
return []
def build_steps(self):
- return [['tools/run_tests/build_ruby.sh']]
+ return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
- return [['tools/run_tests/post_tests_ruby.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
@@ -692,7 +701,6 @@ class CSharpLanguage(object):
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
- None,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
@@ -710,7 +718,6 @@ class CSharpLanguage(object):
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
- None,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
@@ -718,9 +725,9 @@ class CSharpLanguage(object):
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\pre_build_csharp.bat']]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']]
else:
- return [['tools/run_tests/pre_build_csharp.sh']]
+ return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
@@ -731,22 +738,22 @@ class CSharpLanguage(object):
def build_steps(self):
if self.args.compiler == 'coreclr':
if self.platform == 'windows':
- return [['tools\\run_tests\\build_csharp_coreclr.bat']]
+ return [['tools\\run_tests\\helper_scripts\\build_csharp_coreclr.bat']]
else:
- return [['tools/run_tests/build_csharp_coreclr.sh']]
+ return [['tools/run_tests/helper_scripts/build_csharp_coreclr.sh']]
else:
if self.platform == 'windows':
return [[_windows_build_bat(self.args.compiler),
'src/csharp/Grpc.sln',
'/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
else:
- return [['tools/run_tests/build_csharp.sh']]
+ return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\post_tests_csharp.bat']]
+ return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
- return [['tools/run_tests/post_tests_csharp.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
return 'Makefile'
@@ -769,7 +776,7 @@ class ObjCLanguage(object):
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
- timeout_seconds=None,
+ timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
@@ -813,8 +820,12 @@ class Sanity(object):
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
+ environ={'TEST': 'true'}
+ if _is_use_docker_child():
+ environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
- timeout_seconds=None, environ={'TEST': 'true'},
+ timeout_seconds=30*60,
+ environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
@@ -842,9 +853,56 @@ class Sanity(object):
def __str__(self):
return 'sanity'
+class NodeExpressLanguage(object):
+ """Dummy Node express test target to enable running express performance
+ benchmarks"""
+
+ def __init__(self):
+ self.platform = platform_string()
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default', 'node0.12',
+ 'node4', 'node5', 'node6'])
+ if self.args.compiler == 'default':
+ self.node_version = '4'
+ else:
+ # Take off the word "node"
+ self.node_version = self.args.compiler[4:]
+
+ def test_specs(self):
+ return []
+
+ def pre_build_steps(self):
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
+ else:
+ return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
+
+ def make_targets(self):
+ return []
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return []
+
+ def post_tests_steps(self):
+ return []
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
+
+ def __str__(self):
+ return 'node_express'
# different configurations we can run under
-with open('tools/run_tests/configs.json') as f:
+with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
@@ -852,6 +910,7 @@ _LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
+ 'node_express': NodeExpressLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
@@ -1012,10 +1071,11 @@ argp.add_argument('--arch',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
- 'gcc4.4', 'gcc4.6', 'gcc4.9', 'gcc5.3',
+ 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2010', 'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3',
+ 'node0.12', 'node4', 'node5', 'node6', 'node7',
'coreclr'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
@@ -1038,6 +1098,12 @@ argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
+argp.add_argument('--quiet_success',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
@@ -1237,7 +1303,7 @@ def _start_port_server(port_server_port):
running = False
if running:
current_version = int(subprocess.check_output(
- [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
+ [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'dump_version']))
print('my port server is version %d' % current_version)
running = (version >= current_version)
@@ -1249,7 +1315,7 @@ def _start_port_server(port_server_port):
fd, logfile = tempfile.mkstemp()
os.close(fd)
print('starting port_server, with log file %s' % logfile)
- args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
+ args = [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'-p', '%d' % port_server_port, '-l', logfile]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
@@ -1355,7 +1421,7 @@ def _build_and_run(
return []
# start antagonists
- antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
+ antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
port_server_port = 32766
_start_port_server(port_server_port)
@@ -1385,20 +1451,24 @@ def _build_and_run(
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
+ if args.quiet_success:
+ jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port},
+ quiet_success=args.quiet_success)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
- if num_failures == num_runs: # what about infinite_runs???
- jobset.message('FAILED', k, do_newline=True)
- elif num_failures > 0:
- jobset.message(
- 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
- do_newline=True)
+ if num_failures > 0:
+ if num_failures == num_runs: # what about infinite_runs???
+ jobset.message('FAILED', k, do_newline=True)
+ else:
+ jobset.message(
+ 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
+ do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()