aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_tests.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests/run_tests.py')
-rwxr-xr-xtools/run_tests/run_tests.py80
1 files changed, 52 insertions, 28 deletions
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 7c65067857..209925b5e6 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -63,8 +63,7 @@ _FORCE_ENVIRON_FOR_WRAPPERS = {
}
_POLLING_STRATEGIES = {
- 'linux': ['epollsig', 'epoll1', 'poll', 'poll-cv'],
-# TODO(ctiller, sreecha): enable epollex, epoll-thread-pool
+ 'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
@@ -122,7 +121,7 @@ def max_parallel_tests_for_current_platform():
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
- return 128
+ return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
@@ -149,18 +148,16 @@ class Config(object):
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
- print('Setting %s to flaky' % shortname)
flaky = True
if shortname in shortname_to_cpu:
- print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, shortname_to_cpu[shortname]))
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
- flake_retries=5 if flaky or args.allow_flakes else 0,
- timeout_retries=3 if flaky or args.allow_flakes else 0)
+ flake_retries=4 if flaky or args.allow_flakes else 0,
+ timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
@@ -284,9 +281,10 @@ class CLanguage(object):
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
+ auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
- else ['all'])
+ else ['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
@@ -298,22 +296,30 @@ class CLanguage(object):
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
- timeout_scaling = 1
- if polling_strategy == 'poll-cv':
- timeout_scaling *= 5
-
if polling_strategy in target.get('excluded_poll_engines', []):
continue
- # Scale overall test timeout if running under various sanitizers.
- config = self.args.config
- if ('asan' in config
- or config == 'msan'
- or config == 'tsan'
- or config == 'ubsan'
- or config == 'helgrind'
- or config == 'memcheck'):
- timeout_scaling *= 20
+ timeout_scaling = 1
+ if auto_timeout_scaling:
+ config = self.args.config
+ if ('asan' in config
+ or config == 'msan'
+ or config == 'tsan'
+ or config == 'ubsan'
+ or config == 'helgrind'
+ or config == 'memcheck'):
+ # Scale overall test timeout if running under various sanitizers.
+ # scaling value is based on historical data analysis
+ timeout_scaling *= 3
+ elif polling_strategy == 'poll-cv':
+ # scale test timeout if running with poll-cv
+ # sanitizer and poll-cv scaling is not cumulative to ensure
+ # reasonable timeout values.
+ # TODO(jtattermusch): based on historical data and 5min default
+ # test timeout poll-cv scaling is currently not useful.
+ # Leaving here so it can be reintroduced if the default test timeout
+ # is decreased in the future.
+ timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
@@ -330,11 +336,29 @@ class CLanguage(object):
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
- if 'gtest' in target and target['gtest']:
- # here we parse the output of --gtest_list_tests to build up a
- # complete list of the tests contained in a binary
- # for each test, we then add a job to run, filtering for just that
- # test
+ list_test_command = None
+ filter_test_command = None
+
+ # these are the flag defined by gtest and benchmark framework to list
+ # and filter test runs. We use them to split each individual test
+ # into its own JobSpec, and thus into its own process.
+ if 'benchmark' in target and target['benchmark']:
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output([binary, '--benchmark_list_tests'],
+ stderr=fnull)
+ for line in tests.split('\n'):
+ test = line.strip()
+ if not test: continue
+ cmdline = [binary, '--benchmark_filter=%s$' % test] + target['args']
+ out.append(self.config.job_spec(cmdline,
+ shortname='%s %s' % (' '.join(cmdline), shortname_ext),
+ cpu_cost=cpu_cost,
+ timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
+ environ=env))
+ elif 'gtest' in target and target['gtest']:
+ # here we parse the output of --gtest_list_tests to build up a complete
+ # list of the tests contained in a binary for each test, we then
+ # add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
@@ -353,7 +377,7 @@ class CLanguage(object):
out.append(self.config.job_spec(cmdline,
shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
- timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
+ timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
@@ -1493,7 +1517,7 @@ def build_step_environ(cfg):
return environ
build_steps = list(set(
- jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
+ jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets: