aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_tests.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests/run_tests.py')
-rwxr-xr-xtools/run_tests/run_tests.py111
1 files changed, 61 insertions, 50 deletions
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index c49ee4a6cc..924274191e 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -54,9 +54,9 @@ import time
from six.moves import urllib
import uuid
-import jobset
-import report_utils
-import watch_dirs
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+import python_utils.watch_dirs as watch_dirs
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
@@ -116,7 +116,7 @@ class Config(object):
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
- with open('tools/run_tests/tests.json') as f:
+ with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
@@ -300,7 +300,7 @@ class CLanguage(object):
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\pre_build_c.bat']]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
@@ -311,7 +311,7 @@ class CLanguage(object):
if self.platform == 'windows':
return []
else:
- return [['tools/run_tests/post_tests_c.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
return 'Makefile'
@@ -382,17 +382,16 @@ class NodeLanguage(object):
def test_specs(self):
if self.platform == 'windows':
- return [self.config.job_spec(['tools\\run_tests\\run_node.bat'], None)]
+ return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
- return [self.config.job_spec(['tools/run_tests/run_node.sh', self.node_version],
- None,
+ return [self.config.job_spec(['tools/run_tests/helper_scripts/run_node.sh', self.node_version],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\pre_build_node.bat']]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
- return [['tools/run_tests/pre_build_node.sh', self.node_version]]
+ return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
@@ -402,9 +401,9 @@ class NodeLanguage(object):
def build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\build_node.bat']]
+ return [['tools\\run_tests\\helper_scripts\\build_node.bat']]
else:
- return [['tools/run_tests/build_node.sh', self.node_version]]
+ return [['tools/run_tests/helper_scripts/build_node.sh', self.node_version]]
def post_tests_steps(self):
return []
@@ -427,7 +426,7 @@ class PhpLanguage(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
+ return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
@@ -440,10 +439,10 @@ class PhpLanguage(object):
return []
def build_steps(self):
- return [['tools/run_tests/build_php.sh']]
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
- return [['tools/run_tests/post_tests_php.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
@@ -463,7 +462,7 @@ class Php7Language(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
+ return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
@@ -476,10 +475,10 @@ class Php7Language(object):
return []
def build_steps(self):
- return [['tools/run_tests/build_php.sh']]
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
- return [['tools/run_tests/post_tests_php.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
@@ -548,18 +547,18 @@ class PythonLanguage(object):
if os.name == 'nt':
shell = ['bash']
- builder = [os.path.abspath('tools/run_tests/build_python_msys2.sh')]
+ builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
- builder = [os.path.abspath('tools/run_tests/build_python.sh')]
+ builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
- runner = [os.path.abspath('tools/run_tests/run_python.sh')]
+ runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
@@ -611,12 +610,12 @@ class RubyLanguage(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- return [self.config.job_spec(['tools/run_tests/run_ruby.sh'],
+ return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
- return [['tools/run_tests/pre_build_ruby.sh']]
+ return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
@@ -625,10 +624,10 @@ class RubyLanguage(object):
return []
def build_steps(self):
- return [['tools/run_tests/build_ruby.sh']]
+ return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
- return [['tools/run_tests/post_tests_ruby.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
@@ -702,7 +701,6 @@ class CSharpLanguage(object):
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
- None,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
@@ -720,7 +718,6 @@ class CSharpLanguage(object):
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
- None,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
@@ -728,9 +725,9 @@ class CSharpLanguage(object):
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\pre_build_csharp.bat']]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']]
else:
- return [['tools/run_tests/pre_build_csharp.sh']]
+ return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
@@ -741,22 +738,22 @@ class CSharpLanguage(object):
def build_steps(self):
if self.args.compiler == 'coreclr':
if self.platform == 'windows':
- return [['tools\\run_tests\\build_csharp_coreclr.bat']]
+ return [['tools\\run_tests\\helper_scripts\\build_csharp_coreclr.bat']]
else:
- return [['tools/run_tests/build_csharp_coreclr.sh']]
+ return [['tools/run_tests/helper_scripts/build_csharp_coreclr.sh']]
else:
if self.platform == 'windows':
return [[_windows_build_bat(self.args.compiler),
'src/csharp/Grpc.sln',
'/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
else:
- return [['tools/run_tests/build_csharp.sh']]
+ return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\post_tests_csharp.bat']]
+ return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
- return [['tools/run_tests/post_tests_csharp.sh']]
+ return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
return 'Makefile'
@@ -779,7 +776,7 @@ class ObjCLanguage(object):
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
- timeout_seconds=None,
+ timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
@@ -823,8 +820,12 @@ class Sanity(object):
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
+ environ={'TEST': 'true'}
+ if _is_use_docker_child():
+ environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
- timeout_seconds=None, environ={'TEST': 'true'},
+ timeout_seconds=30*60,
+ environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
@@ -875,9 +876,9 @@ class NodeExpressLanguage(object):
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\pre_build_node.bat']]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
- return [['tools/run_tests/pre_build_node.sh', self.node_version]]
+ return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
@@ -901,7 +902,7 @@ class NodeExpressLanguage(object):
return 'node_express'
# different configurations we can run under
-with open('tools/run_tests/configs.json') as f:
+with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
@@ -1097,6 +1098,12 @@ argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
+argp.add_argument('--quiet_success',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
@@ -1296,7 +1303,7 @@ def _start_port_server(port_server_port):
running = False
if running:
current_version = int(subprocess.check_output(
- [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
+ [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'dump_version']))
print('my port server is version %d' % current_version)
running = (version >= current_version)
@@ -1308,7 +1315,7 @@ def _start_port_server(port_server_port):
fd, logfile = tempfile.mkstemp()
os.close(fd)
print('starting port_server, with log file %s' % logfile)
- args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
+ args = [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'-p', '%d' % port_server_port, '-l', logfile]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
@@ -1414,7 +1421,7 @@ def _build_and_run(
return []
# start antagonists
- antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
+ antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
port_server_port = 32766
_start_port_server(port_server_port)
@@ -1444,20 +1451,24 @@ def _build_and_run(
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
+ if args.quiet_success:
+ jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port},
+ quiet_success=args.quiet_success)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
- if num_failures == num_runs: # what about infinite_runs???
- jobset.message('FAILED', k, do_newline=True)
- elif num_failures > 0:
- jobset.message(
- 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
- do_newline=True)
+ if num_failures > 0:
+ if num_failures == num_runs: # what about infinite_runs???
+ jobset.message('FAILED', k, do_newline=True)
+ else:
+ jobset.message(
+ 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
+ do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()