aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests
diff options
context:
space:
mode:
authorGravatar murgatroid99 <mlumish@google.com>2015-10-05 10:18:05 -0700
committerGravatar murgatroid99 <mlumish@google.com>2015-10-05 10:18:05 -0700
commitb0f3bf16cee1d893fc0414fcfe1060baac4eaa9b (patch)
tree747298bf60f8a0b1c519a33d6084fad42fc192de /tools/run_tests
parent350835a9566a4b56bfd409108ec235ef2424b630 (diff)
parenta9154a36fbc019b87f4d62ce16b346f3d44ec2de (diff)
Resolved merge conflicts with master
Diffstat (limited to 'tools/run_tests')
-rwxr-xr-xtools/run_tests/dockerjob.py115
-rwxr-xr-xtools/run_tests/jobset.py11
-rwxr-xr-xtools/run_tests/run_interop_tests.py337
-rwxr-xr-xtools/run_tests/run_node.sh8
-rwxr-xr-xtools/run_tests/run_python.sh10
-rw-r--r--tools/run_tests/sources_and_headers.json8
6 files changed, 384 insertions, 105 deletions
diff --git a/tools/run_tests/dockerjob.py b/tools/run_tests/dockerjob.py
new file mode 100755
index 0000000000..11686d46b0
--- /dev/null
+++ b/tools/run_tests/dockerjob.py
@@ -0,0 +1,115 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Helpers to run docker instances as jobs."""
+
+import jobset
+import tempfile
+import time
+import uuid
+import os
+import subprocess
+
+_DEVNULL = open(os.devnull, 'w')
+
+def wait_for_file(filepath, timeout_seconds=15):
+ """Wait until given file exists and returns its content."""
+ started = time.time()
+ while time.time() - started < timeout_seconds:
+ if os.path.isfile(filepath):
+ with open(filepath, 'r') as f:
+ content = f.read()
+ # make sure we don't return empty content
+ if content:
+ return content
+ time.sleep(1)
+ raise Exception('Failed to read file %s.' % filepath)
+
+
+def docker_mapped_port(cid, port):
+ """Get port mapped to internal given internal port for given container."""
+ output = subprocess.check_output('docker port %s %s' % (cid, port), shell=True)
+ return int(output.split(':', 2)[1])
+
+
+def finish_jobs(jobs):
+ """Kills given docker containers and waits for corresponding jobs to finish"""
+ for job in jobs:
+ job.kill(suppress_failure=True)
+
+ while any(job.is_running() for job in jobs):
+ time.sleep(1)
+
+
+def image_exists(image):
+ """Returns True if given docker image exists."""
+ return subprocess.call(['docker','inspect', image],
+ stdout=_DEVNULL,
+ stderr=_DEVNULL) == 0
+
+
+def remove_image(image, skip_nonexistent=False, max_retries=10):
+ """Attempts to remove docker image with retries."""
+ if skip_nonexistent and not image_exists(image):
+ return True
+ for attempt in range(0, max_retries):
+ if subprocess.call(['docker','rmi', '-f', image]) == 0:
+ return True
+ time.sleep(2)
+ print 'Failed to remove docker image %s' % image
+ return False
+
+
+class DockerJob:
+ """Encapsulates a job"""
+
+ def __init__(self, spec):
+ self._spec = spec
+ self._job = jobset.Job(spec, bin_hash=None, newline_on_success=True, travis=True, add_env={}, xml_report=None)
+ self._cidfile = spec.cidfile
+ self._cid = None
+
+ def cid(self):
+ """Gets cid of this container"""
+ if not self._cid:
+ self._cid = wait_for_file(self._cidfile)
+ return self._cid
+
+ def mapped_port(self, port):
+ return docker_mapped_port(self.cid(), port)
+
+ def kill(self, suppress_failure=False):
+ """Sends kill signal to the container."""
+ if suppress_failure:
+ self._job.suppress_failure_message()
+ return subprocess.call(['docker','kill', self.cid()]) == 0
+
+ def is_running(self):
+ """Polls a job and returns True if given job is still running."""
+ return self._job.state(jobset.NoCache()) == jobset._RUNNING
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index b9ec020f9e..87be703b4c 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -180,6 +180,7 @@ class Job(object):
name=self._spec.shortname) if xml_report is not None else None
self._retries = 0
self._timeout_retries = 0
+ self._suppress_failure_message = False
message('START', spec.shortname, do_newline=self._travis)
self.start()
@@ -220,9 +221,10 @@ class Job(object):
self.start()
else:
self._state = _FAILURE
- message('FAILED', '%s [ret=%d, pid=%d]' % (
- self._spec.shortname, self._process.returncode, self._process.pid),
- stdout, do_newline=True)
+ if not self._suppress_failure_message:
+ message('FAILED', '%s [ret=%d, pid=%d]' % (
+ self._spec.shortname, self._process.returncode, self._process.pid),
+ stdout, do_newline=True)
if self._xml_test is not None:
ET.SubElement(self._xml_test, 'failure', message='Failure').text
else:
@@ -254,6 +256,9 @@ class Job(object):
self._state = _KILLED
self._process.terminate()
+ def suppress_failure_message(self):
+ self._suppress_failure_message = True
+
class Jobset(object):
"""Manages one run of jobs."""
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index a2fb1243cc..46b34fea4c 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -31,14 +31,22 @@
"""Run interop (cross-language) tests in parallel."""
import argparse
+import dockerjob
import itertools
import xml.etree.cElementTree as ET
import jobset
+import multiprocessing
import os
import subprocess
import sys
+import tempfile
import time
+import uuid
+ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+os.chdir(ROOT)
+
+_DEFAULT_SERVER_PORT=8080
_CLOUD_TO_PROD_BASE_ARGS = [
'--server_host_override=grpc-test.sandbox.google.com',
@@ -53,7 +61,8 @@ _CLOUD_TO_CLOUD_BASE_ARGS = [
# supported by C core SslCredentials instead.
_SSL_CERT_ENV = { 'SSL_CERT_FILE':'/usr/local/share/grpc/roots.pem' }
-# TODO(jtatttermusch) unify usage of --enable_ssl, --use_tls and --use_tls=true
+# TODO(jtatttermusch) unify usage of --use_tls and --use_tls=true
+# TODO(jtatttermusch) unify usage of --use_prod_roots and --use_test_ca
class CXXLanguage:
@@ -61,18 +70,22 @@ class CXXLanguage:
def __init__(self):
self.client_cmdline_base = ['bins/opt/interop_client']
self.client_cwd = None
+ self.server_cwd = None
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
- ['--enable_ssl','--use_prod_roots'])
+ ['--use_tls=true','--use_prod_roots'])
def cloud_to_cloud_args(self):
return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
- ['--enable_ssl'])
+ ['--use_tls=true'])
def cloud_to_prod_env(self):
return None
+ def server_args(self):
+ return ['bins/opt/interop_server', '--use_tls=true']
+
def __str__(self):
return 'c++'
@@ -82,6 +95,7 @@ class CSharpLanguage:
def __init__(self):
self.client_cmdline_base = ['mono', 'Grpc.IntegrationTesting.Client.exe']
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug'
+ self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug'
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
@@ -94,15 +108,44 @@ class CSharpLanguage:
def cloud_to_prod_env(self):
return _SSL_CERT_ENV
+ def server_args(self):
+ return ['mono', 'Grpc.IntegrationTesting.Server.exe', '--use_tls']
+
def __str__(self):
return 'csharp'
+class JavaLanguage:
+
+ def __init__(self):
+ self.client_cmdline_base = ['./run-test-client.sh']
+ self.client_cwd = '../grpc-java'
+ self.server_cwd = '../grpc-java'
+
+ def cloud_to_prod_args(self):
+ return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
+ ['--use_tls=true'])
+
+ def cloud_to_cloud_args(self):
+ return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
+ ['--use_tls=true', '--use_test_ca=true'])
+
+ def cloud_to_prod_env(self):
+ return None
+
+ def server_args(self):
+ return ['./run-test-server.sh', '--use_tls=true']
+
+ def __str__(self):
+ return 'java'
+
+
class NodeLanguage:
def __init__(self):
self.client_cmdline_base = ['node', 'src/node/interop/interop_client.js']
self.client_cwd = None
+ self.server_cwd = None
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
@@ -115,6 +158,9 @@ class NodeLanguage:
def cloud_to_prod_env(self):
return _SSL_CERT_ENV
+ def server_args(self):
+ return ['node', 'src/node/interop/interop_server.js', '--use_tls=true']
+
def __str__(self):
return 'node'
@@ -145,6 +191,7 @@ class RubyLanguage:
def __init__(self):
self.client_cmdline_base = ['ruby', 'src/ruby/bin/interop/interop_client.rb']
self.client_cwd = None
+ self.server_cwd = None
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
@@ -157,64 +204,151 @@ class RubyLanguage:
def cloud_to_prod_env(self):
return _SSL_CERT_ENV
+ def server_args(self):
+ return ['ruby', 'src/ruby/bin/interop/interop_server.rb', '--use_tls']
+
def __str__(self):
return 'ruby'
-# TODO(jtattermusch): add php and python once we get them working
+# TODO(jtattermusch): python once we get it working
_LANGUAGES = {
'c++' : CXXLanguage(),
'csharp' : CSharpLanguage(),
+ 'java' : JavaLanguage(),
'node' : NodeLanguage(),
'php' : PHPLanguage(),
'ruby' : RubyLanguage(),
}
-# languages supported as cloud_to_cloud servers
+# languages supported as cloud_to_cloud servers
# TODO(jtattermusch): enable other languages as servers as well
-_SERVERS = { 'c++' : 8010, 'node' : 8040, 'csharp': 8070 }
+_SERVERS = ['c++', 'node', 'csharp', 'java']
-# TODO(jtattermusch): add empty_stream once C++ start supporting it.
+# TODO(jtattermusch): add empty_stream once PHP starts supporting it.
+# TODO(jtattermusch): add timeout_on_sleeping_server once java starts supporting it.
# TODO(jtattermusch): add support for auth tests.
_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
'client_streaming', 'server_streaming',
- 'cancel_after_begin', 'cancel_after_first_response',
- 'timeout_on_sleeping_server']
+ 'cancel_after_begin', 'cancel_after_first_response']
-def cloud_to_prod_jobspec(language, test_case):
+def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
+ """Wraps given cmdline array to create 'docker run' cmdline from it."""
+ docker_cmdline = ['docker', 'run', '-i', '--rm=true']
+
+ # turn environ into -e docker args
+ if environ:
+ for k,v in environ.iteritems():
+ docker_cmdline += ['-e', '%s=%s' % (k,v)]
+
+ # set working directory
+ workdir = '/var/local/git/grpc'
+ if cwd:
+ workdir = os.path.join(workdir, cwd)
+ docker_cmdline += ['-w', workdir]
+
+ docker_cmdline += docker_args + [image] + cmdline
+ return docker_cmdline
+
+
+def bash_login_cmdline(cmdline):
+ """Creates bash -l -c cmdline from args list."""
+ # Use login shell:
+ # * rvm and nvm require it
+ # * makes error messages clearer if executables are missing
+ return ['bash', '-l', '-c', ' '.join(cmdline)]
+
+
+def cloud_to_prod_jobspec(language, test_case, docker_image=None):
"""Creates jobspec for cloud-to-prod interop test"""
- cmdline = language.cloud_to_prod_args() + ['--test_case=%s' % test_case]
+ cmdline = bash_login_cmdline(language.cloud_to_prod_args() +
+ ['--test_case=%s' % test_case])
+ cwd = language.client_cwd
+ environ = language.cloud_to_prod_env()
+ if docker_image:
+ cmdline = docker_run_cmdline(cmdline, image=docker_image, cwd=cwd, environ=environ)
+ cwd = None
+ environ = None
+
test_job = jobset.JobSpec(
cmdline=cmdline,
- cwd=language.client_cwd,
+ cwd=cwd,
+ environ=environ,
shortname="cloud_to_prod:%s:%s" % (language, test_case),
- environ=language.cloud_to_prod_env(),
- timeout_seconds=60)
+ timeout_seconds=2*60,
+ flake_retries=5 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0)
return test_job
def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
- server_port):
+ server_port, docker_image=None):
"""Creates jobspec for cloud-to-cloud interop test"""
- cmdline = language.cloud_to_cloud_args() + ['--test_case=%s' % test_case,
- '--server_host=%s' % server_host,
- '--server_port=%s' % server_port ]
+ cmdline = bash_login_cmdline(language.cloud_to_cloud_args() +
+ ['--test_case=%s' % test_case,
+ '--server_host=%s' % server_host,
+ '--server_port=%s' % server_port ])
+ cwd = language.client_cwd
+ if docker_image:
+ cmdline = docker_run_cmdline(cmdline,
+ image=docker_image,
+ cwd=cwd,
+ docker_args=['--net=host'])
+ cwd = None
test_job = jobset.JobSpec(
cmdline=cmdline,
- cwd=language.client_cwd,
+ cwd=cwd,
shortname="cloud_to_cloud:%s:%s_server:%s" % (language, server_name,
test_case),
- timeout_seconds=60)
+ timeout_seconds=2*60,
+ flake_retries=5 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0)
return test_job
+
+def server_jobspec(language, docker_image):
+ """Create jobspec for running a server"""
+ cidfile = tempfile.mktemp()
+ cmdline = bash_login_cmdline(language.server_args() +
+ ['--port=%s' % _DEFAULT_SERVER_PORT])
+ docker_cmdline = docker_run_cmdline(cmdline,
+ image=docker_image,
+ cwd=language.server_cwd,
+ docker_args=['-p', str(_DEFAULT_SERVER_PORT),
+ '--cidfile', cidfile])
+ server_job = jobset.JobSpec(
+ cmdline=docker_cmdline,
+ shortname="interop_server:%s" % language,
+ timeout_seconds=30*60)
+ server_job.cidfile = cidfile
+ return server_job
+
+
+def build_interop_image_jobspec(language, tag=None):
+ """Creates jobspec for building interop docker image for a language"""
+ safelang = str(language).replace("+", "x")
+ if not tag:
+ tag = 'grpc_interop_%s:%s' % (safelang, uuid.uuid4())
+ env = {'INTEROP_IMAGE': tag, 'BASE_NAME': 'grpc_interop_%s' % safelang}
+ if not args.travis:
+ env['TTY_FLAG'] = '-t'
+ build_job = jobset.JobSpec(
+ cmdline=['tools/jenkins/build_interop_image.sh'],
+ environ=env,
+ shortname="build_docker_%s" % (language),
+ timeout_seconds=30*60)
+ build_job.tag = tag
+ return build_job
+
+
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Clients to run.')
-argp.add_argument('-j', '--jobs', default=24, type=int)
+argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('--cloud_to_prod',
default=False,
action='store_const',
@@ -243,9 +377,14 @@ argp.add_argument('--use_docker',
help='Run all the interop tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
+argp.add_argument('--allow_flakes',
+ default=False,
+ action='store_const',
+ const=True,
+ help="Allow flaky tests to show as passing (re-runs failed tests up to five times)")
args = argp.parse_args()
-servers = set(s for s in itertools.chain.from_iterable(_SERVERS.iterkeys()
+servers = set(s for s in itertools.chain.from_iterable(_SERVERS
if x == 'all' else [x]
for x in args.server))
@@ -258,73 +397,99 @@ if args.use_docker:
print 'copied to the docker environment.'
time.sleep(5)
- child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
- run_tests_cmd = ('tools/run_tests/run_interop_tests.py %s' %
- " ".join(child_argv[1:]))
-
- # cmdline args to pass to the container running servers.
- servers_extra_docker_args = ''
- server_port_tuples = ''
- for server in servers:
- port = _SERVERS[server]
- servers_extra_docker_args += ' -p %s' % port
- servers_extra_docker_args += ' -e SERVER_PORT_%s=%s' % (server.replace("+", "x"), port)
- server_port_tuples += ' %s:%s' % (server, port)
-
- env = os.environ.copy()
- env['RUN_TESTS_COMMAND'] = run_tests_cmd
- env['SERVERS_DOCKER_EXTRA_ARGS'] = servers_extra_docker_args
- env['SERVER_PORT_TUPLES'] = server_port_tuples
- if not args.travis:
- env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
-
- subprocess.check_call(['tools/jenkins/build_docker_and_run_interop_tests.sh'],
- shell=True,
- env=env)
- sys.exit(0)
+if not args.use_docker and servers:
+ print "Running interop servers is only supported with --use_docker option enabled."
+ sys.exit(1)
languages = set(_LANGUAGES[l]
for l in itertools.chain.from_iterable(
_LANGUAGES.iterkeys() if x == 'all' else [x]
for x in args.language))
-jobs = []
-if args.cloud_to_prod:
- for language in languages:
- for test_case in _TEST_CASES:
- test_job = cloud_to_prod_jobspec(language, test_case)
- jobs.append(test_job)
-
-# default servers to "localhost" and the default port
-server_addresses = dict((s, ("localhost", _SERVERS[s])) for s in servers)
-
-for server in args.override_server:
- server_name = server[0]
- (server_host, server_port) = server[1].split(":")
- server_addresses[server_name] = (server_host, server_port)
-
-for server_name, server_address in server_addresses.iteritems():
- (server_host, server_port) = server_address
- for language in languages:
- for test_case in _TEST_CASES:
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- server_name,
- server_host,
- server_port)
- jobs.append(test_job)
-
-if not jobs:
- print "No jobs to run."
- sys.exit(1)
-
-root = ET.Element('testsuites')
-testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
-
-if jobset.run(jobs, newline_on_success=True, maxjobs=args.jobs, xml_report=testsuite):
- jobset.message('SUCCESS', 'All tests passed', do_newline=True)
-else:
- jobset.message('FAILED', 'Some tests failed', do_newline=True)
-
-tree = ET.ElementTree(root)
-tree.write('report.xml', encoding='UTF-8') \ No newline at end of file
+docker_images={}
+if args.use_docker:
+ # languages for which to build docker images
+ languages_to_build = set(_LANGUAGES[k] for k in set([str(l) for l in languages] +
+ [s for s in servers]))
+
+ build_jobs = []
+ for l in languages_to_build:
+ job = build_interop_image_jobspec(l)
+ docker_images[str(l)] = job.tag
+ build_jobs.append(job)
+
+ if build_jobs:
+ jobset.message('START', 'Building interop docker images.', do_newline=True)
+ if jobset.run(build_jobs, newline_on_success=True, maxjobs=args.jobs):
+ jobset.message('SUCCESS', 'All docker images built successfully.', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Failed to build interop docker images.', do_newline=True)
+ for image in docker_images.itervalues():
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ exit(1);
+
+# Start interop servers.
+server_jobs={}
+server_addresses={}
+try:
+ for s in servers:
+ lang = str(s)
+ spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang))
+ job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = job
+ server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
+
+
+ jobs = []
+ if args.cloud_to_prod:
+ for language in languages:
+ for test_case in _TEST_CASES:
+ test_job = cloud_to_prod_jobspec(language, test_case,
+ docker_image=docker_images.get(str(language)))
+ jobs.append(test_job)
+
+ for server in args.override_server:
+ server_name = server[0]
+ (server_host, server_port) = server[1].split(':')
+ server_addresses[server_name] = (server_host, server_port)
+
+ for server_name, server_address in server_addresses.iteritems():
+ (server_host, server_port) = server_address
+ for language in languages:
+ for test_case in _TEST_CASES:
+ test_job = cloud_to_cloud_jobspec(language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(language)))
+ jobs.append(test_job)
+
+ if not jobs:
+ print "No jobs to run."
+ for image in docker_images.itervalues():
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ sys.exit(1)
+
+ root = ET.Element('testsuites')
+ testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
+
+ if jobset.run(jobs, newline_on_success=True, maxjobs=args.jobs, xml_report=testsuite):
+ jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Some tests failed', do_newline=True)
+
+ tree = ET.ElementTree(root)
+ tree.write('report.xml', encoding='UTF-8')
+
+finally:
+ # Check if servers are still running.
+ for server, job in server_jobs.iteritems():
+ if not job.is_running():
+ print 'Server "%s" has exited prematurely.' % server
+
+ dockerjob.finish_jobs([j for j in server_jobs.itervalues()])
+
+ for image in docker_images.itervalues():
+ print 'Removing docker image %s' % image
+ dockerjob.remove_image(image)
diff --git a/tools/run_tests/run_node.sh b/tools/run_tests/run_node.sh
index eb68b20e83..780969089d 100755
--- a/tools/run_tests/run_node.sh
+++ b/tools/run_tests/run_node.sh
@@ -39,13 +39,13 @@ root=`pwd`
if [ "$CONFIG" = "gcov" ]
then
- ./node_modules/.bin/istanbul cover ./node_modules/.bin/_mocha -- \
- --timeout 8000 src/node/test
+ ./node_modules/.bin/istanbul cover --dir reports/node_coverage \
+ ./node_modules/.bin/_mocha -- --timeout 8000 src/node/test
cd build
gcov Release/obj.target/grpc/ext/*.o
lcov --base-directory . --directory . -c -o coverage.info
- genhtml -o ../ext_coverage --num-spaces 2 -t 'Node gRPC test coverage' \
- coverage.info
+ genhtml -o ../reports/node_ext_coverage --num-spaces 2 \
+ -t 'Node gRPC test coverage' coverage.info
else
./node_modules/mocha/bin/mocha --timeout 8000 src/node/test
fi
diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/run_python.sh
index 977b02fd94..e2135be04c 100755
--- a/tools/run_tests/run_python.sh
+++ b/tools/run_tests/run_python.sh
@@ -40,14 +40,4 @@ export DYLD_LIBRARY_PATH=$ROOT/libs/$CONFIG
export PATH=$ROOT/bins/$CONFIG:$ROOT/bins/$CONFIG/protobuf:$PATH
source "python"$PYVER"_virtual_environment"/bin/activate
-# TODO(atash): These tests don't currently run under py.test and thus don't
-# appear under the coverage report. Find a way to get these tests to work with
-# py.test (or find another tool or *something*) that's acceptable to the rest of
-# the team...
-"python"$PYVER -m grpc_test._core_over_links_base_interface_test
-"python"$PYVER -m grpc_test._crust_over_core_over_links_face_interface_test
-"python"$PYVER -m grpc_test.beta._face_interface_test
-"python"$PYVER -m grpc_test.framework._crust_over_core_face_interface_test
-"python"$PYVER -m grpc_test.framework.core._base_interface_test
-
"python"$PYVER $GRPCIO_TEST/setup.py test -a "-n8 --cov=grpc --junitxml=./report.xml --timeout=300"
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index 1e5dd11fcd..1ceff15a3b 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -12362,6 +12362,7 @@
"src/core/security/security_context.h",
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
+ "src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/channel.h",
@@ -12596,6 +12597,8 @@
"src/core/security/server_secure_chttp2.c",
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
+ "src/core/surface/api_trace.c",
+ "src/core/surface/api_trace.h",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_queue.h",
@@ -12622,7 +12625,6 @@
"src/core/surface/server.h",
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
- "src/core/surface/surface_trace.c",
"src/core/surface/surface_trace.h",
"src/core/surface/version.c",
"src/core/transport/chttp2/alpn.c",
@@ -12856,6 +12858,7 @@
"src/core/profiling/timers.h",
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
+ "src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/channel.h",
@@ -13060,6 +13063,8 @@
"src/core/profiling/timers.h",
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
+ "src/core/surface/api_trace.c",
+ "src/core/surface/api_trace.h",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_queue.h",
@@ -13085,7 +13090,6 @@
"src/core/surface/server.h",
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
- "src/core/surface/surface_trace.c",
"src/core/surface/surface_trace.h",
"src/core/surface/version.c",
"src/core/transport/chttp2/alpn.c",