aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests')
-rw-r--r--tools/run_tests/artifacts/artifact_targets.py611
-rw-r--r--tools/run_tests/artifacts/distribtest_targets.py525
-rw-r--r--tools/run_tests/artifacts/package_targets.py214
-rwxr-xr-xtools/run_tests/performance/bq_upload_result.py219
-rw-r--r--tools/run_tests/performance/massage_qps_stats.py630
-rw-r--r--tools/run_tests/performance/massage_qps_stats_helpers.py71
-rwxr-xr-xtools/run_tests/performance/patch_scenario_results_schema.py35
-rw-r--r--tools/run_tests/performance/scenario_config.py1852
-rwxr-xr-xtools/run_tests/python_utils/antagonist.py3
-rw-r--r--tools/run_tests/python_utils/comment_on_pr.py33
-rwxr-xr-xtools/run_tests/python_utils/dockerjob.py162
-rw-r--r--tools/run_tests/python_utils/filter_pull_request_tests.py200
-rwxr-xr-xtools/run_tests/python_utils/jobset.py825
-rwxr-xr-xtools/run_tests/python_utils/port_server.py254
-rw-r--r--tools/run_tests/python_utils/report_utils.py219
-rw-r--r--tools/run_tests/python_utils/start_port_server.py11
-rw-r--r--tools/run_tests/python_utils/upload_test_results.py231
-rwxr-xr-xtools/run_tests/python_utils/watch_dirs.py79
-rwxr-xr-xtools/run_tests/run_build_statistics.py302
-rwxr-xr-xtools/run_tests/run_interop_tests.py1988
-rwxr-xr-xtools/run_tests/run_microbenchmark.py346
-rwxr-xr-xtools/run_tests/run_performance_tests.py1143
-rwxr-xr-xtools/run_tests/run_tests.py2668
-rwxr-xr-xtools/run_tests/run_tests_matrix.py922
-rwxr-xr-xtools/run_tests/sanity/check_bazel_workspace.py30
-rwxr-xr-xtools/run_tests/sanity/check_sources_and_headers.py142
-rwxr-xr-xtools/run_tests/sanity/check_test_filtering.py217
-rwxr-xr-xtools/run_tests/sanity/check_tracer_sanity.py21
-rwxr-xr-xtools/run_tests/sanity/check_version.py76
-rwxr-xr-xtools/run_tests/sanity/core_banned_functions.py36
-rwxr-xr-xtools/run_tests/start_port_server.py1
-rwxr-xr-xtools/run_tests/task_runner.py100
32 files changed, 7713 insertions, 6453 deletions
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index ea202edb30..dc0803b3c7 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Definition of targets to build artifacts."""
import os.path
@@ -24,316 +23,350 @@ sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
- flake_retries=0, timeout_retries=0, timeout_seconds=30*60,
- docker_base_image=None, extra_docker_args=None):
- """Creates jobspec for a task running under docker."""
- environ = environ.copy()
- environ['RUN_COMMAND'] = shell_command
- environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
-
- docker_args=[]
- for k,v in environ.items():
- docker_args += ['-e', '%s=%s' % (k, v)]
- docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
- 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
- 'OUTPUT_DIR': 'artifacts'}
-
- if docker_base_image is not None:
- docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
- if extra_docker_args is not None:
- docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
- jobspec = jobset.JobSpec(
- cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
- environ=docker_env,
- shortname='build_artifact.%s' % (name),
- timeout_seconds=timeout_seconds,
- flake_retries=flake_retries,
- timeout_retries=timeout_retries)
- return jobspec
-
-
-def create_jobspec(name, cmdline, environ={}, shell=False,
- flake_retries=0, timeout_retries=0, timeout_seconds=30*60,
+def create_docker_jobspec(name,
+ dockerfile_dir,
+ shell_command,
+ environ={},
+ flake_retries=0,
+ timeout_retries=0,
+ timeout_seconds=30 * 60,
+ docker_base_image=None,
+ extra_docker_args=None):
+ """Creates jobspec for a task running under docker."""
+ environ = environ.copy()
+ environ['RUN_COMMAND'] = shell_command
+ environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
+
+ docker_args = []
+ for k, v in environ.items():
+ docker_args += ['-e', '%s=%s' % (k, v)]
+ docker_env = {
+ 'DOCKERFILE_DIR': dockerfile_dir,
+ 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+ 'OUTPUT_DIR': 'artifacts'
+ }
+
+ if docker_base_image is not None:
+ docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
+ if extra_docker_args is not None:
+ docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
+ jobspec = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+ docker_args,
+ environ=docker_env,
+ shortname='build_artifact.%s' % (name),
+ timeout_seconds=timeout_seconds,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries)
+ return jobspec
+
+
+def create_jobspec(name,
+ cmdline,
+ environ={},
+ shell=False,
+ flake_retries=0,
+ timeout_retries=0,
+ timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0):
- """Creates jobspec."""
- environ = environ.copy()
- if use_workspace:
- environ['WORKSPACE_NAME'] = 'workspace_%s' % name
- environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
- cmdline = ['bash',
- 'tools/run_tests/artifacts/run_in_workspace.sh'] + cmdline
- else:
- environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
-
- jobspec = jobset.JobSpec(
- cmdline=cmdline,
- environ=environ,
- shortname='build_artifact.%s' % (name),
- timeout_seconds=timeout_seconds,
- flake_retries=flake_retries,
- timeout_retries=timeout_retries,
- shell=shell,
- cpu_cost=cpu_cost)
- return jobspec
+ """Creates jobspec."""
+ environ = environ.copy()
+ if use_workspace:
+ environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+ environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
+ cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+ ] + cmdline
+ else:
+ environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
+
+ jobspec = jobset.JobSpec(
+ cmdline=cmdline,
+ environ=environ,
+ shortname='build_artifact.%s' % (name),
+ timeout_seconds=timeout_seconds,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries,
+ shell=shell,
+ cpu_cost=cpu_cost)
+ return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
-_ARCH_FLAG_MAP = {
- 'x86': '-m32',
- 'x64': '-m64'
-}
+_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
- """Builds Python artifacts."""
-
- def __init__(self, platform, arch, py_version):
- self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
- self.platform = platform
- self.arch = arch
- self.labels = ['artifact', 'python', platform, arch, py_version]
- self.py_version = py_version
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- environ = {}
- if self.platform == 'linux_extra':
- # Raspberry Pi build
- environ['PYTHON'] = '/usr/local/bin/python{}'.format(self.py_version)
- environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
- # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
- # A QEMU bug causes submodule update to hang, so we copy directly
- environ['RELATIVE_COPY_PATH'] = '.'
- extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
- 'tools/run_tests/artifacts/build_artifact_python.sh',
- environ=environ,
- timeout_seconds=60*60*5,
- docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
- extra_docker_args=extra_args)
- elif self.platform == 'linux':
- if self.arch == 'x86':
- environ['SETARCH_CMD'] = 'linux32'
- # Inside the manylinux container, the python installations are located in
- # special places...
- environ['PYTHON'] = '/opt/python/{}/bin/python'.format(self.py_version)
- environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
- # Platform autodetection for the manylinux1 image breaks so we set the
- # defines ourselves.
- # TODO(atash) get better platform-detection support in core so we don't
- # need to do this manually...
- environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
- environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
- environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch,
- 'tools/run_tests/artifacts/build_artifact_python.sh',
- environ=environ,
- timeout_seconds=60*60,
- docker_base_image='quay.io/pypa/manylinux1_i686' if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
- elif self.platform == 'windows':
- if 'Python27' in self.py_version or 'Python34' in self.py_version:
- environ['EXT_COMPILER'] = 'mingw32'
- else:
- environ['EXT_COMPILER'] = 'msvc'
- # For some reason, the batch script %random% always runs with the same
- # seed. We create a random temp-dir here
- dir = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
- return create_jobspec(self.name,
- ['tools\\run_tests\\artifacts\\build_artifact_python.bat',
- self.py_version,
- '32' if self.arch == 'x86' else '64'],
- environ=environ,
- timeout_seconds=45*60,
- use_workspace=True)
- else:
- environ['PYTHON'] = self.py_version
- environ['SKIP_PIP_INSTALL'] = 'TRUE'
- return create_jobspec(self.name,
- ['tools/run_tests/artifacts/build_artifact_python.sh'],
- environ=environ,
- timeout_seconds=60*60,
- use_workspace=True)
-
- def __str__(self):
- return self.name
+ """Builds Python artifacts."""
+
+ def __init__(self, platform, arch, py_version):
+ self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'python', platform, arch, py_version]
+ self.py_version = py_version
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ environ = {}
+ if self.platform == 'linux_extra':
+ # Raspberry Pi build
+ environ['PYTHON'] = '/usr/local/bin/python{}'.format(
+ self.py_version)
+ environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
+ # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
+ # A QEMU bug causes submodule update to hang, so we copy directly
+ environ['RELATIVE_COPY_PATH'] = '.'
+ extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+ 'tools/run_tests/artifacts/build_artifact_python.sh',
+ environ=environ,
+ timeout_seconds=60 * 60 * 5,
+ docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
+ extra_docker_args=extra_args)
+ elif self.platform == 'linux':
+ if self.arch == 'x86':
+ environ['SETARCH_CMD'] = 'linux32'
+ # Inside the manylinux container, the python installations are located in
+ # special places...
+ environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
+ self.py_version)
+ environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
+ # Platform autodetection for the manylinux1 image breaks so we set the
+ # defines ourselves.
+ # TODO(atash) get better platform-detection support in core so we don't
+ # need to do this manually...
+ environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
+ environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
+ environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_python_manylinux_%s' %
+ self.arch,
+ 'tools/run_tests/artifacts/build_artifact_python.sh',
+ environ=environ,
+ timeout_seconds=60 * 60,
+ docker_base_image='quay.io/pypa/manylinux1_i686'
+ if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
+ elif self.platform == 'windows':
+ if 'Python27' in self.py_version or 'Python34' in self.py_version:
+ environ['EXT_COMPILER'] = 'mingw32'
+ else:
+ environ['EXT_COMPILER'] = 'msvc'
+ # For some reason, the batch script %random% always runs with the same
+ # seed. We create a random temp-dir here
+ dir = ''.join(
+ random.choice(string.ascii_uppercase) for _ in range(10))
+ return create_jobspec(
+ self.name, [
+ 'tools\\run_tests\\artifacts\\build_artifact_python.bat',
+ self.py_version, '32' if self.arch == 'x86' else '64'
+ ],
+ environ=environ,
+ timeout_seconds=45 * 60,
+ use_workspace=True)
+ else:
+ environ['PYTHON'] = self.py_version
+ environ['SKIP_PIP_INSTALL'] = 'TRUE'
+ return create_jobspec(
+ self.name,
+ ['tools/run_tests/artifacts/build_artifact_python.sh'],
+ environ=environ,
+ timeout_seconds=60 * 60,
+ use_workspace=True)
+
+ def __str__(self):
+ return self.name
class RubyArtifact:
- """Builds ruby native gem."""
+ """Builds ruby native gem."""
- def __init__(self, platform, arch):
- self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
- self.platform = platform
- self.arch = arch
- self.labels = ['artifact', 'ruby', platform, arch]
+ def __init__(self, platform, arch):
+ self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'ruby', platform, arch]
- def pre_build_jobspecs(self):
- return []
+ def pre_build_jobspecs(self):
+ return []
- def build_jobspec(self):
- # Ruby build uses docker internally and docker cannot be nested.
- # We are using a custom workspace instead.
- return create_jobspec(self.name,
- ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
- use_workspace=True,
- timeout_seconds=45*60)
+ def build_jobspec(self):
+ # Ruby build uses docker internally and docker cannot be nested.
+ # We are using a custom workspace instead.
+ return create_jobspec(
+ self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
+ use_workspace=True,
+ timeout_seconds=45 * 60)
class CSharpExtArtifact:
- """Builds C# native extension library"""
-
- def __init__(self, platform, arch):
- self.name = 'csharp_ext_%s_%s' % (platform, arch)
- self.platform = platform
- self.arch = arch
- self.labels = ['artifact', 'csharp', platform, arch]
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- if self.platform == 'windows':
- cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
- return create_jobspec(self.name,
- ['tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
- cmake_arch_option],
- use_workspace=True)
- else:
- environ = {'CONFIG': 'opt',
- 'EMBED_OPENSSL': 'true',
- 'EMBED_ZLIB': 'true',
- 'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
- 'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
- 'LDFLAGS': ''}
- if self.platform == 'linux':
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
- 'tools/run_tests/artifacts/build_artifact_csharp.sh',
- environ=environ)
- else:
- archflag = _ARCH_FLAG_MAP[self.arch]
- environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
- environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
- environ['LDFLAGS'] += ' %s' % archflag
- return create_jobspec(self.name,
- ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
- environ=environ,
- use_workspace=True)
-
- def __str__(self):
- return self.name
+ """Builds C# native extension library"""
+
+ def __init__(self, platform, arch):
+ self.name = 'csharp_ext_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'csharp', platform, arch]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform == 'windows':
+ cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
+ return create_jobspec(
+ self.name, [
+ 'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
+ cmake_arch_option
+ ],
+ use_workspace=True)
+ else:
+ environ = {
+ 'CONFIG': 'opt',
+ 'EMBED_OPENSSL': 'true',
+ 'EMBED_ZLIB': 'true',
+ 'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
+ 'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
+ 'LDFLAGS': ''
+ }
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
+ 'tools/run_tests/artifacts/build_artifact_csharp.sh',
+ environ=environ)
+ else:
+ archflag = _ARCH_FLAG_MAP[self.arch]
+ environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
+ environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
+ environ['LDFLAGS'] += ' %s' % archflag
+ return create_jobspec(
+ self.name,
+ ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
+ environ=environ,
+ use_workspace=True)
+
+ def __str__(self):
+ return self.name
+
class PHPArtifact:
- """Builds PHP PECL package"""
-
- def __init__(self, platform, arch):
- self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
- self.platform = platform
- self.arch = arch
- self.labels = ['artifact', 'php', platform, arch]
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- if self.platform == 'linux':
- return create_docker_jobspec(
- self.name,
- 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
- 'tools/run_tests/artifacts/build_artifact_php.sh')
- else:
- return create_jobspec(self.name,
- ['tools/run_tests/artifacts/build_artifact_php.sh'],
- use_workspace=True)
+ """Builds PHP PECL package"""
-class ProtocArtifact:
- """Builds protoc and protoc-plugin artifacts"""
-
- def __init__(self, platform, arch):
- self.name = 'protoc_%s_%s' % (platform, arch)
- self.platform = platform
- self.arch = arch
- self.labels = ['artifact', 'protoc', platform, arch]
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- if self.platform != 'windows':
- cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
- ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
- if self.platform != 'macos':
- ldflags += ' -static-libgcc -static-libstdc++ -s'
- environ={'CONFIG': 'opt',
- 'CXXFLAGS': cxxflags,
- 'LDFLAGS': ldflags,
- 'PROTOBUF_LDFLAGS_EXTRA': ldflags}
- if self.platform == 'linux':
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/grpc_artifact_protoc',
- 'tools/run_tests/artifacts/build_artifact_protoc.sh',
- environ=environ)
- else:
- environ['CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
- return create_jobspec(self.name,
- ['tools/run_tests/artifacts/build_artifact_protoc.sh'],
- environ=environ,
- use_workspace=True)
- else:
- generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
- return create_jobspec(self.name,
- ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
- environ={'generator': generator},
- use_workspace=True)
+ def __init__(self, platform, arch):
+ self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'php', platform, arch]
+
+ def pre_build_jobspecs(self):
+ return []
- def __str__(self):
- return self.name
+ def build_jobspec(self):
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+ 'tools/run_tests/artifacts/build_artifact_php.sh')
+ else:
+ return create_jobspec(
+ self.name, ['tools/run_tests/artifacts/build_artifact_php.sh'],
+ use_workspace=True)
+
+
+class ProtocArtifact:
+ """Builds protoc and protoc-plugin artifacts"""
+
+ def __init__(self, platform, arch):
+ self.name = 'protoc_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'protoc', platform, arch]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform != 'windows':
+ cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
+ ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
+ if self.platform != 'macos':
+ ldflags += ' -static-libgcc -static-libstdc++ -s'
+ environ = {
+ 'CONFIG': 'opt',
+ 'CXXFLAGS': cxxflags,
+ 'LDFLAGS': ldflags,
+ 'PROTOBUF_LDFLAGS_EXTRA': ldflags
+ }
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_protoc',
+ 'tools/run_tests/artifacts/build_artifact_protoc.sh',
+ environ=environ)
+ else:
+ environ[
+ 'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
+ return create_jobspec(
+ self.name,
+ ['tools/run_tests/artifacts/build_artifact_protoc.sh'],
+ environ=environ,
+ use_workspace=True)
+ else:
+ generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
+ return create_jobspec(
+ self.name,
+ ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
+ environ={'generator': generator},
+ use_workspace=True)
+
+ def __str__(self):
+ return self.name
def targets():
- """Gets list of supported targets"""
- return ([Cls(platform, arch)
- for Cls in (CSharpExtArtifact, ProtocArtifact)
- for platform in ('linux', 'macos', 'windows')
- for arch in ('x86', 'x64')] +
- [PythonArtifact('linux', 'x86', 'cp27-cp27m'),
- PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
- PythonArtifact('linux', 'x86', 'cp34-cp34m'),
- PythonArtifact('linux', 'x86', 'cp35-cp35m'),
- PythonArtifact('linux', 'x86', 'cp36-cp36m'),
- PythonArtifact('linux_extra', 'armv7', '2.7'),
- PythonArtifact('linux_extra', 'armv7', '3.4'),
- PythonArtifact('linux_extra', 'armv7', '3.5'),
- PythonArtifact('linux_extra', 'armv7', '3.6'),
- PythonArtifact('linux_extra', 'armv6', '2.7'),
- PythonArtifact('linux_extra', 'armv6', '3.4'),
- PythonArtifact('linux_extra', 'armv6', '3.5'),
- PythonArtifact('linux_extra', 'armv6', '3.6'),
- PythonArtifact('linux', 'x64', 'cp27-cp27m'),
- PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
- PythonArtifact('linux', 'x64', 'cp34-cp34m'),
- PythonArtifact('linux', 'x64', 'cp35-cp35m'),
- PythonArtifact('linux', 'x64', 'cp36-cp36m'),
- PythonArtifact('macos', 'x64', 'python2.7'),
- PythonArtifact('macos', 'x64', 'python3.4'),
- PythonArtifact('macos', 'x64', 'python3.5'),
- PythonArtifact('macos', 'x64', 'python3.6'),
- PythonArtifact('windows', 'x86', 'Python27_32bits'),
- PythonArtifact('windows', 'x86', 'Python34_32bits'),
- PythonArtifact('windows', 'x86', 'Python35_32bits'),
- PythonArtifact('windows', 'x86', 'Python36_32bits'),
- PythonArtifact('windows', 'x64', 'Python27'),
- PythonArtifact('windows', 'x64', 'Python34'),
- PythonArtifact('windows', 'x64', 'Python35'),
- PythonArtifact('windows', 'x64', 'Python36'),
- RubyArtifact('linux', 'x64'),
- RubyArtifact('macos', 'x64'),
- PHPArtifact('linux', 'x64'),
- PHPArtifact('macos', 'x64')])
+ """Gets list of supported targets"""
+ return ([
+ Cls(platform, arch)
+ for Cls in (CSharpExtArtifact, ProtocArtifact)
+ for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
+ ] + [
+ PythonArtifact('linux', 'x86', 'cp27-cp27m'), PythonArtifact(
+ 'linux', 'x86', 'cp27-cp27mu'), PythonArtifact(
+ 'linux', 'x86', 'cp34-cp34m'), PythonArtifact(
+ 'linux', 'x86', 'cp35-cp35m'), PythonArtifact(
+ 'linux', 'x86', 'cp36-cp36m'), PythonArtifact(
+ 'linux_extra', 'armv7', '2.7'), PythonArtifact(
+ 'linux_extra', 'armv7', '3.4'), PythonArtifact(
+ 'linux_extra', 'armv7', '3.5'),
+ PythonArtifact('linux_extra', 'armv7', '3.6'), PythonArtifact(
+ 'linux_extra', 'armv6', '2.7'), PythonArtifact(
+ 'linux_extra', 'armv6', '3.4'), PythonArtifact(
+ 'linux_extra', 'armv6', '3.5'), PythonArtifact(
+ 'linux_extra', 'armv6', '3.6'), PythonArtifact(
+ 'linux', 'x64', 'cp27-cp27m'), PythonArtifact(
+ 'linux', 'x64', 'cp27-cp27mu'), PythonArtifact(
+ 'linux', 'x64', 'cp34-cp34m'),
+ PythonArtifact('linux', 'x64', 'cp35-cp35m'), PythonArtifact(
+ 'linux', 'x64', 'cp36-cp36m'), PythonArtifact(
+ 'macos', 'x64', 'python2.7'), PythonArtifact(
+ 'macos', 'x64', 'python3.4'), PythonArtifact('macos', 'x64',
+ 'python3.5'),
+ PythonArtifact('macos', 'x64', 'python3.6'), PythonArtifact(
+ 'windows', 'x86', 'Python27_32bits'), PythonArtifact(
+ 'windows', 'x86', 'Python34_32bits'), PythonArtifact(
+ 'windows', 'x86', 'Python35_32bits'), PythonArtifact(
+ 'windows', 'x86', 'Python36_32bits'), PythonArtifact(
+ 'windows', 'x64', 'Python27'),
+ PythonArtifact('windows', 'x64', 'Python34'), PythonArtifact(
+ 'windows', 'x64', 'Python35'), PythonArtifact(
+ 'windows', 'x64', 'Python36'), RubyArtifact(
+ 'linux', 'x64'), RubyArtifact('macos', 'x64'), PHPArtifact(
+ 'linux', 'x64'), PHPArtifact('macos', 'x64')
+ ])
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py
index 7ba0e0ebc9..83f656b433 100644
--- a/tools/run_tests/artifacts/distribtest_targets.py
+++ b/tools/run_tests/artifacts/distribtest_targets.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Definition of targets run distribution package tests."""
import os.path
@@ -22,280 +21,306 @@ sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
- flake_retries=0, timeout_retries=0,
- copy_rel_path=None):
- """Creates jobspec for a task running under docker."""
- environ = environ.copy()
- environ['RUN_COMMAND'] = shell_command
- # the entire repo will be cloned if copy_rel_path is not set.
- if copy_rel_path:
- environ['RELATIVE_COPY_PATH'] = copy_rel_path
-
- docker_args=[]
- for k,v in environ.items():
- docker_args += ['-e', '%s=%s' % (k, v)]
- docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
- 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
- jobspec = jobset.JobSpec(
- cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
- environ=docker_env,
- shortname='distribtest.%s' % (name),
- timeout_seconds=30*60,
- flake_retries=flake_retries,
- timeout_retries=timeout_retries)
- return jobspec
-
-
-def create_jobspec(name, cmdline, environ=None, shell=False,
- flake_retries=0, timeout_retries=0,
+def create_docker_jobspec(name,
+ dockerfile_dir,
+ shell_command,
+ environ={},
+ flake_retries=0,
+ timeout_retries=0,
+ copy_rel_path=None):
+ """Creates jobspec for a task running under docker."""
+ environ = environ.copy()
+ environ['RUN_COMMAND'] = shell_command
+ # the entire repo will be cloned if copy_rel_path is not set.
+ if copy_rel_path:
+ environ['RELATIVE_COPY_PATH'] = copy_rel_path
+
+ docker_args = []
+ for k, v in environ.items():
+ docker_args += ['-e', '%s=%s' % (k, v)]
+ docker_env = {
+ 'DOCKERFILE_DIR': dockerfile_dir,
+ 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
+ }
+ jobspec = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+ docker_args,
+ environ=docker_env,
+ shortname='distribtest.%s' % (name),
+ timeout_seconds=30 * 60,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries)
+ return jobspec
+
+
+def create_jobspec(name,
+ cmdline,
+ environ=None,
+ shell=False,
+ flake_retries=0,
+ timeout_retries=0,
use_workspace=False,
- timeout_seconds=10*60):
- """Creates jobspec."""
- environ = environ.copy()
- if use_workspace:
- environ['WORKSPACE_NAME'] = 'workspace_%s' % name
- cmdline = ['bash',
- 'tools/run_tests/artifacts/run_in_workspace.sh'] + cmdline
- jobspec = jobset.JobSpec(
- cmdline=cmdline,
- environ=environ,
- shortname='distribtest.%s' % (name),
- timeout_seconds=timeout_seconds,
- flake_retries=flake_retries,
- timeout_retries=timeout_retries,
- shell=shell)
- return jobspec
+ timeout_seconds=10 * 60):
+ """Creates jobspec."""
+ environ = environ.copy()
+ if use_workspace:
+ environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+ cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+ ] + cmdline
+ jobspec = jobset.JobSpec(
+ cmdline=cmdline,
+ environ=environ,
+ shortname='distribtest.%s' % (name),
+ timeout_seconds=timeout_seconds,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries,
+ shell=shell)
+ return jobspec
class CSharpDistribTest(object):
- """Tests C# NuGet package"""
-
- def __init__(self, platform, arch, docker_suffix=None, use_dotnet_cli=False):
- self.name = 'csharp_%s_%s' % (platform, arch)
- self.platform = platform
- self.arch = arch
- self.docker_suffix = docker_suffix
- self.labels = ['distribtest', 'csharp', platform, arch]
- self.script_suffix = ''
- if docker_suffix:
- self.name += '_%s' % docker_suffix
- self.labels.append(docker_suffix)
- if use_dotnet_cli:
- self.name += '_dotnetcli'
- self.script_suffix = '_dotnetcli'
- self.labels.append('dotnetcli')
- else:
- self.labels.append('olddotnet')
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- if self.platform == 'linux':
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/distribtest/csharp_%s_%s' % (
- self.docker_suffix,
- self.arch),
- 'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix,
- copy_rel_path='test/distrib')
- elif self.platform == 'macos':
- return create_jobspec(self.name,
- ['test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix],
- environ={'EXTERNAL_GIT_ROOT': '../../../..'},
- use_workspace=True)
- elif self.platform == 'windows':
- if self.arch == 'x64':
- # Use double leading / as the first occurence gets removed by msys bash
- # when invoking the .bat file (side-effect of posix path conversion)
- environ={'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
- 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
- else:
- environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
- return create_jobspec(self.name,
- ['test\\distrib\\csharp\\run_distrib_test%s.bat' % self.script_suffix],
- environ=environ,
- use_workspace=True)
- else:
- raise Exception("Not supported yet.")
-
- def __str__(self):
- return self.name
+ """Tests C# NuGet package"""
+
+ def __init__(self, platform, arch, docker_suffix=None,
+ use_dotnet_cli=False):
+ self.name = 'csharp_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.labels = ['distribtest', 'csharp', platform, arch]
+ self.script_suffix = ''
+ if docker_suffix:
+ self.name += '_%s' % docker_suffix
+ self.labels.append(docker_suffix)
+ if use_dotnet_cli:
+ self.name += '_dotnetcli'
+ self.script_suffix = '_dotnetcli'
+ self.labels.append('dotnetcli')
+ else:
+ self.labels.append('olddotnet')
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/csharp_%s_%s' % (
+ self.docker_suffix, self.arch),
+ 'test/distrib/csharp/run_distrib_test%s.sh' %
+ self.script_suffix,
+ copy_rel_path='test/distrib')
+ elif self.platform == 'macos':
+ return create_jobspec(
+ self.name, [
+ 'test/distrib/csharp/run_distrib_test%s.sh' %
+ self.script_suffix
+ ],
+ environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+ use_workspace=True)
+ elif self.platform == 'windows':
+ if self.arch == 'x64':
+ # Use double leading / as the first occurence gets removed by msys bash
+ # when invoking the .bat file (side-effect of posix path conversion)
+ environ = {
+ 'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
+ 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
+ }
+ else:
+ environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
+ return create_jobspec(
+ self.name, [
+ 'test\\distrib\\csharp\\run_distrib_test%s.bat' %
+ self.script_suffix
+ ],
+ environ=environ,
+ use_workspace=True)
+ else:
+ raise Exception("Not supported yet.")
+
+ def __str__(self):
+ return self.name
+
class PythonDistribTest(object):
- """Tests Python package"""
+ """Tests Python package"""
- def __init__(self, platform, arch, docker_suffix):
- self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
- self.platform = platform
- self.arch = arch
- self.docker_suffix = docker_suffix
- self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
+ def __init__(self, platform, arch, docker_suffix):
+ self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
- def pre_build_jobspecs(self):
- return []
+ def pre_build_jobspecs(self):
+ return []
- def build_jobspec(self):
- if not self.platform == 'linux':
- raise Exception("Not supported yet.")
+ def build_jobspec(self):
+ if not self.platform == 'linux':
+ raise Exception("Not supported yet.")
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/distribtest/python_%s_%s' % (
- self.docker_suffix,
- self.arch),
- 'test/distrib/python/run_distrib_test.sh',
- copy_rel_path='test/distrib')
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/python_%s_%s' % (self.docker_suffix,
+ self.arch),
+ 'test/distrib/python/run_distrib_test.sh',
+ copy_rel_path='test/distrib')
- def __str__(self):
- return self.name
+ def __str__(self):
+ return self.name
class RubyDistribTest(object):
- """Tests Ruby package"""
+ """Tests Ruby package"""
- def __init__(self, platform, arch, docker_suffix):
- self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
- self.platform = platform
- self.arch = arch
- self.docker_suffix = docker_suffix
- self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
+ def __init__(self, platform, arch, docker_suffix):
+ self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
- def pre_build_jobspecs(self):
- return []
+ def pre_build_jobspecs(self):
+ return []
- def build_jobspec(self):
- if not self.platform == 'linux':
- raise Exception("Not supported yet.")
+ def build_jobspec(self):
+ if not self.platform == 'linux':
+ raise Exception("Not supported yet.")
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/distribtest/ruby_%s_%s' % (
- self.docker_suffix,
- self.arch),
- 'test/distrib/ruby/run_distrib_test.sh',
- copy_rel_path='test/distrib')
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/ruby_%s_%s' % (self.docker_suffix,
+ self.arch),
+ 'test/distrib/ruby/run_distrib_test.sh',
+ copy_rel_path='test/distrib')
- def __str__(self):
- return self.name
+ def __str__(self):
+ return self.name
class PHPDistribTest(object):
- """Tests PHP package"""
-
- def __init__(self, platform, arch, docker_suffix=None):
- self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
- self.platform = platform
- self.arch = arch
- self.docker_suffix = docker_suffix
- self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- if self.platform == 'linux':
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/distribtest/php_%s_%s' % (
- self.docker_suffix,
- self.arch),
- 'test/distrib/php/run_distrib_test.sh',
- copy_rel_path='test/distrib')
- elif self.platform == 'macos':
- return create_jobspec(self.name,
- ['test/distrib/php/run_distrib_test.sh'],
- environ={'EXTERNAL_GIT_ROOT': '../../../..'},
- use_workspace=True)
- else:
- raise Exception("Not supported yet.")
-
- def __str__(self):
- return self.name
+ """Tests PHP package"""
+
+ def __init__(self, platform, arch, docker_suffix=None):
+ self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/php_%s_%s' % (self.docker_suffix,
+ self.arch),
+ 'test/distrib/php/run_distrib_test.sh',
+ copy_rel_path='test/distrib')
+ elif self.platform == 'macos':
+ return create_jobspec(
+ self.name, ['test/distrib/php/run_distrib_test.sh'],
+ environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+ use_workspace=True)
+ else:
+ raise Exception("Not supported yet.")
+
+ def __str__(self):
+ return self.name
class CppDistribTest(object):
- """Tests Cpp make intall by building examples."""
-
- def __init__(self, platform, arch, docker_suffix=None, testcase=None):
- if platform == 'linux':
- self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix, testcase)
- else:
- self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
- self.platform = platform
- self.arch = arch
- self.docker_suffix = docker_suffix
- self.testcase = testcase
- self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix, testcase]
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- if self.platform == 'linux':
- return create_docker_jobspec(self.name,
- 'tools/dockerfile/distribtest/cpp_%s_%s' % (
- self.docker_suffix,
- self.arch),
- 'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
- elif self.platform == 'windows':
- return create_jobspec(self.name,
- ['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
- environ={},
- timeout_seconds=30*60,
- use_workspace=True)
- else:
- raise Exception("Not supported yet.")
-
- def __str__(self):
- return self.name
+ """Tests Cpp make intall by building examples."""
+
+ def __init__(self, platform, arch, docker_suffix=None, testcase=None):
+ if platform == 'linux':
+ self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
+ testcase)
+ else:
+ self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.testcase = testcase
+ self.labels = [
+ 'distribtest', 'cpp', platform, arch, docker_suffix, testcase
+ ]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' % (
+ self.docker_suffix, self.arch),
+ 'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
+ elif self.platform == 'windows':
+ return create_jobspec(
+ self.name,
+ ['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
+ environ={},
+ timeout_seconds=30 * 60,
+ use_workspace=True)
+ else:
+ raise Exception("Not supported yet.")
+
+ def __str__(self):
+ return self.name
def targets():
- """Gets list of supported targets"""
- return [CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
- CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
- CppDistribTest('windows', 'x86', testcase='cmake'),
- CSharpDistribTest('linux', 'x64', 'wheezy'),
- CSharpDistribTest('linux', 'x64', 'jessie'),
- CSharpDistribTest('linux', 'x86', 'jessie'),
- CSharpDistribTest('linux', 'x64', 'centos7'),
- CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
- CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
- CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
- CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
- CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
- CSharpDistribTest('macos', 'x86'),
- CSharpDistribTest('windows', 'x86'),
- CSharpDistribTest('windows', 'x64'),
- PythonDistribTest('linux', 'x64', 'wheezy'),
- PythonDistribTest('linux', 'x64', 'jessie'),
- PythonDistribTest('linux', 'x86', 'jessie'),
- PythonDistribTest('linux', 'x64', 'centos6'),
- PythonDistribTest('linux', 'x64', 'centos7'),
- PythonDistribTest('linux', 'x64', 'fedora20'),
- PythonDistribTest('linux', 'x64', 'fedora21'),
- PythonDistribTest('linux', 'x64', 'fedora22'),
- PythonDistribTest('linux', 'x64', 'fedora23'),
- PythonDistribTest('linux', 'x64', 'opensuse'),
- PythonDistribTest('linux', 'x64', 'arch'),
- PythonDistribTest('linux', 'x64', 'ubuntu1204'),
- PythonDistribTest('linux', 'x64', 'ubuntu1404'),
- PythonDistribTest('linux', 'x64', 'ubuntu1504'),
- PythonDistribTest('linux', 'x64', 'ubuntu1510'),
- PythonDistribTest('linux', 'x64', 'ubuntu1604'),
- RubyDistribTest('linux', 'x64', 'wheezy'),
- RubyDistribTest('linux', 'x64', 'jessie'),
- RubyDistribTest('linux', 'x86', 'jessie'),
- RubyDistribTest('linux', 'x64', 'centos6'),
- RubyDistribTest('linux', 'x64', 'centos7'),
- RubyDistribTest('linux', 'x64', 'fedora20'),
- RubyDistribTest('linux', 'x64', 'fedora21'),
- RubyDistribTest('linux', 'x64', 'fedora22'),
- RubyDistribTest('linux', 'x64', 'fedora23'),
- RubyDistribTest('linux', 'x64', 'opensuse'),
- RubyDistribTest('linux', 'x64', 'ubuntu1204'),
- RubyDistribTest('linux', 'x64', 'ubuntu1404'),
- RubyDistribTest('linux', 'x64', 'ubuntu1504'),
- RubyDistribTest('linux', 'x64', 'ubuntu1510'),
- RubyDistribTest('linux', 'x64', 'ubuntu1604'),
- PHPDistribTest('linux', 'x64', 'jessie'),
- PHPDistribTest('macos', 'x64'),
- ]
+ """Gets list of supported targets"""
+ return [
+ CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
+ CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
+ CppDistribTest('windows', 'x86', testcase='cmake'),
+ CSharpDistribTest('linux', 'x64', 'wheezy'),
+ CSharpDistribTest('linux', 'x64', 'jessie'),
+ CSharpDistribTest('linux', 'x86', 'jessie'),
+ CSharpDistribTest('linux', 'x64', 'centos7'),
+ CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
+ CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
+ CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
+ CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
+ CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
+ CSharpDistribTest('macos', 'x86'),
+ CSharpDistribTest('windows', 'x86'),
+ CSharpDistribTest('windows', 'x64'),
+ PythonDistribTest('linux', 'x64', 'wheezy'),
+ PythonDistribTest('linux', 'x64', 'jessie'),
+ PythonDistribTest('linux', 'x86', 'jessie'),
+ PythonDistribTest('linux', 'x64', 'centos6'),
+ PythonDistribTest('linux', 'x64', 'centos7'),
+ PythonDistribTest('linux', 'x64', 'fedora20'),
+ PythonDistribTest('linux', 'x64', 'fedora21'),
+ PythonDistribTest('linux', 'x64', 'fedora22'),
+ PythonDistribTest('linux', 'x64', 'fedora23'),
+ PythonDistribTest('linux', 'x64', 'opensuse'),
+ PythonDistribTest('linux', 'x64', 'arch'),
+ PythonDistribTest('linux', 'x64', 'ubuntu1204'),
+ PythonDistribTest('linux', 'x64', 'ubuntu1404'),
+ PythonDistribTest('linux', 'x64', 'ubuntu1504'),
+ PythonDistribTest('linux', 'x64', 'ubuntu1510'),
+ PythonDistribTest('linux', 'x64', 'ubuntu1604'),
+ RubyDistribTest('linux', 'x64', 'wheezy'),
+ RubyDistribTest('linux', 'x64', 'jessie'),
+ RubyDistribTest('linux', 'x86', 'jessie'),
+ RubyDistribTest('linux', 'x64', 'centos6'),
+ RubyDistribTest('linux', 'x64', 'centos7'),
+ RubyDistribTest('linux', 'x64', 'fedora20'),
+ RubyDistribTest('linux', 'x64', 'fedora21'),
+ RubyDistribTest('linux', 'x64', 'fedora22'),
+ RubyDistribTest('linux', 'x64', 'fedora23'),
+ RubyDistribTest('linux', 'x64', 'opensuse'),
+ RubyDistribTest('linux', 'x64', 'ubuntu1204'),
+ RubyDistribTest('linux', 'x64', 'ubuntu1404'),
+ RubyDistribTest('linux', 'x64', 'ubuntu1504'),
+ RubyDistribTest('linux', 'x64', 'ubuntu1510'),
+ RubyDistribTest('linux', 'x64', 'ubuntu1604'),
+ PHPDistribTest('linux', 'x64', 'jessie'),
+ PHPDistribTest('macos', 'x64'),
+ ]
diff --git a/tools/run_tests/artifacts/package_targets.py b/tools/run_tests/artifacts/package_targets.py
index ff93bb30e8..52908454f2 100644
--- a/tools/run_tests/artifacts/package_targets.py
+++ b/tools/run_tests/artifacts/package_targets.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Definition of targets to build distribution packages."""
import os.path
@@ -22,128 +21,137 @@ sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
- flake_retries=0, timeout_retries=0):
- """Creates jobspec for a task running under docker."""
- environ = environ.copy()
- environ['RUN_COMMAND'] = shell_command
-
- docker_args=[]
- for k,v in environ.items():
- docker_args += ['-e', '%s=%s' % (k, v)]
- docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
- 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
- 'OUTPUT_DIR': 'artifacts'}
- jobspec = jobset.JobSpec(
- cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
- environ=docker_env,
- shortname='build_package.%s' % (name),
- timeout_seconds=30*60,
- flake_retries=flake_retries,
- timeout_retries=timeout_retries)
- return jobspec
-
-def create_jobspec(name, cmdline, environ=None, cwd=None, shell=False,
- flake_retries=0, timeout_retries=0):
- """Creates jobspec."""
- jobspec = jobset.JobSpec(
- cmdline=cmdline,
- environ=environ,
- cwd=cwd,
- shortname='build_package.%s' % (name),
- timeout_seconds=10*60,
- flake_retries=flake_retries,
- timeout_retries=timeout_retries,
- shell=shell)
- return jobspec
+def create_docker_jobspec(name,
+ dockerfile_dir,
+ shell_command,
+ environ={},
+ flake_retries=0,
+ timeout_retries=0):
+ """Creates jobspec for a task running under docker."""
+ environ = environ.copy()
+ environ['RUN_COMMAND'] = shell_command
+
+ docker_args = []
+ for k, v in environ.items():
+ docker_args += ['-e', '%s=%s' % (k, v)]
+ docker_env = {
+ 'DOCKERFILE_DIR': dockerfile_dir,
+ 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+ 'OUTPUT_DIR': 'artifacts'
+ }
+ jobspec = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+ docker_args,
+ environ=docker_env,
+ shortname='build_package.%s' % (name),
+ timeout_seconds=30 * 60,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries)
+ return jobspec
+
+
+def create_jobspec(name,
+ cmdline,
+ environ=None,
+ cwd=None,
+ shell=False,
+ flake_retries=0,
+ timeout_retries=0):
+ """Creates jobspec."""
+ jobspec = jobset.JobSpec(
+ cmdline=cmdline,
+ environ=environ,
+ cwd=cwd,
+ shortname='build_package.%s' % (name),
+ timeout_seconds=10 * 60,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries,
+ shell=shell)
+ return jobspec
class CSharpPackage:
- """Builds C# nuget packages."""
-
- def __init__(self, linux=False):
- self.linux = linux
- self.labels = ['package', 'csharp']
- if linux:
- self.name = 'csharp_package_dotnetcli_linux'
- self.labels += ['linux']
- else:
- self.name = 'csharp_package_dotnetcli_windows'
- self.labels += ['windows']
-
- def pre_build_jobspecs(self):
- return []
-
- def build_jobspec(self):
- if self.linux:
- return create_docker_jobspec(
- self.name,
- 'tools/dockerfile/test/csharp_jessie_x64',
- 'src/csharp/build_packages_dotnetcli.sh')
- else:
- return create_jobspec(self.name,
- ['build_packages_dotnetcli.bat'],
- cwd='src\\csharp',
- shell=True)
-
- def __str__(self):
- return self.name
+ """Builds C# nuget packages."""
+
+ def __init__(self, linux=False):
+ self.linux = linux
+ self.labels = ['package', 'csharp']
+ if linux:
+ self.name = 'csharp_package_dotnetcli_linux'
+ self.labels += ['linux']
+ else:
+ self.name = 'csharp_package_dotnetcli_windows'
+ self.labels += ['windows']
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.linux:
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/test/csharp_jessie_x64',
+ 'src/csharp/build_packages_dotnetcli.sh')
+ else:
+ return create_jobspec(
+ self.name, ['build_packages_dotnetcli.bat'],
+ cwd='src\\csharp',
+ shell=True)
+
+ def __str__(self):
+ return self.name
+
class RubyPackage:
- """Collects ruby gems created in the artifact phase"""
+ """Collects ruby gems created in the artifact phase"""
- def __init__(self):
- self.name = 'ruby_package'
- self.labels = ['package', 'ruby', 'linux']
+ def __init__(self):
+ self.name = 'ruby_package'
+ self.labels = ['package', 'ruby', 'linux']
- def pre_build_jobspecs(self):
- return []
+ def pre_build_jobspecs(self):
+ return []
- def build_jobspec(self):
- return create_docker_jobspec(
- self.name,
- 'tools/dockerfile/grpc_artifact_linux_x64',
- 'tools/run_tests/artifacts/build_package_ruby.sh')
+ def build_jobspec(self):
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+ 'tools/run_tests/artifacts/build_package_ruby.sh')
class PythonPackage:
- """Collects python eggs and wheels created in the artifact phase"""
+ """Collects python eggs and wheels created in the artifact phase"""
- def __init__(self):
- self.name = 'python_package'
- self.labels = ['package', 'python', 'linux']
+ def __init__(self):
+ self.name = 'python_package'
+ self.labels = ['package', 'python', 'linux']
- def pre_build_jobspecs(self):
- return []
+ def pre_build_jobspecs(self):
+ return []
- def build_jobspec(self):
- return create_docker_jobspec(
- self.name,
- 'tools/dockerfile/grpc_artifact_linux_x64',
- 'tools/run_tests/artifacts/build_package_python.sh')
+ def build_jobspec(self):
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+ 'tools/run_tests/artifacts/build_package_python.sh')
class PHPPackage:
- """Copy PHP PECL package artifact"""
+ """Copy PHP PECL package artifact"""
- def __init__(self):
- self.name = 'php_package'
- self.labels = ['package', 'php', 'linux']
+ def __init__(self):
+ self.name = 'php_package'
+ self.labels = ['package', 'php', 'linux']
- def pre_build_jobspecs(self):
- return []
+ def pre_build_jobspecs(self):
+ return []
- def build_jobspec(self):
- return create_docker_jobspec(
- self.name,
- 'tools/dockerfile/grpc_artifact_linux_x64',
- 'tools/run_tests/artifacts/build_package_php.sh')
+ def build_jobspec(self):
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+ 'tools/run_tests/artifacts/build_package_php.sh')
def targets():
- """Gets list of supported targets"""
- return [CSharpPackage(),
- CSharpPackage(linux=True),
- RubyPackage(),
- PythonPackage(),
- PHPPackage()]
+ """Gets list of supported targets"""
+ return [
+ CSharpPackage(), CSharpPackage(linux=True), RubyPackage(),
+ PythonPackage(), PHPPackage()
+ ]
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index 31819d6159..6702587557 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -26,146 +26,161 @@ import time
import uuid
import massage_qps_stats
-
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
-
-_PROJECT_ID='grpc-testing'
+_PROJECT_ID = 'grpc-testing'
def _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, result_file):
- with open(result_file, 'r') as f:
- (col1, col2, col3) = f.read().split(',')
- latency50 = float(col1.strip()) * 1000
- latency90 = float(col2.strip()) * 1000
- latency99 = float(col3.strip()) * 1000
-
- scenario_result = {
- 'scenario': {
- 'name': 'netperf_tcp_rr'
- },
- 'summary': {
- 'latency50': latency50,
- 'latency90': latency90,
- 'latency99': latency99
+ with open(result_file, 'r') as f:
+ (col1, col2, col3) = f.read().split(',')
+ latency50 = float(col1.strip()) * 1000
+ latency90 = float(col2.strip()) * 1000
+ latency99 = float(col3.strip()) * 1000
+
+ scenario_result = {
+ 'scenario': {
+ 'name': 'netperf_tcp_rr'
+ },
+ 'summary': {
+ 'latency50': latency50,
+ 'latency90': latency90,
+ 'latency99': latency99
+ }
}
- }
- bq = big_query_utils.create_big_query()
- _create_results_table(bq, dataset_id, table_id)
+ bq = big_query_utils.create_big_query()
+ _create_results_table(bq, dataset_id, table_id)
- if not _insert_result(bq, dataset_id, table_id, scenario_result, flatten=False):
- print('Error uploading result to bigquery.')
- sys.exit(1)
+ if not _insert_result(
+ bq, dataset_id, table_id, scenario_result, flatten=False):
+ print('Error uploading result to bigquery.')
+ sys.exit(1)
def _upload_scenario_result_to_bigquery(dataset_id, table_id, result_file):
- with open(result_file, 'r') as f:
- scenario_result = json.loads(f.read())
+ with open(result_file, 'r') as f:
+ scenario_result = json.loads(f.read())
- bq = big_query_utils.create_big_query()
- _create_results_table(bq, dataset_id, table_id)
+ bq = big_query_utils.create_big_query()
+ _create_results_table(bq, dataset_id, table_id)
- if not _insert_result(bq, dataset_id, table_id, scenario_result):
- print('Error uploading result to bigquery.')
- sys.exit(1)
+ if not _insert_result(bq, dataset_id, table_id, scenario_result):
+ print('Error uploading result to bigquery.')
+ sys.exit(1)
def _insert_result(bq, dataset_id, table_id, scenario_result, flatten=True):
- if flatten:
- _flatten_result_inplace(scenario_result)
- _populate_metadata_inplace(scenario_result)
- row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
- return big_query_utils.insert_rows(bq,
- _PROJECT_ID,
- dataset_id,
- table_id,
- [row])
+ if flatten:
+ _flatten_result_inplace(scenario_result)
+ _populate_metadata_inplace(scenario_result)
+ row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
+ return big_query_utils.insert_rows(bq, _PROJECT_ID, dataset_id, table_id,
+ [row])
def _create_results_table(bq, dataset_id, table_id):
- with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
- table_schema = json.loads(f.read())
- desc = 'Results of performance benchmarks.'
- return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id,
- table_id, table_schema, desc)
+ with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+ 'r') as f:
+ table_schema = json.loads(f.read())
+ desc = 'Results of performance benchmarks.'
+ return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id, table_id,
+ table_schema, desc)
def _flatten_result_inplace(scenario_result):
- """Bigquery is not really great for handling deeply nested data
+ """Bigquery is not really great for handling deeply nested data
and repeated fields. To maintain values of some fields while keeping
the schema relatively simple, we artificially leave some of the fields
as JSON strings.
"""
- scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
- scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
- scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
- scenario_result['serverCpuStats'] = []
- for stats in scenario_result['serverStats']:
- scenario_result['serverCpuStats'].append(dict())
- scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop('totalCpuTime', None)
- scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop('idleCpuTime', None)
- for stats in scenario_result['clientStats']:
- stats['latencies'] = json.dumps(stats['latencies'])
- stats.pop('requestResults', None)
- scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
- scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
- scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
- scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
- scenario_result['serverCpuUsage'] = scenario_result['summary'].pop('serverCpuUsage', None)
- scenario_result['summary'].pop('successfulRequestsPerSecond', None)
- scenario_result['summary'].pop('failedRequestsPerSecond', None)
- massage_qps_stats.massage_qps_stats(scenario_result)
+ scenario_result['scenario']['clientConfig'] = json.dumps(
+ scenario_result['scenario']['clientConfig'])
+ scenario_result['scenario']['serverConfig'] = json.dumps(
+ scenario_result['scenario']['serverConfig'])
+ scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
+ scenario_result['serverCpuStats'] = []
+ for stats in scenario_result['serverStats']:
+ scenario_result['serverCpuStats'].append(dict())
+ scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop(
+ 'totalCpuTime', None)
+ scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop(
+ 'idleCpuTime', None)
+ for stats in scenario_result['clientStats']:
+ stats['latencies'] = json.dumps(stats['latencies'])
+ stats.pop('requestResults', None)
+ scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
+ scenario_result['clientSuccess'] = json.dumps(
+ scenario_result['clientSuccess'])
+ scenario_result['serverSuccess'] = json.dumps(
+ scenario_result['serverSuccess'])
+ scenario_result['requestResults'] = json.dumps(
+ scenario_result.get('requestResults', []))
+ scenario_result['serverCpuUsage'] = scenario_result['summary'].pop(
+ 'serverCpuUsage', None)
+ scenario_result['summary'].pop('successfulRequestsPerSecond', None)
+ scenario_result['summary'].pop('failedRequestsPerSecond', None)
+ massage_qps_stats.massage_qps_stats(scenario_result)
def _populate_metadata_inplace(scenario_result):
- """Populates metadata based on environment variables set by Jenkins."""
- # NOTE: Grabbing the Jenkins environment variables will only work if the
- # driver is running locally on the same machine where Jenkins has started
- # the job. For our setup, this is currently the case, so just assume that.
- build_number = os.getenv('BUILD_NUMBER')
- build_url = os.getenv('BUILD_URL')
- job_name = os.getenv('JOB_NAME')
- git_commit = os.getenv('GIT_COMMIT')
- # actual commit is the actual head of PR that is getting tested
- git_actual_commit = os.getenv('ghprbActualCommit')
-
- utc_timestamp = str(calendar.timegm(time.gmtime()))
- metadata = {'created': utc_timestamp}
-
- if build_number:
- metadata['buildNumber'] = build_number
- if build_url:
- metadata['buildUrl'] = build_url
- if job_name:
- metadata['jobName'] = job_name
- if git_commit:
- metadata['gitCommit'] = git_commit
- if git_actual_commit:
- metadata['gitActualCommit'] = git_actual_commit
-
- scenario_result['metadata'] = metadata
+ """Populates metadata based on environment variables set by Jenkins."""
+ # NOTE: Grabbing the Jenkins environment variables will only work if the
+ # driver is running locally on the same machine where Jenkins has started
+ # the job. For our setup, this is currently the case, so just assume that.
+ build_number = os.getenv('BUILD_NUMBER')
+ build_url = os.getenv('BUILD_URL')
+ job_name = os.getenv('JOB_NAME')
+ git_commit = os.getenv('GIT_COMMIT')
+ # actual commit is the actual head of PR that is getting tested
+ git_actual_commit = os.getenv('ghprbActualCommit')
+
+ utc_timestamp = str(calendar.timegm(time.gmtime()))
+ metadata = {'created': utc_timestamp}
+
+ if build_number:
+ metadata['buildNumber'] = build_number
+ if build_url:
+ metadata['buildUrl'] = build_url
+ if job_name:
+ metadata['jobName'] = job_name
+ if git_commit:
+ metadata['gitCommit'] = git_commit
+ if git_actual_commit:
+ metadata['gitActualCommit'] = git_actual_commit
+
+ scenario_result['metadata'] = metadata
argp = argparse.ArgumentParser(description='Upload result to big query.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
- help='Bigquery "dataset.table" to upload results to.')
-argp.add_argument('--file_to_upload', default='scenario_result.json', type=str,
- help='Report file to upload.')
-argp.add_argument('--file_format',
- choices=['scenario_result','netperf_latency_csv'],
- default='scenario_result',
- help='Format of the file to upload.')
+argp.add_argument(
+ '--bq_result_table',
+ required=True,
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to upload results to.')
+argp.add_argument(
+ '--file_to_upload',
+ default='scenario_result.json',
+ type=str,
+ help='Report file to upload.')
+argp.add_argument(
+ '--file_format',
+ choices=['scenario_result', 'netperf_latency_csv'],
+ default='scenario_result',
+ help='Format of the file to upload.')
args = argp.parse_args()
dataset_id, table_id = args.bq_result_table.split('.', 2)
if args.file_format == 'netperf_latency_csv':
- _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, args.file_to_upload)
+ _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id,
+ args.file_to_upload)
else:
- _upload_scenario_result_to_bigquery(dataset_id, table_id, args.file_to_upload)
+ _upload_scenario_result_to_bigquery(dataset_id, table_id,
+ args.file_to_upload)
print('Successfully uploaded %s to BigQuery.\n' % args.file_to_upload)
diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py
index 48c57581a5..37f6e7aae0 100644
--- a/tools/run_tests/performance/massage_qps_stats.py
+++ b/tools/run_tests/performance/massage_qps_stats.py
@@ -15,182 +15,456 @@
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
+
+
def massage_qps_stats(scenario_result):
- for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
- if "coreStats" not in stats: return
- core_stats = stats["coreStats"]
- del stats["coreStats"]
- stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "client_calls_created")
- stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "server_calls_created")
- stats["core_cqs_created"] = massage_qps_stats_helpers.counter(core_stats, "cqs_created")
- stats["core_client_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_channels_created")
- stats["core_client_subchannels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_subchannels_created")
- stats["core_server_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "server_channels_created")
- stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(core_stats, "syscall_poll")
- stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(core_stats, "syscall_wait")
- stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick")
- stats["core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_without_poller")
- stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_again")
- stats["core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_fd")
- stats["core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_cv")
- stats["core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_own_thread")
- stats["core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(core_stats, "histogram_slow_lookups")
- stats["core_syscall_write"] = massage_qps_stats_helpers.counter(core_stats, "syscall_write")
- stats["core_syscall_read"] = massage_qps_stats_helpers.counter(core_stats, "syscall_read")
- stats["core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_pollers_created")
- stats["core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_poller_polls")
- stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_batches")
- stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_cancel")
- stats["core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_initial_metadata")
- stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_message")
- stats["core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_trailing_metadata")
- stats["core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_initial_metadata")
- stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_message")
- stats["core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_trailing_metadata")
- stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_settings_writes")
- stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_pings_sent")
- stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_begun")
- stats["core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_offloaded")
- stats["core_http2_writes_continued"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_continued")
- stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_partial_writes")
- stats["core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_initial_write")
- stats["core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_start_new_stream")
- stats["core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_message")
- stats["core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_initial_metadata")
- stats["core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_trailing_metadata")
- stats["core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_retry_send_ping")
- stats["core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_continue_pings")
- stats["core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_goaway_sent")
- stats["core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_rst_stream")
- stats["core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_close_from_api")
- stats["core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_stream_flow_control")
- stats["core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control")
- stats["core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_settings")
- stats["core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
- stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
- stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_update")
- stats["core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_application_ping")
- stats["core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_keepalive_ping")
- stats["core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control_unstalled")
- stats["core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_ping_response")
- stats["core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_force_rst_stream")
- stats["core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_spurious_writes_begun")
- stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_indexed")
- stats["core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx")
- stats["core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx_v")
- stats["core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx")
- stats["core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx_v")
- stats["core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx")
- stats["core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx_v")
- stats["core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_uncompressed")
- stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_huffman")
- stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary")
- stats["core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary_base64")
- stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_indexed")
- stats["core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx")
- stats["core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx_v")
- stats["core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx")
- stats["core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx_v")
- stats["core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx")
- stats["core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx_v")
- stats["core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_uncompressed")
- stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_huffman")
- stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary")
- stats["core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary_base64")
- stats["core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_initiated")
- stats["core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_items")
- stats["core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_final_items")
- stats["core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_offloaded")
- stats["core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_locks_initiated")
- stats["core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_locks_scheduled_items")
- stats["core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_set_notify_on_cancel")
- stats["core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_cancelled")
- stats["core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_short_items")
- stats["core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_long_items")
- stats["core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_to_self")
- stats["core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(core_stats, "executor_wakeup_initiated")
- stats["core_executor_queue_drained"] = massage_qps_stats_helpers.counter(core_stats, "executor_queue_drained")
- stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(core_stats, "executor_push_retries")
- stats["core_server_requested_calls"] = massage_qps_stats_helpers.counter(core_stats, "server_requested_calls")
- stats["core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(core_stats, "server_slowpath_requests_queued")
- stats["core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_trylock_failures")
- stats["core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_trylock_successes")
- stats["core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_transient_pop_failures")
- h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
- stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_call_initial_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "poll_events_returned")
- stats["core_poll_events_returned"] = ",".join("%f" % x for x in h.buckets)
- stats["core_poll_events_returned_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
- stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_write_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_iov_size")
- stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
- stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
- stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer_iov_size")
- stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_offer_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_size")
- stats["core_http2_send_message_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_message_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_initial_metadata_per_write")
- stats["core_http2_send_initial_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_per_write")
- stats["core_http2_send_message_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_message_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_trailing_metadata_per_write")
- stats["core_http2_send_trailing_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_flowctl_per_write")
- stats["core_http2_send_flowctl_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_flowctl_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "server_cqs_checked")
- stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
- stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
+ for stats in scenario_result["serverStats"] + scenario_result[
+ "clientStats"]:
+ if "coreStats" not in stats: return
+ core_stats = stats["coreStats"]
+ del stats["coreStats"]
+ stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_calls_created")
+ stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_calls_created")
+ stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "cqs_created")
+ stats[
+ "core_client_channels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_channels_created")
+ stats[
+ "core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_subchannels_created")
+ stats[
+ "core_server_channels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_channels_created")
+ stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_poll")
+ stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_wait")
+ stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick")
+ stats[
+ "core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kicked_without_poller")
+ stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kicked_again")
+ stats[
+ "core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_wakeup_fd")
+ stats[
+ "core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_wakeup_cv")
+ stats[
+ "core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_own_thread")
+ stats[
+ "core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
+ core_stats, "histogram_slow_lookups")
+ stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_write")
+ stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_read")
+ stats[
+ "core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "tcp_backup_pollers_created")
+ stats[
+ "core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
+ core_stats, "tcp_backup_poller_polls")
+ stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_batches")
+ stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_cancel")
+ stats[
+ "core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_initial_metadata")
+ stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_message")
+ stats[
+ "core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_trailing_metadata")
+ stats[
+ "core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_initial_metadata")
+ stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_message")
+ stats[
+ "core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_trailing_metadata")
+ stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_settings_writes")
+ stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_pings_sent")
+ stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_begun")
+ stats[
+ "core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_offloaded")
+ stats[
+ "core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_continued")
+ stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_partial_writes")
+ stats[
+ "core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_initial_write")
+ stats[
+ "core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_start_new_stream")
+ stats[
+ "core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_message")
+ stats[
+ "core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_initial_metadata")
+ stats[
+ "core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_send_trailing_metadata")
+ stats[
+ "core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_retry_send_ping")
+ stats[
+ "core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_continue_pings")
+ stats[
+ "core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_goaway_sent")
+ stats[
+ "core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_rst_stream")
+ stats[
+ "core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_close_from_api")
+ stats[
+ "core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_stream_flow_control")
+ stats[
+ "core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_transport_flow_control")
+ stats[
+ "core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_settings")
+ stats[
+ "core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
+ stats[
+ "core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
+ stats[
+ "core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_flow_control_unstalled_by_update")
+ stats[
+ "core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_application_ping")
+ stats[
+ "core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_keepalive_ping")
+ stats[
+ "core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_transport_flow_control_unstalled")
+ stats[
+ "core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_ping_response")
+ stats[
+ "core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_force_rst_stream")
+ stats[
+ "core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_spurious_writes_begun")
+ stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_indexed")
+ stats[
+ "core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_incidx")
+ stats[
+ "core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_incidx_v")
+ stats[
+ "core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_notidx")
+ stats[
+ "core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_notidx_v")
+ stats[
+ "core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_nvridx")
+ stats[
+ "core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_nvridx_v")
+ stats[
+ "core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_uncompressed")
+ stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_huffman")
+ stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_binary")
+ stats[
+ "core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_binary_base64")
+ stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_indexed")
+ stats[
+ "core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_incidx")
+ stats[
+ "core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_incidx_v")
+ stats[
+ "core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_notidx")
+ stats[
+ "core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_notidx_v")
+ stats[
+ "core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_nvridx")
+ stats[
+ "core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_nvridx_v")
+ stats[
+ "core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_uncompressed")
+ stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_huffman")
+ stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_binary")
+ stats[
+ "core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_binary_base64")
+ stats[
+ "core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_initiated")
+ stats[
+ "core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_scheduled_items")
+ stats[
+ "core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_scheduled_final_items")
+ stats[
+ "core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_offloaded")
+ stats[
+ "core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_locks_initiated")
+ stats[
+ "core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_locks_scheduled_items")
+ stats[
+ "core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_set_notify_on_cancel")
+ stats[
+ "core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_cancelled")
+ stats[
+ "core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_short_items")
+ stats[
+ "core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_long_items")
+ stats[
+ "core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_to_self")
+ stats[
+ "core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_wakeup_initiated")
+ stats[
+ "core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_queue_drained")
+ stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_push_retries")
+ stats[
+ "core_server_requested_calls"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_requested_calls")
+ stats[
+ "core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_slowpath_requests_queued")
+ stats[
+ "core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_trylock_failures")
+ stats[
+ "core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_trylock_successes")
+ stats[
+ "core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_transient_pop_failures")
+ h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
+ stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_call_initial_size_bkts"] = ",".join("%f" % x
+ for x in h.boundaries)
+ stats[
+ "core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "poll_events_returned")
+ stats["core_poll_events_returned"] = ",".join("%f" % x
+ for x in h.buckets)
+ stats["core_poll_events_returned_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
+ stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_write_size_bkts"] = ",".join("%f" % x
+ for x in h.boundaries)
+ stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_write_iov_size")
+ stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x
+ for x in h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
+ stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_read_size_bkts"] = ",".join("%f" % x
+ for x in h.boundaries)
+ stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
+ stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x
+ for x in h.boundaries)
+ stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_read_offer_iov_size")
+ stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x
+ for x in h.buckets)
+ stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "http2_send_message_size")
+ stats["core_http2_send_message_size"] = ",".join("%f" % x
+ for x in h.buckets)
+ stats["core_http2_send_message_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_initial_metadata_per_write")
+ stats["core_http2_send_initial_metadata_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "http2_send_message_per_write")
+ stats["core_http2_send_message_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_message_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_trailing_metadata_per_write")
+ stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "http2_send_flowctl_per_write")
+ stats["core_http2_send_flowctl_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "server_cqs_checked")
+ stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x
+ for x in h.boundaries)
+ stats[
+ "core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
diff --git a/tools/run_tests/performance/massage_qps_stats_helpers.py b/tools/run_tests/performance/massage_qps_stats_helpers.py
index a2fe4ae6c3..108451cd55 100644
--- a/tools/run_tests/performance/massage_qps_stats_helpers.py
+++ b/tools/run_tests/performance/massage_qps_stats_helpers.py
@@ -14,44 +14,49 @@
import collections
+
def _threshold_for_count_below(buckets, boundaries, count_below):
- count_so_far = 0
- for lower_idx in range(0, len(buckets)):
- count_so_far += buckets[lower_idx]
- if count_so_far >= count_below:
- break
- if count_so_far == count_below:
- # this bucket hits the threshold exactly... we should be midway through
- # any run of zero values following the bucket
- for upper_idx in range(lower_idx + 1, len(buckets)):
- if buckets[upper_idx] != 0:
- break
- return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
- else:
- # treat values as uniform throughout the bucket, and find where this value
- # should lie
- lower_bound = boundaries[lower_idx]
- upper_bound = boundaries[lower_idx + 1]
- return (upper_bound -
- (upper_bound - lower_bound) * (count_so_far - count_below) /
- float(buckets[lower_idx]))
+ count_so_far = 0
+ for lower_idx in range(0, len(buckets)):
+ count_so_far += buckets[lower_idx]
+ if count_so_far >= count_below:
+ break
+ if count_so_far == count_below:
+ # this bucket hits the threshold exactly... we should be midway through
+ # any run of zero values following the bucket
+ for upper_idx in range(lower_idx + 1, len(buckets)):
+ if buckets[upper_idx] != 0:
+ break
+ return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
+ else:
+ # treat values as uniform throughout the bucket, and find where this value
+ # should lie
+ lower_bound = boundaries[lower_idx]
+ upper_bound = boundaries[lower_idx + 1]
+ return (upper_bound - (upper_bound - lower_bound) *
+ (count_so_far - count_below) / float(buckets[lower_idx]))
+
def percentile(buckets, pctl, boundaries):
- return _threshold_for_count_below(
- buckets, boundaries, sum(buckets) * pctl / 100.0)
+ return _threshold_for_count_below(buckets, boundaries,
+ sum(buckets) * pctl / 100.0)
+
def counter(core_stats, name):
- for stat in core_stats['metrics']:
- if stat['name'] == name:
- return int(stat.get('count', 0))
+ for stat in core_stats['metrics']:
+ if stat['name'] == name:
+ return int(stat.get('count', 0))
+
Histogram = collections.namedtuple('Histogram', 'buckets boundaries')
+
+
def histogram(core_stats, name):
- for stat in core_stats['metrics']:
- if stat['name'] == name:
- buckets = []
- boundaries = []
- for b in stat['histogram']['buckets']:
- buckets.append(int(b.get('count', 0)))
- boundaries.append(int(b.get('start', 0)))
- return Histogram(buckets=buckets, boundaries=boundaries)
+ for stat in core_stats['metrics']:
+ if stat['name'] == name:
+ buckets = []
+ boundaries = []
+ for b in stat['histogram']['buckets']:
+ buckets.append(int(b.get('count', 0)))
+ boundaries.append(int(b.get('start', 0)))
+ return Histogram(buckets=buckets, boundaries=boundaries)
diff --git a/tools/run_tests/performance/patch_scenario_results_schema.py b/tools/run_tests/performance/patch_scenario_results_schema.py
index 81ba5381b3..2a2aadc242 100755
--- a/tools/run_tests/performance/patch_scenario_results_schema.py
+++ b/tools/run_tests/performance/patch_scenario_results_schema.py
@@ -25,27 +25,32 @@ import sys
import time
import uuid
-
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
+_PROJECT_ID = 'grpc-testing'
-_PROJECT_ID='grpc-testing'
def _patch_results_table(dataset_id, table_id):
- bq = big_query_utils.create_big_query()
- with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
- table_schema = json.loads(f.read())
- desc = 'Results of performance benchmarks.'
- return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id,
- table_id, table_schema)
-
-
-argp = argparse.ArgumentParser(description='Patch schema of scenario results table.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
- help='Bigquery "dataset.table" to patch.')
+ bq = big_query_utils.create_big_query()
+ with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+ 'r') as f:
+ table_schema = json.loads(f.read())
+ desc = 'Results of performance benchmarks.'
+ return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id, table_id,
+ table_schema)
+
+
+argp = argparse.ArgumentParser(
+ description='Patch schema of scenario results table.')
+argp.add_argument(
+ '--bq_result_table',
+ required=True,
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to patch.')
args = argp.parse_args()
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index cafac3dfad..e92de5898c 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -16,66 +16,64 @@
import math
-WARMUP_SECONDS=5
-JAVA_WARMUP_SECONDS=15 # Java needs more warmup time for JIT to kick in.
-BENCHMARK_SECONDS=30
-
-SMOKETEST='smoketest'
-SCALABLE='scalable'
-INPROC='inproc'
-SWEEP='sweep'
-DEFAULT_CATEGORIES=[SCALABLE, SMOKETEST]
-
-SECURE_SECARGS = {'use_test_ca': True,
- 'server_host_override': 'foo.test.google.fr'}
+WARMUP_SECONDS = 5
+JAVA_WARMUP_SECONDS = 15 # Java needs more warmup time for JIT to kick in.
+BENCHMARK_SECONDS = 30
+
+SMOKETEST = 'smoketest'
+SCALABLE = 'scalable'
+INPROC = 'inproc'
+SWEEP = 'sweep'
+DEFAULT_CATEGORIES = [SCALABLE, SMOKETEST]
+
+SECURE_SECARGS = {
+ 'use_test_ca': True,
+ 'server_host_override': 'foo.test.google.fr'
+}
HISTOGRAM_PARAMS = {
- 'resolution': 0.01,
- 'max_possible': 60e9,
+ 'resolution': 0.01,
+ 'max_possible': 60e9,
}
# target number of RPCs outstanding on across all client channels in
# non-ping-pong tests (since we can only specify per-channel numbers, the
# actual target will be slightly higher)
-OUTSTANDING_REQUESTS={
- 'async': 6400,
- 'async-limited': 800,
- 'sync': 1000
-}
+OUTSTANDING_REQUESTS = {'async': 6400, 'async-limited': 800, 'sync': 1000}
# wide is the number of client channels in multi-channel tests (1 otherwise)
-WIDE=64
+WIDE = 64
def _get_secargs(is_secure):
- if is_secure:
- return SECURE_SECARGS
- else:
- return None
+ if is_secure:
+ return SECURE_SECARGS
+ else:
+ return None
def remove_nonproto_fields(scenario):
- """Remove special-purpose that contains some extra info about the scenario
+ """Remove special-purpose that contains some extra info about the scenario
but don't belong to the ScenarioConfig protobuf message"""
- scenario.pop('CATEGORIES', None)
- scenario.pop('CLIENT_LANGUAGE', None)
- scenario.pop('SERVER_LANGUAGE', None)
- scenario.pop('EXCLUDED_POLL_ENGINES', None)
- return scenario
+ scenario.pop('CATEGORIES', None)
+ scenario.pop('CLIENT_LANGUAGE', None)
+ scenario.pop('SERVER_LANGUAGE', None)
+ scenario.pop('EXCLUDED_POLL_ENGINES', None)
+ return scenario
def geometric_progression(start, stop, step):
- n = start
- while n < stop:
- yield int(round(n))
- n *= step
+ n = start
+ while n < stop:
+ yield int(round(n))
+ n *= step
def _payload_type(use_generic_payload, req_size, resp_size):
r = {}
sizes = {
- 'req_size': req_size,
- 'resp_size': resp_size,
+ 'req_size': req_size,
+ 'resp_size': resp_size,
}
if use_generic_payload:
r['bytebuf_params'] = sizes
@@ -83,6 +81,7 @@ def _payload_type(use_generic_payload, req_size, resp_size):
r['simple_params'] = sizes
return r
+
def _load_params(offered_load):
r = {}
if offered_load is None:
@@ -93,21 +92,25 @@ def _load_params(offered_load):
r['poisson'] = load
return r
+
def _add_channel_arg(config, key, value):
- if 'channel_args' in config:
- channel_args = config['channel_args']
- else:
- channel_args = []
- config['channel_args'] = channel_args
- arg = {'name': key}
- if isinstance(value, int):
- arg['int_value'] = value
- else:
- arg['str_value'] = value
- channel_args.append(arg)
-
-def _ping_pong_scenario(name, rpc_type,
- client_type, server_type,
+ if 'channel_args' in config:
+ channel_args = config['channel_args']
+ else:
+ channel_args = []
+ config['channel_args'] = channel_args
+ arg = {'name': key}
+ if isinstance(value, int):
+ arg['int_value'] = value
+ else:
+ arg['str_value'] = value
+ channel_args.append(arg)
+
+
+def _ping_pong_scenario(name,
+ rpc_type,
+ client_type,
+ server_type,
secure=True,
use_generic_payload=False,
req_size=0,
@@ -128,824 +131,1029 @@ def _ping_pong_scenario(name, rpc_type,
excluded_poll_engines=[],
minimal_stack=False,
offered_load=None):
- """Creates a basic ping pong scenario."""
- scenario = {
- 'name': name,
- 'num_servers': 1,
- 'num_clients': 1,
- 'client_config': {
- 'client_type': client_type,
- 'security_params': _get_secargs(secure),
- 'outstanding_rpcs_per_channel': 1,
- 'client_channels': 1,
- 'async_client_threads': 1,
- 'threads_per_cq': client_threads_per_cq,
- 'rpc_type': rpc_type,
- 'histogram_params': HISTOGRAM_PARAMS,
- 'channel_args': [],
- },
- 'server_config': {
- 'server_type': server_type,
- 'security_params': _get_secargs(secure),
- 'async_server_threads': async_server_threads,
- 'threads_per_cq': server_threads_per_cq,
- 'channel_args': [],
- },
- 'warmup_seconds': warmup_seconds,
- 'benchmark_seconds': BENCHMARK_SECONDS
- }
- if resource_quota_size:
- scenario['server_config']['resource_quota_size'] = resource_quota_size
- if use_generic_payload:
- if server_type != 'ASYNC_GENERIC_SERVER':
- raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
- scenario['server_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
-
- scenario['client_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
-
- # Optimization target of 'throughput' does not work well with epoll1 polling
- # engine. Use the default value of 'blend'
- optimization_target = 'throughput'
-
- if unconstrained_client:
- outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client]
- # clamp buffer usage to something reasonable (16 gig for now)
- MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
- if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
- outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size, resp_size))
- wide = channels if channels is not None else WIDE
- deep = int(math.ceil(1.0 * outstanding_calls / wide))
-
- scenario['num_clients'] = num_clients if num_clients is not None else 0 # use as many clients as available.
- scenario['client_config']['outstanding_rpcs_per_channel'] = deep
- scenario['client_config']['client_channels'] = wide
- scenario['client_config']['async_client_threads'] = 0
- if offered_load is not None:
+ """Creates a basic ping pong scenario."""
+ scenario = {
+ 'name': name,
+ 'num_servers': 1,
+ 'num_clients': 1,
+ 'client_config': {
+ 'client_type': client_type,
+ 'security_params': _get_secargs(secure),
+ 'outstanding_rpcs_per_channel': 1,
+ 'client_channels': 1,
+ 'async_client_threads': 1,
+ 'threads_per_cq': client_threads_per_cq,
+ 'rpc_type': rpc_type,
+ 'histogram_params': HISTOGRAM_PARAMS,
+ 'channel_args': [],
+ },
+ 'server_config': {
+ 'server_type': server_type,
+ 'security_params': _get_secargs(secure),
+ 'async_server_threads': async_server_threads,
+ 'threads_per_cq': server_threads_per_cq,
+ 'channel_args': [],
+ },
+ 'warmup_seconds': warmup_seconds,
+ 'benchmark_seconds': BENCHMARK_SECONDS
+ }
+ if resource_quota_size:
+ scenario['server_config']['resource_quota_size'] = resource_quota_size
+ if use_generic_payload:
+ if server_type != 'ASYNC_GENERIC_SERVER':
+ raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
+ scenario['server_config']['payload_config'] = _payload_type(
+ use_generic_payload, req_size, resp_size)
+
+ scenario['client_config']['payload_config'] = _payload_type(
+ use_generic_payload, req_size, resp_size)
+
+ # Optimization target of 'throughput' does not work well with epoll1 polling
+ # engine. Use the default value of 'blend'
+ optimization_target = 'throughput'
+
+ if unconstrained_client:
+ outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[
+ unconstrained_client]
+ # clamp buffer usage to something reasonable (16 gig for now)
+ MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
+ if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
+ outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size,
+ resp_size))
+ wide = channels if channels is not None else WIDE
+ deep = int(math.ceil(1.0 * outstanding_calls / wide))
+
+ scenario[
+ 'num_clients'] = num_clients if num_clients is not None else 0 # use as many clients as available.
+ scenario['client_config']['outstanding_rpcs_per_channel'] = deep
+ scenario['client_config']['client_channels'] = wide
+ scenario['client_config']['async_client_threads'] = 0
+ if offered_load is not None:
+ optimization_target = 'latency'
+ else:
+ scenario['client_config']['outstanding_rpcs_per_channel'] = 1
+ scenario['client_config']['client_channels'] = 1
+ scenario['client_config']['async_client_threads'] = 1
optimization_target = 'latency'
- else:
- scenario['client_config']['outstanding_rpcs_per_channel'] = 1
- scenario['client_config']['client_channels'] = 1
- scenario['client_config']['async_client_threads'] = 1
- optimization_target = 'latency'
-
- scenario['client_config']['load_params'] = _load_params(offered_load)
-
- optimization_channel_arg = {
- 'name': 'grpc.optimization_target',
- 'str_value': optimization_target
- }
- scenario['client_config']['channel_args'].append(optimization_channel_arg)
- scenario['server_config']['channel_args'].append(optimization_channel_arg)
-
- if minimal_stack:
- _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
- _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
-
- if messages_per_stream:
- scenario['client_config']['messages_per_stream'] = messages_per_stream
- if client_language:
- # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
- scenario['CLIENT_LANGUAGE'] = client_language
- if server_language:
- # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
- scenario['SERVER_LANGUAGE'] = server_language
- if categories:
- scenario['CATEGORIES'] = categories
- if len(excluded_poll_engines):
- # The polling engines for which this scenario is excluded
- scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
- return scenario
+
+ scenario['client_config']['load_params'] = _load_params(offered_load)
+
+ optimization_channel_arg = {
+ 'name': 'grpc.optimization_target',
+ 'str_value': optimization_target
+ }
+ scenario['client_config']['channel_args'].append(optimization_channel_arg)
+ scenario['server_config']['channel_args'].append(optimization_channel_arg)
+
+ if minimal_stack:
+ _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
+ _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
+
+ if messages_per_stream:
+ scenario['client_config']['messages_per_stream'] = messages_per_stream
+ if client_language:
+ # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
+ scenario['CLIENT_LANGUAGE'] = client_language
+ if server_language:
+ # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
+ scenario['SERVER_LANGUAGE'] = server_language
+ if categories:
+ scenario['CATEGORIES'] = categories
+ if len(excluded_poll_engines):
+ # The polling engines for which this scenario is excluded
+ scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
+ return scenario
class CXXLanguage:
- def __init__(self):
- self.safename = 'cxx'
-
- def worker_cmdline(self):
- return ['bins/opt/qps_worker']
-
- def worker_port_offset(self):
- return 0
-
- def scenarios(self):
- # TODO(ctiller): add 70% load latency test
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_unary_1channel_100rpcs_1MB', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- unconstrained_client='async', outstanding=100, channels=1,
- num_clients=1,
- secure=False,
- categories=[SMOKETEST] + [INPROC] + [SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_streaming_from_client_1channel_1MB', rpc_type='STREAMING_FROM_CLIENT',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- unconstrained_client='async', outstanding=1, channels=1,
- num_clients=1,
- secure=False,
- categories=[SMOKETEST] + [INPROC] + [SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
- rpc_type='UNARY', client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=300, resp_size=50,
- unconstrained_client='async', outstanding=30000, channels=300,
- offered_load=37500, secure=False,
- async_server_threads=16, server_threads_per_cq=1,
- categories=[SMOKETEST] + [SCALABLE])
-
- for secure in [True, False]:
- secstr = 'secure' if secure else 'insecure'
- smoketest_categories = ([SMOKETEST] if secure else [INPROC]) + [SCALABLE]
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_ping_pong_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True, async_server_threads=1,
- secure=secure,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
-
- for mps in geometric_progression(1, 20, 10):
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
- rpc_type='STREAMING',
+ def __init__(self):
+ self.safename = 'cxx'
+
+ def worker_cmdline(self):
+ return ['bins/opt/qps_worker']
+
+ def worker_port_offset(self):
+ return 0
+
+ def scenarios(self):
+ # TODO(ctiller): add 70% load latency test
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_1channel_100rpcs_1MB',
+ rpc_type='UNARY',
client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ unconstrained_client='async',
+ outstanding=100,
+ channels=1,
+ num_clients=1,
+ secure=False,
+ categories=[SMOKETEST] + [INPROC] + [SCALABLE])
- for mps in geometric_progression(1, 200, math.sqrt(10)):
yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
- rpc_type='STREAMING',
+ 'cpp_protobuf_async_streaming_from_client_1channel_1MB',
+ rpc_type='STREAMING_FROM_CLIENT',
client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=[SWEEP])
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
- rpc_type='STREAMING',
- req_size=1024*1024,
- resp_size=1024*1024,
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE],
- channels=1, outstanding=100)
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' % secstr,
- rpc_type='STREAMING',
- req_size=64*1024,
- resp_size=64*1024,
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
-
- # TODO(https://github.com/grpc/grpc/issues/11500) Re-enable this test
- #yield _ping_pong_scenario(
- # 'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
- # rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT',
- # server_type='ASYNC_GENERIC_SERVER',
- # unconstrained_client='async-limited', use_generic_payload=True,
- # secure=secure,
- # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
- # categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- client_threads_per_cq=2, server_threads_per_cq=2,
- categories=smoketest_categories+[SCALABLE])
-
- #yield _ping_pong_scenario(
- # 'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' % secstr,
- # rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT',
- # server_type='ASYNC_SERVER',
- # unconstrained_client='async-limited',
- # secure=secure,
- # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
- # categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- client_threads_per_cq=2, server_threads_per_cq=2,
- categories=smoketest_categories+[SCALABLE])
-
- #yield _ping_pong_scenario(
- # 'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
- # rpc_type='UNARY',
- # client_type='ASYNC_CLIENT',
- # server_type='ASYNC_SERVER',
- # unconstrained_client='async-limited',
- # secure=secure,
- # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
- # categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' % secstr,
- rpc_type='UNARY',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- client_threads_per_cq=2, server_threads_per_cq=2,
- categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async-limited', use_generic_payload=True,
- async_server_threads=1,
- minimal_stack=not secure,
- secure=secure)
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s' %
- (secstr),
- rpc_type='UNARY',
- client_type='ASYNC_CLIENT',
- server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories + [SCALABLE],
- excluded_poll_engines = ['poll-cv'])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s' %
- (secstr),
- rpc_type='UNARY',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_SERVER',
- channels=1,
- outstanding=64,
- req_size=128,
- resp_size=8*1024*1024,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories + [SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE],
- excluded_poll_engines = ['poll-cv'])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr, rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories + [SCALABLE])
-
- for rpc_type in ['unary', 'streaming', 'streaming_from_client', 'streaming_from_server']:
- for synchronicity in ['sync', 'async']:
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity, rpc_type, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- async_server_threads=1,
- minimal_stack=not secure,
- secure=secure)
-
- for size in geometric_progression(1, 1024*1024*1024+1, 8):
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' % (synchronicity, rpc_type, secstr, size),
- rpc_type=rpc_type.upper(),
- req_size=size,
- resp_size=size,
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure,
- minimal_stack=not secure,
- categories=[SWEEP])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%s' % (synchronicity, rpc_type, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure,
- minimal_stack=not secure,
- server_threads_per_cq=3,
- client_threads_per_cq=3,
- categories=smoketest_categories+[SCALABLE])
-
- # TODO(vjpai): Re-enable this test. It has a lot of timeouts
- # and hasn't yet been conclusively identified as a test failure
- # or race in the library
- # yield _ping_pong_scenario(
- # 'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
- # rpc_type=rpc_type.upper(),
- # client_type='%s_CLIENT' % synchronicity.upper(),
- # server_type='%s_SERVER' % synchronicity.upper(),
- # unconstrained_client=synchronicity,
- # secure=secure,
- # categories=smoketest_categories+[SCALABLE],
- # resource_quota_size=500*1024)
-
- if rpc_type == 'streaming':
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ unconstrained_client='async',
+ outstanding=1,
+ channels=1,
+ num_clients=1,
+ secure=False,
+ categories=[SMOKETEST] + [INPROC] + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=300,
+ resp_size=50,
+ unconstrained_client='async',
+ outstanding=30000,
+ channels=300,
+ offered_load=37500,
+ secure=False,
+ async_server_threads=16,
+ server_threads_per_cq=1,
+ categories=[SMOKETEST] + [SCALABLE])
+
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST]
+ if secure else [INPROC]) + [SCALABLE]
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
for mps in geometric_progression(1, 20, 10):
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+ (mps, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
for mps in geometric_progression(1, 200, math.sqrt(10)):
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=[SWEEP])
-
- for channels in geometric_progression(1, 20000, math.sqrt(10)):
- for outstanding in geometric_progression(1, 200000, math.sqrt(10)):
- if synchronicity == 'sync' and outstanding > 1200: continue
- if outstanding < channels: continue
yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding' % (synchronicity, rpc_type, secstr, channels, outstanding),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity, secure=secure,
+ 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+ (mps, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ messages_per_stream=mps,
minimal_stack=not secure,
- categories=[SWEEP], channels=channels, outstanding=outstanding)
-
- def __str__(self):
- return 'c++'
+ categories=[SWEEP])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
+ rpc_type='STREAMING',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE],
+ channels=1,
+ outstanding=100)
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' %
+ secstr,
+ rpc_type='STREAMING',
+ req_size=64 * 1024,
+ resp_size=64 * 1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ # TODO(https://github.com/grpc/grpc/issues/11500) Re-enable this test
+ #yield _ping_pong_scenario(
+ # 'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
+ # rpc_type='STREAMING',
+ # client_type='ASYNC_CLIENT',
+ # server_type='ASYNC_GENERIC_SERVER',
+ # unconstrained_client='async-limited', use_generic_payload=True,
+ # secure=secure,
+ # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
+ # categories=smoketest_categories+[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=smoketest_categories + [SCALABLE])
+
+ #yield _ping_pong_scenario(
+ # 'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' % secstr,
+ # rpc_type='STREAMING',
+ # client_type='ASYNC_CLIENT',
+ # server_type='ASYNC_SERVER',
+ # unconstrained_client='async-limited',
+ # secure=secure,
+ # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
+ # categories=smoketest_categories+[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=smoketest_categories + [SCALABLE])
+
+ #yield _ping_pong_scenario(
+ # 'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
+ # rpc_type='UNARY',
+ # client_type='ASYNC_CLIENT',
+ # server_type='ASYNC_SERVER',
+ # unconstrained_client='async-limited',
+ # secure=secure,
+ # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
+ # categories=smoketest_categories+[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' %
+ secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ async_server_threads=1,
+ minimal_stack=not secure,
+ secure=secure)
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s'
+ % (secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE],
+ excluded_poll_engines=['poll-cv'])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s'
+ % (secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ outstanding=64,
+ req_size=128,
+ resp_size=8 * 1024 * 1024,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE],
+ excluded_poll_engines=['poll-cv'])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ for rpc_type in [
+ 'unary', 'streaming', 'streaming_from_client',
+ 'streaming_from_server'
+ ]:
+ for synchronicity in ['sync', 'async']:
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_ping_pong_%s' %
+ (synchronicity, rpc_type, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ async_server_threads=1,
+ minimal_stack=not secure,
+ secure=secure)
+
+ for size in geometric_progression(1, 1024 * 1024 * 1024 + 1,
+ 8):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' %
+ (synchronicity, rpc_type, secstr, size),
+ rpc_type=rpc_type.upper(),
+ req_size=size,
+ resp_size=size,
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=[SWEEP])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s' %
+ (synchronicity, rpc_type, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ server_threads_per_cq=3,
+ client_threads_per_cq=3,
+ categories=smoketest_categories + [SCALABLE])
+
+ # TODO(vjpai): Re-enable this test. It has a lot of timeouts
+ # and hasn't yet been conclusively identified as a test failure
+ # or race in the library
+ # yield _ping_pong_scenario(
+ # 'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
+ # rpc_type=rpc_type.upper(),
+ # client_type='%s_CLIENT' % synchronicity.upper(),
+ # server_type='%s_SERVER' % synchronicity.upper(),
+ # unconstrained_client=synchronicity,
+ # secure=secure,
+ # categories=smoketest_categories+[SCALABLE],
+ # resource_quota_size=500*1024)
+
+ if rpc_type == 'streaming':
+ for mps in geometric_progression(1, 20, 10):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+ % (synchronicity, rpc_type, mps, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ for mps in geometric_progression(1, 200, math.sqrt(10)):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+ % (synchronicity, rpc_type, mps, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=[SWEEP])
+
+ for channels in geometric_progression(1, 20000,
+ math.sqrt(10)):
+ for outstanding in geometric_progression(1, 200000,
+ math.sqrt(10)):
+ if synchronicity == 'sync' and outstanding > 1200:
+ continue
+ if outstanding < channels: continue
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding'
+ % (synchronicity, rpc_type, secstr, channels,
+ outstanding),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=[SWEEP],
+ channels=channels,
+ outstanding=outstanding)
+
+ def __str__(self):
+ return 'c++'
class CSharpLanguage:
- def __init__(self):
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_csharp.sh']
-
- def worker_port_offset(self):
- return 100
-
- def scenarios(self):
- yield _ping_pong_scenario(
- 'csharp_generic_async_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_generic_async_streaming_ping_pong_insecure_1MB', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- use_generic_payload=True,
- secure=False,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_generic_async_streaming_qps_unconstrained_insecure', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=False,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- categories=[SMOKETEST,SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1)
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async', server_language='c++',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='sync', server_language='c++',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_to_csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async', client_language='c++',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_unary_ping_pong_1MB', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- categories=[SMOKETEST, SCALABLE])
-
- def __str__(self):
- return 'csharp'
+ def __init__(self):
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_csharp.sh']
+
+ def worker_port_offset(self):
+ return 100
+
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_ping_pong_insecure_1MB',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ use_generic_payload=True,
+ secure=False,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_qps_unconstrained_insecure',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=False,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_sync_to_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync',
+ server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_to_csharp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ client_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
+
+ def __str__(self):
+ return 'csharp'
+
class PythonLanguage:
- def __init__(self):
- self.safename = 'python'
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_python.sh']
-
- def worker_port_offset(self):
- return 500
-
- def scenarios(self):
- yield _ping_pong_scenario(
- 'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'python_protobuf_async_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='sync')
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='sync')
-
- yield _ping_pong_scenario(
- 'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1)
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_unary_ping_pong_1MB', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- categories=[SMOKETEST, SCALABLE])
-
- def __str__(self):
- return 'python'
+ def __init__(self):
+ self.safename = 'python'
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_python.sh']
+
+ def worker_port_offset(self):
+ return 500
+
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'python_generic_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario(
+ 'python_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_to_cpp_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
+
+ def __str__(self):
+ return 'python'
+
class RubyLanguage:
- def __init__(self):
- pass
- self.safename = str(self)
+ def __init__(self):
+ pass
+ self.safename = str(self)
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_ruby.sh']
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_ruby.sh']
- def worker_port_offset(self):
- return 300
+ def worker_port_offset(self):
+ return 300
- def scenarios(self):
- yield _ping_pong_scenario(
- 'ruby_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
- yield _ping_pong_scenario(
- 'ruby_protobuf_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
- yield _ping_pong_scenario(
- 'ruby_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='sync')
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_sync_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='sync')
- yield _ping_pong_scenario(
- 'ruby_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='sync')
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_sync_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='sync')
- yield _ping_pong_scenario(
- 'ruby_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'ruby_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
- yield _ping_pong_scenario(
- 'ruby_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'ruby_to_cpp_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
- yield _ping_pong_scenario(
- 'ruby_protobuf_unary_ping_pong_1MB', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- categories=[SMOKETEST, SCALABLE])
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
- def __str__(self):
- return 'ruby'
+ def __str__(self):
+ return 'ruby'
class Php7Language:
- def __init__(self, php7_protobuf_c=False):
- pass
- self.php7_protobuf_c=php7_protobuf_c
- self.safename = str(self)
-
- def worker_cmdline(self):
- if self.php7_protobuf_c:
- return ['tools/run_tests/performance/run_worker_php.sh', '--use_protobuf_c_extension']
- return ['tools/run_tests/performance/run_worker_php.sh']
-
- def worker_port_offset(self):
- if self.php7_protobuf_c:
- return 900
- return 800
-
- def scenarios(self):
- php7_extension_mode='php7_protobuf_php_extension'
- if self.php7_protobuf_c:
- php7_extension_mode='php7_protobuf_c_extension'
-
- yield _ping_pong_scenario(
- '%s_to_cpp_protobuf_sync_unary_ping_pong' % php7_extension_mode,
- rpc_type='UNARY', client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
-
- yield _ping_pong_scenario(
- '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
- rpc_type='STREAMING', client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
-
- # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
- # better than async_server_threads=0/CPU usage 490%.
- yield _ping_pong_scenario(
- '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' % php7_extension_mode,
- rpc_type='UNARY', client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', outstanding=1, async_server_threads=1, unconstrained_client='sync')
-
- yield _ping_pong_scenario(
- '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' % php7_extension_mode,
- rpc_type='STREAMING', client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', outstanding=1, async_server_threads=1, unconstrained_client='sync')
-
- def __str__(self):
- if self.php7_protobuf_c:
- return 'php7_protobuf_c'
- return 'php7'
+ def __init__(self, php7_protobuf_c=False):
+ pass
+ self.php7_protobuf_c = php7_protobuf_c
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ if self.php7_protobuf_c:
+ return [
+ 'tools/run_tests/performance/run_worker_php.sh',
+ '--use_protobuf_c_extension'
+ ]
+ return ['tools/run_tests/performance/run_worker_php.sh']
+
+ def worker_port_offset(self):
+ if self.php7_protobuf_c:
+ return 900
+ return 800
+
+ def scenarios(self):
+ php7_extension_mode = 'php7_protobuf_php_extension'
+ if self.php7_protobuf_c:
+ php7_extension_mode = 'php7_protobuf_c_extension'
+
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_unary_ping_pong' % php7_extension_mode,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
+ # better than async_server_threads=0/CPU usage 490%.
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' %
+ php7_extension_mode,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ outstanding=1,
+ async_server_threads=1,
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' %
+ php7_extension_mode,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ outstanding=1,
+ async_server_threads=1,
+ unconstrained_client='sync')
+
+ def __str__(self):
+ if self.php7_protobuf_c:
+ return 'php7_protobuf_c'
+ return 'php7'
+
class JavaLanguage:
- def __init__(self):
- pass
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_java.sh']
-
- def worker_port_offset(self):
- return 400
-
- def scenarios(self):
- for secure in [True, False]:
- secstr = 'secure' if secure else 'insecure'
- smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
-
- yield _ping_pong_scenario(
- 'java_generic_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True, async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'java_protobuf_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'java_generic_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async-limited', use_generic_payload=True,
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
-
- # TODO(jtattermusch): add scenarios java vs C++
-
- def __str__(self):
- return 'java'
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_java.sh']
+
+ def worker_port_offset(self):
+ return 400
+
+ def scenarios(self):
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_unary_ping_pong_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_unary_ping_pong_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_unary_qps_unconstrained_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_qps_one_server_core_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ # TODO(jtattermusch): add scenarios java vs C++
+
+ def __str__(self):
+ return 'java'
class GoLanguage:
- def __init__(self):
- pass
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_go.sh']
-
- def worker_port_offset(self):
- return 600
-
- def scenarios(self):
- for secure in [True, False]:
- secstr = 'secure' if secure else 'insecure'
- smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
-
- # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
- # but that's mostly because of lack of better name of the enum value.
- yield _ping_pong_scenario(
- 'go_generic_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True, async_server_threads=1,
- secure=secure,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'go_protobuf_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- async_server_threads=1,
- secure=secure)
-
- yield _ping_pong_scenario(
- 'go_protobuf_sync_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- async_server_threads=1,
- secure=secure,
- categories=smoketest_categories)
-
- # unconstrained_client='async' is intended (client uses goroutines)
- yield _ping_pong_scenario(
- 'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- categories=smoketest_categories+[SCALABLE])
-
- # unconstrained_client='async' is intended (client uses goroutines)
- yield _ping_pong_scenario(
- 'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- categories=[SCALABLE])
-
- # unconstrained_client='async' is intended (client uses goroutines)
- # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
- # but that's mostly because of lack of better name of the enum value.
- yield _ping_pong_scenario(
- 'go_generic_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- categories=[SCALABLE])
-
- # TODO(jtattermusch): add scenarios go vs C++
-
- def __str__(self):
- return 'go'
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_go.sh']
+
+ def worker_port_offset(self):
+ return 600
+
+ def scenarios(self):
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+
+ # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+ # but that's mostly because of lack of better name of the enum value.
+ yield _ping_pong_scenario(
+ 'go_generic_sync_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure)
+
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_unary_ping_pong_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ categories=[SCALABLE])
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+ # but that's mostly because of lack of better name of the enum value.
+ yield _ping_pong_scenario(
+ 'go_generic_sync_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ categories=[SCALABLE])
+
+ # TODO(jtattermusch): add scenarios go vs C++
+
+ def __str__(self):
+ return 'go'
LANGUAGES = {
- 'c++' : CXXLanguage(),
- 'csharp' : CSharpLanguage(),
- 'ruby' : RubyLanguage(),
- 'php7' : Php7Language(),
- 'php7_protobuf_c' : Php7Language(php7_protobuf_c=True),
- 'java' : JavaLanguage(),
- 'python' : PythonLanguage(),
- 'go' : GoLanguage(),
+ 'c++': CXXLanguage(),
+ 'csharp': CSharpLanguage(),
+ 'ruby': RubyLanguage(),
+ 'php7': Php7Language(),
+ 'php7_protobuf_c': Php7Language(php7_protobuf_c=True),
+ 'java': JavaLanguage(),
+ 'python': PythonLanguage(),
+ 'go': GoLanguage(),
}
diff --git a/tools/run_tests/python_utils/antagonist.py b/tools/run_tests/python_utils/antagonist.py
index 0d79ce0986..a928a4cb00 100755
--- a/tools/run_tests/python_utils/antagonist.py
+++ b/tools/run_tests/python_utils/antagonist.py
@@ -12,8 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""This is used by run_tests.py to create cpu load on a machine"""
while True:
- pass
+ pass
diff --git a/tools/run_tests/python_utils/comment_on_pr.py b/tools/run_tests/python_utils/comment_on_pr.py
index 21b9bb7085..399c996d4d 100644
--- a/tools/run_tests/python_utils/comment_on_pr.py
+++ b/tools/run_tests/python_utils/comment_on_pr.py
@@ -16,19 +16,22 @@ import os
import json
import urllib2
+
def comment_on_pr(text):
- if 'JENKINS_OAUTH_TOKEN' not in os.environ:
- print 'Missing JENKINS_OAUTH_TOKEN env var: not commenting'
- return
- if 'ghprbPullId' not in os.environ:
- print 'Missing ghprbPullId env var: not commenting'
- return
- req = urllib2.Request(
- url = 'https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
- os.environ['ghprbPullId'],
- data = json.dumps({'body': text}),
- headers = {
- 'Authorization': 'token %s' % os.environ['JENKINS_OAUTH_TOKEN'],
- 'Content-Type': 'application/json',
- })
- print urllib2.urlopen(req).read()
+ if 'JENKINS_OAUTH_TOKEN' not in os.environ:
+ print 'Missing JENKINS_OAUTH_TOKEN env var: not commenting'
+ return
+ if 'ghprbPullId' not in os.environ:
+ print 'Missing ghprbPullId env var: not commenting'
+ return
+ req = urllib2.Request(
+ url='https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
+ os.environ['ghprbPullId'],
+ data=json.dumps({
+ 'body': text
+ }),
+ headers={
+ 'Authorization': 'token %s' % os.environ['JENKINS_OAUTH_TOKEN'],
+ 'Content-Type': 'application/json',
+ })
+ print urllib2.urlopen(req).read()
diff --git a/tools/run_tests/python_utils/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py
index 2f5285b26c..d2941c0811 100755
--- a/tools/run_tests/python_utils/dockerjob.py
+++ b/tools/run_tests/python_utils/dockerjob.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Helpers to run docker instances as jobs."""
from __future__ import print_function
@@ -28,102 +27,109 @@ _DEVNULL = open(os.devnull, 'w')
def random_name(base_name):
- """Randomizes given base name."""
- return '%s_%s' % (base_name, uuid.uuid4())
+ """Randomizes given base name."""
+ return '%s_%s' % (base_name, uuid.uuid4())
def docker_kill(cid):
- """Kills a docker container. Returns True if successful."""
- return subprocess.call(['docker','kill', str(cid)],
- stdin=subprocess.PIPE,
- stdout=_DEVNULL,
- stderr=subprocess.STDOUT) == 0
+ """Kills a docker container. Returns True if successful."""
+ return subprocess.call(
+ ['docker', 'kill', str(cid)],
+ stdin=subprocess.PIPE,
+ stdout=_DEVNULL,
+ stderr=subprocess.STDOUT) == 0
def docker_mapped_port(cid, port, timeout_seconds=15):
- """Get port mapped to internal given internal port for given container."""
- started = time.time()
- while time.time() - started < timeout_seconds:
- try:
- output = subprocess.check_output('docker port %s %s' % (cid, port),
- stderr=_DEVNULL,
- shell=True)
- return int(output.split(':', 2)[1])
- except subprocess.CalledProcessError as e:
- pass
- raise Exception('Failed to get exposed port %s for container %s.' %
- (port, cid))
+ """Get port mapped to internal given internal port for given container."""
+ started = time.time()
+ while time.time() - started < timeout_seconds:
+ try:
+ output = subprocess.check_output(
+ 'docker port %s %s' % (cid, port), stderr=_DEVNULL, shell=True)
+ return int(output.split(':', 2)[1])
+ except subprocess.CalledProcessError as e:
+ pass
+ raise Exception('Failed to get exposed port %s for container %s.' %
+ (port, cid))
def wait_for_healthy(cid, shortname, timeout_seconds):
- """Wait timeout_seconds for the container to become healthy"""
- started = time.time()
- while time.time() - started < timeout_seconds:
- try:
- output = subprocess.check_output(
- ['docker', 'inspect', '--format="{{.State.Health.Status}}"', cid],
- stderr=_DEVNULL)
- if output.strip('\n') == 'healthy':
- return
- except subprocess.CalledProcessError as e:
- pass
- time.sleep(1)
- raise Exception('Timed out waiting for %s (%s) to pass health check' %
- (shortname, cid))
+ """Wait timeout_seconds for the container to become healthy"""
+ started = time.time()
+ while time.time() - started < timeout_seconds:
+ try:
+ output = subprocess.check_output(
+ [
+ 'docker', 'inspect', '--format="{{.State.Health.Status}}"',
+ cid
+ ],
+ stderr=_DEVNULL)
+ if output.strip('\n') == 'healthy':
+ return
+ except subprocess.CalledProcessError as e:
+ pass
+ time.sleep(1)
+ raise Exception('Timed out waiting for %s (%s) to pass health check' %
+ (shortname, cid))
def finish_jobs(jobs):
- """Kills given docker containers and waits for corresponding jobs to finish"""
- for job in jobs:
- job.kill(suppress_failure=True)
+ """Kills given docker containers and waits for corresponding jobs to finish"""
+ for job in jobs:
+ job.kill(suppress_failure=True)
- while any(job.is_running() for job in jobs):
- time.sleep(1)
+ while any(job.is_running() for job in jobs):
+ time.sleep(1)
def image_exists(image):
- """Returns True if given docker image exists."""
- return subprocess.call(['docker','inspect', image],
- stdin=subprocess.PIPE,
- stdout=_DEVNULL,
- stderr=subprocess.STDOUT) == 0
+ """Returns True if given docker image exists."""
+ return subprocess.call(
+ ['docker', 'inspect', image],
+ stdin=subprocess.PIPE,
+ stdout=_DEVNULL,
+ stderr=subprocess.STDOUT) == 0
def remove_image(image, skip_nonexistent=False, max_retries=10):
- """Attempts to remove docker image with retries."""
- if skip_nonexistent and not image_exists(image):
- return True
- for attempt in range(0, max_retries):
- if subprocess.call(['docker','rmi', '-f', image],
- stdin=subprocess.PIPE,
- stdout=_DEVNULL,
- stderr=subprocess.STDOUT) == 0:
- return True
- time.sleep(2)
- print('Failed to remove docker image %s' % image)
- return False
+ """Attempts to remove docker image with retries."""
+ if skip_nonexistent and not image_exists(image):
+ return True
+ for attempt in range(0, max_retries):
+ if subprocess.call(
+ ['docker', 'rmi', '-f', image],
+ stdin=subprocess.PIPE,
+ stdout=_DEVNULL,
+ stderr=subprocess.STDOUT) == 0:
+ return True
+ time.sleep(2)
+ print('Failed to remove docker image %s' % image)
+ return False
class DockerJob:
- """Encapsulates a job"""
-
- def __init__(self, spec):
- self._spec = spec
- self._job = jobset.Job(spec, newline_on_success=True, travis=True, add_env={})
- self._container_name = spec.container_name
-
- def mapped_port(self, port):
- return docker_mapped_port(self._container_name, port)
-
- def wait_for_healthy(self, timeout_seconds):
- wait_for_healthy(self._container_name, self._spec.shortname, timeout_seconds)
-
- def kill(self, suppress_failure=False):
- """Sends kill signal to the container."""
- if suppress_failure:
- self._job.suppress_failure_message()
- return docker_kill(self._container_name)
-
- def is_running(self):
- """Polls a job and returns True if given job is still running."""
- return self._job.state() == jobset._RUNNING
+ """Encapsulates a job"""
+
+ def __init__(self, spec):
+ self._spec = spec
+ self._job = jobset.Job(
+ spec, newline_on_success=True, travis=True, add_env={})
+ self._container_name = spec.container_name
+
+ def mapped_port(self, port):
+ return docker_mapped_port(self._container_name, port)
+
+ def wait_for_healthy(self, timeout_seconds):
+ wait_for_healthy(self._container_name, self._spec.shortname,
+ timeout_seconds)
+
+ def kill(self, suppress_failure=False):
+ """Sends kill signal to the container."""
+ if suppress_failure:
+ self._job.suppress_failure_message()
+ return docker_kill(self._container_name)
+
+ def is_running(self):
+ """Polls a job and returns True if given job is still running."""
+ return self._job.state() == jobset._RUNNING
diff --git a/tools/run_tests/python_utils/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py
index e880734651..8e0dc708dd 100644
--- a/tools/run_tests/python_utils/filter_pull_request_tests.py
+++ b/tools/run_tests/python_utils/filter_pull_request_tests.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Filter out tests based on file differences compared to merge target branch"""
from __future__ import print_function
@@ -23,24 +22,25 @@ from subprocess import check_output
class TestSuite:
- """
+ """
Contains label to identify job as belonging to this test suite and
triggers to identify if changed files are relevant
"""
- def __init__(self, labels):
- """
+
+ def __init__(self, labels):
+ """
Build TestSuite to group tests based on labeling
:param label: strings that should match a jobs's platform, config, language, or test group
"""
- self.triggers = []
- self.labels = labels
+ self.triggers = []
+ self.labels = labels
- def add_trigger(self, trigger):
- """
+ def add_trigger(self, trigger):
+ """
Add a regex to list of triggers that determine if a changed file should run tests
:param trigger: regex matching file relevant to tests
"""
- self.triggers.append(trigger)
+ self.triggers.append(trigger)
# Create test suites
@@ -55,10 +55,11 @@ _RUBY_TEST_SUITE = TestSuite(['ruby'])
_LINUX_TEST_SUITE = TestSuite(['linux'])
_WINDOWS_TEST_SUITE = TestSuite(['windows'])
_MACOS_TEST_SUITE = TestSuite(['macos'])
-_ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
- _NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE,
- _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE,
- _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
+_ALL_TEST_SUITES = [
+ _CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE, _NODE_TEST_SUITE,
+ _OBJC_TEST_SUITE, _PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE,
+ _LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE
+]
# Dictionary of whitelistable files where the key is a regex matching changed files
# and the value is a list of tests that should be run. An empty list means that
@@ -66,46 +67,46 @@ _ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
# match any of these regexes will trigger all tests
# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
_WHITELIST_DICT = {
- '^doc/': [],
- '^examples/': [],
- '^include/grpc\+\+/': [_CPP_TEST_SUITE],
- '^summerofcode/': [],
- '^src/cpp/': [_CPP_TEST_SUITE],
- '^src/csharp/': [_CSHARP_TEST_SUITE],
- '^src/objective\-c/': [_OBJC_TEST_SUITE],
- '^src/php/': [_PHP_TEST_SUITE],
- '^src/python/': [_PYTHON_TEST_SUITE],
- '^src/ruby/': [_RUBY_TEST_SUITE],
- '^templates/': [],
- '^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
- '^test/cpp/': [_CPP_TEST_SUITE],
- '^test/distrib/cpp/': [_CPP_TEST_SUITE],
- '^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
- '^test/distrib/php/': [_PHP_TEST_SUITE],
- '^test/distrib/python/': [_PYTHON_TEST_SUITE],
- '^test/distrib/ruby/': [_RUBY_TEST_SUITE],
- '^vsprojects/': [_WINDOWS_TEST_SUITE],
- 'composer\.json$': [_PHP_TEST_SUITE],
- 'config\.m4$': [_PHP_TEST_SUITE],
- 'CONTRIBUTING\.md$': [],
- 'Gemfile$': [_RUBY_TEST_SUITE],
- 'grpc\.def$': [_WINDOWS_TEST_SUITE],
- 'grpc\.gemspec$': [_RUBY_TEST_SUITE],
- 'gRPC\.podspec$': [_OBJC_TEST_SUITE],
- 'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
- 'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
- 'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
- 'INSTALL\.md$': [],
- 'LICENSE$': [],
- 'MANIFEST\.md$': [],
- 'package\.json$': [_PHP_TEST_SUITE],
- 'package\.xml$': [_PHP_TEST_SUITE],
- 'PATENTS$': [],
- 'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
- 'README\.md$': [],
- 'requirements\.txt$': [_PYTHON_TEST_SUITE],
- 'setup\.cfg$': [_PYTHON_TEST_SUITE],
- 'setup\.py$': [_PYTHON_TEST_SUITE]
+ '^doc/': [],
+ '^examples/': [],
+ '^include/grpc\+\+/': [_CPP_TEST_SUITE],
+ '^summerofcode/': [],
+ '^src/cpp/': [_CPP_TEST_SUITE],
+ '^src/csharp/': [_CSHARP_TEST_SUITE],
+ '^src/objective\-c/': [_OBJC_TEST_SUITE],
+ '^src/php/': [_PHP_TEST_SUITE],
+ '^src/python/': [_PYTHON_TEST_SUITE],
+ '^src/ruby/': [_RUBY_TEST_SUITE],
+ '^templates/': [],
+ '^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
+ '^test/cpp/': [_CPP_TEST_SUITE],
+ '^test/distrib/cpp/': [_CPP_TEST_SUITE],
+ '^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
+ '^test/distrib/php/': [_PHP_TEST_SUITE],
+ '^test/distrib/python/': [_PYTHON_TEST_SUITE],
+ '^test/distrib/ruby/': [_RUBY_TEST_SUITE],
+ '^vsprojects/': [_WINDOWS_TEST_SUITE],
+ 'composer\.json$': [_PHP_TEST_SUITE],
+ 'config\.m4$': [_PHP_TEST_SUITE],
+ 'CONTRIBUTING\.md$': [],
+ 'Gemfile$': [_RUBY_TEST_SUITE],
+ 'grpc\.def$': [_WINDOWS_TEST_SUITE],
+ 'grpc\.gemspec$': [_RUBY_TEST_SUITE],
+ 'gRPC\.podspec$': [_OBJC_TEST_SUITE],
+ 'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
+ 'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
+ 'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
+ 'INSTALL\.md$': [],
+ 'LICENSE$': [],
+ 'MANIFEST\.md$': [],
+ 'package\.json$': [_PHP_TEST_SUITE],
+ 'package\.xml$': [_PHP_TEST_SUITE],
+ 'PATENTS$': [],
+ 'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
+ 'README\.md$': [],
+ 'requirements\.txt$': [_PYTHON_TEST_SUITE],
+ 'setup\.cfg$': [_PYTHON_TEST_SUITE],
+ 'setup\.py$': [_PYTHON_TEST_SUITE]
}
# Regex that combines all keys in _WHITELIST_DICT
@@ -113,83 +114,88 @@ _ALL_TRIGGERS = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")"
# Add all triggers to their respective test suites
for trigger, test_suites in six.iteritems(_WHITELIST_DICT):
- for test_suite in test_suites:
- test_suite.add_trigger(trigger)
+ for test_suite in test_suites:
+ test_suite.add_trigger(trigger)
def _get_changed_files(base_branch):
- """
+ """
Get list of changed files between current branch and base of target merge branch
"""
- # Get file changes between branch and merge-base of specified branch
- # Not combined to be Windows friendly
- base_commit = check_output(["git", "merge-base", base_branch, "HEAD"]).rstrip()
- return check_output(["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
+ # Get file changes between branch and merge-base of specified branch
+ # Not combined to be Windows friendly
+ base_commit = check_output(
+ ["git", "merge-base", base_branch, "HEAD"]).rstrip()
+ return check_output(
+ ["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
def _can_skip_tests(file_names, triggers):
- """
+ """
Determines if tests are skippable based on if all files do not match list of regexes
:param file_names: list of changed files generated by _get_changed_files()
:param triggers: list of regexes matching file name that indicates tests should be run
:return: safe to skip tests
"""
- for file_name in file_names:
- if any(re.match(trigger, file_name) for trigger in triggers):
- return False
- return True
+ for file_name in file_names:
+ if any(re.match(trigger, file_name) for trigger in triggers):
+ return False
+ return True
def _remove_irrelevant_tests(tests, skippable_labels):
- """
+ """
Filters out tests by config or language - will not remove sanitizer tests
:param tests: list of all tests generated by run_tests_matrix.py
:param skippable_labels: list of languages and platforms with skippable tests
:return: list of relevant tests
"""
- # test.labels[0] is platform and test.labels[2] is language
- # We skip a test if both are considered safe to skip
- return [test for test in tests if test.labels[0] not in skippable_labels or \
- test.labels[2] not in skippable_labels]
+ # test.labels[0] is platform and test.labels[2] is language
+ # We skip a test if both are considered safe to skip
+ return [test for test in tests if test.labels[0] not in skippable_labels or \
+ test.labels[2] not in skippable_labels]
def affects_c_cpp(base_branch):
- """
+ """
Determines if a pull request's changes affect C/C++. This function exists because
there are pull request tests that only test C/C++ code
:param base_branch: branch that a pull request is requesting to merge into
:return: boolean indicating whether C/C++ changes are made in pull request
"""
- changed_files = _get_changed_files(base_branch)
- # Run all tests if any changed file is not in the whitelist dictionary
- for changed_file in changed_files:
- if not re.match(_ALL_TRIGGERS, changed_file):
- return True
- return not _can_skip_tests(changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
+ changed_files = _get_changed_files(base_branch)
+ # Run all tests if any changed file is not in the whitelist dictionary
+ for changed_file in changed_files:
+ if not re.match(_ALL_TRIGGERS, changed_file):
+ return True
+ return not _can_skip_tests(
+ changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
def filter_tests(tests, base_branch):
- """
+ """
Filters out tests that are safe to ignore
:param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests
"""
- print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
- changed_files = _get_changed_files(base_branch)
- for changed_file in changed_files:
- print(' %s' % changed_file)
- print('')
-
- # Run all tests if any changed file is not in the whitelist dictionary
- for changed_file in changed_files:
- if not re.match(_ALL_TRIGGERS, changed_file):
- return(tests)
- # Figure out which language and platform tests to run
- skippable_labels = []
- for test_suite in _ALL_TEST_SUITES:
- if _can_skip_tests(changed_files, test_suite.triggers):
- for label in test_suite.labels:
- print(' %s tests safe to skip' % label)
- skippable_labels.append(label)
- tests = _remove_irrelevant_tests(tests, skippable_labels)
- return tests
+ print(
+ 'Finding file differences between gRPC %s branch and pull request...\n'
+ % base_branch)
+ changed_files = _get_changed_files(base_branch)
+ for changed_file in changed_files:
+ print(' %s' % changed_file)
+ print('')
+
+ # Run all tests if any changed file is not in the whitelist dictionary
+ for changed_file in changed_files:
+ if not re.match(_ALL_TRIGGERS, changed_file):
+ return (tests)
+ # Figure out which language and platform tests to run
+ skippable_labels = []
+ for test_suite in _ALL_TEST_SUITES:
+ if _can_skip_tests(changed_files, test_suite.triggers):
+ for label in test_suite.labels:
+ print(' %s tests safe to skip' % label)
+ skippable_labels.append(label)
+ tests = _remove_irrelevant_tests(tests, skippable_labels)
+ return tests
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 85eef444ef..454d09bf0d 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run a group of subprocesses and then finish."""
from __future__ import print_function
@@ -28,11 +27,9 @@ import tempfile
import time
import errno
-
# cpu cost measurement
measure_cpu_costs = False
-
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
_MAX_RESULT_SIZE = 8192
@@ -42,63 +39,60 @@ _MAX_RESULT_SIZE = 8192
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
- return ''.join(c for c in s if ord(c) < 128)
+ return ''.join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
- sanitized = {}
- for key, value in env.items():
- sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
- return sanitized
+ sanitized = {}
+ for key, value in env.items():
+ sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
+ return sanitized
def platform_string():
- if platform.system() == 'Windows':
- return 'windows'
- elif platform.system()[:7] == 'MSYS_NT':
- return 'windows'
- elif platform.system() == 'Darwin':
- return 'mac'
- elif platform.system() == 'Linux':
- return 'linux'
- else:
- return 'posix'
+ if platform.system() == 'Windows':
+ return 'windows'
+ elif platform.system()[:7] == 'MSYS_NT':
+ return 'windows'
+ elif platform.system() == 'Darwin':
+ return 'mac'
+ elif platform.system() == 'Linux':
+ return 'linux'
+ else:
+ return 'posix'
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == 'windows':
- pass
-else:
- def alarm_handler(unused_signum, unused_frame):
pass
+else:
- signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
- signal.signal(signal.SIGALRM, alarm_handler)
+ def alarm_handler(unused_signum, unused_frame):
+ pass
+ signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
+ signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
-
_COLORS = {
- 'red': [ 31, 0 ],
- 'green': [ 32, 0 ],
- 'yellow': [ 33, 0 ],
- 'lightgray': [ 37, 0],
- 'gray': [ 30, 1 ],
- 'purple': [ 35, 0 ],
- 'cyan': [ 36, 0 ]
- }
-
+ 'red': [31, 0],
+ 'green': [32, 0],
+ 'yellow': [33, 0],
+ 'lightgray': [37, 0],
+ 'gray': [30, 1],
+ 'purple': [35, 0],
+ 'cyan': [36, 0]
+}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
-
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
@@ -111,392 +105,435 @@ _TAG_COLOR = {
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
- }
+}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def eintr_be_gone(fn):
- """Run fn until it doesn't stop because of EINTR"""
- while True:
- try:
- return fn()
- except IOError, e:
- if e.errno != errno.EINTR:
- raise
-
+ """Run fn until it doesn't stop because of EINTR"""
+ while True:
+ try:
+ return fn()
+ except IOError, e:
+ if e.errno != errno.EINTR:
+ raise
def message(tag, msg, explanatory_text=None, do_newline=False):
- if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
- return
- message.old_tag = tag
- message.old_msg = msg
- while True:
- try:
- if platform_string() == 'windows' or not sys.stdout.isatty():
- if explanatory_text:
- logging.info(explanatory_text)
- logging.info('%s: %s', tag, msg)
- else:
- sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
- _BEGINNING_OF_LINE,
- _CLEAR_LINE,
- '\n%s' % explanatory_text if explanatory_text is not None else '',
- _COLORS[_TAG_COLOR[tag]][1],
- _COLORS[_TAG_COLOR[tag]][0],
- tag,
- msg,
- '\n' if do_newline or explanatory_text is not None else ''))
- sys.stdout.flush()
- return
- except IOError, e:
- if e.errno != errno.EINTR:
- raise
+ if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
+ return
+ message.old_tag = tag
+ message.old_msg = msg
+ while True:
+ try:
+ if platform_string() == 'windows' or not sys.stdout.isatty():
+ if explanatory_text:
+ logging.info(explanatory_text)
+ logging.info('%s: %s', tag, msg)
+ else:
+ sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
+ _BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
+ if explanatory_text is not None else '',
+ _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
+ tag, msg, '\n'
+ if do_newline or explanatory_text is not None else ''))
+ sys.stdout.flush()
+ return
+ except IOError, e:
+ if e.errno != errno.EINTR:
+ raise
+
message.old_tag = ''
message.old_msg = ''
+
def which(filename):
- if '/' in filename:
- return filename
- for path in os.environ['PATH'].split(os.pathsep):
- if os.path.exists(os.path.join(path, filename)):
- return os.path.join(path, filename)
- raise Exception('%s not found' % filename)
+ if '/' in filename:
+ return filename
+ for path in os.environ['PATH'].split(os.pathsep):
+ if os.path.exists(os.path.join(path, filename)):
+ return os.path.join(path, filename)
+ raise Exception('%s not found' % filename)
class JobSpec(object):
- """Specifies what to run for a job."""
-
- def __init__(self, cmdline, shortname=None, environ=None,
- cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
- timeout_retries=0, kill_handler=None, cpu_cost=1.0,
- verbose_success=False):
- """
+ """Specifies what to run for a job."""
+
+ def __init__(self,
+ cmdline,
+ shortname=None,
+ environ=None,
+ cwd=None,
+ shell=False,
+ timeout_seconds=5 * 60,
+ flake_retries=0,
+ timeout_retries=0,
+ kill_handler=None,
+ cpu_cost=1.0,
+ verbose_success=False):
+ """
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
kill_handler: a handler that will be called whenever job.kill() is invoked
cpu_cost: number of cores per second this job needs
"""
- if environ is None:
- environ = {}
- self.cmdline = cmdline
- self.environ = environ
- self.shortname = cmdline[0] if shortname is None else shortname
- self.cwd = cwd
- self.shell = shell
- self.timeout_seconds = timeout_seconds
- self.flake_retries = flake_retries
- self.timeout_retries = timeout_retries
- self.kill_handler = kill_handler
- self.cpu_cost = cpu_cost
- self.verbose_success = verbose_success
-
- def identity(self):
- return '%r %r' % (self.cmdline, self.environ)
-
- def __hash__(self):
- return hash(self.identity())
-
- def __cmp__(self, other):
- return self.identity() == other.identity()
-
- def __repr__(self):
- return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname, self.cmdline)
-
- def __str__(self):
- return '%s: %s %s' % (self.shortname,
- ' '.join('%s=%s' % kv for kv in self.environ.items()),
- ' '.join(self.cmdline))
+ if environ is None:
+ environ = {}
+ self.cmdline = cmdline
+ self.environ = environ
+ self.shortname = cmdline[0] if shortname is None else shortname
+ self.cwd = cwd
+ self.shell = shell
+ self.timeout_seconds = timeout_seconds
+ self.flake_retries = flake_retries
+ self.timeout_retries = timeout_retries
+ self.kill_handler = kill_handler
+ self.cpu_cost = cpu_cost
+ self.verbose_success = verbose_success
+
+ def identity(self):
+ return '%r %r' % (self.cmdline, self.environ)
+
+ def __hash__(self):
+ return hash(self.identity())
+
+ def __cmp__(self, other):
+ return self.identity() == other.identity()
+
+ def __repr__(self):
+ return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
+ self.cmdline)
+
+ def __str__(self):
+ return '%s: %s %s' % (self.shortname, ' '.join(
+ '%s=%s' % kv
+ for kv in self.environ.items()), ' '.join(self.cmdline))
class JobResult(object):
- def __init__(self):
- self.state = 'UNKNOWN'
- self.returncode = -1
- self.elapsed_time = 0
- self.num_failures = 0
- self.retries = 0
- self.message = ''
- self.cpu_estimated = 1
- self.cpu_measured = 1
+
+ def __init__(self):
+ self.state = 'UNKNOWN'
+ self.returncode = -1
+ self.elapsed_time = 0
+ self.num_failures = 0
+ self.retries = 0
+ self.message = ''
+ self.cpu_estimated = 1
+ self.cpu_measured = 1
def read_from_start(f):
- f.seek(0)
- return f.read()
+ f.seek(0)
+ return f.read()
class Job(object):
- """Manages one job."""
-
- def __init__(self, spec, newline_on_success, travis, add_env,
- quiet_success=False):
- self._spec = spec
- self._newline_on_success = newline_on_success
- self._travis = travis
- self._add_env = add_env.copy()
- self._retries = 0
- self._timeout_retries = 0
- self._suppress_failure_message = False
- self._quiet_success = quiet_success
- if not self._quiet_success:
- message('START', spec.shortname, do_newline=self._travis)
- self.result = JobResult()
- self.start()
-
- def GetSpec(self):
- return self._spec
-
- def start(self):
- self._tempfile = tempfile.TemporaryFile()
- env = dict(os.environ)
- env.update(self._spec.environ)
- env.update(self._add_env)
- env = sanitized_environment(env)
- self._start = time.time()
- cmdline = self._spec.cmdline
- # The Unix time command is finicky when used with MSBuild, so we don't use it
- # with jobs that run MSBuild.
- global measure_cpu_costs
- if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
- cmdline = ['time', '-p'] + cmdline
- else:
- measure_cpu_costs = False
- try_start = lambda: subprocess.Popen(args=cmdline,
- stderr=subprocess.STDOUT,
- stdout=self._tempfile,
- cwd=self._spec.cwd,
- shell=self._spec.shell,
- env=env)
- delay = 0.3
- for i in range(0, 4):
- try:
- self._process = try_start()
- break
- except OSError:
- message('WARNING', 'Failed to start %s, retrying in %f seconds' % (self._spec.shortname, delay))
- time.sleep(delay)
- delay *= 2
- else:
- self._process = try_start()
- self._state = _RUNNING
-
- def state(self):
- """Poll current state of the job. Prints messages at completion."""
- def stdout(self=self):
- stdout = read_from_start(self._tempfile)
- self.result.message = stdout[-_MAX_RESULT_SIZE:]
- return stdout
- if self._state == _RUNNING and self._process.poll() is not None:
- elapsed = time.time() - self._start
- self.result.elapsed_time = elapsed
- if self._process.returncode != 0:
- if self._retries < self._spec.flake_retries:
- message('FLAKE', '%s [ret=%d, pid=%d]' % (
- self._spec.shortname, self._process.returncode, self._process.pid),
- stdout(), do_newline=True)
- self._retries += 1
- self.result.num_failures += 1
- self.result.retries = self._timeout_retries + self._retries
- # NOTE: job is restarted regardless of jobset's max_time setting
- self.start()
- else:
- self._state = _FAILURE
- if not self._suppress_failure_message:
- message('FAILED', '%s [ret=%d, pid=%d, time=%.1fsec]' % (
- self._spec.shortname, self._process.returncode, self._process.pid, elapsed),
- stdout(), do_newline=True)
- self.result.state = 'FAILED'
- self.result.num_failures += 1
- self.result.returncode = self._process.returncode
- else:
- self._state = _SUCCESS
- measurement = ''
- if measure_cpu_costs:
- m = re.search(r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)', stdout())
- real = float(m.group(1))
- user = float(m.group(2))
- sys = float(m.group(3))
- if real > 0.5:
- cores = (user + sys) / real
- self.result.cpu_measured = float('%.01f' % cores)
- self.result.cpu_estimated = float('%.01f' % self._spec.cpu_cost)
- measurement = '; cpu_cost=%.01f; estimated=%.01f' % (self.result.cpu_measured, self.result.cpu_estimated)
+ """Manages one job."""
+
+ def __init__(self,
+ spec,
+ newline_on_success,
+ travis,
+ add_env,
+ quiet_success=False):
+ self._spec = spec
+ self._newline_on_success = newline_on_success
+ self._travis = travis
+ self._add_env = add_env.copy()
+ self._retries = 0
+ self._timeout_retries = 0
+ self._suppress_failure_message = False
+ self._quiet_success = quiet_success
if not self._quiet_success:
- message('PASSED', '%s [time=%.1fsec, retries=%d:%d%s]' % (
- self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
- stdout() if self._spec.verbose_success else None,
- do_newline=self._newline_on_success or self._travis)
- self.result.state = 'PASSED'
- elif (self._state == _RUNNING and
- self._spec.timeout_seconds is not None and
- time.time() - self._start > self._spec.timeout_seconds):
- elapsed = time.time() - self._start
- self.result.elapsed_time = elapsed
- if self._timeout_retries < self._spec.timeout_retries:
- message('TIMEOUT_FLAKE', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
- self._timeout_retries += 1
- self.result.num_failures += 1
- self.result.retries = self._timeout_retries + self._retries
- if self._spec.kill_handler:
- self._spec.kill_handler(self)
- self._process.terminate()
- # NOTE: job is restarted regardless of jobset's max_time setting
+ message('START', spec.shortname, do_newline=self._travis)
+ self.result = JobResult()
self.start()
- else:
- message('TIMEOUT', '%s [pid=%d, time=%.1fsec]' % (self._spec.shortname, self._process.pid, elapsed), stdout(), do_newline=True)
- self.kill()
- self.result.state = 'TIMEOUT'
- self.result.num_failures += 1
- return self._state
- def kill(self):
- if self._state == _RUNNING:
- self._state = _KILLED
- if self._spec.kill_handler:
- self._spec.kill_handler(self)
- self._process.terminate()
-
- def suppress_failure_message(self):
- self._suppress_failure_message = True
+ def GetSpec(self):
+ return self._spec
+
+ def start(self):
+ self._tempfile = tempfile.TemporaryFile()
+ env = dict(os.environ)
+ env.update(self._spec.environ)
+ env.update(self._add_env)
+ env = sanitized_environment(env)
+ self._start = time.time()
+ cmdline = self._spec.cmdline
+ # The Unix time command is finicky when used with MSBuild, so we don't use it
+ # with jobs that run MSBuild.
+ global measure_cpu_costs
+ if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
+ cmdline = ['time', '-p'] + cmdline
+ else:
+ measure_cpu_costs = False
+ try_start = lambda: subprocess.Popen(args=cmdline,
+ stderr=subprocess.STDOUT,
+ stdout=self._tempfile,
+ cwd=self._spec.cwd,
+ shell=self._spec.shell,
+ env=env)
+ delay = 0.3
+ for i in range(0, 4):
+ try:
+ self._process = try_start()
+ break
+ except OSError:
+ message('WARNING', 'Failed to start %s, retrying in %f seconds'
+ % (self._spec.shortname, delay))
+ time.sleep(delay)
+ delay *= 2
+ else:
+ self._process = try_start()
+ self._state = _RUNNING
+
+ def state(self):
+ """Poll current state of the job. Prints messages at completion."""
+
+ def stdout(self=self):
+ stdout = read_from_start(self._tempfile)
+ self.result.message = stdout[-_MAX_RESULT_SIZE:]
+ return stdout
+
+ if self._state == _RUNNING and self._process.poll() is not None:
+ elapsed = time.time() - self._start
+ self.result.elapsed_time = elapsed
+ if self._process.returncode != 0:
+ if self._retries < self._spec.flake_retries:
+ message(
+ 'FLAKE',
+ '%s [ret=%d, pid=%d]' %
+ (self._spec.shortname, self._process.returncode,
+ self._process.pid),
+ stdout(),
+ do_newline=True)
+ self._retries += 1
+ self.result.num_failures += 1
+ self.result.retries = self._timeout_retries + self._retries
+ # NOTE: job is restarted regardless of jobset's max_time setting
+ self.start()
+ else:
+ self._state = _FAILURE
+ if not self._suppress_failure_message:
+ message(
+ 'FAILED',
+ '%s [ret=%d, pid=%d, time=%.1fsec]' %
+ (self._spec.shortname, self._process.returncode,
+ self._process.pid, elapsed),
+ stdout(),
+ do_newline=True)
+ self.result.state = 'FAILED'
+ self.result.num_failures += 1
+ self.result.returncode = self._process.returncode
+ else:
+ self._state = _SUCCESS
+ measurement = ''
+ if measure_cpu_costs:
+ m = re.search(
+ r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
+ stdout())
+ real = float(m.group(1))
+ user = float(m.group(2))
+ sys = float(m.group(3))
+ if real > 0.5:
+ cores = (user + sys) / real
+ self.result.cpu_measured = float('%.01f' % cores)
+ self.result.cpu_estimated = float('%.01f' %
+ self._spec.cpu_cost)
+ measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
+ self.result.cpu_measured, self.result.cpu_estimated)
+ if not self._quiet_success:
+ message(
+ 'PASSED',
+ '%s [time=%.1fsec, retries=%d:%d%s]' %
+ (self._spec.shortname, elapsed, self._retries,
+ self._timeout_retries, measurement),
+ stdout() if self._spec.verbose_success else None,
+ do_newline=self._newline_on_success or self._travis)
+ self.result.state = 'PASSED'
+ elif (self._state == _RUNNING and
+ self._spec.timeout_seconds is not None and
+ time.time() - self._start > self._spec.timeout_seconds):
+ elapsed = time.time() - self._start
+ self.result.elapsed_time = elapsed
+ if self._timeout_retries < self._spec.timeout_retries:
+ message(
+ 'TIMEOUT_FLAKE',
+ '%s [pid=%d]' % (self._spec.shortname, self._process.pid),
+ stdout(),
+ do_newline=True)
+ self._timeout_retries += 1
+ self.result.num_failures += 1
+ self.result.retries = self._timeout_retries + self._retries
+ if self._spec.kill_handler:
+ self._spec.kill_handler(self)
+ self._process.terminate()
+ # NOTE: job is restarted regardless of jobset's max_time setting
+ self.start()
+ else:
+ message(
+ 'TIMEOUT',
+ '%s [pid=%d, time=%.1fsec]' %
+ (self._spec.shortname, self._process.pid, elapsed),
+ stdout(),
+ do_newline=True)
+ self.kill()
+ self.result.state = 'TIMEOUT'
+ self.result.num_failures += 1
+ return self._state
+
+ def kill(self):
+ if self._state == _RUNNING:
+ self._state = _KILLED
+ if self._spec.kill_handler:
+ self._spec.kill_handler(self)
+ self._process.terminate()
+
+ def suppress_failure_message(self):
+ self._suppress_failure_message = True
class Jobset(object):
- """Manages one run of jobs."""
-
- def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic, newline_on_success, travis,
- stop_on_failure, add_env, quiet_success, max_time):
- self._running = set()
- self._check_cancelled = check_cancelled
- self._cancelled = False
- self._failures = 0
- self._completed = 0
- self._maxjobs = maxjobs
- self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
- self._newline_on_success = newline_on_success
- self._travis = travis
- self._stop_on_failure = stop_on_failure
- self._add_env = add_env
- self._quiet_success = quiet_success
- self._max_time = max_time
- self.resultset = {}
- self._remaining = None
- self._start_time = time.time()
-
- def set_remaining(self, remaining):
- self._remaining = remaining
-
- def get_num_failures(self):
- return self._failures
-
- def cpu_cost(self):
- c = 0
- for job in self._running:
- c += job._spec.cpu_cost
- return c
-
- def start(self, spec):
- """Start a job. Return True on success, False on failure."""
- while True:
- if self._max_time > 0 and time.time() - self._start_time > self._max_time:
- skipped_job_result = JobResult()
- skipped_job_result.state = 'SKIPPED'
- message('SKIPPED', spec.shortname, do_newline=True)
- self.resultset[spec.shortname] = [skipped_job_result]
+ """Manages one run of jobs."""
+
+ def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
+ newline_on_success, travis, stop_on_failure, add_env,
+ quiet_success, max_time):
+ self._running = set()
+ self._check_cancelled = check_cancelled
+ self._cancelled = False
+ self._failures = 0
+ self._completed = 0
+ self._maxjobs = maxjobs
+ self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
+ self._newline_on_success = newline_on_success
+ self._travis = travis
+ self._stop_on_failure = stop_on_failure
+ self._add_env = add_env
+ self._quiet_success = quiet_success
+ self._max_time = max_time
+ self.resultset = {}
+ self._remaining = None
+ self._start_time = time.time()
+
+ def set_remaining(self, remaining):
+ self._remaining = remaining
+
+ def get_num_failures(self):
+ return self._failures
+
+ def cpu_cost(self):
+ c = 0
+ for job in self._running:
+ c += job._spec.cpu_cost
+ return c
+
+ def start(self, spec):
+ """Start a job. Return True on success, False on failure."""
+ while True:
+ if self._max_time > 0 and time.time(
+ ) - self._start_time > self._max_time:
+ skipped_job_result = JobResult()
+ skipped_job_result.state = 'SKIPPED'
+ message('SKIPPED', spec.shortname, do_newline=True)
+ self.resultset[spec.shortname] = [skipped_job_result]
+ return True
+ if self.cancelled(): return False
+ current_cpu_cost = self.cpu_cost()
+ if current_cpu_cost == 0: break
+ if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
+ if len(self._running) < self._maxjobs_cpu_agnostic:
+ break
+ self.reap(spec.shortname, spec.cpu_cost)
+ if self.cancelled(): return False
+ job = Job(spec, self._newline_on_success, self._travis, self._add_env,
+ self._quiet_success)
+ self._running.add(job)
+ if job.GetSpec().shortname not in self.resultset:
+ self.resultset[job.GetSpec().shortname] = []
return True
- if self.cancelled(): return False
- current_cpu_cost = self.cpu_cost()
- if current_cpu_cost == 0: break
- if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
- if len(self._running) < self._maxjobs_cpu_agnostic:
- break
- self.reap(spec.shortname, spec.cpu_cost)
- if self.cancelled(): return False
- job = Job(spec,
- self._newline_on_success,
- self._travis,
- self._add_env,
- self._quiet_success)
- self._running.add(job)
- if job.GetSpec().shortname not in self.resultset:
- self.resultset[job.GetSpec().shortname] = []
- return True
-
- def reap(self, waiting_for=None, waiting_for_cost=None):
- """Collect the dead jobs."""
- while self._running:
- dead = set()
- for job in self._running:
- st = eintr_be_gone(lambda: job.state())
- if st == _RUNNING: continue
- if st == _FAILURE or st == _KILLED:
- self._failures += 1
- if self._stop_on_failure:
- self._cancelled = True
+
+ def reap(self, waiting_for=None, waiting_for_cost=None):
+ """Collect the dead jobs."""
+ while self._running:
+ dead = set()
for job in self._running:
- job.kill()
- dead.add(job)
- break
- for job in dead:
- self._completed += 1
- if not self._quiet_success or job.result.state != 'PASSED':
- self.resultset[job.GetSpec().shortname].append(job.result)
- self._running.remove(job)
- if dead: return
- if not self._travis and platform_string() != 'windows':
- rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
- if self._remaining is not None and self._completed > 0:
- now = time.time()
- sofar = now - self._start_time
- remaining = sofar / self._completed * (self._remaining + len(self._running))
- rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
- if waiting_for is not None:
- wstr = ' next: %s @ %.2f cpu' % (waiting_for, waiting_for_cost)
- else:
- wstr = ''
- message('WAITING', '%s%d jobs running, %d complete, %d failed (load %.2f)%s' % (
- rstr, len(self._running), self._completed, self._failures, self.cpu_cost(), wstr))
- if platform_string() == 'windows':
- time.sleep(0.1)
- else:
- signal.alarm(10)
- signal.pause()
-
- def cancelled(self):
- """Poll for cancellation."""
- if self._cancelled: return True
- if not self._check_cancelled(): return False
- for job in self._running:
- job.kill()
- self._cancelled = True
- return True
-
- def finish(self):
- while self._running:
- if self.cancelled(): pass # poll cancellation
- self.reap()
- if platform_string() != 'windows':
- signal.alarm(0)
- return not self.cancelled() and self._failures == 0
+ st = eintr_be_gone(lambda: job.state())
+ if st == _RUNNING: continue
+ if st == _FAILURE or st == _KILLED:
+ self._failures += 1
+ if self._stop_on_failure:
+ self._cancelled = True
+ for job in self._running:
+ job.kill()
+ dead.add(job)
+ break
+ for job in dead:
+ self._completed += 1
+ if not self._quiet_success or job.result.state != 'PASSED':
+ self.resultset[job.GetSpec().shortname].append(job.result)
+ self._running.remove(job)
+ if dead: return
+ if not self._travis and platform_string() != 'windows':
+ rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
+ if self._remaining is not None and self._completed > 0:
+ now = time.time()
+ sofar = now - self._start_time
+ remaining = sofar / self._completed * (
+ self._remaining + len(self._running))
+ rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
+ if waiting_for is not None:
+ wstr = ' next: %s @ %.2f cpu' % (waiting_for,
+ waiting_for_cost)
+ else:
+ wstr = ''
+ message(
+ 'WAITING',
+ '%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
+ (rstr, len(self._running), self._completed, self._failures,
+ self.cpu_cost(), wstr))
+ if platform_string() == 'windows':
+ time.sleep(0.1)
+ else:
+ signal.alarm(10)
+ signal.pause()
+
+ def cancelled(self):
+ """Poll for cancellation."""
+ if self._cancelled: return True
+ if not self._check_cancelled(): return False
+ for job in self._running:
+ job.kill()
+ self._cancelled = True
+ return True
+
+ def finish(self):
+ while self._running:
+ if self.cancelled(): pass # poll cancellation
+ self.reap()
+ if platform_string() != 'windows':
+ signal.alarm(0)
+ return not self.cancelled() and self._failures == 0
def _never_cancelled():
- return False
+ return False
def tag_remaining(xs):
- staging = []
- for x in xs:
- staging.append(x)
- if len(staging) > 5000:
- yield (staging.pop(0), None)
- n = len(staging)
- for i, x in enumerate(staging):
- yield (x, n - i - 1)
+ staging = []
+ for x in xs:
+ staging.append(x)
+ if len(staging) > 5000:
+ yield (staging.pop(0), None)
+ n = len(staging)
+ for i, x in enumerate(staging):
+ yield (x, n - i - 1)
def run(cmdlines,
@@ -511,23 +548,23 @@ def run(cmdlines,
skip_jobs=False,
quiet_success=False,
max_time=-1):
- if skip_jobs:
- resultset = {}
- skipped_job_result = JobResult()
- skipped_job_result.state = 'SKIPPED'
- for job in cmdlines:
- message('SKIPPED', job.shortname, do_newline=True)
- resultset[job.shortname] = [skipped_job_result]
- return 0, resultset
- js = Jobset(check_cancelled,
- maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
- maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
- newline_on_success, travis, stop_on_failure, add_env,
- quiet_success, max_time)
- for cmdline, remaining in tag_remaining(cmdlines):
- if not js.start(cmdline):
- break
- if remaining is not None:
- js.set_remaining(remaining)
- js.finish()
- return js.get_num_failures(), js.resultset
+ if skip_jobs:
+ resultset = {}
+ skipped_job_result = JobResult()
+ skipped_job_result.state = 'SKIPPED'
+ for job in cmdlines:
+ message('SKIPPED', job.shortname, do_newline=True)
+ resultset[job.shortname] = [skipped_job_result]
+ return 0, resultset
+ js = Jobset(check_cancelled, maxjobs if maxjobs is not None else
+ _DEFAULT_MAX_JOBS, maxjobs_cpu_agnostic
+ if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
+ newline_on_success, travis, stop_on_failure, add_env,
+ quiet_success, max_time)
+ for cmdline, remaining in tag_remaining(cmdlines):
+ if not js.start(cmdline):
+ break
+ if remaining is not None:
+ js.set_remaining(remaining)
+ js.finish()
+ return js.get_num_failures(), js.resultset
diff --git a/tools/run_tests/python_utils/port_server.py b/tools/run_tests/python_utils/port_server.py
index e8ac71af8d..83e09c09d0 100755
--- a/tools/run_tests/python_utils/port_server.py
+++ b/tools/run_tests/python_utils/port_server.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Manage TCP ports for unit tests; started by run_tests.py"""
import argparse
@@ -27,17 +26,14 @@ from SocketServer import ThreadingMixIn
import threading
import platform
-
# increment this number whenever making a change to ensure that
# the changes are picked up by running CI servers
# note that all changes must be backwards compatible
_MY_VERSION = 20
-
if len(sys.argv) == 2 and sys.argv[1] == 'dump_version':
- print _MY_VERSION
- sys.exit(0)
-
+ print _MY_VERSION
+ sys.exit(0)
argp = argparse.ArgumentParser(description='Server for httpcli_test')
argp.add_argument('-p', '--port', default=12345, type=int)
@@ -45,11 +41,11 @@ argp.add_argument('-l', '--logfile', default=None, type=str)
args = argp.parse_args()
if args.logfile is not None:
- sys.stdin.close()
- sys.stderr.close()
- sys.stdout.close()
- sys.stderr = open(args.logfile, 'w')
- sys.stdout = sys.stderr
+ sys.stdin.close()
+ sys.stderr.close()
+ sys.stdout.close()
+ sys.stderr = open(args.logfile, 'w')
+ sys.stdout = sys.stderr
print 'port server running on port %d' % args.port
@@ -61,74 +57,81 @@ mu = threading.Lock()
# https://cs.chromium.org/chromium/src/net/base/port_util.cc). When one of these
# ports is used in a Cronet test, the test would fail (see issue #12149). These
# ports must be excluded from pool.
-cronet_restricted_ports = [1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37,
- 42, 43, 53, 77, 79, 87, 95, 101, 102, 103, 104, 109,
- 110, 111, 113, 115, 117, 119, 123, 135, 139, 143,
- 179, 389, 465, 512, 513, 514, 515, 526, 530, 531,
- 532, 540, 556, 563, 587, 601, 636, 993, 995, 2049,
- 3659, 4045, 6000, 6665, 6666, 6667, 6668, 6669, 6697]
+cronet_restricted_ports = [
+ 1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87,
+ 95, 101, 102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139,
+ 143, 179, 389, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 556, 563,
+ 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667, 6668,
+ 6669, 6697
+]
+
def can_connect(port):
- # this test is only really useful on unices where SO_REUSE_PORT is available
- # so on Windows, where this test is expensive, skip it
- if platform.system() == 'Windows': return False
- s = socket.socket()
- try:
- s.connect(('localhost', port))
- return True
- except socket.error, e:
- return False
- finally:
- s.close()
+ # this test is only really useful on unices where SO_REUSE_PORT is available
+ # so on Windows, where this test is expensive, skip it
+ if platform.system() == 'Windows': return False
+ s = socket.socket()
+ try:
+ s.connect(('localhost', port))
+ return True
+ except socket.error, e:
+ return False
+ finally:
+ s.close()
+
def can_bind(port, proto):
- s = socket.socket(proto, socket.SOCK_STREAM)
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- s.bind(('localhost', port))
- return True
- except socket.error, e:
- return False
- finally:
- s.close()
+ s = socket.socket(proto, socket.SOCK_STREAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ s.bind(('localhost', port))
+ return True
+ except socket.error, e:
+ return False
+ finally:
+ s.close()
def refill_pool(max_timeout, req):
- """Scan for ports not marked for being in use"""
- chk = [port for port in list(range(1025, 32766)) if port not in cronet_restricted_ports]
- random.shuffle(chk)
- for i in chk:
- if len(pool) > 100: break
- if i in in_use:
- age = time.time() - in_use[i]
- if age < max_timeout:
- continue
- req.log_message("kill old request %d" % i)
- del in_use[i]
- if can_bind(i, socket.AF_INET) and can_bind(i, socket.AF_INET6) and not can_connect(i):
- req.log_message("found available port %d" % i)
- pool.append(i)
+ """Scan for ports not marked for being in use"""
+ chk = [
+ port for port in list(range(1025, 32766))
+ if port not in cronet_restricted_ports
+ ]
+ random.shuffle(chk)
+ for i in chk:
+ if len(pool) > 100: break
+ if i in in_use:
+ age = time.time() - in_use[i]
+ if age < max_timeout:
+ continue
+ req.log_message("kill old request %d" % i)
+ del in_use[i]
+ if can_bind(i, socket.AF_INET) and can_bind(
+ i, socket.AF_INET6) and not can_connect(i):
+ req.log_message("found available port %d" % i)
+ pool.append(i)
def allocate_port(req):
- global pool
- global in_use
- global mu
- mu.acquire()
- max_timeout = 600
- while not pool:
- refill_pool(max_timeout, req)
- if not pool:
- req.log_message("failed to find ports: retrying soon")
- mu.release()
- time.sleep(1)
- mu.acquire()
- max_timeout /= 2
- port = pool[0]
- pool = pool[1:]
- in_use[port] = time.time()
- mu.release()
- return port
+ global pool
+ global in_use
+ global mu
+ mu.acquire()
+ max_timeout = 600
+ while not pool:
+ refill_pool(max_timeout, req)
+ if not pool:
+ req.log_message("failed to find ports: retrying soon")
+ mu.release()
+ time.sleep(1)
+ mu.acquire()
+ max_timeout /= 2
+ port = pool[0]
+ pool = pool[1:]
+ in_use[port] = time.time()
+ mu.release()
+ return port
keep_running = True
@@ -136,61 +139,68 @@ keep_running = True
class Handler(BaseHTTPRequestHandler):
- def setup(self):
- # If the client is unreachable for 5 seconds, close the connection
- self.timeout = 5
- BaseHTTPRequestHandler.setup(self)
+ def setup(self):
+ # If the client is unreachable for 5 seconds, close the connection
+ self.timeout = 5
+ BaseHTTPRequestHandler.setup(self)
+
+ def do_GET(self):
+ global keep_running
+ global mu
+ if self.path == '/get':
+ # allocate a new port, it will stay bound for ten minutes and until
+ # it's unused
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ p = allocate_port(self)
+ self.log_message('allocated port %d' % p)
+ self.wfile.write('%d' % p)
+ elif self.path[0:6] == '/drop/':
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ p = int(self.path[6:])
+ mu.acquire()
+ if p in in_use:
+ del in_use[p]
+ pool.append(p)
+ k = 'known'
+ else:
+ k = 'unknown'
+ mu.release()
+ self.log_message('drop %s port %d' % (k, p))
+ elif self.path == '/version_number':
+ # fetch a version string and the current process pid
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ self.wfile.write(_MY_VERSION)
+ elif self.path == '/dump':
+ # yaml module is not installed on Macs and Windows machines by default
+ # so we import it lazily (/dump action is only used for debugging)
+ import yaml
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ mu.acquire()
+ now = time.time()
+ out = yaml.dump({
+ 'pool':
+ pool,
+ 'in_use':
+ dict((k, now - v) for k, v in in_use.items())
+ })
+ mu.release()
+ self.wfile.write(out)
+ elif self.path == '/quitquitquit':
+ self.send_response(200)
+ self.end_headers()
+ self.server.shutdown()
- def do_GET(self):
- global keep_running
- global mu
- if self.path == '/get':
- # allocate a new port, it will stay bound for ten minutes and until
- # it's unused
- self.send_response(200)
- self.send_header('Content-Type', 'text/plain')
- self.end_headers()
- p = allocate_port(self)
- self.log_message('allocated port %d' % p)
- self.wfile.write('%d' % p)
- elif self.path[0:6] == '/drop/':
- self.send_response(200)
- self.send_header('Content-Type', 'text/plain')
- self.end_headers()
- p = int(self.path[6:])
- mu.acquire()
- if p in in_use:
- del in_use[p]
- pool.append(p)
- k = 'known'
- else:
- k = 'unknown'
- mu.release()
- self.log_message('drop %s port %d' % (k, p))
- elif self.path == '/version_number':
- # fetch a version string and the current process pid
- self.send_response(200)
- self.send_header('Content-Type', 'text/plain')
- self.end_headers()
- self.wfile.write(_MY_VERSION)
- elif self.path == '/dump':
- # yaml module is not installed on Macs and Windows machines by default
- # so we import it lazily (/dump action is only used for debugging)
- import yaml
- self.send_response(200)
- self.send_header('Content-Type', 'text/plain')
- self.end_headers()
- mu.acquire()
- now = time.time()
- out = yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.items())})
- mu.release()
- self.wfile.write(out)
- elif self.path == '/quitquitquit':
- self.send_response(200)
- self.end_headers()
- self.server.shutdown()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
- """Handle requests in a separate thread"""
+ """Handle requests in a separate thread"""
+
ThreadedHTTPServer(('', args.port), Handler).serve_forever()
diff --git a/tools/run_tests/python_utils/report_utils.py b/tools/run_tests/python_utils/report_utils.py
index a3867808b5..e4fddb8a7d 100644
--- a/tools/run_tests/python_utils/report_utils.py
+++ b/tools/run_tests/python_utils/report_utils.py
@@ -11,17 +11,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Generate XML and HTML test reports."""
from __future__ import print_function
try:
- from mako.runtime import Context
- from mako.template import Template
- from mako import exceptions
+ from mako.runtime import Context
+ from mako.template import Template
+ from mako import exceptions
except (ImportError):
- pass # Mako not installed but it is ok.
+ pass # Mako not installed but it is ok.
import datetime
import os
import string
@@ -30,111 +29,127 @@ import six
def _filter_msg(msg, output_format):
- """Filters out nonprintable and illegal characters from the message."""
- if output_format in ['XML', 'HTML']:
- # keep whitespaces but remove formfeed and vertical tab characters
- # that make XML report unparseable.
- filtered_msg = filter(
- lambda x: x in string.printable and x != '\f' and x != '\v',
- msg.decode('UTF-8', 'ignore'))
- if output_format == 'HTML':
- filtered_msg = filtered_msg.replace('"', '&quot;')
- return filtered_msg
- else:
- return msg
+ """Filters out nonprintable and illegal characters from the message."""
+ if output_format in ['XML', 'HTML']:
+ # keep whitespaces but remove formfeed and vertical tab characters
+ # that make XML report unparseable.
+ filtered_msg = filter(
+ lambda x: x in string.printable and x != '\f' and x != '\v',
+ msg.decode('UTF-8', 'ignore'))
+ if output_format == 'HTML':
+ filtered_msg = filtered_msg.replace('"', '&quot;')
+ return filtered_msg
+ else:
+ return msg
def new_junit_xml_tree():
- return ET.ElementTree(ET.Element('testsuites'))
+ return ET.ElementTree(ET.Element('testsuites'))
+
-def render_junit_xml_report(resultset, report_file, suite_package='grpc',
+def render_junit_xml_report(resultset,
+ report_file,
+ suite_package='grpc',
suite_name='tests'):
- """Generate JUnit-like XML report."""
- tree = new_junit_xml_tree()
- append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
- create_xml_report_file(tree, report_file)
+ """Generate JUnit-like XML report."""
+ tree = new_junit_xml_tree()
+ append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
+ create_xml_report_file(tree, report_file)
+
def create_xml_report_file(tree, report_file):
- """Generate JUnit-like report file from xml tree ."""
- # ensure the report directory exists
- report_dir = os.path.dirname(os.path.abspath(report_file))
- if not os.path.exists(report_dir):
- os.makedirs(report_dir)
- tree.write(report_file, encoding='UTF-8')
+ """Generate JUnit-like report file from xml tree ."""
+ # ensure the report directory exists
+ report_dir = os.path.dirname(os.path.abspath(report_file))
+ if not os.path.exists(report_dir):
+ os.makedirs(report_dir)
+ tree.write(report_file, encoding='UTF-8')
+
def append_junit_xml_results(tree, resultset, suite_package, suite_name, id):
- """Append a JUnit-like XML report tree with test results as a new suite."""
- testsuite = ET.SubElement(tree.getroot(), 'testsuite',
- id=id, package=suite_package, name=suite_name,
- timestamp=datetime.datetime.now().isoformat())
- failure_count = 0
- error_count = 0
- for shortname, results in six.iteritems(resultset):
- for result in results:
- xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
- if result.elapsed_time:
- xml_test.set('time', str(result.elapsed_time))
- filtered_msg = _filter_msg(result.message, 'XML')
- if result.state == 'FAILED':
- ET.SubElement(xml_test, 'failure', message='Failure').text = filtered_msg
- failure_count += 1
- elif result.state == 'TIMEOUT':
- ET.SubElement(xml_test, 'error', message='Timeout').text = filtered_msg
- error_count += 1
- elif result.state == 'SKIPPED':
- ET.SubElement(xml_test, 'skipped', message='Skipped')
- testsuite.set('failures', str(failure_count))
- testsuite.set('errors', str(error_count))
-
-def render_interop_html_report(
- client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
- http2_server_cases, resultset,
- num_failures, cloud_to_prod, prod_servers, http2_interop):
- """Generate HTML report for interop tests."""
- template_file = 'tools/run_tests/interop/interop_html_report.template'
- try:
- mytemplate = Template(filename=template_file, format_exceptions=True)
- except NameError:
- print('Mako template is not installed. Skipping HTML report generation.')
- return
- except IOError as e:
- print('Failed to find the template %s: %s' % (template_file, e))
- return
-
- sorted_test_cases = sorted(test_cases)
- sorted_auth_test_cases = sorted(auth_test_cases)
- sorted_http2_cases = sorted(http2_cases)
- sorted_http2_server_cases = sorted(http2_server_cases)
- sorted_client_langs = sorted(client_langs)
- sorted_server_langs = sorted(server_langs)
- sorted_prod_servers = sorted(prod_servers)
-
- args = {'client_langs': sorted_client_langs,
- 'server_langs': sorted_server_langs,
- 'test_cases': sorted_test_cases,
- 'auth_test_cases': sorted_auth_test_cases,
- 'http2_cases': sorted_http2_cases,
- 'http2_server_cases': sorted_http2_server_cases,
- 'resultset': resultset,
- 'num_failures': num_failures,
- 'cloud_to_prod': cloud_to_prod,
- 'prod_servers': sorted_prod_servers,
- 'http2_interop': http2_interop}
-
- html_report_out_dir = 'reports'
- if not os.path.exists(html_report_out_dir):
- os.mkdir(html_report_out_dir)
- html_file_path = os.path.join(html_report_out_dir, 'index.html')
- try:
- with open(html_file_path, 'w') as output_file:
- mytemplate.render_context(Context(output_file, **args))
- except:
- print(exceptions.text_error_template().render())
- raise
+ """Append a JUnit-like XML report tree with test results as a new suite."""
+ testsuite = ET.SubElement(
+ tree.getroot(),
+ 'testsuite',
+ id=id,
+ package=suite_package,
+ name=suite_name,
+ timestamp=datetime.datetime.now().isoformat())
+ failure_count = 0
+ error_count = 0
+ for shortname, results in six.iteritems(resultset):
+ for result in results:
+ xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
+ if result.elapsed_time:
+ xml_test.set('time', str(result.elapsed_time))
+ filtered_msg = _filter_msg(result.message, 'XML')
+ if result.state == 'FAILED':
+ ET.SubElement(
+ xml_test, 'failure', message='Failure').text = filtered_msg
+ failure_count += 1
+ elif result.state == 'TIMEOUT':
+ ET.SubElement(
+ xml_test, 'error', message='Timeout').text = filtered_msg
+ error_count += 1
+ elif result.state == 'SKIPPED':
+ ET.SubElement(xml_test, 'skipped', message='Skipped')
+ testsuite.set('failures', str(failure_count))
+ testsuite.set('errors', str(error_count))
+
+
+def render_interop_html_report(client_langs, server_langs, test_cases,
+ auth_test_cases, http2_cases, http2_server_cases,
+ resultset, num_failures, cloud_to_prod,
+ prod_servers, http2_interop):
+ """Generate HTML report for interop tests."""
+ template_file = 'tools/run_tests/interop/interop_html_report.template'
+ try:
+ mytemplate = Template(filename=template_file, format_exceptions=True)
+ except NameError:
+ print(
+ 'Mako template is not installed. Skipping HTML report generation.')
+ return
+ except IOError as e:
+ print('Failed to find the template %s: %s' % (template_file, e))
+ return
+
+ sorted_test_cases = sorted(test_cases)
+ sorted_auth_test_cases = sorted(auth_test_cases)
+ sorted_http2_cases = sorted(http2_cases)
+ sorted_http2_server_cases = sorted(http2_server_cases)
+ sorted_client_langs = sorted(client_langs)
+ sorted_server_langs = sorted(server_langs)
+ sorted_prod_servers = sorted(prod_servers)
+
+ args = {
+ 'client_langs': sorted_client_langs,
+ 'server_langs': sorted_server_langs,
+ 'test_cases': sorted_test_cases,
+ 'auth_test_cases': sorted_auth_test_cases,
+ 'http2_cases': sorted_http2_cases,
+ 'http2_server_cases': sorted_http2_server_cases,
+ 'resultset': resultset,
+ 'num_failures': num_failures,
+ 'cloud_to_prod': cloud_to_prod,
+ 'prod_servers': sorted_prod_servers,
+ 'http2_interop': http2_interop
+ }
+
+ html_report_out_dir = 'reports'
+ if not os.path.exists(html_report_out_dir):
+ os.mkdir(html_report_out_dir)
+ html_file_path = os.path.join(html_report_out_dir, 'index.html')
+ try:
+ with open(html_file_path, 'w') as output_file:
+ mytemplate.render_context(Context(output_file, **args))
+ except:
+ print(exceptions.text_error_template().render())
+ raise
+
def render_perf_profiling_results(output_filepath, profile_names):
- with open(output_filepath, 'w') as output_file:
- output_file.write('<ul>\n')
- for name in profile_names:
- output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
- output_file.write('</ul>\n')
+ with open(output_filepath, 'w') as output_file:
+ output_file.write('<ul>\n')
+ for name in profile_names:
+ output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
+ output_file.write('</ul>\n')
diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py
index 786103ccdf..5572cdcfe7 100644
--- a/tools/run_tests/python_utils/start_port_server.py
+++ b/tools/run_tests/python_utils/start_port_server.py
@@ -22,10 +22,10 @@ import sys
import tempfile
import time
-
# must be synchronized with test/core/utils/port_server_client.h
_PORT_SERVER_PORT = 32766
+
def start_port_server():
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
@@ -33,9 +33,8 @@ def start_port_server():
# otherwise, leave it up
try:
version = int(
- urllib.urlopen(
- 'http://localhost:%d/version_number' %
- _PORT_SERVER_PORT).read())
+ urllib.urlopen('http://localhost:%d/version_number' %
+ _PORT_SERVER_PORT).read())
logging.info('detected port server running version %d', version)
running = True
except Exception as e:
@@ -92,8 +91,8 @@ def start_port_server():
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
- urllib.urlopen(
- 'http://localhost:%d/get' % _PORT_SERVER_PORT).read()
+ urllib.urlopen('http://localhost:%d/get' %
+ _PORT_SERVER_PORT).read()
logging.info(
'last ditch attempt to contact port server succeeded')
break
diff --git a/tools/run_tests/python_utils/upload_test_results.py b/tools/run_tests/python_utils/upload_test_results.py
index ea97bc0aec..9eb8e2a862 100644
--- a/tools/run_tests/python_utils/upload_test_results.py
+++ b/tools/run_tests/python_utils/upload_test_results.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Helper to upload Jenkins test results to BQ"""
from __future__ import print_function
@@ -23,8 +22,8 @@ import sys
import time
import uuid
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
@@ -35,55 +34,57 @@ _EXPIRATION_MS = 90 * 24 * 60 * 60 * 1000
_PARTITION_TYPE = 'DAY'
_PROJECT_ID = 'grpc-testing'
_RESULTS_SCHEMA = [
- ('job_name', 'STRING', 'Name of Jenkins job'),
- ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
- ('build_url', 'STRING', 'URL of Jenkins job'),
- ('test_name', 'STRING', 'Individual test name'),
- ('language', 'STRING', 'Language of test'),
- ('platform', 'STRING', 'Platform used for test'),
- ('config', 'STRING', 'Config used for test'),
- ('compiler', 'STRING', 'Compiler used for test'),
- ('iomgr_platform', 'STRING', 'Iomgr used for test'),
- ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
- ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
- ('elapsed_time', 'FLOAT', 'How long test took to run'),
- ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
- ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
- ('return_code', 'INTEGER', 'Exit code of test'),
+ ('job_name', 'STRING', 'Name of Jenkins job'),
+ ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
+ ('build_url', 'STRING', 'URL of Jenkins job'),
+ ('test_name', 'STRING', 'Individual test name'),
+ ('language', 'STRING', 'Language of test'),
+ ('platform', 'STRING', 'Platform used for test'),
+ ('config', 'STRING', 'Config used for test'),
+ ('compiler', 'STRING', 'Compiler used for test'),
+ ('iomgr_platform', 'STRING', 'Iomgr used for test'),
+ ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+ ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+ ('elapsed_time', 'FLOAT', 'How long test took to run'),
+ ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
+ ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
+ ('return_code', 'INTEGER', 'Exit code of test'),
]
_INTEROP_RESULTS_SCHEMA = [
- ('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
- ('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
- ('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
- ('test_name', 'STRING', 'Unique test name combining client, server, and test_name'),
- ('suite', 'STRING', 'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
- ('client', 'STRING', 'Client language'),
- ('server', 'STRING', 'Server host name'),
- ('test_case', 'STRING', 'Name of test case'),
- ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
- ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
- ('elapsed_time', 'FLOAT', 'How long test took to run'),
+ ('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
+ ('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
+ ('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
+ ('test_name', 'STRING',
+ 'Unique test name combining client, server, and test_name'),
+ ('suite', 'STRING',
+ 'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
+ ('client', 'STRING', 'Client language'),
+ ('server', 'STRING', 'Server host name'),
+ ('test_case', 'STRING', 'Name of test case'),
+ ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+ ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+ ('elapsed_time', 'FLOAT', 'How long test took to run'),
]
def _get_build_metadata(test_results):
- """Add Jenkins/Kokoro build metadata to test_results based on environment
+ """Add Jenkins/Kokoro build metadata to test_results based on environment
variables set by Jenkins/Kokoro.
"""
- build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
- build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
- job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
+ build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
+ build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
+ job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
- if build_id:
- test_results['build_id'] = build_id
- if build_url:
- test_results['build_url'] = build_url
- if job_name:
- test_results['job_name'] = job_name
+ if build_id:
+ test_results['build_id'] = build_id
+ if build_url:
+ test_results['build_url'] = build_url
+ if job_name:
+ test_results['job_name'] = job_name
def upload_results_to_bq(resultset, bq_table, args, platform):
- """Upload test results to a BQ table.
+ """Upload test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
@@ -91,77 +92,97 @@ def upload_results_to_bq(resultset, bq_table, args, platform):
args: args in run_tests.py, generated by argparse
platform: string name of platform tests were run on
"""
- bq = big_query_utils.create_big_query()
- big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION,
- partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
-
- for shortname, results in six.iteritems(resultset):
- for result in results:
- test_results = {}
- _get_build_metadata(test_results)
- test_results['compiler'] = args.compiler
- test_results['config'] = args.config
- test_results['cpu_estimated'] = result.cpu_estimated
- test_results['cpu_measured'] = result.cpu_measured
- test_results['elapsed_time'] = '%.2f' % result.elapsed_time
- test_results['iomgr_platform'] = args.iomgr_platform
- # args.language is a list, but will always have one element in the contexts
- # this function is used.
- test_results['language'] = args.language[0]
- test_results['platform'] = platform
- test_results['result'] = result.state
- test_results['return_code'] = result.returncode
- test_results['test_name'] = shortname
- test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
-
- row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
-
- # TODO(jtattermusch): rows are inserted one by one, very inefficient
- max_retries = 3
- for attempt in range(max_retries):
- if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
- break
- else:
- if attempt < max_retries - 1:
- print('Error uploading result to bigquery, will retry.')
- else:
- print('Error uploading result to bigquery, all attempts failed.')
- sys.exit(1)
+ bq = big_query_utils.create_big_query()
+ big_query_utils.create_partitioned_table(
+ bq,
+ _PROJECT_ID,
+ _DATASET_ID,
+ bq_table,
+ _RESULTS_SCHEMA,
+ _DESCRIPTION,
+ partition_type=_PARTITION_TYPE,
+ expiration_ms=_EXPIRATION_MS)
+
+ for shortname, results in six.iteritems(resultset):
+ for result in results:
+ test_results = {}
+ _get_build_metadata(test_results)
+ test_results['compiler'] = args.compiler
+ test_results['config'] = args.config
+ test_results['cpu_estimated'] = result.cpu_estimated
+ test_results['cpu_measured'] = result.cpu_measured
+ test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+ test_results['iomgr_platform'] = args.iomgr_platform
+ # args.language is a list, but will always have one element in the contexts
+ # this function is used.
+ test_results['language'] = args.language[0]
+ test_results['platform'] = platform
+ test_results['result'] = result.state
+ test_results['return_code'] = result.returncode
+ test_results['test_name'] = shortname
+ test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+
+ row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+
+ # TODO(jtattermusch): rows are inserted one by one, very inefficient
+ max_retries = 3
+ for attempt in range(max_retries):
+ if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+ bq_table, [row]):
+ break
+ else:
+ if attempt < max_retries - 1:
+ print('Error uploading result to bigquery, will retry.')
+ else:
+ print(
+ 'Error uploading result to bigquery, all attempts failed.'
+ )
+ sys.exit(1)
def upload_interop_results_to_bq(resultset, bq_table, args):
- """Upload interop test results to a BQ table.
+ """Upload interop test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
args: args in run_interop_tests.py, generated by argparse
"""
- bq = big_query_utils.create_big_query()
- big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _INTEROP_RESULTS_SCHEMA, _DESCRIPTION,
- partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
-
- for shortname, results in six.iteritems(resultset):
- for result in results:
- test_results = {}
- _get_build_metadata(test_results)
- test_results['elapsed_time'] = '%.2f' % result.elapsed_time
- test_results['result'] = result.state
- test_results['test_name'] = shortname
- test_results['suite'] = shortname.split(':')[0]
- test_results['client'] = shortname.split(':')[1]
- test_results['server'] = shortname.split(':')[2]
- test_results['test_case'] = shortname.split(':')[3]
- test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
- row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
- # TODO(jtattermusch): rows are inserted one by one, very inefficient
- max_retries = 3
- for attempt in range(max_retries):
- if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
- break
- else:
- if attempt < max_retries - 1:
- print('Error uploading result to bigquery, will retry.')
- else:
- print('Error uploading result to bigquery, all attempts failed.')
- sys.exit(1)
+ bq = big_query_utils.create_big_query()
+ big_query_utils.create_partitioned_table(
+ bq,
+ _PROJECT_ID,
+ _DATASET_ID,
+ bq_table,
+ _INTEROP_RESULTS_SCHEMA,
+ _DESCRIPTION,
+ partition_type=_PARTITION_TYPE,
+ expiration_ms=_EXPIRATION_MS)
+
+ for shortname, results in six.iteritems(resultset):
+ for result in results:
+ test_results = {}
+ _get_build_metadata(test_results)
+ test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+ test_results['result'] = result.state
+ test_results['test_name'] = shortname
+ test_results['suite'] = shortname.split(':')[0]
+ test_results['client'] = shortname.split(':')[1]
+ test_results['server'] = shortname.split(':')[2]
+ test_results['test_case'] = shortname.split(':')[3]
+ test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+ row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+ # TODO(jtattermusch): rows are inserted one by one, very inefficient
+ max_retries = 3
+ for attempt in range(max_retries):
+ if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+ bq_table, [row]):
+ break
+ else:
+ if attempt < max_retries - 1:
+ print('Error uploading result to bigquery, will retry.')
+ else:
+ print(
+ 'Error uploading result to bigquery, all attempts failed.'
+ )
+ sys.exit(1)
diff --git a/tools/run_tests/python_utils/watch_dirs.py b/tools/run_tests/python_utils/watch_dirs.py
index 7bd085efaf..d2ad303a07 100755
--- a/tools/run_tests/python_utils/watch_dirs.py
+++ b/tools/run_tests/python_utils/watch_dirs.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Helper to watch a (set) of directories for modifications."""
import os
@@ -19,42 +18,42 @@ import time
class DirWatcher(object):
- """Helper to watch a (set) of directories for modifications."""
-
- def __init__(self, paths):
- if isinstance(paths, basestring):
- paths = [paths]
- self._done = False
- self.paths = list(paths)
- self.lastrun = time.time()
- self._cache = self._calculate()
-
- def _calculate(self):
- """Walk over all subscribed paths, check most recent mtime."""
- most_recent_change = None
- for path in self.paths:
- if not os.path.exists(path):
- continue
- if not os.path.isdir(path):
- continue
- for root, _, files in os.walk(path):
- for f in files:
- if f and f[0] == '.': continue
- try:
- st = os.stat(os.path.join(root, f))
- except OSError as e:
- if e.errno == os.errno.ENOENT:
- continue
- raise
- if most_recent_change is None:
- most_recent_change = st.st_mtime
- else:
- most_recent_change = max(most_recent_change, st.st_mtime)
- return most_recent_change
-
- def most_recent_change(self):
- if time.time() - self.lastrun > 1:
- self._cache = self._calculate()
- self.lastrun = time.time()
- return self._cache
-
+ """Helper to watch a (set) of directories for modifications."""
+
+ def __init__(self, paths):
+ if isinstance(paths, basestring):
+ paths = [paths]
+ self._done = False
+ self.paths = list(paths)
+ self.lastrun = time.time()
+ self._cache = self._calculate()
+
+ def _calculate(self):
+ """Walk over all subscribed paths, check most recent mtime."""
+ most_recent_change = None
+ for path in self.paths:
+ if not os.path.exists(path):
+ continue
+ if not os.path.isdir(path):
+ continue
+ for root, _, files in os.walk(path):
+ for f in files:
+ if f and f[0] == '.': continue
+ try:
+ st = os.stat(os.path.join(root, f))
+ except OSError as e:
+ if e.errno == os.errno.ENOENT:
+ continue
+ raise
+ if most_recent_change is None:
+ most_recent_change = st.st_mtime
+ else:
+ most_recent_change = max(most_recent_change,
+ st.st_mtime)
+ return most_recent_change
+
+ def most_recent_change(self):
+ if time.time() - self.lastrun > 1:
+ self._cache = self._calculate()
+ self.lastrun = time.time()
+ return self._cache
diff --git a/tools/run_tests/run_build_statistics.py b/tools/run_tests/run_build_statistics.py
index 1e957b6677..4af00a47a6 100755
--- a/tools/run_tests/run_build_statistics.py
+++ b/tools/run_tests/run_build_statistics.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Tool to get build statistics from Jenkins and upload to BigQuery."""
from __future__ import print_function
@@ -27,39 +26,38 @@ import re
import sys
import urllib
-
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
-
_PROJECT_ID = 'grpc-testing'
_HAS_MATRIX = True
-_BUILDS = {'gRPC_interop_master': not _HAS_MATRIX,
- 'gRPC_master_linux': not _HAS_MATRIX,
- 'gRPC_master_macos': not _HAS_MATRIX,
- 'gRPC_master_windows': not _HAS_MATRIX,
- 'gRPC_performance_master': not _HAS_MATRIX,
- 'gRPC_portability_master_linux': not _HAS_MATRIX,
- 'gRPC_portability_master_windows': not _HAS_MATRIX,
- 'gRPC_master_asanitizer_c': not _HAS_MATRIX,
- 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
- 'gRPC_master_msan_c': not _HAS_MATRIX,
- 'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
- 'gRPC_master_tsan_cpp': not _HAS_MATRIX,
- 'gRPC_interop_pull_requests': not _HAS_MATRIX,
- 'gRPC_performance_pull_requests': not _HAS_MATRIX,
- 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
- 'gRPC_portability_pr_win': not _HAS_MATRIX,
- 'gRPC_pull_requests_linux': not _HAS_MATRIX,
- 'gRPC_pull_requests_macos': not _HAS_MATRIX,
- 'gRPC_pr_win': not _HAS_MATRIX,
- 'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
- 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
- 'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
- 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
- 'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
+_BUILDS = {
+ 'gRPC_interop_master': not _HAS_MATRIX,
+ 'gRPC_master_linux': not _HAS_MATRIX,
+ 'gRPC_master_macos': not _HAS_MATRIX,
+ 'gRPC_master_windows': not _HAS_MATRIX,
+ 'gRPC_performance_master': not _HAS_MATRIX,
+ 'gRPC_portability_master_linux': not _HAS_MATRIX,
+ 'gRPC_portability_master_windows': not _HAS_MATRIX,
+ 'gRPC_master_asanitizer_c': not _HAS_MATRIX,
+ 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
+ 'gRPC_master_msan_c': not _HAS_MATRIX,
+ 'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
+ 'gRPC_master_tsan_cpp': not _HAS_MATRIX,
+ 'gRPC_interop_pull_requests': not _HAS_MATRIX,
+ 'gRPC_performance_pull_requests': not _HAS_MATRIX,
+ 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
+ 'gRPC_portability_pr_win': not _HAS_MATRIX,
+ 'gRPC_pull_requests_linux': not _HAS_MATRIX,
+ 'gRPC_pull_requests_macos': not _HAS_MATRIX,
+ 'gRPC_pr_win': not _HAS_MATRIX,
+ 'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
+ 'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
}
_URL_BASE = 'https://grpc-testing.appspot.com/job'
@@ -99,147 +97,155 @@ _DATASET_ID = 'build_statistics'
def _scrape_for_known_errors(html):
- error_list = []
- for known_error in _KNOWN_ERRORS:
- errors = re.findall(known_error, html)
- this_error_count = len(errors)
- if this_error_count > 0:
- error_list.append({'description': known_error,
- 'count': this_error_count})
- print('====> %d failures due to %s' % (this_error_count, known_error))
- return error_list
+ error_list = []
+ for known_error in _KNOWN_ERRORS:
+ errors = re.findall(known_error, html)
+ this_error_count = len(errors)
+ if this_error_count > 0:
+ error_list.append({
+ 'description': known_error,
+ 'count': this_error_count
+ })
+ print('====> %d failures due to %s' %
+ (this_error_count, known_error))
+ return error_list
def _no_report_files_found(html):
- return _NO_REPORT_FILES_FOUND_ERROR in html
+ return _NO_REPORT_FILES_FOUND_ERROR in html
def _get_last_processed_buildnumber(build_name):
- query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
- _PROJECT_ID, _DATASET_ID, build_name)
- query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
- page = bq.jobs().getQueryResults(
- pageToken=None,
- **query_job['jobReference']).execute(num_retries=3)
- if page['rows'][0]['f'][0]['v']:
- return int(page['rows'][0]['f'][0]['v'])
- return 0
+ query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
+ _PROJECT_ID, _DATASET_ID, build_name)
+ query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
+ page = bq.jobs().getQueryResults(
+ pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+ if page['rows'][0]['f'][0]['v']:
+ return int(page['rows'][0]['f'][0]['v'])
+ return 0
def _process_matrix(build, url_base):
- matrix_list = []
- for matrix in build.get_matrix_runs():
- matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
- matrix.name).groups()[0]
- matrix_tuple = matrix_str.split(',')
- json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
- url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
- console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
- url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
- matrix_dict = {'name': matrix_str,
- 'duration': matrix.get_duration().total_seconds()}
- matrix_dict.update(_process_build(json_url, console_url))
- matrix_list.append(matrix_dict)
-
- return matrix_list
+ matrix_list = []
+ for matrix in build.get_matrix_runs():
+ matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
+ matrix.name).groups()[0]
+ matrix_tuple = matrix_str.split(',')
+ json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
+ url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+ console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
+ url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+ matrix_dict = {
+ 'name': matrix_str,
+ 'duration': matrix.get_duration().total_seconds()
+ }
+ matrix_dict.update(_process_build(json_url, console_url))
+ matrix_list.append(matrix_dict)
+
+ return matrix_list
def _process_build(json_url, console_url):
- build_result = {}
- error_list = []
- try:
- html = urllib.urlopen(json_url).read()
- test_result = json.loads(html)
- print('====> Parsing result from %s' % json_url)
- failure_count = test_result['failCount']
- build_result['pass_count'] = test_result['passCount']
- build_result['failure_count'] = failure_count
- # This means Jenkins failure occurred.
- build_result['no_report_files_found'] = _no_report_files_found(html)
- # Only check errors if Jenkins failure occurred.
- if build_result['no_report_files_found']:
- error_list = _scrape_for_known_errors(html)
- except Exception as e:
- print('====> Got exception for %s: %s.' % (json_url, str(e)))
- print('====> Parsing errors from %s.' % console_url)
- html = urllib.urlopen(console_url).read()
- build_result['pass_count'] = 0
- build_result['failure_count'] = 1
- # In this case, the string doesn't exist in the result html but the fact
- # that we fail to parse the result html indicates Jenkins failure and hence
- # no report files were generated.
- build_result['no_report_files_found'] = True
- error_list = _scrape_for_known_errors(html)
-
- if error_list:
- build_result['error'] = error_list
- elif build_result['no_report_files_found']:
- build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
- else:
- build_result['error'] = [{'description': '', 'count': 0}]
-
- return build_result
+ build_result = {}
+ error_list = []
+ try:
+ html = urllib.urlopen(json_url).read()
+ test_result = json.loads(html)
+ print('====> Parsing result from %s' % json_url)
+ failure_count = test_result['failCount']
+ build_result['pass_count'] = test_result['passCount']
+ build_result['failure_count'] = failure_count
+ # This means Jenkins failure occurred.
+ build_result['no_report_files_found'] = _no_report_files_found(html)
+ # Only check errors if Jenkins failure occurred.
+ if build_result['no_report_files_found']:
+ error_list = _scrape_for_known_errors(html)
+ except Exception as e:
+ print('====> Got exception for %s: %s.' % (json_url, str(e)))
+ print('====> Parsing errors from %s.' % console_url)
+ html = urllib.urlopen(console_url).read()
+ build_result['pass_count'] = 0
+ build_result['failure_count'] = 1
+ # In this case, the string doesn't exist in the result html but the fact
+ # that we fail to parse the result html indicates Jenkins failure and hence
+ # no report files were generated.
+ build_result['no_report_files_found'] = True
+ error_list = _scrape_for_known_errors(html)
+
+ if error_list:
+ build_result['error'] = error_list
+ elif build_result['no_report_files_found']:
+ build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
+ else:
+ build_result['error'] = [{'description': '', 'count': 0}]
+
+ return build_result
# parse command line
argp = argparse.ArgumentParser(description='Get build statistics.')
argp.add_argument('-u', '--username', default='jenkins')
-argp.add_argument('-b', '--builds',
- choices=['all'] + sorted(_BUILDS.keys()),
- nargs='+',
- default=['all'])
+argp.add_argument(
+ '-b',
+ '--builds',
+ choices=['all'] + sorted(_BUILDS.keys()),
+ nargs='+',
+ default=['all'])
args = argp.parse_args()
J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken')
bq = big_query_utils.create_big_query()
for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds:
- print('====> Build: %s' % build_name)
- # Since get_last_completed_build() always fails due to malformatted string
- # error, we use get_build_metadata() instead.
- job = None
- try:
- job = J[build_name]
- except Exception as e:
- print('====> Failed to get build %s: %s.' % (build_name, str(e)))
- continue
- last_processed_build_number = _get_last_processed_buildnumber(build_name)
- last_complete_build_number = job.get_last_completed_buildnumber()
- # To avoid processing all builds for a project never looked at. In this case,
- # only examine 10 latest builds.
- starting_build_number = max(last_processed_build_number+1,
- last_complete_build_number-9)
- for build_number in xrange(starting_build_number,
- last_complete_build_number+1):
- print('====> Processing %s build %d.' % (build_name, build_number))
- build = None
+ print('====> Build: %s' % build_name)
+ # Since get_last_completed_build() always fails due to malformatted string
+ # error, we use get_build_metadata() instead.
+ job = None
try:
- build = job.get_build_metadata(build_number)
- print('====> Build status: %s.' % build.get_status())
- if build.get_status() == 'ABORTED':
+ job = J[build_name]
+ except Exception as e:
+ print('====> Failed to get build %s: %s.' % (build_name, str(e)))
continue
- # If any build is still running, stop processing this job. Next time, we
- # start from where it was left so that all builds are processed
- # sequentially.
- if build.is_running():
- print('====> Build %d is still running.' % build_number)
- break
- except KeyError:
- print('====> Build %s is missing. Skip.' % build_number)
- continue
- build_result = {'build_number': build_number,
- 'timestamp': str(build.get_timestamp())}
- url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
- if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
- build_result['matrix'] = _process_matrix(build, url_base)
- else:
- json_url = '%s/testReport/api/json' % url_base
- console_url = '%s/consoleFull' % url_base
- build_result['duration'] = build.get_duration().total_seconds()
- build_stat = _process_build(json_url, console_url)
- build_result.update(build_stat)
- rows = [big_query_utils.make_row(build_number, build_result)]
- if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, build_name,
- rows):
- print('====> Error uploading result to bigquery.')
- sys.exit(1)
+ last_processed_build_number = _get_last_processed_buildnumber(build_name)
+ last_complete_build_number = job.get_last_completed_buildnumber()
+ # To avoid processing all builds for a project never looked at. In this case,
+ # only examine 10 latest builds.
+ starting_build_number = max(last_processed_build_number + 1,
+ last_complete_build_number - 9)
+ for build_number in xrange(starting_build_number,
+ last_complete_build_number + 1):
+ print('====> Processing %s build %d.' % (build_name, build_number))
+ build = None
+ try:
+ build = job.get_build_metadata(build_number)
+ print('====> Build status: %s.' % build.get_status())
+ if build.get_status() == 'ABORTED':
+ continue
+ # If any build is still running, stop processing this job. Next time, we
+ # start from where it was left so that all builds are processed
+ # sequentially.
+ if build.is_running():
+ print('====> Build %d is still running.' % build_number)
+ break
+ except KeyError:
+ print('====> Build %s is missing. Skip.' % build_number)
+ continue
+ build_result = {
+ 'build_number': build_number,
+ 'timestamp': str(build.get_timestamp())
+ }
+ url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
+ if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
+ build_result['matrix'] = _process_matrix(build, url_base)
+ else:
+ json_url = '%s/testReport/api/json' % url_base
+ console_url = '%s/consoleFull' % url_base
+ build_result['duration'] = build.get_duration().total_seconds()
+ build_stat = _process_build(json_url, console_url)
+ build_result.update(build_stat)
+ rows = [big_query_utils.make_row(build_number, build_result)]
+ if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+ build_name, rows):
+ print('====> Error uploading result to bigquery.')
+ sys.exit(1)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 8f46ea99fd..99f4298813 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run interop (cross-language) tests in parallel."""
from __future__ import print_function
@@ -37,9 +36,9 @@ import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
# It's ok to not import because this is only necessary to upload results to BQ.
try:
- from python_utils.upload_test_results import upload_interop_results_to_bq
+ from python_utils.upload_test_results import upload_interop_results_to_bq
except ImportError as e:
- print(e)
+ print(e)
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
@@ -47,22 +46,24 @@ atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
-_DEFAULT_SERVER_PORT=8080
+_DEFAULT_SERVER_PORT = 8080
-_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary',
- 'client_compressed_streaming']
+_SKIP_CLIENT_COMPRESSION = [
+ 'client_compressed_unary', 'client_compressed_streaming'
+]
-_SKIP_SERVER_COMPRESSION = ['server_compressed_unary',
- 'server_compressed_streaming']
+_SKIP_SERVER_COMPRESSION = [
+ 'server_compressed_unary', 'server_compressed_streaming'
+]
_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
-_SKIP_ADVANCED = ['status_code_and_message',
- 'custom_metadata',
- 'unimplemented_method',
- 'unimplemented_service']
+_SKIP_ADVANCED = [
+ 'status_code_and_message', 'custom_metadata', 'unimplemented_method',
+ 'unimplemented_service'
+]
-_TEST_TIMEOUT = 3*60
+_TEST_TIMEOUT = 3 * 60
# disable this test on core-based languages,
# see https://github.com/grpc/grpc/issues/9779
@@ -77,977 +78,1054 @@ _XML_REPORT = 'report.xml'
class CXXLanguage:
- def __init__(self):
- self.client_cwd = None
- self.server_cwd = None
- self.http2_cwd = None
- self.safename = 'cxx'
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.http2_cwd = None
+ self.safename = 'cxx'
- def client_cmd(self, args):
- return ['bins/opt/interop_client'] + args
+ def client_cmd(self, args):
+ return ['bins/opt/interop_client'] + args
- def client_cmd_http2interop(self, args):
- return ['bins/opt/http2_client'] + args
+ def client_cmd_http2interop(self, args):
+ return ['bins/opt/http2_client'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['bins/opt/interop_server'] + args
+ def server_cmd(self, args):
+ return ['bins/opt/interop_server'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return []
+ def unimplemented_test_cases_server(self):
+ return []
- def __str__(self):
- return 'c++'
+ def __str__(self):
+ return 'c++'
class CSharpLanguage:
- def __init__(self):
- self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
- self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
+ self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
+ def client_cmd(self, args):
+ return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
+ def server_cmd(self, args):
+ return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'csharp'
+ def __str__(self):
+ return 'csharp'
class CSharpCoreCLRLanguage:
- def __init__(self):
- self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
- self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
+ self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
+ def client_cmd(self, args):
+ return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
+ def server_cmd(self, args):
+ return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'csharpcoreclr'
+ def __str__(self):
+ return 'csharpcoreclr'
class JavaLanguage:
- def __init__(self):
- self.client_cwd = '../grpc-java'
- self.server_cwd = '../grpc-java'
- self.http2_cwd = '../grpc-java'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = '../grpc-java'
+ self.server_cwd = '../grpc-java'
+ self.http2_cwd = '../grpc-java'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['./run-test-client.sh'] + args
+ def client_cmd(self, args):
+ return ['./run-test-client.sh'] + args
- def client_cmd_http2interop(self, args):
- return ['./interop-testing/build/install/grpc-interop-testing/bin/http2-client'] + args
+ def client_cmd_http2interop(self, args):
+ return [
+ './interop-testing/build/install/grpc-interop-testing/bin/http2-client'
+ ] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['./run-test-server.sh'] + args
+ def server_cmd(self, args):
+ return ['./run-test-server.sh'] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return []
+ def unimplemented_test_cases(self):
+ return []
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'java'
+ def __str__(self):
+ return 'java'
class JavaOkHttpClient:
- def __init__(self):
- self.client_cwd = '../grpc-java'
- self.safename = 'java'
+ def __init__(self):
+ self.client_cwd = '../grpc-java'
+ self.safename = 'java'
- def client_cmd(self, args):
- return ['./run-test-client.sh', '--use_okhttp=true'] + args
+ def client_cmd(self, args):
+ return ['./run-test-client.sh', '--use_okhttp=true'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_DATA_FRAME_PADDING
- def __str__(self):
- return 'javaokhttp'
+ def __str__(self):
+ return 'javaokhttp'
class GoLanguage:
- def __init__(self):
- # TODO: this relies on running inside docker
- self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
- self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
- self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
- self.safename = str(self)
+ def __init__(self):
+ # TODO: this relies on running inside docker
+ self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
+ self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
+ self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['go', 'run', 'client.go'] + args
- def client_cmd(self, args):
- return ['go', 'run', 'client.go'] + args
+ def client_cmd_http2interop(self, args):
+ return ['go', 'run', 'negative_http2_client.go'] + args
- def client_cmd_http2interop(self, args):
- return ['go', 'run', 'negative_http2_client.go'] + args
+ def cloud_to_prod_env(self):
+ return {}
- def cloud_to_prod_env(self):
- return {}
+ def server_cmd(self, args):
+ return ['go', 'run', 'server.go'] + args
- def server_cmd(self, args):
- return ['go', 'run', 'server.go'] + args
+ def global_env(self):
+ return {}
- def global_env(self):
- return {}
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def __str__(self):
+ return 'go'
- def __str__(self):
- return 'go'
class Http2Server:
- """Represents the HTTP/2 Interop Test server
+ """Represents the HTTP/2 Interop Test server
This pretends to be a language in order to be built and run, but really it
isn't.
"""
- def __init__(self):
- self.server_cwd = None
- self.safename = str(self)
- def server_cmd(self, args):
- return ['python test/http2_test/http2_test_server.py']
+ def __init__(self):
+ self.server_cwd = None
+ self.safename = str(self)
+
+ def server_cmd(self, args):
+ return ['python test/http2_test/http2_test_server.py']
+
+ def cloud_to_prod_env(self):
+ return {}
- def cloud_to_prod_env(self):
- return {}
+ def global_env(self):
+ return {}
- def global_env(self):
- return {}
+ def unimplemented_test_cases(self):
+ return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases(self):
- return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases_server(self):
+ return _TEST_CASES
- def unimplemented_test_cases_server(self):
- return _TEST_CASES
+ def __str__(self):
+ return 'http2'
- def __str__(self):
- return 'http2'
class Http2Client:
- """Represents the HTTP/2 Interop Test
+ """Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
- def __init__(self):
- self.client_cwd = None
- self.safename = str(self)
- def client_cmd(self, args):
- return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
- def cloud_to_prod_env(self):
- return {}
+ def client_cmd(self, args):
+ return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
- def global_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _TEST_CASES
+ def global_env(self):
+ return {}
- def unimplemented_test_cases_server(self):
- return _TEST_CASES
+ def unimplemented_test_cases(self):
+ return _TEST_CASES
+
+ def unimplemented_test_cases_server(self):
+ return _TEST_CASES
+
+ def __str__(self):
+ return 'http2'
- def __str__(self):
- return 'http2'
class NodeLanguage:
- def __init__(self):
- self.client_cwd = '../grpc-node'
- self.server_cwd = '../grpc-node'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = '../grpc-node'
+ self.server_cwd = '../grpc-node'
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+ def client_cmd(self, args):
+ return [
+ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
'node', '--require', './test/fixtures/native_native',
- 'test/interop/interop_client.js'] + args
+ 'test/interop/interop_client.js'
+ ] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+ def server_cmd(self, args):
+ return [
+ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
'node', '--require', './test/fixtures/native_native',
- 'test/interop/interop_server.js'] + args
+ 'test/interop/interop_server.js'
+ ] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def __str__(self):
- return 'node'
+ def __str__(self):
+ return 'node'
class PHPLanguage:
- def __init__(self):
- self.client_cwd = None
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['src/php/bin/interop_client.sh'] + args
+ def client_cmd(self, args):
+ return ['src/php/bin/interop_client.sh'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return []
+ def unimplemented_test_cases_server(self):
+ return []
- def __str__(self):
- return 'php'
+ def __str__(self):
+ return 'php'
class PHP7Language:
- def __init__(self):
- self.client_cwd = None
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['src/php/bin/interop_client.sh'] + args
+ def client_cmd(self, args):
+ return ['src/php/bin/interop_client.sh'] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return []
+ def unimplemented_test_cases_server(self):
+ return []
+
+ def __str__(self):
+ return 'php7'
- def __str__(self):
- return 'php7'
class ObjcLanguage:
- def __init__(self):
- self.client_cwd = 'src/objective-c/tests'
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = 'src/objective-c/tests'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ # from args, extract the server port and craft xcodebuild command out of it
+ for arg in args:
+ port = re.search('--server_port=(\d+)', arg)
+ if port:
+ portnum = port.group(1)
+ cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum
+ return [cmdline]
- def client_cmd(self, args):
- # from args, extract the server port and craft xcodebuild command out of it
- for arg in args:
- port = re.search('--server_port=(\d+)', arg)
- if port:
- portnum = port.group(1)
- cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test'%portnum
- return [cmdline]
+ def cloud_to_prod_env(self):
+ return {}
- def cloud_to_prod_env(self):
- return {}
+ def global_env(self):
+ return {}
- def global_env(self):
- return {}
+ def unimplemented_test_cases(self):
+ # ObjC test runs all cases with the same command. It ignores the testcase
+ # cmdline argument. Here we return all but one test cases as unimplemented,
+ # and depend upon ObjC test's behavior that it runs all cases even when
+ # we tell it to run just one.
+ return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases(self):
- # ObjC test runs all cases with the same command. It ignores the testcase
- # cmdline argument. Here we return all but one test cases as unimplemented,
- # and depend upon ObjC test's behavior that it runs all cases even when
- # we tell it to run just one.
- return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def __str__(self):
+ return 'objc'
- def __str__(self):
- return 'objc'
class RubyLanguage:
- def __init__(self):
- self.client_cwd = None
- self.server_cwd = None
- self.safename = str(self)
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.safename = str(self)
- def client_cmd(self, args):
- return ['tools/run_tests/interop/with_rvm.sh',
- 'ruby', 'src/ruby/pb/test/client.rb'] + args
+ def client_cmd(self, args):
+ return [
+ 'tools/run_tests/interop/with_rvm.sh', 'ruby',
+ 'src/ruby/pb/test/client.rb'
+ ] + args
- def cloud_to_prod_env(self):
- return {}
+ def cloud_to_prod_env(self):
+ return {}
- def server_cmd(self, args):
- return ['tools/run_tests/interop/with_rvm.sh',
- 'ruby', 'src/ruby/pb/test/server.rb'] + args
+ def server_cmd(self, args):
+ return [
+ 'tools/run_tests/interop/with_rvm.sh', 'ruby',
+ 'src/ruby/pb/test/server.rb'
+ ] + args
- def global_env(self):
- return {}
+ def global_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'ruby'
- def __str__(self):
- return 'ruby'
class PythonLanguage:
- def __init__(self):
- self.client_cwd = None
- self.server_cwd = None
- self.http2_cwd = None
- self.safename = str(self)
-
- def client_cmd(self, args):
- return [
- 'py27/bin/python',
- 'src/python/grpcio_tests/setup.py',
- 'run_interop',
- '--client',
- '--args="{}"'.format(' '.join(args))
- ]
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.http2_cwd = None
+ self.safename = str(self)
- def client_cmd_http2interop(self, args):
- return [ 'py27/bin/python',
- 'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
- ] + args
-
- def cloud_to_prod_env(self):
- return {}
-
- def server_cmd(self, args):
- return [
- 'py27/bin/python',
- 'src/python/grpcio_tests/setup.py',
- 'run_interop',
- '--server',
- '--args="{}"'.format(' '.join(args))
- ]
+ def client_cmd(self, args):
+ return [
+ 'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--client', '--args="{}"'.format(' '.join(args))
+ ]
+
+ def client_cmd_http2interop(self, args):
+ return [
+ 'py27/bin/python',
+ 'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
+ ] + args
- def global_env(self):
- return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
- 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
+ def cloud_to_prod_env(self):
+ return {}
- def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+ def server_cmd(self, args):
+ return [
+ 'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--server', '--args="{}"'.format(' '.join(args))
+ ]
- def unimplemented_test_cases_server(self):
- return _SKIP_COMPRESSION
+ def global_env(self):
+ return {
+ 'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
+ 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)
+ }
- def __str__(self):
- return 'python'
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'python'
_LANGUAGES = {
- 'c++' : CXXLanguage(),
- 'csharp' : CSharpLanguage(),
- 'csharpcoreclr' : CSharpCoreCLRLanguage(),
- 'go' : GoLanguage(),
- 'java' : JavaLanguage(),
- 'javaokhttp' : JavaOkHttpClient(),
- 'node' : NodeLanguage(),
- 'php' : PHPLanguage(),
- 'php7' : PHP7Language(),
- 'objc' : ObjcLanguage(),
- 'ruby' : RubyLanguage(),
- 'python' : PythonLanguage(),
+ 'c++': CXXLanguage(),
+ 'csharp': CSharpLanguage(),
+ 'csharpcoreclr': CSharpCoreCLRLanguage(),
+ 'go': GoLanguage(),
+ 'java': JavaLanguage(),
+ 'javaokhttp': JavaOkHttpClient(),
+ 'node': NodeLanguage(),
+ 'php': PHPLanguage(),
+ 'php7': PHP7Language(),
+ 'objc': ObjcLanguage(),
+ 'ruby': RubyLanguage(),
+ 'python': PythonLanguage(),
}
# languages supported as cloud_to_cloud servers
-_SERVERS = ['c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python']
-
-_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
- 'empty_stream', 'client_streaming', 'server_streaming',
- 'cancel_after_begin', 'cancel_after_first_response',
- 'timeout_on_sleeping_server', 'custom_metadata',
- 'status_code_and_message', 'unimplemented_method',
- 'client_compressed_unary', 'server_compressed_unary',
- 'client_compressed_streaming', 'server_compressed_streaming',
- 'unimplemented_service']
-
-_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
- 'oauth2_auth_token', 'per_rpc_creds']
+_SERVERS = [
+ 'c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python'
+]
+
+_TEST_CASES = [
+ 'large_unary', 'empty_unary', 'ping_pong', 'empty_stream',
+ 'client_streaming', 'server_streaming', 'cancel_after_begin',
+ 'cancel_after_first_response', 'timeout_on_sleeping_server',
+ 'custom_metadata', 'status_code_and_message', 'unimplemented_method',
+ 'client_compressed_unary', 'server_compressed_unary',
+ 'client_compressed_streaming', 'server_compressed_streaming',
+ 'unimplemented_service'
+]
+
+_AUTH_TEST_CASES = [
+ 'compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token',
+ 'per_rpc_creds'
+]
_HTTP2_TEST_CASES = ['tls', 'framing']
-_HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
- 'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test']
+_HTTP2_SERVER_TEST_CASES = [
+ 'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping',
+ 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test'
+]
-_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' }
+_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = {
+ 'data_frame_padding': 'large_unary',
+ 'no_df_padding_sanity_test': 'large_unary'
+}
-_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
+_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys(
+)
-_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++']
+_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [
+ 'java', 'go', 'python', 'c++'
+]
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
+
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
- """Wraps given cmdline array to create 'docker run' cmdline from it."""
- docker_cmdline = ['docker', 'run', '-i', '--rm=true']
+ """Wraps given cmdline array to create 'docker run' cmdline from it."""
+ docker_cmdline = ['docker', 'run', '-i', '--rm=true']
- # turn environ into -e docker args
- if environ:
- for k,v in environ.items():
- docker_cmdline += ['-e', '%s=%s' % (k,v)]
+ # turn environ into -e docker args
+ if environ:
+ for k, v in environ.items():
+ docker_cmdline += ['-e', '%s=%s' % (k, v)]
- # set working directory
- workdir = DOCKER_WORKDIR_ROOT
- if cwd:
- workdir = os.path.join(workdir, cwd)
- docker_cmdline += ['-w', workdir]
+ # set working directory
+ workdir = DOCKER_WORKDIR_ROOT
+ if cwd:
+ workdir = os.path.join(workdir, cwd)
+ docker_cmdline += ['-w', workdir]
- docker_cmdline += docker_args + [image] + cmdline
- return docker_cmdline
+ docker_cmdline += docker_args + [image] + cmdline
+ return docker_cmdline
def manual_cmdline(docker_cmdline, docker_image):
- """Returns docker cmdline adjusted for manual invocation."""
- print_cmdline = []
- for item in docker_cmdline:
- if item.startswith('--name='):
- continue
- if item == docker_image:
- item = "$docker_image"
- item = item.replace('"', '\\"')
- # add quotes when necessary
- if any(character.isspace() for character in item):
- item = "\"%s\"" % item
- print_cmdline.append(item)
- return ' '.join(print_cmdline)
+ """Returns docker cmdline adjusted for manual invocation."""
+ print_cmdline = []
+ for item in docker_cmdline:
+ if item.startswith('--name='):
+ continue
+ if item == docker_image:
+ item = "$docker_image"
+ item = item.replace('"', '\\"')
+ # add quotes when necessary
+ if any(character.isspace() for character in item):
+ item = "\"%s\"" % item
+ print_cmdline.append(item)
+ return ' '.join(print_cmdline)
def write_cmdlog_maybe(cmdlog, filename):
- """Returns docker cmdline adjusted for manual invocation."""
- if cmdlog:
- with open(filename, 'w') as logfile:
- logfile.write('#!/bin/bash\n')
- logfile.writelines("%s\n" % line for line in cmdlog)
- print('Command log written to file %s' % filename)
+ """Returns docker cmdline adjusted for manual invocation."""
+ if cmdlog:
+ with open(filename, 'w') as logfile:
+ logfile.write('#!/bin/bash\n')
+ logfile.writelines("%s\n" % line for line in cmdlog)
+ print('Command log written to file %s' % filename)
def bash_cmdline(cmdline):
- """Creates bash -c cmdline from args list."""
- # Use login shell:
- # * makes error messages clearer if executables are missing
- return ['bash', '-c', ' '.join(cmdline)]
+ """Creates bash -c cmdline from args list."""
+ # Use login shell:
+ # * makes error messages clearer if executables are missing
+ return ['bash', '-c', ' '.join(cmdline)]
def auth_options(language, test_case):
- """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
+ """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
- language = str(language)
- cmdargs = []
- env = {}
+ language = str(language)
+ cmdargs = []
+ env = {}
- # TODO(jtattermusch): this file path only works inside docker
- key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
- oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
- key_file_arg = '--service_account_key_file=%s' % key_filepath
- default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
+ # TODO(jtattermusch): this file path only works inside docker
+ key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
+ oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
+ key_file_arg = '--service_account_key_file=%s' % key_filepath
+ default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
- if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
- if language in ['csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby']:
- env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
- else:
- cmdargs += [key_file_arg]
+ if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
+ if language in [
+ 'csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python',
+ 'ruby'
+ ]:
+ env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
+ else:
+ cmdargs += [key_file_arg]
- if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
- cmdargs += [oauth_scope_arg]
+ if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
+ cmdargs += [oauth_scope_arg]
- if test_case == 'oauth2_auth_token' and language == 'c++':
- # C++ oauth2 test uses GCE creds and thus needs to know the default account
- cmdargs += [default_account_arg]
+ if test_case == 'oauth2_auth_token' and language == 'c++':
+ # C++ oauth2 test uses GCE creds and thus needs to know the default account
+ cmdargs += [default_account_arg]
- if test_case == 'compute_engine_creds':
- cmdargs += [oauth_scope_arg, default_account_arg]
+ if test_case == 'compute_engine_creds':
+ cmdargs += [oauth_scope_arg, default_account_arg]
- return (cmdargs, env)
+ return (cmdargs, env)
def _job_kill_handler(job):
- if job._spec.container_name:
- dockerjob.docker_kill(job._spec.container_name)
- # When the job times out and we decide to kill it,
- # we need to wait a before restarting the job
- # to prevent "container name already in use" error.
- # TODO(jtattermusch): figure out a cleaner way to to this.
- time.sleep(2)
-
-
-def cloud_to_prod_jobspec(language, test_case, server_host_name,
- server_host_detail, docker_image=None, auth=False,
+ if job._spec.container_name:
+ dockerjob.docker_kill(job._spec.container_name)
+ # When the job times out and we decide to kill it,
+ # we need to wait a before restarting the job
+ # to prevent "container name already in use" error.
+ # TODO(jtattermusch): figure out a cleaner way to to this.
+ time.sleep(2)
+
+
+def cloud_to_prod_jobspec(language,
+ test_case,
+ server_host_name,
+ server_host_detail,
+ docker_image=None,
+ auth=False,
manual_cmd_log=None):
- """Creates jobspec for cloud-to-prod interop test"""
- container_name = None
- cmdargs = [
- '--server_host=%s' % server_host_detail[0],
- '--server_host_override=%s' % server_host_detail[1],
- '--server_port=443',
- '--use_tls=true',
- '--test_case=%s' % test_case]
- environ = dict(language.cloud_to_prod_env(), **language.global_env())
- if auth:
- auth_cmdargs, auth_env = auth_options(language, test_case)
- cmdargs += auth_cmdargs
- environ.update(auth_env)
- cmdline = bash_cmdline(language.client_cmd(cmdargs))
- cwd = language.client_cwd
-
- if docker_image:
- container_name = dockerjob.random_name('interop_client_%s' %
- language.safename)
- cmdline = docker_run_cmdline(cmdline,
- image=docker_image,
- cwd=cwd,
- environ=environ,
- docker_args=['--net=host',
- '--name=%s' % container_name])
- if manual_cmd_log is not None:
- if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
- manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
- cwd = None
- environ = None
-
- suite_name='cloud_to_prod_auth' if auth else 'cloud_to_prod'
- test_job = jobset.JobSpec(
- cmdline=cmdline,
- cwd=cwd,
- environ=environ,
- shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
- test_case),
- timeout_seconds=_TEST_TIMEOUT,
- flake_retries=4 if args.allow_flakes else 0,
- timeout_retries=2 if args.allow_flakes else 0,
- kill_handler=_job_kill_handler)
- if docker_image:
- test_job.container_name = container_name
- return test_job
-
-
-def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
- server_port, docker_image=None, insecure=False,
- manual_cmd_log=None):
- """Creates jobspec for cloud-to-cloud interop test"""
- interop_only_options = [
- '--server_host_override=foo.test.google.fr',
- '--use_tls=%s' % ('false' if insecure else 'true'),
- '--use_test_ca=true',
- ]
-
- client_test_case = test_case
- if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
- client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case]
- if client_test_case in language.unimplemented_test_cases():
- print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case))
- sys.exit(1)
+ """Creates jobspec for cloud-to-prod interop test"""
+ container_name = None
+ cmdargs = [
+ '--server_host=%s' % server_host_detail[0],
+ '--server_host_override=%s' % server_host_detail[1],
+ '--server_port=443', '--use_tls=true', '--test_case=%s' % test_case
+ ]
+ environ = dict(language.cloud_to_prod_env(), **language.global_env())
+ if auth:
+ auth_cmdargs, auth_env = auth_options(language, test_case)
+ cmdargs += auth_cmdargs
+ environ.update(auth_env)
+ cmdline = bash_cmdline(language.client_cmd(cmdargs))
+ cwd = language.client_cwd
- common_options = [
- '--test_case=%s' % client_test_case,
- '--server_host=%s' % server_host,
- '--server_port=%s' % server_port,
- ]
+ if docker_image:
+ container_name = dockerjob.random_name('interop_client_%s' %
+ language.safename)
+ cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ cwd=cwd,
+ environ=environ,
+ docker_args=['--net=host', '--name=%s' % container_name])
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+ cwd = None
+ environ = None
+
+ suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod'
+ test_job = jobset.JobSpec(
+ cmdline=cmdline,
+ cwd=cwd,
+ environ=environ,
+ shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
+ test_case),
+ timeout_seconds=_TEST_TIMEOUT,
+ flake_retries=4 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0,
+ kill_handler=_job_kill_handler)
+ if docker_image:
+ test_job.container_name = container_name
+ return test_job
+
+
+def cloud_to_cloud_jobspec(language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=None,
+ insecure=False,
+ manual_cmd_log=None):
+ """Creates jobspec for cloud-to-cloud interop test"""
+ interop_only_options = [
+ '--server_host_override=foo.test.google.fr',
+ '--use_tls=%s' % ('false' if insecure else 'true'),
+ '--use_test_ca=true',
+ ]
- if test_case in _HTTP2_SERVER_TEST_CASES:
+ client_test_case = test_case
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
- client_options = interop_only_options + common_options
- cmdline = bash_cmdline(language.client_cmd(client_options))
- cwd = language.client_cwd
- else:
- cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
- cwd = language.http2_cwd
- else:
- cmdline = bash_cmdline(language.client_cmd(common_options+interop_only_options))
- cwd = language.client_cwd
+ client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[
+ test_case]
+ if client_test_case in language.unimplemented_test_cases():
+ print('asking client %s to run unimplemented test case %s' %
+ (repr(language), client_test_case))
+ sys.exit(1)
+
+ common_options = [
+ '--test_case=%s' % client_test_case,
+ '--server_host=%s' % server_host,
+ '--server_port=%s' % server_port,
+ ]
- environ = language.global_env()
- if docker_image and language.safename != 'objc':
- # we can't run client in docker for objc.
- container_name = dockerjob.random_name('interop_client_%s' % language.safename)
- cmdline = docker_run_cmdline(cmdline,
- image=docker_image,
- environ=environ,
- cwd=cwd,
- docker_args=['--net=host',
- '--name=%s' % container_name])
- if manual_cmd_log is not None:
- if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
- manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
- cwd = None
-
- test_job = jobset.JobSpec(
- cmdline=cmdline,
- cwd=cwd,
- environ=environ,
- shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
- test_case),
- timeout_seconds=_TEST_TIMEOUT,
- flake_retries=4 if args.allow_flakes else 0,
- timeout_retries=2 if args.allow_flakes else 0,
- kill_handler=_job_kill_handler)
- if docker_image:
- test_job.container_name = container_name
- return test_job
+ if test_case in _HTTP2_SERVER_TEST_CASES:
+ if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ client_options = interop_only_options + common_options
+ cmdline = bash_cmdline(language.client_cmd(client_options))
+ cwd = language.client_cwd
+ else:
+ cmdline = bash_cmdline(
+ language.client_cmd_http2interop(common_options))
+ cwd = language.http2_cwd
+ else:
+ cmdline = bash_cmdline(
+ language.client_cmd(common_options + interop_only_options))
+ cwd = language.client_cwd
+
+ environ = language.global_env()
+ if docker_image and language.safename != 'objc':
+ # we can't run client in docker for objc.
+ container_name = dockerjob.random_name('interop_client_%s' %
+ language.safename)
+ cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ environ=environ,
+ cwd=cwd,
+ docker_args=['--net=host', '--name=%s' % container_name])
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+ cwd = None
+
+ test_job = jobset.JobSpec(
+ cmdline=cmdline,
+ cwd=cwd,
+ environ=environ,
+ shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
+ test_case),
+ timeout_seconds=_TEST_TIMEOUT,
+ flake_retries=4 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0,
+ kill_handler=_job_kill_handler)
+ if docker_image:
+ test_job.container_name = container_name
+ return test_job
def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
- """Create jobspec for running a server"""
- container_name = dockerjob.random_name('interop_server_%s' % language.safename)
- cmdline = bash_cmdline(
- language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT,
- '--use_tls=%s' % ('false' if insecure else 'true')]))
- environ = language.global_env()
- docker_args = ['--name=%s' % container_name]
- if language.safename == 'http2':
- # we are running the http2 interop server. Open next N ports beginning
- # with the server port. These ports are used for http2 interop test
- # (one test case per port).
- docker_args += list(
- itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
- for i in range(
- len(_HTTP2_SERVER_TEST_CASES))))
- # Enable docker's healthcheck mechanism.
- # This runs a Python script inside the container every second. The script
- # pings the http2 server to verify it is ready. The 'health-retries' flag
- # specifies the number of consecutive failures before docker will report
- # the container's status as 'unhealthy'. Prior to the first 'health_retries'
- # failures or the first success, the status will be 'starting'. 'docker ps'
- # or 'docker inspect' can be used to see the health of the container on the
- # command line.
- docker_args += [
- '--health-cmd=python test/http2_test/http2_server_health_check.py '
- '--server_host=%s --server_port=%d'
- % ('localhost', _DEFAULT_SERVER_PORT),
- '--health-interval=1s',
- '--health-retries=5',
- '--health-timeout=10s',
- ]
+ """Create jobspec for running a server"""
+ container_name = dockerjob.random_name('interop_server_%s' %
+ language.safename)
+ cmdline = bash_cmdline(
+ language.server_cmd([
+ '--port=%s' % _DEFAULT_SERVER_PORT, '--use_tls=%s' % (
+ 'false' if insecure else 'true')
+ ]))
+ environ = language.global_env()
+ docker_args = ['--name=%s' % container_name]
+ if language.safename == 'http2':
+ # we are running the http2 interop server. Open next N ports beginning
+ # with the server port. These ports are used for http2 interop test
+ # (one test case per port).
+ docker_args += list(
+ itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
+ for i in range(
+ len(_HTTP2_SERVER_TEST_CASES))))
+ # Enable docker's healthcheck mechanism.
+ # This runs a Python script inside the container every second. The script
+ # pings the http2 server to verify it is ready. The 'health-retries' flag
+ # specifies the number of consecutive failures before docker will report
+ # the container's status as 'unhealthy'. Prior to the first 'health_retries'
+ # failures or the first success, the status will be 'starting'. 'docker ps'
+ # or 'docker inspect' can be used to see the health of the container on the
+ # command line.
+ docker_args += [
+ '--health-cmd=python test/http2_test/http2_server_health_check.py '
+ '--server_host=%s --server_port=%d' %
+ ('localhost', _DEFAULT_SERVER_PORT),
+ '--health-interval=1s',
+ '--health-retries=5',
+ '--health-timeout=10s',
+ ]
- else:
- docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
-
- docker_cmdline = docker_run_cmdline(cmdline,
- image=docker_image,
- cwd=language.server_cwd,
- environ=environ,
- docker_args=docker_args)
- if manual_cmd_log is not None:
- if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
- manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
- server_job = jobset.JobSpec(
- cmdline=docker_cmdline,
- environ=environ,
- shortname='interop_server_%s' % language,
- timeout_seconds=30*60)
- server_job.container_name = container_name
- return server_job
+ else:
+ docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
+
+ docker_cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ cwd=language.server_cwd,
+ environ=environ,
+ docker_args=docker_args)
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
+ server_job = jobset.JobSpec(
+ cmdline=docker_cmdline,
+ environ=environ,
+ shortname='interop_server_%s' % language,
+ timeout_seconds=30 * 60)
+ server_job.container_name = container_name
+ return server_job
def build_interop_image_jobspec(language, tag=None):
- """Creates jobspec for building interop docker image for a language"""
- if not tag:
- tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
- env = {'INTEROP_IMAGE': tag,
- 'BASE_NAME': 'grpc_interop_%s' % language.safename}
- if not args.travis:
- env['TTY_FLAG'] = '-t'
- # This env variable is used to get around the github rate limit
- # error when running the PHP `composer install` command
- host_file = '%s/.composer/auth.json' % os.environ['HOME']
- if language.safename == 'php' and os.path.exists(host_file):
- env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
- '-v %s:/root/.composer/auth.json:ro' % host_file
- build_job = jobset.JobSpec(
- cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
- environ=env,
- shortname='build_docker_%s' % (language),
- timeout_seconds=30*60)
- build_job.tag = tag
- return build_job
+ """Creates jobspec for building interop docker image for a language"""
+ if not tag:
+ tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
+ env = {
+ 'INTEROP_IMAGE': tag,
+ 'BASE_NAME': 'grpc_interop_%s' % language.safename
+ }
+ if not args.travis:
+ env['TTY_FLAG'] = '-t'
+ # This env variable is used to get around the github rate limit
+ # error when running the PHP `composer install` command
+ host_file = '%s/.composer/auth.json' % os.environ['HOME']
+ if language.safename == 'php' and os.path.exists(host_file):
+ env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
+ '-v %s:/root/.composer/auth.json:ro' % host_file
+ build_job = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
+ environ=env,
+ shortname='build_docker_%s' % (language),
+ timeout_seconds=30 * 60)
+ build_job.tag = tag
+ return build_job
def aggregate_http2_results(stdout):
- match = re.search(r'\{"cases[^\]]*\]\}', stdout)
- if not match:
- return None
-
- results = json.loads(match.group(0))
- skipped = 0
- passed = 0
- failed = 0
- failed_cases = []
- for case in results['cases']:
- if case.get('skipped', False):
- skipped += 1
- else:
- if case.get('passed', False):
- passed += 1
- else:
- failed += 1
- failed_cases.append(case.get('name', "NONAME"))
- return {
- 'passed': passed,
- 'failed': failed,
- 'skipped': skipped,
- 'failed_cases': ', '.join(failed_cases),
- 'percent': 1.0 * passed / (passed + failed)
- }
+ match = re.search(r'\{"cases[^\]]*\]\}', stdout)
+ if not match:
+ return None
+
+ results = json.loads(match.group(0))
+ skipped = 0
+ passed = 0
+ failed = 0
+ failed_cases = []
+ for case in results['cases']:
+ if case.get('skipped', False):
+ skipped += 1
+ else:
+ if case.get('passed', False):
+ passed += 1
+ else:
+ failed += 1
+ failed_cases.append(case.get('name', "NONAME"))
+ return {
+ 'passed': passed,
+ 'failed': failed,
+ 'skipped': skipped,
+ 'failed_cases': ', '.join(failed_cases),
+ 'percent': 1.0 * passed / (passed + failed)
+ }
+
# A dictionary of prod servers to test.
# Format: server_name: (server_host, server_host_override, errors_allowed)
# TODO(adelez): implement logic for errors_allowed where if the indicated tests
# fail, they don't impact the overall test result.
prod_servers = {
- 'default': ('216.239.32.254',
- 'grpc-test.sandbox.googleapis.com', False),
- 'gateway_v2': ('216.239.32.254',
- 'grpc-test2.sandbox.googleapis.com', True),
+ 'default': ('216.239.32.254', 'grpc-test.sandbox.googleapis.com', False),
+ 'gateway_v2': ('216.239.32.254', 'grpc-test2.sandbox.googleapis.com', True),
'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com',
False),
'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com',
True),
- 'gateway_v4': ('216.239.32.254',
- 'grpc-test4.sandbox.googleapis.com', True),
+ 'gateway_v4': ('216.239.32.254', 'grpc-test4.sandbox.googleapis.com', True),
'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com',
True),
}
argp = argparse.ArgumentParser(description='Run interop tests.')
-argp.add_argument('-l', '--language',
- choices=['all'] + sorted(_LANGUAGES),
- nargs='+',
- default=['all'],
- help='Clients to run. Objc client can be only run on OSX.')
+argp.add_argument(
+ '-l',
+ '--language',
+ choices=['all'] + sorted(_LANGUAGES),
+ nargs='+',
+ default=['all'],
+ help='Clients to run. Objc client can be only run on OSX.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('--cloud_to_prod',
- default=False,
- action='store_const',
- const=True,
- help='Run cloud_to_prod tests.')
-argp.add_argument('--cloud_to_prod_auth',
- default=False,
- action='store_const',
- const=True,
- help='Run cloud_to_prod_auth tests.')
-argp.add_argument('--prod_servers',
- choices=prod_servers.keys(),
- default=['default'],
- nargs='+',
- help=('The servers to run cloud_to_prod and '
- 'cloud_to_prod_auth tests against.'))
-argp.add_argument('-s', '--server',
- choices=['all'] + sorted(_SERVERS),
- nargs='+',
- help='Run cloud_to_cloud servers in a separate docker ' +
- 'image. Servers can only be started automatically if ' +
- '--use_docker option is enabled.',
- default=[])
-argp.add_argument('--override_server',
- action='append',
- type=lambda kv: kv.split('='),
- help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
- default=[])
-argp.add_argument('-t', '--travis',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('-v', '--verbose',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('--use_docker',
- default=False,
- action='store_const',
- const=True,
- help='Run all the interop tests under docker. That provides ' +
- 'additional isolation and prevents the need to install ' +
- 'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
- default=False,
- action='store_const',
- const=True,
- help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--manual_run',
- default=False,
- action='store_const',
- const=True,
- help='Prepare things for running interop tests manually. ' +
- 'Preserve docker images after building them and skip '
- 'actually running the tests. Only print commands to run by ' +
- 'hand.')
-argp.add_argument('--http2_interop',
- default=False,
- action='store_const',
- const=True,
- help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
-argp.add_argument('--http2_server_interop',
- default=False,
- action='store_const',
- const=True,
- help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests')
-argp.add_argument('--insecure',
- default=False,
- action='store_const',
- const=True,
- help='Whether to use secure channel.')
-argp.add_argument('--internal_ci',
- default=False,
- action='store_const',
- const=True,
- help=('Put reports into subdirectories to improve '
- 'presentation of results by Internal CI.'))
-argp.add_argument('--bq_result_table',
- default='',
- type=str,
- nargs='?',
- help='Upload test results to a specified BQ table.')
+argp.add_argument(
+ '--cloud_to_prod',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run cloud_to_prod tests.')
+argp.add_argument(
+ '--cloud_to_prod_auth',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run cloud_to_prod_auth tests.')
+argp.add_argument(
+ '--prod_servers',
+ choices=prod_servers.keys(),
+ default=['default'],
+ nargs='+',
+ help=('The servers to run cloud_to_prod and '
+ 'cloud_to_prod_auth tests against.'))
+argp.add_argument(
+ '-s',
+ '--server',
+ choices=['all'] + sorted(_SERVERS),
+ nargs='+',
+ help='Run cloud_to_cloud servers in a separate docker ' +
+ 'image. Servers can only be started automatically if ' +
+ '--use_docker option is enabled.',
+ default=[])
+argp.add_argument(
+ '--override_server',
+ action='append',
+ type=lambda kv: kv.split('='),
+ help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+ default=[])
+argp.add_argument(
+ '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+ '-v', '--verbose', default=False, action='store_const', const=True)
+argp.add_argument(
+ '--use_docker',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run all the interop tests under docker. That provides ' +
+ 'additional isolation and prevents the need to install ' +
+ 'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+ '--allow_flakes',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+ '--manual_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Prepare things for running interop tests manually. ' +
+ 'Preserve docker images after building them and skip '
+ 'actually running the tests. Only print commands to run by ' + 'hand.')
+argp.add_argument(
+ '--http2_interop',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
+argp.add_argument(
+ '--http2_server_interop',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
+)
+argp.add_argument(
+ '--insecure',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Whether to use secure channel.')
+argp.add_argument(
+ '--internal_ci',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Put reports into subdirectories to improve '
+ 'presentation of results by Internal CI.'))
+argp.add_argument(
+ '--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
args = argp.parse_args()
-servers = set(s for s in itertools.chain.from_iterable(_SERVERS
- if x == 'all' else [x]
- for x in args.server))
+servers = set(
+ s
+ for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
+ for x in args.server))
if args.use_docker:
- if not args.travis:
- print('Seen --use_docker flag, will run interop tests under docker.')
- print('')
- print('IMPORTANT: The changes you are testing need to be locally committed')
- print('because only the committed changes in the current branch will be')
- print('copied to the docker environment.')
- time.sleep(5)
+ if not args.travis:
+ print('Seen --use_docker flag, will run interop tests under docker.')
+ print('')
+ print(
+ 'IMPORTANT: The changes you are testing need to be locally committed'
+ )
+ print(
+ 'because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
+ time.sleep(5)
if args.manual_run and not args.use_docker:
- print('--manual_run is only supported with --use_docker option enabled.')
- sys.exit(1)
+ print('--manual_run is only supported with --use_docker option enabled.')
+ sys.exit(1)
if not args.use_docker and servers:
- print('Running interop servers is only supported with --use_docker option enabled.')
- sys.exit(1)
-
+ print(
+ 'Running interop servers is only supported with --use_docker option enabled.'
+ )
+ sys.exit(1)
# we want to include everything but objc in 'all'
# because objc won't run on non-mac platforms
all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
-languages = set(_LANGUAGES[l]
- for l in itertools.chain.from_iterable(
- all_but_objc if x == 'all' else [x]
- for x in args.language))
+languages = set(
+ _LANGUAGES[l]
+ for l in itertools.chain.from_iterable(all_but_objc if x == 'all' else [x]
+ for x in args.language))
languages_http2_clients_for_http2_server_interop = set()
if args.http2_server_interop:
- languages_http2_clients_for_http2_server_interop = set(
- _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
- if 'all' in args.language or l in args.language)
+ languages_http2_clients_for_http2_server_interop = set(
+ _LANGUAGES[l]
+ for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
+ if 'all' in args.language or l in args.language)
http2Interop = Http2Client() if args.http2_interop else None
http2InteropServer = Http2Server() if args.http2_server_interop else None
-docker_images={}
+docker_images = {}
if args.use_docker:
- # languages for which to build docker images
- languages_to_build = set(
- _LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers]))
- languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
-
- if args.http2_interop:
- languages_to_build.add(http2Interop)
-
- if args.http2_server_interop:
- languages_to_build.add(http2InteropServer)
-
- build_jobs = []
- for l in languages_to_build:
- if str(l) == 'objc':
- # we don't need to build a docker image for objc
- continue
- job = build_interop_image_jobspec(l)
- docker_images[str(l)] = job.tag
- build_jobs.append(job)
-
- if build_jobs:
- jobset.message('START', 'Building interop docker images.', do_newline=True)
- if args.verbose:
- print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+ # languages for which to build docker images
+ languages_to_build = set(
+ _LANGUAGES[k]
+ for k in set([str(l) for l in languages] + [s for s in servers]))
+ languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
- num_failures, _ = jobset.run(
- build_jobs, newline_on_success=True, maxjobs=args.jobs)
- if num_failures == 0:
- jobset.message('SUCCESS', 'All docker images built successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Failed to build interop docker images.',
- do_newline=True)
- for image in six.itervalues(docker_images):
- dockerjob.remove_image(image, skip_nonexistent=True)
- sys.exit(1)
+ if args.http2_interop:
+ languages_to_build.add(http2Interop)
+
+ if args.http2_server_interop:
+ languages_to_build.add(http2InteropServer)
+
+ build_jobs = []
+ for l in languages_to_build:
+ if str(l) == 'objc':
+ # we don't need to build a docker image for objc
+ continue
+ job = build_interop_image_jobspec(l)
+ docker_images[str(l)] = job.tag
+ build_jobs.append(job)
+
+ if build_jobs:
+ jobset.message(
+ 'START', 'Building interop docker images.', do_newline=True)
+ if args.verbose:
+ print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+
+ num_failures, _ = jobset.run(
+ build_jobs, newline_on_success=True, maxjobs=args.jobs)
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS',
+ 'All docker images built successfully.',
+ do_newline=True)
+ else:
+ jobset.message(
+ 'FAILED',
+ 'Failed to build interop docker images.',
+ do_newline=True)
+ for image in six.itervalues(docker_images):
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ sys.exit(1)
server_manual_cmd_log = [] if args.manual_run else None
client_manual_cmd_log = [] if args.manual_run else None
@@ -1056,214 +1134,236 @@ client_manual_cmd_log = [] if args.manual_run else None
server_jobs = {}
server_addresses = {}
try:
- for s in servers:
- lang = str(s)
- spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang),
- args.insecure, manual_cmd_log=server_manual_cmd_log)
- if not args.manual_run:
- job = dockerjob.DockerJob(spec)
- server_jobs[lang] = job
- server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
- else:
- # don't run the server, set server port to a placeholder value
- server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
- http2_server_job = None
- if args.http2_server_interop:
- # launch a HTTP2 server emulator that creates edge cases
- lang = str(http2InteropServer)
- spec = server_jobspec(http2InteropServer, docker_images.get(lang),
- manual_cmd_log=server_manual_cmd_log)
- if not args.manual_run:
- http2_server_job = dockerjob.DockerJob(spec)
- server_jobs[lang] = http2_server_job
- else:
- # don't run the server, set server port to a placeholder value
- server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
- jobs = []
- if args.cloud_to_prod:
- if args.insecure:
- print('TLS is always enabled for cloud_to_prod scenarios.')
- for server_host_name in args.prod_servers:
- for language in languages:
- for test_case in _TEST_CASES:
- if not test_case in language.unimplemented_test_cases():
- if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
- test_job = cloud_to_prod_jobspec(
- language, test_case, server_host_name,
- prod_servers[server_host_name],
- docker_image=docker_images.get(str(language)),
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.http2_interop:
- for test_case in _HTTP2_TEST_CASES:
- test_job = cloud_to_prod_jobspec(
- http2Interop, test_case, server_host_name,
- prod_servers[server_host_name],
- docker_image=docker_images.get(str(http2Interop)),
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.cloud_to_prod_auth:
- if args.insecure:
- print('TLS is always enabled for cloud_to_prod scenarios.')
- for server_host_name in args.prod_servers:
- for language in languages:
- for test_case in _AUTH_TEST_CASES:
- if not test_case in language.unimplemented_test_cases():
- test_job = cloud_to_prod_jobspec(
- language, test_case, server_host_name,
- prod_servers[server_host_name],
- docker_image=docker_images.get(str(language)), auth=True,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- for server in args.override_server:
- server_name = server[0]
- (server_host, server_port) = server[1].split(':')
- server_addresses[server_name] = (server_host, server_port)
-
- for server_name, server_address in server_addresses.items():
- (server_host, server_port) = server_address
- server_language = _LANGUAGES.get(server_name, None)
- skip_server = [] # test cases unimplemented by server
- if server_language:
- skip_server = server_language.unimplemented_test_cases_server()
- for language in languages:
- for test_case in _TEST_CASES:
- if not test_case in language.unimplemented_test_cases():
- if not test_case in skip_server:
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- server_name,
- server_host,
- server_port,
- docker_image=docker_images.get(str(language)),
- insecure=args.insecure,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.http2_interop:
- for test_case in _HTTP2_TEST_CASES:
- if server_name == "go":
- # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
- continue
- test_job = cloud_to_cloud_jobspec(http2Interop,
- test_case,
- server_name,
- server_host,
- server_port,
- docker_image=docker_images.get(str(http2Interop)),
- insecure=args.insecure,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if args.http2_server_interop:
- if not args.manual_run:
- http2_server_job.wait_for_healthy(timeout_seconds=600)
- for language in languages_http2_clients_for_http2_server_interop:
- for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
- offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
- server_port = _DEFAULT_SERVER_PORT+offset
+ for s in servers:
+ lang = str(s)
+ spec = server_jobspec(
+ _LANGUAGES[lang],
+ docker_images.get(lang),
+ args.insecure,
+ manual_cmd_log=server_manual_cmd_log)
if not args.manual_run:
- server_port = http2_server_job.mapped_port(server_port)
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- str(http2InteropServer),
- 'localhost',
- server_port,
- docker_image=docker_images.get(str(language)),
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
- for language in languages:
- # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
- # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
- # than specialized http2 clients, reusing existing test implementations.
- # For example, in the "data_frame_padding" test, use language's gRPC
- # interop clients and make them think that theyre running "large_unary"
- # test case. This avoids implementing a new test case in each language.
- for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
- if test_case not in language.unimplemented_test_cases():
- offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
- server_port = _DEFAULT_SERVER_PORT+offset
- if not args.manual_run:
- server_port = http2_server_job.mapped_port(server_port)
- if not args.insecure:
- print(('Creating grpc cient to http2 server test case with insecure connection, even though'
- ' args.insecure is False. Http2 test server only supports insecure connections.'))
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- str(http2InteropServer),
- 'localhost',
- server_port,
- docker_image=docker_images.get(str(language)),
- insecure=True,
- manual_cmd_log=client_manual_cmd_log)
- jobs.append(test_job)
-
- if not jobs:
- print('No jobs to run.')
- for image in six.itervalues(docker_images):
- dockerjob.remove_image(image, skip_nonexistent=True)
- sys.exit(1)
-
- if args.manual_run:
- print('All tests will skipped --manual_run option is active.')
-
- if args.verbose:
- print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+ job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = job
+ server_addresses[lang] = ('localhost',
+ job.mapped_port(_DEFAULT_SERVER_PORT))
+ else:
+ # don't run the server, set server port to a placeholder value
+ server_addresses[lang] = ('localhost', '${SERVER_PORT}')
+
+ http2_server_job = None
+ if args.http2_server_interop:
+ # launch a HTTP2 server emulator that creates edge cases
+ lang = str(http2InteropServer)
+ spec = server_jobspec(
+ http2InteropServer,
+ docker_images.get(lang),
+ manual_cmd_log=server_manual_cmd_log)
+ if not args.manual_run:
+ http2_server_job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = http2_server_job
+ else:
+ # don't run the server, set server port to a placeholder value
+ server_addresses[lang] = ('localhost', '${SERVER_PORT}')
+
+ jobs = []
+ if args.cloud_to_prod:
+ if args.insecure:
+ print('TLS is always enabled for cloud_to_prod scenarios.')
+ for server_host_name in args.prod_servers:
+ for language in languages:
+ for test_case in _TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
+ test_job = cloud_to_prod_jobspec(
+ language,
+ test_case,
+ server_host_name,
+ prod_servers[server_host_name],
+ docker_image=docker_images.get(str(language)),
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_interop:
+ for test_case in _HTTP2_TEST_CASES:
+ test_job = cloud_to_prod_jobspec(
+ http2Interop,
+ test_case,
+ server_host_name,
+ prod_servers[server_host_name],
+ docker_image=docker_images.get(str(http2Interop)),
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.cloud_to_prod_auth:
+ if args.insecure:
+ print('TLS is always enabled for cloud_to_prod scenarios.')
+ for server_host_name in args.prod_servers:
+ for language in languages:
+ for test_case in _AUTH_TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ test_job = cloud_to_prod_jobspec(
+ language,
+ test_case,
+ server_host_name,
+ prod_servers[server_host_name],
+ docker_image=docker_images.get(str(language)),
+ auth=True,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ for server in args.override_server:
+ server_name = server[0]
+ (server_host, server_port) = server[1].split(':')
+ server_addresses[server_name] = (server_host, server_port)
+
+ for server_name, server_address in server_addresses.items():
+ (server_host, server_port) = server_address
+ server_language = _LANGUAGES.get(server_name, None)
+ skip_server = [] # test cases unimplemented by server
+ if server_language:
+ skip_server = server_language.unimplemented_test_cases_server()
+ for language in languages:
+ for test_case in _TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ if not test_case in skip_server:
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ insecure=args.insecure,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_interop:
+ for test_case in _HTTP2_TEST_CASES:
+ if server_name == "go":
+ # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
+ continue
+ test_job = cloud_to_cloud_jobspec(
+ http2Interop,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(http2Interop)),
+ insecure=args.insecure,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_server_interop:
+ if not args.manual_run:
+ http2_server_job.wait_for_healthy(timeout_seconds=600)
+ for language in languages_http2_clients_for_http2_server_interop:
+ for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(
+ _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+ server_port = _DEFAULT_SERVER_PORT + offset
+ if not args.manual_run:
+ server_port = http2_server_job.mapped_port(server_port)
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ str(http2InteropServer),
+ 'localhost',
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+ for language in languages:
+ # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
+ # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
+ # than specialized http2 clients, reusing existing test implementations.
+ # For example, in the "data_frame_padding" test, use language's gRPC
+ # interop clients and make them think that theyre running "large_unary"
+ # test case. This avoids implementing a new test case in each language.
+ for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ if test_case not in language.unimplemented_test_cases():
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+ server_port = _DEFAULT_SERVER_PORT + offset
+ if not args.manual_run:
+ server_port = http2_server_job.mapped_port(server_port)
+ if not args.insecure:
+ print((
+ 'Creating grpc cient to http2 server test case with insecure connection, even though'
+ ' args.insecure is False. Http2 test server only supports insecure connections.'
+ ))
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ str(http2InteropServer),
+ 'localhost',
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ insecure=True,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if not jobs:
+ print('No jobs to run.')
+ for image in six.itervalues(docker_images):
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ sys.exit(1)
+
+ if args.manual_run:
+ print('All tests will skipped --manual_run option is active.')
- num_failures, resultset = jobset.run(jobs, newline_on_success=True,
- maxjobs=args.jobs,
- skip_jobs=args.manual_run)
- if args.bq_result_table and resultset:
- upload_interop_results_to_bq(resultset, args.bq_result_table, args)
- if num_failures:
- jobset.message('FAILED', 'Some tests failed', do_newline=True)
- else:
- jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+ if args.verbose:
+ print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+
+ num_failures, resultset = jobset.run(
+ jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs,
+ skip_jobs=args.manual_run)
+ if args.bq_result_table and resultset:
+ upload_interop_results_to_bq(resultset, args.bq_result_table, args)
+ if num_failures:
+ jobset.message('FAILED', 'Some tests failed', do_newline=True)
+ else:
+ jobset.message('SUCCESS', 'All tests passed', do_newline=True)
- write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
- write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
+ write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
+ write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
- xml_report_name = _XML_REPORT
- if args.internal_ci:
- xml_report_name = _INTERNAL_CL_XML_REPORT
- report_utils.render_junit_xml_report(resultset, xml_report_name)
+ xml_report_name = _XML_REPORT
+ if args.internal_ci:
+ xml_report_name = _INTERNAL_CL_XML_REPORT
+ report_utils.render_junit_xml_report(resultset, xml_report_name)
- for name, job in resultset.items():
- if "http2" in name:
- job[0].http2results = aggregate_http2_results(job[0].message)
+ for name, job in resultset.items():
+ if "http2" in name:
+ job[0].http2results = aggregate_http2_results(job[0].message)
- http2_server_test_cases = (
- _HTTP2_SERVER_TEST_CASES if args.http2_server_interop else [])
+ http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES
+ if args.http2_server_interop else [])
- report_utils.render_interop_html_report(
- set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
- _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures,
- args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
- args.http2_interop)
+ report_utils.render_interop_html_report(
+ set([str(l) for l in languages]), servers, _TEST_CASES,
+ _AUTH_TEST_CASES, _HTTP2_TEST_CASES, http2_server_test_cases, resultset,
+ num_failures, args.cloud_to_prod_auth or args.cloud_to_prod,
+ args.prod_servers, args.http2_interop)
- if num_failures:
- sys.exit(1)
- else:
- sys.exit(0)
+ if num_failures:
+ sys.exit(1)
+ else:
+ sys.exit(0)
except Exception as e:
- print('exception occurred:')
- traceback.print_exc(file=sys.stdout)
+ print('exception occurred:')
+ traceback.print_exc(file=sys.stdout)
finally:
- # Check if servers are still running.
- for server, job in server_jobs.items():
- if not job.is_running():
- print('Server "%s" has exited prematurely.' % server)
+ # Check if servers are still running.
+ for server, job in server_jobs.items():
+ if not job.is_running():
+ print('Server "%s" has exited prematurely.' % server)
- dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
+ dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
- for image in six.itervalues(docker_images):
- if not args.manual_run:
- print('Removing docker image %s' % image)
- dockerjob.remove_image(image)
- else:
- print('Preserving docker image: %s' % image)
+ for image in six.itervalues(docker_images):
+ if not args.manual_run:
+ print('Removing docker image %s' % image)
+ dockerjob.remove_image(image)
+ else:
+ print('Preserving docker image: %s' % image)
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index c136af58cb..561217ceb1 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -23,26 +23,31 @@ import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
-sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks', 'bm_diff'))
+sys.path.append(
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks',
+ 'bm_diff'))
import bm_constants
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
- os.makedirs('reports')
+ os.makedirs('reports')
start_port_server.start_port_server()
+
def fnize(s):
- out = ''
- for c in s:
- if c in '<>, /':
- if len(out) and out[-1] == '_': continue
- out += '_'
- else:
- out += c
- return out
+ out = ''
+ for c in s:
+ if c in '<>, /':
+ if len(out) and out[-1] == '_': continue
+ out += '_'
+ else:
+ out += c
+ return out
+
# index html
index_html = """
@@ -53,169 +58,202 @@ index_html = """
<body>
"""
+
def heading(name):
- global index_html
- index_html += "<h1>%s</h1>\n" % name
+ global index_html
+ index_html += "<h1>%s</h1>\n" % name
+
def link(txt, tgt):
- global index_html
- index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
- cgi.escape(tgt, quote=True), cgi.escape(txt))
+ global index_html
+ index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
+ cgi.escape(tgt, quote=True), cgi.escape(txt))
+
def text(txt):
- global index_html
- index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+ global index_html
+ index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+
def collect_latency(bm_name, args):
- """generate latency profiles"""
- benchmarks = []
- profile_analysis = []
- cleanup = []
-
- heading('Latency Profiles: %s' % bm_name)
- subprocess.check_call(
- ['make', bm_name,
- 'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
- for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
- '--benchmark_list_tests']).splitlines():
- link(line, '%s.txt' % fnize(line))
- benchmarks.append(
- jobset.JobSpec(['bins/basicprof/%s' % bm_name,
- '--benchmark_filter=^%s$' % line,
- '--benchmark_min_time=0.05'],
- environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
- shortname='profile-%s' % fnize(line)))
- profile_analysis.append(
- jobset.JobSpec([sys.executable,
- 'tools/profiling/latency_profile/profile_analyzer.py',
- '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
- '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60,
- shortname='analyze-%s' % fnize(line)))
- cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
- # periodically flush out the list of jobs: profile_analysis jobs at least
- # consume upwards of five gigabytes of ram in some cases, and so analysing
- # hundreds of them at once is impractical -- but we want at least some
- # concurrency or the work takes too long
- if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
- # run up to half the cpu count: each benchmark can use up to two cores
- # (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
- benchmarks = []
- profile_analysis = []
- cleanup = []
- # run the remaining benchmarks that weren't flushed
- if len(benchmarks):
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ """generate latency profiles"""
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+
+ heading('Latency Profiles: %s' % bm_name)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=basicprof', '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ for line in subprocess.check_output(
+ ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+ link(line, '%s.txt' % fnize(line))
+ benchmarks.append(
+ jobset.JobSpec(
+ [
+ 'bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' %
+ line, '--benchmark_min_time=0.05'
+ ],
+ environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
+ shortname='profile-%s' % fnize(line)))
+ profile_analysis.append(
+ jobset.JobSpec(
+ [
+ sys.executable,
+ 'tools/profiling/latency_profile/profile_analyzer.py',
+ '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
+ '--out', 'reports/%s.txt' % fnize(line)
+ ],
+ timeout_seconds=20 * 60,
+ shortname='analyze-%s' % fnize(line)))
+ cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
+ # periodically flush out the list of jobs: profile_analysis jobs at least
+ # consume upwards of five gigabytes of ram in some cases, and so analysing
+ # hundreds of them at once is impractical -- but we want at least some
+ # concurrency or the work takes too long
+ if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
+ # run up to half the cpu count: each benchmark can use up to two cores
+ # (one for the microbenchmark, one for the data flush)
+ jobset.run(
+ benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ # run the remaining benchmarks that weren't flushed
+ if len(benchmarks):
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
def collect_perf(bm_name, args):
- """generate flamegraphs"""
- heading('Flamegraphs: %s' % bm_name)
- subprocess.check_call(
- ['make', bm_name,
- 'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
- benchmarks = []
- profile_analysis = []
- cleanup = []
- for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
- '--benchmark_list_tests']).splitlines():
- link(line, '%s.svg' % fnize(line))
- benchmarks.append(
- jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
- '-g', '-F', '997',
- 'bins/mutrace/%s' % bm_name,
- '--benchmark_filter=^%s$' % line,
- '--benchmark_min_time=10'],
- shortname='perf-%s' % fnize(line)))
- profile_analysis.append(
- jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
- environ = {
- 'PERF_BASE_NAME': fnize(line),
- 'OUTPUT_DIR': 'reports',
- 'OUTPUT_FILENAME': fnize(line),
- },
- shortname='flame-%s' % fnize(line)))
- cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
- cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
- # periodically flush out the list of jobs: temporary space required for this
- # processing is large
- if len(benchmarks) >= 20:
- # run up to half the cpu count: each benchmark can use up to two cores
- # (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=1)
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
- benchmarks = []
- profile_analysis = []
- cleanup = []
- # run the remaining benchmarks that weren't flushed
- if len(benchmarks):
- jobset.run(benchmarks, maxjobs=1)
- jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
- jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ """generate flamegraphs"""
+ heading('Flamegraphs: %s' % bm_name)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=mutrace', '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ for line in subprocess.check_output(
+ ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+ link(line, '%s.svg' % fnize(line))
+ benchmarks.append(
+ jobset.JobSpec(
+ [
+ 'perf', 'record', '-o', '%s-perf.data' % fnize(
+ line), '-g', '-F', '997', 'bins/mutrace/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
+ ],
+ shortname='perf-%s' % fnize(line)))
+ profile_analysis.append(
+ jobset.JobSpec(
+ [
+ 'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
+ ],
+ environ={
+ 'PERF_BASE_NAME': fnize(line),
+ 'OUTPUT_DIR': 'reports',
+ 'OUTPUT_FILENAME': fnize(line),
+ },
+ shortname='flame-%s' % fnize(line)))
+ cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
+ cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
+ # periodically flush out the list of jobs: temporary space required for this
+ # processing is large
+ if len(benchmarks) >= 20:
+ # run up to half the cpu count: each benchmark can use up to two cores
+ # (one for the microbenchmark, one for the data flush)
+ jobset.run(benchmarks, maxjobs=1)
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ # run the remaining benchmarks that weren't flushed
+ if len(benchmarks):
+ jobset.run(benchmarks, maxjobs=1)
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
def run_summary(bm_name, cfg, base_json_name):
- subprocess.check_call(
- ['make', bm_name,
- 'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
- cmd = ['bins/%s/%s' % (cfg, bm_name),
- '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
- '--benchmark_out_format=json']
- if args.summary_time is not None:
- cmd += ['--benchmark_min_time=%d' % args.summary_time]
- return subprocess.check_output(cmd)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=%s' % cfg, '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ cmd = [
+ 'bins/%s/%s' % (cfg, bm_name), '--benchmark_out=%s.%s.json' %
+ (base_json_name, cfg), '--benchmark_out_format=json'
+ ]
+ if args.summary_time is not None:
+ cmd += ['--benchmark_min_time=%d' % args.summary_time]
+ return subprocess.check_output(cmd)
+
def collect_summary(bm_name, args):
- heading('Summary: %s [no counters]' % bm_name)
- text(run_summary(bm_name, 'opt', bm_name))
- heading('Summary: %s [with counters]' % bm_name)
- text(run_summary(bm_name, 'counters', bm_name))
- if args.bigquery_upload:
- with open('%s.csv' % bm_name, 'w') as f:
- f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py',
- '%s.counters.json' % bm_name,
- '%s.opt.json' % bm_name]))
- subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name])
+ heading('Summary: %s [no counters]' % bm_name)
+ text(run_summary(bm_name, 'opt', bm_name))
+ heading('Summary: %s [with counters]' % bm_name)
+ text(run_summary(bm_name, 'counters', bm_name))
+ if args.bigquery_upload:
+ with open('%s.csv' % bm_name, 'w') as f:
+ f.write(
+ subprocess.check_output([
+ 'tools/profiling/microbenchmarks/bm2bq.py',
+ '%s.counters.json' % bm_name, '%s.opt.json' % bm_name
+ ]))
+ subprocess.check_call([
+ 'bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name
+ ])
+
collectors = {
- 'latency': collect_latency,
- 'perf': collect_perf,
- 'summary': collect_summary,
+ 'latency': collect_latency,
+ 'perf': collect_perf,
+ 'summary': collect_summary,
}
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
-argp.add_argument('-c', '--collect',
- choices=sorted(collectors.keys()),
- nargs='*',
- default=sorted(collectors.keys()),
- help='Which collectors should be run against each benchmark')
-argp.add_argument('-b', '--benchmarks',
- choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- nargs='+',
- type=str,
- help='Which microbenchmarks should be run')
-argp.add_argument('--bigquery_upload',
- default=False,
- action='store_const',
- const=True,
- help='Upload results from summary collection to bigquery')
-argp.add_argument('--summary_time',
- default=None,
- type=int,
- help='Minimum time to run benchmarks for the summary collection')
+argp.add_argument(
+ '-c',
+ '--collect',
+ choices=sorted(collectors.keys()),
+ nargs='*',
+ default=sorted(collectors.keys()),
+ help='Which collectors should be run against each benchmark')
+argp.add_argument(
+ '-b',
+ '--benchmarks',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ nargs='+',
+ type=str,
+ help='Which microbenchmarks should be run')
+argp.add_argument(
+ '--bigquery_upload',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Upload results from summary collection to bigquery')
+argp.add_argument(
+ '--summary_time',
+ default=None,
+ type=int,
+ help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
try:
- for collect in args.collect:
- for bm_name in args.benchmarks:
- collectors[collect](bm_name, args)
+ for collect in args.collect:
+ for bm_name in args.benchmarks:
+ collectors[collect](bm_name, args)
finally:
- if not os.path.exists('reports'):
- os.makedirs('reports')
- index_html += "</body>\n</html>\n"
- with open('reports/index.html', 'w') as f:
- f.write(index_html)
+ if not os.path.exists('reports'):
+ os.makedirs('reports')
+ index_html += "</body>\n</html>\n"
+ with open('reports/index.html', 'w') as f:
+ f.write(index_html)
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index aa305be466..03b684b318 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run performance tests locally or remotely."""
from __future__ import print_function
@@ -37,566 +36,666 @@ import performance.scenario_config as scenario_config
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
-
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
-
_REMOTE_HOST_USERNAME = 'jenkins'
class QpsWorkerJob:
- """Encapsulates a qps worker server job."""
-
- def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
- self._spec = spec
- self.language = language
- self.host_and_port = host_and_port
- self._job = None
- self.perf_file_base_name = perf_file_base_name
-
- def start(self):
- self._job = jobset.Job(self._spec, newline_on_success=True, travis=True, add_env={})
-
- def is_running(self):
- """Polls a job and returns True if given job is still running."""
- return self._job and self._job.state() == jobset._RUNNING
-
- def kill(self):
- if self._job:
- self._job.kill()
- self._job = None
-
-
-def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, perf_cmd=None):
- cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
-
- if remote_host:
- host_and_port='%s:%s' % (remote_host, port)
- else:
- host_and_port='localhost:%s' % port
-
- perf_file_base_name = None
- if perf_cmd:
- perf_file_base_name = '%s-%s' % (host_and_port, shortname)
- # specify -o output file so perf.data gets collected when worker stopped
- cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name] + cmdline
-
- worker_timeout = 3 * 60
- if remote_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
- ssh_cmd = ['ssh']
- cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
- ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s' % ' '.join(cmdline)])
- cmdline = ssh_cmd
-
- jobspec = jobset.JobSpec(
- cmdline=cmdline,
- shortname=shortname,
- timeout_seconds=worker_timeout, # workers get restarted after each scenario
- verbose_success=True)
- return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
-
-
-def create_scenario_jobspec(scenario_json, workers, remote_host=None,
- bq_result_table=None, server_cpu_load=0):
- """Runs one scenario using QPS driver."""
- # setting QPS_WORKERS env variable here makes sure it works with SSH too.
- cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
- if bq_result_table:
- cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
- cmd += 'tools/run_tests/performance/run_qps_driver.sh '
- cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
- cmd += '--scenario_result_file=scenario_result.json '
- if server_cpu_load != 0:
- cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
- if remote_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
- cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
-
- return jobset.JobSpec(
- cmdline=[cmd],
- shortname='qps_json_driver.%s' % scenario_json['name'],
- timeout_seconds=12*60,
- shell=True,
- verbose_success=True)
+ """Encapsulates a qps worker server job."""
+
+ def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
+ self._spec = spec
+ self.language = language
+ self.host_and_port = host_and_port
+ self._job = None
+ self.perf_file_base_name = perf_file_base_name
+
+ def start(self):
+ self._job = jobset.Job(
+ self._spec, newline_on_success=True, travis=True, add_env={})
+
+ def is_running(self):
+ """Polls a job and returns True if given job is still running."""
+ return self._job and self._job.state() == jobset._RUNNING
+
+ def kill(self):
+ if self._job:
+ self._job.kill()
+ self._job = None
+
+
+def create_qpsworker_job(language,
+ shortname=None,
+ port=10000,
+ remote_host=None,
+ perf_cmd=None):
+ cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
+
+ if remote_host:
+ host_and_port = '%s:%s' % (remote_host, port)
+ else:
+ host_and_port = 'localhost:%s' % port
+
+ perf_file_base_name = None
+ if perf_cmd:
+ perf_file_base_name = '%s-%s' % (host_and_port, shortname)
+ # specify -o output file so perf.data gets collected when worker stopped
+ cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
+ ] + cmdline
+
+ worker_timeout = 3 * 60
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ ssh_cmd = ['ssh']
+ cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
+ ssh_cmd.extend([
+ str(user_at_host),
+ 'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s'
+ % ' '.join(cmdline)
+ ])
+ cmdline = ssh_cmd
+
+ jobspec = jobset.JobSpec(
+ cmdline=cmdline,
+ shortname=shortname,
+ timeout_seconds=worker_timeout, # workers get restarted after each scenario
+ verbose_success=True)
+ return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
+
+
+def create_scenario_jobspec(scenario_json,
+ workers,
+ remote_host=None,
+ bq_result_table=None,
+ server_cpu_load=0):
+ """Runs one scenario using QPS driver."""
+ # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+ cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
+ if bq_result_table:
+ cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+ cmd += 'tools/run_tests/performance/run_qps_driver.sh '
+ cmd += '--scenarios_json=%s ' % pipes.quote(
+ json.dumps({
+ 'scenarios': [scenario_json]
+ }))
+ cmd += '--scenario_result_file=scenario_result.json '
+ if server_cpu_load != 0:
+ cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='qps_json_driver.%s' % scenario_json['name'],
+ timeout_seconds=12 * 60,
+ shell=True,
+ verbose_success=True)
def create_quit_jobspec(workers, remote_host=None):
- """Runs quit using QPS driver."""
- # setting QPS_WORKERS env variable here makes sure it works with SSH too.
- cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(w.host_and_port for w in workers)
- if remote_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
- cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
-
- return jobset.JobSpec(
- cmdline=[cmd],
- shortname='qps_json_driver.quit',
- timeout_seconds=3*60,
- shell=True,
- verbose_success=True)
-
-
-def create_netperf_jobspec(server_host='localhost', client_host=None,
+ """Runs quit using QPS driver."""
+ # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+ cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(
+ w.host_and_port for w in workers)
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='qps_json_driver.quit',
+ timeout_seconds=3 * 60,
+ shell=True,
+ verbose_success=True)
+
+
+def create_netperf_jobspec(server_host='localhost',
+ client_host=None,
bq_result_table=None):
- """Runs netperf benchmark."""
- cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
- if bq_result_table:
- cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
- if client_host:
- # If netperf is running remotely, the env variables populated by Jenkins
- # won't be available on the client, but we need them for uploading results
- # to BigQuery.
- jenkins_job_name = os.getenv('JOB_NAME')
- if jenkins_job_name:
- cmd += 'JOB_NAME="%s" ' % jenkins_job_name
- jenkins_build_number = os.getenv('BUILD_NUMBER')
- if jenkins_build_number:
- cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
-
- cmd += 'tools/run_tests/performance/run_netperf.sh'
- if client_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
- cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
-
- return jobset.JobSpec(
- cmdline=[cmd],
- shortname='netperf',
- timeout_seconds=60,
- shell=True,
- verbose_success=True)
+ """Runs netperf benchmark."""
+ cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
+ if bq_result_table:
+ cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+ if client_host:
+ # If netperf is running remotely, the env variables populated by Jenkins
+ # won't be available on the client, but we need them for uploading results
+ # to BigQuery.
+ jenkins_job_name = os.getenv('JOB_NAME')
+ if jenkins_job_name:
+ cmd += 'JOB_NAME="%s" ' % jenkins_job_name
+ jenkins_build_number = os.getenv('BUILD_NUMBER')
+ if jenkins_build_number:
+ cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
+
+ cmd += 'tools/run_tests/performance/run_netperf.sh'
+ if client_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='netperf',
+ timeout_seconds=60,
+ shell=True,
+ verbose_success=True)
def archive_repo(languages):
- """Archives local version of repo including submodules."""
- cmdline=['tar', '-cf', '../grpc.tar', '../grpc/']
- if 'java' in languages:
- cmdline.append('../grpc-java')
- if 'go' in languages:
- cmdline.append('../grpc-go')
-
- archive_job = jobset.JobSpec(
- cmdline=cmdline,
- shortname='archive_repo',
- timeout_seconds=3*60)
-
- jobset.message('START', 'Archiving local repository.', do_newline=True)
- num_failures, _ = jobset.run(
- [archive_job], newline_on_success=True, maxjobs=1)
- if num_failures == 0:
- jobset.message('SUCCESS',
- 'Archive with local repository created successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Failed to archive local repository.',
- do_newline=True)
- sys.exit(1)
+ """Archives local version of repo including submodules."""
+ cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
+ if 'java' in languages:
+ cmdline.append('../grpc-java')
+ if 'go' in languages:
+ cmdline.append('../grpc-go')
+
+ archive_job = jobset.JobSpec(
+ cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
+
+ jobset.message('START', 'Archiving local repository.', do_newline=True)
+ num_failures, _ = jobset.run(
+ [archive_job], newline_on_success=True, maxjobs=1)
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS',
+ 'Archive with local repository created successfully.',
+ do_newline=True)
+ else:
+ jobset.message(
+ 'FAILED', 'Failed to archive local repository.', do_newline=True)
+ sys.exit(1)
def prepare_remote_hosts(hosts, prepare_local=False):
- """Prepares remote hosts (and maybe prepare localhost as well)."""
- prepare_timeout = 10*60
- prepare_jobs = []
- for host in hosts:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
- prepare_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
- shortname='remote_host_prepare.%s' % host,
- environ = {'USER_AT_HOST': user_at_host},
- timeout_seconds=prepare_timeout))
- if prepare_local:
- # Prepare localhost as well
- prepare_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/kill_workers.sh'],
- shortname='local_prepare',
- timeout_seconds=prepare_timeout))
- jobset.message('START', 'Preparing hosts.', do_newline=True)
- num_failures, _ = jobset.run(
- prepare_jobs, newline_on_success=True, maxjobs=10)
- if num_failures == 0:
- jobset.message('SUCCESS',
- 'Prepare step completed successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Failed to prepare remote hosts.',
- do_newline=True)
- sys.exit(1)
-
-
-def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), build_local=False):
- """Builds performance worker on remote hosts (and maybe also locally)."""
- build_timeout = 15*60
- # Kokoro VMs (which are local only) do not have caching, so they need more time to build
- local_build_timeout = 30*60
- build_jobs = []
- for host in hosts:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
- build_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/remote_host_build.sh'] + languages,
- shortname='remote_host_build.%s' % host,
- environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
- timeout_seconds=build_timeout))
- if build_local:
- # Build locally as well
- build_jobs.append(
- jobset.JobSpec(
- cmdline=['tools/run_tests/performance/build_performance.sh'] + languages,
- shortname='local_build',
- environ = {'CONFIG': 'opt'},
- timeout_seconds=local_build_timeout))
- jobset.message('START', 'Building.', do_newline=True)
- num_failures, _ = jobset.run(
- build_jobs, newline_on_success=True, maxjobs=10)
- if num_failures == 0:
- jobset.message('SUCCESS',
- 'Built successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Build failed.',
- do_newline=True)
- sys.exit(1)
+ """Prepares remote hosts (and maybe prepare localhost as well)."""
+ prepare_timeout = 10 * 60
+ prepare_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ prepare_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
+ shortname='remote_host_prepare.%s' % host,
+ environ={'USER_AT_HOST': user_at_host},
+ timeout_seconds=prepare_timeout))
+ if prepare_local:
+ # Prepare localhost as well
+ prepare_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/kill_workers.sh'],
+ shortname='local_prepare',
+ timeout_seconds=prepare_timeout))
+ jobset.message('START', 'Preparing hosts.', do_newline=True)
+ num_failures, _ = jobset.run(
+ prepare_jobs, newline_on_success=True, maxjobs=10)
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS', 'Prepare step completed successfully.', do_newline=True)
+ else:
+ jobset.message(
+ 'FAILED', 'Failed to prepare remote hosts.', do_newline=True)
+ sys.exit(1)
+
+
+def build_on_remote_hosts(hosts,
+ languages=scenario_config.LANGUAGES.keys(),
+ build_local=False):
+ """Builds performance worker on remote hosts (and maybe also locally)."""
+ build_timeout = 15 * 60
+ # Kokoro VMs (which are local only) do not have caching, so they need more time to build
+ local_build_timeout = 30 * 60
+ build_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
+ languages,
+ shortname='remote_host_build.%s' % host,
+ environ={'USER_AT_HOST': user_at_host,
+ 'CONFIG': 'opt'},
+ timeout_seconds=build_timeout))
+ if build_local:
+ # Build locally as well
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/build_performance.sh'] +
+ languages,
+ shortname='local_build',
+ environ={'CONFIG': 'opt'},
+ timeout_seconds=local_build_timeout))
+ jobset.message('START', 'Building.', do_newline=True)
+ num_failures, _ = jobset.run(
+ build_jobs, newline_on_success=True, maxjobs=10)
+ if num_failures == 0:
+ jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Build failed.', do_newline=True)
+ sys.exit(1)
def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
- """Creates QPS workers (but does not start them)."""
- if not worker_hosts:
- # run two workers locally (for each language)
- workers=[(None, 10000), (None, 10010)]
- elif len(worker_hosts) == 1:
- # run two workers on the remote host (for each language)
- workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
- else:
- # run one worker per each remote host (for each language)
- workers=[(worker_host, 10000) for worker_host in worker_hosts]
-
- return [create_qpsworker_job(language,
- shortname= 'qps_worker_%s_%s' % (language,
- worker_idx),
- port=worker[1] + language.worker_port_offset(),
- remote_host=worker[0],
- perf_cmd=perf_cmd)
- for language in languages
- for worker_idx, worker in enumerate(workers)]
-
-
-def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports):
- print('Creating perf report collection job for %s' % worker_host)
- cmd = ''
- if worker_host != 'localhost':
- user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
- cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
- tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \
- % (user_at_host, output_filename, flame_graph_reports, perf_base_name)
- else:
- cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
- tools/run_tests/performance/process_local_perf_flamegraphs.sh" \
- % (output_filename, flame_graph_reports, perf_base_name)
-
- return jobset.JobSpec(cmdline=cmd,
- timeout_seconds=3*60,
- shell=True,
- verbose_success=True,
- shortname='process perf report')
+ """Creates QPS workers (but does not start them)."""
+ if not worker_hosts:
+ # run two workers locally (for each language)
+ workers = [(None, 10000), (None, 10010)]
+ elif len(worker_hosts) == 1:
+ # run two workers on the remote host (for each language)
+ workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
+ else:
+ # run one worker per each remote host (for each language)
+ workers = [(worker_host, 10000) for worker_host in worker_hosts]
+
+ return [
+ create_qpsworker_job(
+ language,
+ shortname='qps_worker_%s_%s' % (language, worker_idx),
+ port=worker[1] + language.worker_port_offset(),
+ remote_host=worker[0],
+ perf_cmd=perf_cmd)
+ for language in languages for worker_idx, worker in enumerate(workers)
+ ]
+
+
+def perf_report_processor_job(worker_host, perf_base_name, output_filename,
+ flame_graph_reports):
+ print('Creating perf report collection job for %s' % worker_host)
+ cmd = ''
+ if worker_host != 'localhost':
+ user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
+ cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
+ user_at_host, output_filename, flame_graph_reports, perf_base_name)
+ else:
+ cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
+ output_filename, flame_graph_reports, perf_base_name)
+
+ return jobset.JobSpec(
+ cmdline=cmd,
+ timeout_seconds=3 * 60,
+ shell=True,
+ verbose_success=True,
+ shortname='process perf report')
Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
-def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
- category='all', bq_result_table=None,
- netperf=False, netperf_hosts=[], server_cpu_load=0):
- """Create jobspecs for scenarios to run."""
- all_workers = [worker
- for workers in workers_by_lang.values()
- for worker in workers]
- scenarios = []
- _NO_WORKERS = []
-
- if netperf:
- if not netperf_hosts:
- netperf_server='localhost'
- netperf_client=None
- elif len(netperf_hosts) == 1:
- netperf_server=netperf_hosts[0]
- netperf_client=netperf_hosts[0]
- else:
- netperf_server=netperf_hosts[0]
- netperf_client=netperf_hosts[1]
- scenarios.append(Scenario(
- create_netperf_jobspec(server_host=netperf_server,
- client_host=netperf_client,
- bq_result_table=bq_result_table),
- _NO_WORKERS, 'netperf'))
-
- for language in languages:
- for scenario_json in language.scenarios():
- if re.search(regex, scenario_json['name']):
- categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest'])
- if category in categories or category == 'all':
- workers = workers_by_lang[str(language)][:]
- # 'SERVER_LANGUAGE' is an indicator for this script to pick
- # a server in different language.
- custom_server_lang = scenario_json.get('SERVER_LANGUAGE', None)
- custom_client_lang = scenario_json.get('CLIENT_LANGUAGE', None)
- scenario_json = scenario_config.remove_nonproto_fields(scenario_json)
- if custom_server_lang and custom_client_lang:
- raise Exception('Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
+def create_scenarios(languages,
+ workers_by_lang,
+ remote_host=None,
+ regex='.*',
+ category='all',
+ bq_result_table=None,
+ netperf=False,
+ netperf_hosts=[],
+ server_cpu_load=0):
+ """Create jobspecs for scenarios to run."""
+ all_workers = [
+ worker for workers in workers_by_lang.values() for worker in workers
+ ]
+ scenarios = []
+ _NO_WORKERS = []
+
+ if netperf:
+ if not netperf_hosts:
+ netperf_server = 'localhost'
+ netperf_client = None
+ elif len(netperf_hosts) == 1:
+ netperf_server = netperf_hosts[0]
+ netperf_client = netperf_hosts[0]
+ else:
+ netperf_server = netperf_hosts[0]
+ netperf_client = netperf_hosts[1]
+ scenarios.append(
+ Scenario(
+ create_netperf_jobspec(
+ server_host=netperf_server,
+ client_host=netperf_client,
+ bq_result_table=bq_result_table), _NO_WORKERS, 'netperf'))
+
+ for language in languages:
+ for scenario_json in language.scenarios():
+ if re.search(regex, scenario_json['name']):
+ categories = scenario_json.get('CATEGORIES',
+ ['scalable', 'smoketest'])
+ if category in categories or category == 'all':
+ workers = workers_by_lang[str(language)][:]
+ # 'SERVER_LANGUAGE' is an indicator for this script to pick
+ # a server in different language.
+ custom_server_lang = scenario_json.get('SERVER_LANGUAGE',
+ None)
+ custom_client_lang = scenario_json.get('CLIENT_LANGUAGE',
+ None)
+ scenario_json = scenario_config.remove_nonproto_fields(
+ scenario_json)
+ if custom_server_lang and custom_client_lang:
+ raise Exception(
+ 'Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
'in the same scenario')
- if custom_server_lang:
- if not workers_by_lang.get(custom_server_lang, []):
- print('Warning: Skipping scenario %s as' % scenario_json['name'])
- print('SERVER_LANGUAGE is set to %s yet the language has '
- 'not been selected with -l' % custom_server_lang)
- continue
- for idx in range(0, scenario_json['num_servers']):
- # replace first X workers by workers of a different language
- workers[idx] = workers_by_lang[custom_server_lang][idx]
- if custom_client_lang:
- if not workers_by_lang.get(custom_client_lang, []):
- print('Warning: Skipping scenario %s as' % scenario_json['name'])
- print('CLIENT_LANGUAGE is set to %s yet the language has '
- 'not been selected with -l' % custom_client_lang)
- continue
- for idx in range(scenario_json['num_servers'], len(workers)):
- # replace all client workers by workers of a different language,
- # leave num_server workers as they are server workers.
- workers[idx] = workers_by_lang[custom_client_lang][idx]
- scenario = Scenario(
- create_scenario_jobspec(scenario_json,
- [w.host_and_port for w in workers],
- remote_host=remote_host,
- bq_result_table=bq_result_table,
- server_cpu_load=server_cpu_load),
- workers,
- scenario_json['name'])
- scenarios.append(scenario)
-
- return scenarios
+ if custom_server_lang:
+ if not workers_by_lang.get(custom_server_lang, []):
+ print('Warning: Skipping scenario %s as' %
+ scenario_json['name'])
+ print(
+ 'SERVER_LANGUAGE is set to %s yet the language has '
+ 'not been selected with -l' %
+ custom_server_lang)
+ continue
+ for idx in range(0, scenario_json['num_servers']):
+ # replace first X workers by workers of a different language
+ workers[idx] = workers_by_lang[custom_server_lang][
+ idx]
+ if custom_client_lang:
+ if not workers_by_lang.get(custom_client_lang, []):
+ print('Warning: Skipping scenario %s as' %
+ scenario_json['name'])
+ print(
+ 'CLIENT_LANGUAGE is set to %s yet the language has '
+ 'not been selected with -l' %
+ custom_client_lang)
+ continue
+ for idx in range(scenario_json['num_servers'],
+ len(workers)):
+ # replace all client workers by workers of a different language,
+ # leave num_server workers as they are server workers.
+ workers[idx] = workers_by_lang[custom_client_lang][
+ idx]
+ scenario = Scenario(
+ create_scenario_jobspec(
+ scenario_json, [w.host_and_port for w in workers],
+ remote_host=remote_host,
+ bq_result_table=bq_result_table,
+ server_cpu_load=server_cpu_load), workers,
+ scenario_json['name'])
+ scenarios.append(scenario)
+
+ return scenarios
def finish_qps_workers(jobs, qpsworker_jobs):
- """Waits for given jobs to finish and eventually kills them."""
- retries = 0
- num_killed = 0
- while any(job.is_running() for job in jobs):
- for job in qpsworker_jobs:
- if job.is_running():
- print('QPS worker "%s" is still running.' % job.host_and_port)
- if retries > 10:
- print('Killing all QPS workers.')
- for job in jobs:
- job.kill()
- num_killed += 1
- retries += 1
- time.sleep(3)
- print('All QPS workers finished.')
- return num_killed
+ """Waits for given jobs to finish and eventually kills them."""
+ retries = 0
+ num_killed = 0
+ while any(job.is_running() for job in jobs):
+ for job in qpsworker_jobs:
+ if job.is_running():
+ print('QPS worker "%s" is still running.' % job.host_and_port)
+ if retries > 10:
+ print('Killing all QPS workers.')
+ for job in jobs:
+ job.kill()
+ num_killed += 1
+ retries += 1
+ time.sleep(3)
+ print('All QPS workers finished.')
+ return num_killed
+
profile_output_files = []
+
# Collect perf text reports and flamegraphs if perf_cmd was used
# Note the base names of perf text reports are used when creating and processing
# perf data. The scenario name uniqifies the output name in the final
# perf reports directory.
# Alos, the perf profiles need to be fetched and processed after each scenario
# in order to avoid clobbering the output files.
-def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports):
- perf_report_jobs = []
- global profile_output_files
- for host_and_port in hosts_and_base_names:
- perf_base_name = hosts_and_base_names[host_and_port]
- output_filename = '%s-%s' % (scenario_name, perf_base_name)
- # from the base filename, create .svg output filename
- host = host_and_port.split(':')[0]
- profile_output_files.append('%s.svg' % output_filename)
- perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports))
-
- jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
- failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1)
- jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
- return failures
+def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
+ flame_graph_reports):
+ perf_report_jobs = []
+ global profile_output_files
+ for host_and_port in hosts_and_base_names:
+ perf_base_name = hosts_and_base_names[host_and_port]
+ output_filename = '%s-%s' % (scenario_name, perf_base_name)
+ # from the base filename, create .svg output filename
+ host = host_and_port.split(':')[0]
+ profile_output_files.append('%s.svg' % output_filename)
+ perf_report_jobs.append(
+ perf_report_processor_job(host, perf_base_name, output_filename,
+ flame_graph_reports))
+
+ jobset.message(
+ 'START', 'Collecting perf reports from qps workers', do_newline=True)
+ failures, _ = jobset.run(
+ perf_report_jobs, newline_on_success=True, maxjobs=1)
+ jobset.message(
+ 'END', 'Collecting perf reports from qps workers', do_newline=True)
+ return failures
+
def main():
- argp = argparse.ArgumentParser(description='Run performance tests.')
- argp.add_argument('-l', '--language',
- choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
- nargs='+',
- required=True,
- help='Languages to benchmark.')
- argp.add_argument('--remote_driver_host',
- default=None,
- help='Run QPS driver on given host. By default, QPS driver is run locally.')
- argp.add_argument('--remote_worker_host',
- nargs='+',
- default=[],
- help='Worker hosts where to start QPS workers.')
- argp.add_argument('--dry_run',
- default=False,
- action='store_const',
- const=True,
- help='Just list scenarios to be run, but don\'t run them.')
- argp.add_argument('-r', '--regex', default='.*', type=str,
- help='Regex to select scenarios to run.')
- argp.add_argument('--bq_result_table', default=None, type=str,
- help='Bigquery "dataset.table" to upload results to.')
- argp.add_argument('--category',
- choices=['smoketest','all','scalable','sweep'],
- default='all',
- help='Select a category of tests to run.')
- argp.add_argument('--netperf',
- default=False,
- action='store_const',
- const=True,
- help='Run netperf benchmark as one of the scenarios.')
- argp.add_argument('--server_cpu_load',
- default=0, type=int,
- help='Select a targeted server cpu load to run. 0 means ignore this flag')
- argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
- help='Name of XML report file to generate.')
- argp.add_argument('--perf_args',
- help=('Example usage: "--perf_args=record -F 99 -g". '
- 'Wrap QPS workers in a perf command '
- 'with the arguments to perf specified here. '
- '".svg" flame graph profiles will be '
- 'created for each Qps Worker on each scenario. '
- 'Files will output to "<repo_root>/<args.flame_graph_reports>" '
- 'directory. Output files from running the worker '
- 'under perf are saved in the repo root where its ran. '
- 'Note that the perf "-g" flag is necessary for '
- 'flame graphs generation to work (assuming the binary '
- 'being profiled uses frame pointers, check out '
- '"--call-graph dwarf" option using libunwind otherwise.) '
- 'Also note that the entire "--perf_args=<arg(s)>" must '
- 'be wrapped in quotes as in the example usage. '
- 'If the "--perg_args" is unspecified, "perf" will '
- 'not be used at all. '
- 'See http://www.brendangregg.com/perf.html '
- 'for more general perf examples.'))
- argp.add_argument('--skip_generate_flamegraphs',
- default=False,
- action='store_const',
- const=True,
- help=('Turn flame graph generation off. '
- 'May be useful if "perf_args" arguments do not make sense for '
- 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
- argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str,
- help='Name of directory to output flame graph profiles to, if any are created.')
- argp.add_argument('-u', '--remote_host_username', default='', type=str,
- help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
-
- args = argp.parse_args()
-
- global _REMOTE_HOST_USERNAME
- if args.remote_host_username:
- _REMOTE_HOST_USERNAME = args.remote_host_username
-
- languages = set(scenario_config.LANGUAGES[l]
- for l in itertools.chain.from_iterable(
- six.iterkeys(scenario_config.LANGUAGES) if x == 'all'
- else [x] for x in args.language))
-
-
- # Put together set of remote hosts where to run and build
- remote_hosts = set()
- if args.remote_worker_host:
- for host in args.remote_worker_host:
- remote_hosts.add(host)
- if args.remote_driver_host:
- remote_hosts.add(args.remote_driver_host)
-
- if not args.dry_run:
- if remote_hosts:
- archive_repo(languages=[str(l) for l in languages])
- prepare_remote_hosts(remote_hosts, prepare_local=True)
- else:
- prepare_remote_hosts([], prepare_local=True)
-
- build_local = False
- if not args.remote_driver_host:
- build_local = True
- if not args.dry_run:
- build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
-
- perf_cmd = None
- if args.perf_args:
- print('Running workers under perf profiler')
- # Expect /usr/bin/perf to be installed here, as is usual
- perf_cmd = ['/usr/bin/perf']
- perf_cmd.extend(re.split('\s+', args.perf_args))
-
- qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd)
-
- # get list of worker addresses for each language.
- workers_by_lang = dict([(str(language), []) for language in languages])
- for job in qpsworker_jobs:
- workers_by_lang[str(job.language)].append(job)
-
- scenarios = create_scenarios(languages,
- workers_by_lang=workers_by_lang,
- remote_host=args.remote_driver_host,
- regex=args.regex,
- category=args.category,
- bq_result_table=args.bq_result_table,
- netperf=args.netperf,
- netperf_hosts=args.remote_worker_host,
- server_cpu_load=args.server_cpu_load)
-
- if not scenarios:
- raise Exception('No scenarios to run')
-
- total_scenario_failures = 0
- qps_workers_killed = 0
- merged_resultset = {}
- perf_report_failures = 0
-
- for scenario in scenarios:
- if args.dry_run:
- print(scenario.name)
- else:
- scenario_failures = 0
- try:
- for worker in scenario.workers:
- worker.start()
- jobs = [scenario.jobspec]
- if scenario.workers:
- jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
- scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1)
- total_scenario_failures += scenario_failures
- merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset),
- six.iteritems(resultset)))
- finally:
- # Consider qps workers that need to be killed as failures
- qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs)
-
- if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
- workers_and_base_names = {}
- for worker in scenario.workers:
- if not worker.perf_file_base_name:
- raise Exception('using perf buf perf report filename is unspecified')
- workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
- perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports)
-
-
- # Still write the index.html even if some scenarios failed.
- # 'profile_output_files' will only have names for scenarios that passed
- if perf_cmd and not args.skip_generate_flamegraphs:
- # write the index fil to the output dir, with all profiles from all scenarios/workers
- report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files)
-
- report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
- suite_name='benchmarks')
-
- if total_scenario_failures > 0 or qps_workers_killed > 0:
- print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
- sys.exit(1)
-
- if perf_report_failures > 0:
- print('%s perf profile collection jobs failed' % perf_report_failures)
- sys.exit(1)
+ argp = argparse.ArgumentParser(description='Run performance tests.')
+ argp.add_argument(
+ '-l',
+ '--language',
+ choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
+ nargs='+',
+ required=True,
+ help='Languages to benchmark.')
+ argp.add_argument(
+ '--remote_driver_host',
+ default=None,
+ help='Run QPS driver on given host. By default, QPS driver is run locally.'
+ )
+ argp.add_argument(
+ '--remote_worker_host',
+ nargs='+',
+ default=[],
+ help='Worker hosts where to start QPS workers.')
+ argp.add_argument(
+ '--dry_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Just list scenarios to be run, but don\'t run them.')
+ argp.add_argument(
+ '-r',
+ '--regex',
+ default='.*',
+ type=str,
+ help='Regex to select scenarios to run.')
+ argp.add_argument(
+ '--bq_result_table',
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to upload results to.')
+ argp.add_argument(
+ '--category',
+ choices=['smoketest', 'all', 'scalable', 'sweep'],
+ default='all',
+ help='Select a category of tests to run.')
+ argp.add_argument(
+ '--netperf',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run netperf benchmark as one of the scenarios.')
+ argp.add_argument(
+ '--server_cpu_load',
+ default=0,
+ type=int,
+ help='Select a targeted server cpu load to run. 0 means ignore this flag'
+ )
+ argp.add_argument(
+ '-x',
+ '--xml_report',
+ default='report.xml',
+ type=str,
+ help='Name of XML report file to generate.')
+ argp.add_argument(
+ '--perf_args',
+ help=('Example usage: "--perf_args=record -F 99 -g". '
+ 'Wrap QPS workers in a perf command '
+ 'with the arguments to perf specified here. '
+ '".svg" flame graph profiles will be '
+ 'created for each Qps Worker on each scenario. '
+ 'Files will output to "<repo_root>/<args.flame_graph_reports>" '
+ 'directory. Output files from running the worker '
+ 'under perf are saved in the repo root where its ran. '
+ 'Note that the perf "-g" flag is necessary for '
+ 'flame graphs generation to work (assuming the binary '
+ 'being profiled uses frame pointers, check out '
+ '"--call-graph dwarf" option using libunwind otherwise.) '
+ 'Also note that the entire "--perf_args=<arg(s)>" must '
+ 'be wrapped in quotes as in the example usage. '
+ 'If the "--perg_args" is unspecified, "perf" will '
+ 'not be used at all. '
+ 'See http://www.brendangregg.com/perf.html '
+ 'for more general perf examples.'))
+ argp.add_argument(
+ '--skip_generate_flamegraphs',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Turn flame graph generation off. '
+ 'May be useful if "perf_args" arguments do not make sense for '
+ 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
+ argp.add_argument(
+ '-f',
+ '--flame_graph_reports',
+ default='perf_reports',
+ type=str,
+ help='Name of directory to output flame graph profiles to, if any are created.'
+ )
+ argp.add_argument(
+ '-u',
+ '--remote_host_username',
+ default='',
+ type=str,
+ help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
+
+ args = argp.parse_args()
+
+ global _REMOTE_HOST_USERNAME
+ if args.remote_host_username:
+ _REMOTE_HOST_USERNAME = args.remote_host_username
+
+ languages = set(
+ scenario_config.LANGUAGES[l]
+ for l in itertools.chain.from_iterable(
+ six.iterkeys(scenario_config.LANGUAGES) if x == 'all' else [x]
+ for x in args.language))
+
+ # Put together set of remote hosts where to run and build
+ remote_hosts = set()
+ if args.remote_worker_host:
+ for host in args.remote_worker_host:
+ remote_hosts.add(host)
+ if args.remote_driver_host:
+ remote_hosts.add(args.remote_driver_host)
+
+ if not args.dry_run:
+ if remote_hosts:
+ archive_repo(languages=[str(l) for l in languages])
+ prepare_remote_hosts(remote_hosts, prepare_local=True)
+ else:
+ prepare_remote_hosts([], prepare_local=True)
+
+ build_local = False
+ if not args.remote_driver_host:
+ build_local = True
+ if not args.dry_run:
+ build_on_remote_hosts(
+ remote_hosts,
+ languages=[str(l) for l in languages],
+ build_local=build_local)
+
+ perf_cmd = None
+ if args.perf_args:
+ print('Running workers under perf profiler')
+ # Expect /usr/bin/perf to be installed here, as is usual
+ perf_cmd = ['/usr/bin/perf']
+ perf_cmd.extend(re.split('\s+', args.perf_args))
+
+ qpsworker_jobs = create_qpsworkers(
+ languages, args.remote_worker_host, perf_cmd=perf_cmd)
+
+ # get list of worker addresses for each language.
+ workers_by_lang = dict([(str(language), []) for language in languages])
+ for job in qpsworker_jobs:
+ workers_by_lang[str(job.language)].append(job)
+
+ scenarios = create_scenarios(
+ languages,
+ workers_by_lang=workers_by_lang,
+ remote_host=args.remote_driver_host,
+ regex=args.regex,
+ category=args.category,
+ bq_result_table=args.bq_result_table,
+ netperf=args.netperf,
+ netperf_hosts=args.remote_worker_host,
+ server_cpu_load=args.server_cpu_load)
+
+ if not scenarios:
+ raise Exception('No scenarios to run')
+
+ total_scenario_failures = 0
+ qps_workers_killed = 0
+ merged_resultset = {}
+ perf_report_failures = 0
+
+ for scenario in scenarios:
+ if args.dry_run:
+ print(scenario.name)
+ else:
+ scenario_failures = 0
+ try:
+ for worker in scenario.workers:
+ worker.start()
+ jobs = [scenario.jobspec]
+ if scenario.workers:
+ jobs.append(
+ create_quit_jobspec(
+ scenario.workers,
+ remote_host=args.remote_driver_host))
+ scenario_failures, resultset = jobset.run(
+ jobs, newline_on_success=True, maxjobs=1)
+ total_scenario_failures += scenario_failures
+ merged_resultset = dict(
+ itertools.chain(
+ six.iteritems(merged_resultset),
+ six.iteritems(resultset)))
+ finally:
+ # Consider qps workers that need to be killed as failures
+ qps_workers_killed += finish_qps_workers(scenario.workers,
+ qpsworker_jobs)
+
+ if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
+ workers_and_base_names = {}
+ for worker in scenario.workers:
+ if not worker.perf_file_base_name:
+ raise Exception(
+ 'using perf buf perf report filename is unspecified')
+ workers_and_base_names[
+ worker.host_and_port] = worker.perf_file_base_name
+ perf_report_failures += run_collect_perf_profile_jobs(
+ workers_and_base_names, scenario.name,
+ args.flame_graph_reports)
+
+ # Still write the index.html even if some scenarios failed.
+ # 'profile_output_files' will only have names for scenarios that passed
+ if perf_cmd and not args.skip_generate_flamegraphs:
+ # write the index fil to the output dir, with all profiles from all scenarios/workers
+ report_utils.render_perf_profiling_results(
+ '%s/index.html' % args.flame_graph_reports, profile_output_files)
+
+ report_utils.render_junit_xml_report(
+ merged_resultset, args.xml_report, suite_name='benchmarks')
+
+ if total_scenario_failures > 0 or qps_workers_killed > 0:
+ print('%s scenarios failed and %s qps worker jobs killed' %
+ (total_scenario_failures, qps_workers_killed))
+ sys.exit(1)
+
+ if perf_report_failures > 0:
+ print('%s perf profile collection jobs failed' % perf_report_failures)
+ sys.exit(1)
+
if __name__ == "__main__":
- main()
+ main()
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 471f5d99e7..bd5b8644b3 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run tests in parallel."""
from __future__ import print_function
@@ -46,36 +45,34 @@ import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
- from python_utils.upload_test_results import upload_results_to_bq
+ from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
- pass # It's ok to not import because this is only necessary to upload results to BQ.
+ pass # It's ok to not import because this is only necessary to upload results to BQ.
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
-
_FORCE_ENVIRON_FOR_WRAPPERS = {
- 'GRPC_VERBOSITY': 'DEBUG',
+ 'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
- 'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
- 'mac': ['poll'],
+ 'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
+ 'mac': ['poll'],
}
-
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
- import big_query_utils
+ import big_query_utils
- bq = big_query_utils.create_big_query()
- query = """
+ bq = big_query_utils.create_big_query()
+ query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
@@ -88,941 +85,1068 @@ SELECT
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
- AND platform = '"""+platform_string()+"""'
+ AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
- if limit:
- query += " limit {}".format(limit)
- query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
- page = bq.jobs().getQueryResults(
- pageToken=None,
- **query_job['jobReference']).execute(num_retries=3)
- test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']]
- return test_data
+ if limit:
+ query += " limit {}".format(limit)
+ query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
+ page = bq.jobs().getQueryResults(
+ pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+ test_data = [
+ BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
+ float(row['f'][2]['v'])) for row in page['rows']
+ ]
+ return test_data
def platform_string():
- return jobset.platform_string()
+ return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
+
def run_shell_command(cmd, env=None, cwd=None):
- try:
- subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
- except subprocess.CalledProcessError as e:
- logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
- e.cmd, e.returncode, e.output)
- raise
+ try:
+ subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
+ except subprocess.CalledProcessError as e:
+ logging.exception(
+ "Error while running command '%s'. Exit status %d. Output:\n%s",
+ e.cmd, e.returncode, e.output)
+ raise
+
def max_parallel_tests_for_current_platform():
- # Too much test parallelization has only been seen to be a problem
- # so far on windows.
- if jobset.platform_string() == 'windows':
- return 64
- return 1024
+ # Too much test parallelization has only been seen to be a problem
+ # so far on windows.
+ if jobset.platform_string() == 'windows':
+ return 64
+ return 1024
+
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
- def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
- if environ is None:
- environ = {}
- self.build_config = config
- self.environ = environ
- self.environ['CONFIG'] = config
- self.tool_prefix = tool_prefix
- self.timeout_multiplier = timeout_multiplier
- self.iomgr_platform = iomgr_platform
-
- def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
- shortname=None, environ={}, cpu_cost=1.0, flaky=False):
- """Construct a jobset.JobSpec for a test under this config
+ def __init__(self,
+ config,
+ environ=None,
+ timeout_multiplier=1,
+ tool_prefix=[],
+ iomgr_platform='native'):
+ if environ is None:
+ environ = {}
+ self.build_config = config
+ self.environ = environ
+ self.environ['CONFIG'] = config
+ self.tool_prefix = tool_prefix
+ self.timeout_multiplier = timeout_multiplier
+ self.iomgr_platform = iomgr_platform
+
+ def job_spec(self,
+ cmdline,
+ timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
+ shortname=None,
+ environ={},
+ cpu_cost=1.0,
+ flaky=False):
+ """Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
- actual_environ = self.environ.copy()
- for k, v in environ.items():
- actual_environ[k] = v
- if not flaky and shortname and shortname in flaky_tests:
- flaky = True
- if shortname in shortname_to_cpu:
- cpu_cost = shortname_to_cpu[shortname]
- return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
- shortname=shortname,
- environ=actual_environ,
- cpu_cost=cpu_cost,
- timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
- flake_retries=4 if flaky or args.allow_flakes else 0,
- timeout_retries=1 if flaky or args.allow_flakes else 0)
-
-
-def get_c_tests(travis, test_lang) :
- out = []
- platforms_str = 'ci_platforms' if travis else 'platforms'
- with open('tools/run_tests/generated/tests.json') as f:
- js = json.load(f)
- return [tgt
- for tgt in js
- if tgt['language'] == test_lang and
- platform_string() in tgt[platforms_str] and
- not (travis and tgt['flaky'])]
+ actual_environ = self.environ.copy()
+ for k, v in environ.items():
+ actual_environ[k] = v
+ if not flaky and shortname and shortname in flaky_tests:
+ flaky = True
+ if shortname in shortname_to_cpu:
+ cpu_cost = shortname_to_cpu[shortname]
+ return jobset.JobSpec(
+ cmdline=self.tool_prefix + cmdline,
+ shortname=shortname,
+ environ=actual_environ,
+ cpu_cost=cpu_cost,
+ timeout_seconds=(self.timeout_multiplier * timeout_seconds
+ if timeout_seconds else None),
+ flake_retries=4 if flaky or args.allow_flakes else 0,
+ timeout_retries=1 if flaky or args.allow_flakes else 0)
+
+
+def get_c_tests(travis, test_lang):
+ out = []
+ platforms_str = 'ci_platforms' if travis else 'platforms'
+ with open('tools/run_tests/generated/tests.json') as f:
+ js = json.load(f)
+ return [
+ tgt for tgt in js
+ if tgt['language'] == test_lang and platform_string() in tgt[
+ platforms_str] and not (travis and tgt['flaky'])
+ ]
def _check_compiler(compiler, supported_compilers):
- if compiler not in supported_compilers:
- raise Exception('Compiler %s not supported (on this platform).' % compiler)
+ if compiler not in supported_compilers:
+ raise Exception('Compiler %s not supported (on this platform).' %
+ compiler)
def _check_arch(arch, supported_archs):
- if arch not in supported_archs:
- raise Exception('Architecture %s not supported.' % arch)
+ if arch not in supported_archs:
+ raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
- """Returns True if running running as a --use_docker child."""
- return True if os.getenv('RUN_TESTS_COMMAND') else False
+ """Returns True if running running as a --use_docker child."""
+ return True if os.getenv('RUN_TESTS_COMMAND') else False
-_PythonConfigVars = collections.namedtuple(
- '_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
- 'venv_relative_python', 'toolchain', 'runner'])
+_PythonConfigVars = collections.namedtuple('_ConfigVars', [
+ 'shell', 'builder', 'builder_prefix_arguments', 'venv_relative_python',
+ 'toolchain', 'runner'
+])
def _python_config_generator(name, major, minor, bits, config_vars):
- return PythonConfig(
- name,
- config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
- _python_pattern_function(major=major, minor=minor, bits=bits)] + [
- name] + config_vars.venv_relative_python + config_vars.toolchain,
- config_vars.shell + config_vars.runner + [
- os.path.join(name, config_vars.venv_relative_python[0])])
+ return PythonConfig(
+ name, config_vars.shell + config_vars.builder +
+ config_vars.builder_prefix_arguments + [
+ _python_pattern_function(major=major, minor=minor, bits=bits)
+ ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+ config_vars.shell + config_vars.runner +
+ [os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
- return PythonConfig(
- name,
- config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
- _pypy_pattern_function(major=major)] + [
- name] + config_vars.venv_relative_python + config_vars.toolchain,
- config_vars.shell + config_vars.runner + [
- os.path.join(name, config_vars.venv_relative_python[0])])
+ return PythonConfig(
+ name,
+ config_vars.shell + config_vars.builder +
+ config_vars.builder_prefix_arguments + [
+ _pypy_pattern_function(major=major)
+ ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+ config_vars.shell + config_vars.runner +
+ [os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
- # Bit-ness is handled by the test machine's environment
- if os.name == "nt":
- if bits == "64":
- return '/c/Python{major}{minor}/python.exe'.format(
- major=major, minor=minor, bits=bits)
+ # Bit-ness is handled by the test machine's environment
+ if os.name == "nt":
+ if bits == "64":
+ return '/c/Python{major}{minor}/python.exe'.format(
+ major=major, minor=minor, bits=bits)
+ else:
+ return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
+ major=major, minor=minor, bits=bits)
else:
- return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
- major=major, minor=minor, bits=bits)
- else:
- return 'python{major}.{minor}'.format(major=major, minor=minor)
+ return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
- if major == '2':
- return 'pypy'
- elif major == '3':
- return 'pypy3'
- else:
- raise ValueError("Unknown PyPy major version")
+ if major == '2':
+ return 'pypy'
+ elif major == '3':
+ return 'pypy3'
+ else:
+ raise ValueError("Unknown PyPy major version")
class CLanguage(object):
- def __init__(self, make_target, test_lang):
- self.make_target = make_target
- self.platform = platform_string()
- self.test_lang = test_lang
-
- def configure(self, config, args):
- self.config = config
- self.args = args
- if self.platform == 'windows':
- _check_compiler(self.args.compiler, ['default', 'cmake', 'cmake_vs2015',
- 'cmake_vs2017'])
- _check_arch(self.args.arch, ['default', 'x64', 'x86'])
- self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
- self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
- self._use_cmake = True
- self._make_options = []
- elif self.args.compiler == 'cmake':
- _check_arch(self.args.arch, ['default'])
- self._use_cmake = True
- self._docker_distro = 'jessie'
- self._make_options = []
- else:
- self._use_cmake = False
- self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
- self.args.compiler)
- if args.iomgr_platform == "uv":
- cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
- try:
- cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
- except (subprocess.CalledProcessError, OSError):
- pass
- try:
- ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
- except (subprocess.CalledProcessError, OSError):
- ldflags = '-luv '
- self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
- 'EXTRA_LDLIBS={}'.format(ldflags)]
-
- def test_specs(self):
- out = []
- binaries = get_c_tests(self.args.travis, self.test_lang)
- for target in binaries:
- if self._use_cmake and target.get('boringssl', False):
- # cmake doesn't build boringssl tests
- continue
- auto_timeout_scaling = target.get('auto_timeout_scaling', True)
- polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
- if target.get('uses_polling', True)
- else ['none'])
- if self.args.iomgr_platform == 'uv':
- polling_strategies = ['all']
- for polling_strategy in polling_strategies:
- env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
- _ROOT + '/src/core/tsi/test_creds/ca.pem',
- 'GRPC_POLL_STRATEGY': polling_strategy,
- 'GRPC_VERBOSITY': 'DEBUG'}
- resolver = os.environ.get('GRPC_DNS_RESOLVER', None);
- if resolver:
- env['GRPC_DNS_RESOLVER'] = resolver
- shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
- if polling_strategy in target.get('excluded_poll_engines', []):
- continue
-
- timeout_scaling = 1
- if auto_timeout_scaling:
- config = self.args.config
- if ('asan' in config
- or config == 'msan'
- or config == 'tsan'
- or config == 'ubsan'
- or config == 'helgrind'
- or config == 'memcheck'):
- # Scale overall test timeout if running under various sanitizers.
- # scaling value is based on historical data analysis
- timeout_scaling *= 3
- elif polling_strategy == 'poll-cv':
- # scale test timeout if running with poll-cv
- # sanitizer and poll-cv scaling is not cumulative to ensure
- # reasonable timeout values.
- # TODO(jtattermusch): based on historical data and 5min default
- # test timeout poll-cv scaling is currently not useful.
- # Leaving here so it can be reintroduced if the default test timeout
- # is decreased in the future.
- timeout_scaling *= 1
-
- if self.config.build_config in target['exclude_configs']:
- continue
- if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
- continue
+ def __init__(self, make_target, test_lang):
+ self.make_target = make_target
+ self.platform = platform_string()
+ self.test_lang = test_lang
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
if self.platform == 'windows':
- binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
+ _check_compiler(self.args.compiler, [
+ 'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
+ ])
+ _check_arch(self.args.arch, ['default', 'x64', 'x86'])
+ self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
+ self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
+ self._use_cmake = True
+ self._make_options = []
+ elif self.args.compiler == 'cmake':
+ _check_arch(self.args.arch, ['default'])
+ self._use_cmake = True
+ self._docker_distro = 'jessie'
+ self._make_options = []
else:
- if self._use_cmake:
- binary = 'cmake/build/%s' % target['name']
- else:
- binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
- cpu_cost = target['cpu_cost']
- if cpu_cost == 'capacity':
- cpu_cost = multiprocessing.cpu_count()
- if os.path.isfile(binary):
- list_test_command = None
- filter_test_command = None
-
- # these are the flag defined by gtest and benchmark framework to list
- # and filter test runs. We use them to split each individual test
- # into its own JobSpec, and thus into its own process.
- if 'benchmark' in target and target['benchmark']:
- with open(os.devnull, 'w') as fnull:
- tests = subprocess.check_output([binary, '--benchmark_list_tests'],
- stderr=fnull)
- for line in tests.split('\n'):
- test = line.strip()
- if not test: continue
- cmdline = [binary, '--benchmark_filter=%s$' % test] + target['args']
- out.append(self.config.job_spec(cmdline,
- shortname='%s %s' % (' '.join(cmdline), shortname_ext),
- cpu_cost=cpu_cost,
- timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
- environ=env))
- elif 'gtest' in target and target['gtest']:
- # here we parse the output of --gtest_list_tests to build up a complete
- # list of the tests contained in a binary for each test, we then
- # add a job to run, filtering for just that test.
- with open(os.devnull, 'w') as fnull:
- tests = subprocess.check_output([binary, '--gtest_list_tests'],
- stderr=fnull)
- base = None
- for line in tests.split('\n'):
- i = line.find('#')
- if i >= 0: line = line[:i]
- if not line: continue
- if line[0] != ' ':
- base = line.strip()
- else:
- assert base is not None
- assert line[1] == ' '
- test = base + line.strip()
- cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
- out.append(self.config.job_spec(cmdline,
- shortname='%s %s' % (' '.join(cmdline), shortname_ext),
- cpu_cost=cpu_cost,
- timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
- environ=env))
- else:
- cmdline = [binary] + target['args']
- shortname = target.get('shortname', ' '.join(
- pipes.quote(arg)
- for arg in cmdline))
- shortname += shortname_ext
- out.append(self.config.job_spec(cmdline,
- shortname=shortname,
- cpu_cost=cpu_cost,
- flaky=target.get('flaky', False),
- timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
- environ=env))
- elif self.args.regex == '.*' or self.platform == 'windows':
- print('\nWARNING: binary not found, skipping', binary)
- return sorted(out)
-
- def make_targets(self):
- if self.platform == 'windows':
- # don't build tools on windows just yet
- return ['buildtests_%s' % self.make_target]
- return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
- 'check_epollexclusive']
-
- def make_options(self):
- return self._make_options
-
- def pre_build_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
- self._cmake_generator_option,
- self._cmake_arch_option]]
- elif self._use_cmake:
- return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
- else:
- return []
+ self._use_cmake = False
+ self._docker_distro, self._make_options = self._compiler_options(
+ self.args.use_docker, self.args.compiler)
+ if args.iomgr_platform == "uv":
+ cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
+ try:
+ cflags += subprocess.check_output(
+ ['pkg-config', '--cflags', 'libuv']).strip() + ' '
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ try:
+ ldflags = subprocess.check_output(
+ ['pkg-config', '--libs', 'libuv']).strip() + ' '
+ except (subprocess.CalledProcessError, OSError):
+ ldflags = '-luv '
+ self._make_options += [
+ 'EXTRA_CPPFLAGS={}'.format(cflags),
+ 'EXTRA_LDLIBS={}'.format(ldflags)
+ ]
+
+ def test_specs(self):
+ out = []
+ binaries = get_c_tests(self.args.travis, self.test_lang)
+ for target in binaries:
+ if self._use_cmake and target.get('boringssl', False):
+ # cmake doesn't build boringssl tests
+ continue
+ auto_timeout_scaling = target.get('auto_timeout_scaling', True)
+ polling_strategies = (
+ _POLLING_STRATEGIES.get(self.platform, ['all'])
+ if target.get('uses_polling', True) else ['none'])
+ if self.args.iomgr_platform == 'uv':
+ polling_strategies = ['all']
+ for polling_strategy in polling_strategies:
+ env = {
+ 'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
+ _ROOT + '/src/core/tsi/test_creds/ca.pem',
+ 'GRPC_POLL_STRATEGY':
+ polling_strategy,
+ 'GRPC_VERBOSITY':
+ 'DEBUG'
+ }
+ resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
+ if resolver:
+ env['GRPC_DNS_RESOLVER'] = resolver
+ shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
+ if polling_strategy in target.get('excluded_poll_engines', []):
+ continue
+
+ timeout_scaling = 1
+ if auto_timeout_scaling:
+ config = self.args.config
+ if ('asan' in config or config == 'msan' or
+ config == 'tsan' or config == 'ubsan' or
+ config == 'helgrind' or config == 'memcheck'):
+ # Scale overall test timeout if running under various sanitizers.
+ # scaling value is based on historical data analysis
+ timeout_scaling *= 3
+ elif polling_strategy == 'poll-cv':
+ # scale test timeout if running with poll-cv
+ # sanitizer and poll-cv scaling is not cumulative to ensure
+ # reasonable timeout values.
+ # TODO(jtattermusch): based on historical data and 5min default
+ # test timeout poll-cv scaling is currently not useful.
+ # Leaving here so it can be reintroduced if the default test timeout
+ # is decreased in the future.
+ timeout_scaling *= 1
+
+ if self.config.build_config in target['exclude_configs']:
+ continue
+ if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
+ continue
+ if self.platform == 'windows':
+ binary = 'cmake/build/%s/%s.exe' % (
+ _MSBUILD_CONFIG[self.config.build_config],
+ target['name'])
+ else:
+ if self._use_cmake:
+ binary = 'cmake/build/%s' % target['name']
+ else:
+ binary = 'bins/%s/%s' % (self.config.build_config,
+ target['name'])
+ cpu_cost = target['cpu_cost']
+ if cpu_cost == 'capacity':
+ cpu_cost = multiprocessing.cpu_count()
+ if os.path.isfile(binary):
+ list_test_command = None
+ filter_test_command = None
+
+ # these are the flag defined by gtest and benchmark framework to list
+ # and filter test runs. We use them to split each individual test
+ # into its own JobSpec, and thus into its own process.
+ if 'benchmark' in target and target['benchmark']:
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output(
+ [binary, '--benchmark_list_tests'],
+ stderr=fnull)
+ for line in tests.split('\n'):
+ test = line.strip()
+ if not test: continue
+ cmdline = [binary, '--benchmark_filter=%s$' % test
+ ] + target['args']
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='%s %s' % (' '.join(cmdline),
+ shortname_ext),
+ cpu_cost=cpu_cost,
+ timeout_seconds=_DEFAULT_TIMEOUT_SECONDS *
+ timeout_scaling,
+ environ=env))
+ elif 'gtest' in target and target['gtest']:
+ # here we parse the output of --gtest_list_tests to build up a complete
+ # list of the tests contained in a binary for each test, we then
+ # add a job to run, filtering for just that test.
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output(
+ [binary, '--gtest_list_tests'], stderr=fnull)
+ base = None
+ for line in tests.split('\n'):
+ i = line.find('#')
+ if i >= 0: line = line[:i]
+ if not line: continue
+ if line[0] != ' ':
+ base = line.strip()
+ else:
+ assert base is not None
+ assert line[1] == ' '
+ test = base + line.strip()
+ cmdline = [binary, '--gtest_filter=%s' % test
+ ] + target['args']
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='%s %s' % (' '.join(cmdline),
+ shortname_ext),
+ cpu_cost=cpu_cost,
+ timeout_seconds=target.get(
+ 'timeout_seconds',
+ _DEFAULT_TIMEOUT_SECONDS) *
+ timeout_scaling,
+ environ=env))
+ else:
+ cmdline = [binary] + target['args']
+ shortname = target.get('shortname', ' '.join(
+ pipes.quote(arg) for arg in cmdline))
+ shortname += shortname_ext
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname=shortname,
+ cpu_cost=cpu_cost,
+ flaky=target.get('flaky', False),
+ timeout_seconds=target.get(
+ 'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
+ * timeout_scaling,
+ environ=env))
+ elif self.args.regex == '.*' or self.platform == 'windows':
+ print('\nWARNING: binary not found, skipping', binary)
+ return sorted(out)
+
+ def make_targets(self):
+ if self.platform == 'windows':
+ # don't build tools on windows just yet
+ return ['buildtests_%s' % self.make_target]
+ return [
+ 'buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
+ 'check_epollexclusive'
+ ]
- def build_steps(self):
- return []
+ def make_options(self):
+ return self._make_options
- def post_tests_steps(self):
- if self.platform == 'windows':
- return []
- else:
- return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+ def pre_build_steps(self):
+ if self.platform == 'windows':
+ return [[
+ 'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
+ self._cmake_generator_option, self._cmake_arch_option
+ ]]
+ elif self._use_cmake:
+ return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
+ else:
+ return []
- def makefile_name(self):
- if self._use_cmake:
- return 'cmake/build/Makefile'
- else:
- return 'Makefile'
-
- def _clang_make_options(self, version_suffix=''):
- return ['CC=clang%s' % version_suffix,
- 'CXX=clang++%s' % version_suffix,
- 'LD=clang%s' % version_suffix,
- 'LDXX=clang++%s' % version_suffix]
-
- def _gcc_make_options(self, version_suffix):
- return ['CC=gcc%s' % version_suffix,
- 'CXX=g++%s' % version_suffix,
- 'LD=gcc%s' % version_suffix,
- 'LDXX=g++%s' % version_suffix]
-
- def _compiler_options(self, use_docker, compiler):
- """Returns docker distro and make options to use for given compiler."""
- if not use_docker and not _is_use_docker_child():
- _check_compiler(compiler, ['default'])
-
- if compiler == 'gcc4.9' or compiler == 'default':
- return ('jessie', [])
- elif compiler == 'gcc4.8':
- return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
- elif compiler == 'gcc5.3':
- return ('ubuntu1604', [])
- elif compiler == 'gcc_musl':
- return ('alpine', [])
- elif compiler == 'clang3.4':
- # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
- return ('ubuntu1404', self._clang_make_options())
- elif compiler == 'clang3.5':
- return ('jessie', self._clang_make_options(version_suffix='-3.5'))
- elif compiler == 'clang3.6':
- return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
- elif compiler == 'clang3.7':
- return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
- else:
- raise Exception('Compiler %s not supported.' % compiler)
+ def build_steps(self):
+ return []
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
- _docker_arch_suffix(self.args.arch))
+ def post_tests_steps(self):
+ if self.platform == 'windows':
+ return []
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+
+ def makefile_name(self):
+ if self._use_cmake:
+ return 'cmake/build/Makefile'
+ else:
+ return 'Makefile'
+
+ def _clang_make_options(self, version_suffix=''):
+ return [
+ 'CC=clang%s' % version_suffix, 'CXX=clang++%s' % version_suffix,
+ 'LD=clang%s' % version_suffix, 'LDXX=clang++%s' % version_suffix
+ ]
+
+ def _gcc_make_options(self, version_suffix):
+ return [
+ 'CC=gcc%s' % version_suffix, 'CXX=g++%s' % version_suffix,
+ 'LD=gcc%s' % version_suffix, 'LDXX=g++%s' % version_suffix
+ ]
+
+ def _compiler_options(self, use_docker, compiler):
+ """Returns docker distro and make options to use for given compiler."""
+ if not use_docker and not _is_use_docker_child():
+ _check_compiler(compiler, ['default'])
+
+ if compiler == 'gcc4.9' or compiler == 'default':
+ return ('jessie', [])
+ elif compiler == 'gcc4.8':
+ return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
+ elif compiler == 'gcc5.3':
+ return ('ubuntu1604', [])
+ elif compiler == 'gcc_musl':
+ return ('alpine', [])
+ elif compiler == 'clang3.4':
+ # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
+ return ('ubuntu1404', self._clang_make_options())
+ elif compiler == 'clang3.5':
+ return ('jessie', self._clang_make_options(version_suffix='-3.5'))
+ elif compiler == 'clang3.6':
+ return ('ubuntu1604',
+ self._clang_make_options(version_suffix='-3.6'))
+ elif compiler == 'clang3.7':
+ return ('ubuntu1604',
+ self._clang_make_options(version_suffix='-3.7'))
+ else:
+ raise Exception('Compiler %s not supported.' % compiler)
- def __str__(self):
- return self.make_target
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/cxx_%s_%s' % (
+ self._docker_distro, _docker_arch_suffix(self.args.arch))
+
+ def __str__(self):
+ return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
- def __init__(self):
- self.platform = platform_string()
-
- def configure(self, config, args):
- self.config = config
- self.args = args
- # Note: electron ABI only depends on major and minor version, so that's all
- # we should specify in the compiler argument
- _check_compiler(self.args.compiler, ['default', 'node0.12',
- 'node4', 'node5', 'node6',
- 'node7', 'node8',
- 'electron1.3', 'electron1.6'])
- if self.args.compiler == 'default':
- self.runtime = 'node'
- self.node_version = '8'
- else:
- if self.args.compiler.startswith('electron'):
- self.runtime = 'electron'
- self.node_version = self.args.compiler[8:]
- else:
- self.runtime = 'node'
- # Take off the word "node"
- self.node_version = self.args.compiler[4:]
-
- # TODO: update with Windows/electron scripts when available for grpc/grpc-node
- def test_specs(self):
- if self.platform == 'windows':
- return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
- else:
- return [self.config.job_spec(['tools/run_tests/helper_scripts/run_grpc-node.sh'],
- None,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+ def __init__(self):
+ self.platform = platform_string()
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ # Note: electron ABI only depends on major and minor version, so that's all
+ # we should specify in the compiler argument
+ _check_compiler(self.args.compiler, [
+ 'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
+ 'electron1.3', 'electron1.6'
+ ])
+ if self.args.compiler == 'default':
+ self.runtime = 'node'
+ self.node_version = '8'
+ else:
+ if self.args.compiler.startswith('electron'):
+ self.runtime = 'electron'
+ self.node_version = self.args.compiler[8:]
+ else:
+ self.runtime = 'node'
+ # Take off the word "node"
+ self.node_version = self.args.compiler[4:]
+
+ # TODO: update with Windows/electron scripts when available for grpc/grpc-node
+ def test_specs(self):
+ if self.platform == 'windows':
+ return [
+ self.config.job_spec(
+ ['tools\\run_tests\\helper_scripts\\run_node.bat'])
+ ]
+ else:
+ return [
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
+ None,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
- def pre_build_steps(self):
- return []
+ def pre_build_steps(self):
+ return []
- def make_targets(self):
- return []
+ def make_targets(self):
+ return []
- def make_options(self):
- return []
+ def make_options(self):
+ return []
- def build_steps(self):
- return []
+ def build_steps(self):
+ return []
- def post_tests_steps(self):
- return []
+ def post_tests_steps(self):
+ return []
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def __str__(self):
- return 'grpc-node'
+ def __str__(self):
+ return 'grpc-node'
class PhpLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
- self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
- def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'],
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+ def test_specs(self):
+ return [
+ self.config.job_spec(
+ ['src/php/bin/run_tests.sh'],
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
- def pre_build_steps(self):
- return []
+ def pre_build_steps(self):
+ return []
- def make_targets(self):
- return ['static_c', 'shared_c']
+ def make_targets(self):
+ return ['static_c', 'shared_c']
- def make_options(self):
- return self._make_options;
+ def make_options(self):
+ return self._make_options
- def build_steps(self):
- return [['tools/run_tests/helper_scripts/build_php.sh']]
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
- def post_tests_steps(self):
- return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def __str__(self):
- return 'php'
+ def __str__(self):
+ return 'php'
class Php7Language(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
- self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+
+ def test_specs(self):
+ return [
+ self.config.job_spec(
+ ['src/php/bin/run_tests.sh'],
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
- def test_specs(self):
- return [self.config.job_spec(['src/php/bin/run_tests.sh'],
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+ def pre_build_steps(self):
+ return []
- def pre_build_steps(self):
- return []
+ def make_targets(self):
+ return ['static_c', 'shared_c']
- def make_targets(self):
- return ['static_c', 'shared_c']
+ def make_options(self):
+ return self._make_options
- def make_options(self):
- return self._make_options;
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
- def build_steps(self):
- return [['tools/run_tests/helper_scripts/build_php.sh']]
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
- def post_tests_steps(self):
- return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+ def makefile_name(self):
+ return 'Makefile'
- def makefile_name(self):
- return 'Makefile'
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def __str__(self):
+ return 'php7'
- def __str__(self):
- return 'php7'
+class PythonConfig(
+ collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
+ """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
-class PythonConfig(collections.namedtuple('PythonConfig', [
- 'name', 'build', 'run'])):
- """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- self.pythons = self._get_pythons(self.args)
-
- def test_specs(self):
- # load list of known test suites
- with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
- tests_json = json.load(tests_json_file)
- environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
- return [self.config.job_spec(
- config.run,
- timeout_seconds=5*60,
- environ=dict(list(environment.items()) +
- [('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
- shortname='%s.test.%s' % (config.name, suite_name),)
- for suite_name in tests_json
- for config in self.pythons]
-
- def pre_build_steps(self):
- return []
-
- def make_targets(self):
- return []
-
- def make_options(self):
- return []
-
- def build_steps(self):
- return [config.build for config in self.pythons]
-
- def post_tests_steps(self):
- if self.config.build_config != 'gcov':
- return []
- else:
- return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ self.pythons = self._get_pythons(self.args)
+
+ def test_specs(self):
+ # load list of known test suites
+ with open(
+ 'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
+ tests_json = json.load(tests_json_file)
+ environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
+ return [
+ self.config.job_spec(
+ config.run,
+ timeout_seconds=5 * 60,
+ environ=dict(
+ list(environment.items()) + [(
+ 'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
+ shortname='%s.test.%s' % (config.name, suite_name),)
+ for suite_name in tests_json for config in self.pythons
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return []
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return [config.build for config in self.pythons]
+
+ def post_tests_steps(self):
+ if self.config.build_config != 'gcov':
+ return []
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/python_%s_%s' % (
+ self.python_manager_name(), _docker_arch_suffix(self.args.arch))
- def python_manager_name(self):
- if self.args.compiler in ['python3.5', 'python3.6']:
- return 'pyenv'
- elif self.args.compiler == 'python_alpine':
- return 'alpine'
- else:
- return 'jessie'
+ def python_manager_name(self):
+ if self.args.compiler in ['python3.5', 'python3.6']:
+ return 'pyenv'
+ elif self.args.compiler == 'python_alpine':
+ return 'alpine'
+ else:
+ return 'jessie'
- def _get_pythons(self, args):
- if args.arch == 'x86':
- bits = '32'
- else:
- bits = '64'
-
- if os.name == 'nt':
- shell = ['bash']
- builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
- builder_prefix_arguments = ['MINGW{}'.format(bits)]
- venv_relative_python = ['Scripts/python.exe']
- toolchain = ['mingw32']
- else:
- shell = []
- builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
- builder_prefix_arguments = []
- venv_relative_python = ['bin/python']
- toolchain = ['unix']
-
- runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
- config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
- venv_relative_python, toolchain, runner)
- python27_config = _python_config_generator(name='py27', major='2',
- minor='7', bits=bits,
- config_vars=config_vars)
- python34_config = _python_config_generator(name='py34', major='3',
- minor='4', bits=bits,
- config_vars=config_vars)
- python35_config = _python_config_generator(name='py35', major='3',
- minor='5', bits=bits,
- config_vars=config_vars)
- python36_config = _python_config_generator(name='py36', major='3',
- minor='6', bits=bits,
- config_vars=config_vars)
- pypy27_config = _pypy_config_generator(name='pypy', major='2',
- config_vars=config_vars)
- pypy32_config = _pypy_config_generator(name='pypy3', major='3',
- config_vars=config_vars)
-
- if args.compiler == 'default':
- if os.name == 'nt':
- return (python35_config,)
- else:
- return (python27_config, python34_config,)
- elif args.compiler == 'python2.7':
- return (python27_config,)
- elif args.compiler == 'python3.4':
- return (python34_config,)
- elif args.compiler == 'python3.5':
- return (python35_config,)
- elif args.compiler == 'python3.6':
- return (python36_config,)
- elif args.compiler == 'pypy':
- return (pypy27_config,)
- elif args.compiler == 'pypy3':
- return (pypy32_config,)
- elif args.compiler == 'python_alpine':
- return (python27_config,)
- elif args.compiler == 'all_the_cpythons':
- return (python27_config, python34_config, python35_config,
- python36_config,)
- else:
- raise Exception('Compiler %s not supported.' % args.compiler)
+ def _get_pythons(self, args):
+ if args.arch == 'x86':
+ bits = '32'
+ else:
+ bits = '64'
+
+ if os.name == 'nt':
+ shell = ['bash']
+ builder = [
+ os.path.abspath(
+ 'tools/run_tests/helper_scripts/build_python_msys2.sh')
+ ]
+ builder_prefix_arguments = ['MINGW{}'.format(bits)]
+ venv_relative_python = ['Scripts/python.exe']
+ toolchain = ['mingw32']
+ else:
+ shell = []
+ builder = [
+ os.path.abspath(
+ 'tools/run_tests/helper_scripts/build_python.sh')
+ ]
+ builder_prefix_arguments = []
+ venv_relative_python = ['bin/python']
+ toolchain = ['unix']
+
+ runner = [
+ os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
+ ]
+ config_vars = _PythonConfigVars(shell, builder,
+ builder_prefix_arguments,
+ venv_relative_python, toolchain, runner)
+ python27_config = _python_config_generator(
+ name='py27',
+ major='2',
+ minor='7',
+ bits=bits,
+ config_vars=config_vars)
+ python34_config = _python_config_generator(
+ name='py34',
+ major='3',
+ minor='4',
+ bits=bits,
+ config_vars=config_vars)
+ python35_config = _python_config_generator(
+ name='py35',
+ major='3',
+ minor='5',
+ bits=bits,
+ config_vars=config_vars)
+ python36_config = _python_config_generator(
+ name='py36',
+ major='3',
+ minor='6',
+ bits=bits,
+ config_vars=config_vars)
+ pypy27_config = _pypy_config_generator(
+ name='pypy', major='2', config_vars=config_vars)
+ pypy32_config = _pypy_config_generator(
+ name='pypy3', major='3', config_vars=config_vars)
+
+ if args.compiler == 'default':
+ if os.name == 'nt':
+ return (python35_config,)
+ else:
+ return (python27_config, python34_config,)
+ elif args.compiler == 'python2.7':
+ return (python27_config,)
+ elif args.compiler == 'python3.4':
+ return (python34_config,)
+ elif args.compiler == 'python3.5':
+ return (python35_config,)
+ elif args.compiler == 'python3.6':
+ return (python36_config,)
+ elif args.compiler == 'pypy':
+ return (pypy27_config,)
+ elif args.compiler == 'pypy3':
+ return (pypy32_config,)
+ elif args.compiler == 'python_alpine':
+ return (python27_config,)
+ elif args.compiler == 'all_the_cpythons':
+ return (python27_config, python34_config, python35_config,
+ python36_config,)
+ else:
+ raise Exception('Compiler %s not supported.' % args.compiler)
- def __str__(self):
- return 'python'
+ def __str__(self):
+ return 'python'
class RubyLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
- def test_specs(self):
- tests = [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
- timeout_seconds=10*60,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
- tests.append(self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
- timeout_seconds=10*60,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS))
- return tests
+ def test_specs(self):
+ tests = [
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_ruby.sh'],
+ timeout_seconds=10 * 60,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
+ tests.append(
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
+ timeout_seconds=10 * 60,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ return tests
- def pre_build_steps(self):
- return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
+ def pre_build_steps(self):
+ return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
- def make_targets(self):
- return []
+ def make_targets(self):
+ return []
- def make_options(self):
- return []
+ def make_options(self):
+ return []
- def build_steps(self):
- return [['tools/run_tests/helper_scripts/build_ruby.sh']]
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_ruby.sh']]
- def post_tests_steps(self):
- return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
- def __str__(self):
- return 'ruby'
+ def __str__(self):
+ return 'ruby'
class CSharpLanguage(object):
- def __init__(self):
- self.platform = platform_string()
-
- def configure(self, config, args):
- self.config = config
- self.args = args
- if self.platform == 'windows':
- _check_compiler(self.args.compiler, ['coreclr', 'default'])
- _check_arch(self.args.arch, ['default'])
- self._cmake_arch_option = 'x64'
- self._make_options = []
- else:
- _check_compiler(self.args.compiler, ['default', 'coreclr'])
- self._docker_distro = 'jessie'
-
- if self.platform == 'mac':
- # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
- self._make_options = ['EMBED_OPENSSL=true']
- if self.args.compiler != 'coreclr':
- # On Mac, official distribution of mono is 32bit.
- self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
- else:
- self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+ def __init__(self):
+ self.platform = platform_string()
- def test_specs(self):
- with open('src/csharp/tests.json') as f:
- tests_by_assembly = json.load(f)
-
- msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
- nunit_args = ['--labels=All', '--noresult', '--workers=1']
- assembly_subdir = 'bin/%s' % msbuild_config
- assembly_extension = '.exe'
-
- if self.args.compiler == 'coreclr':
- assembly_subdir += '/netcoreapp1.0'
- runtime_cmd = ['dotnet', 'exec']
- assembly_extension = '.dll'
- else:
- assembly_subdir += '/net45'
- if self.platform == 'windows':
- runtime_cmd = []
- else:
- runtime_cmd = ['mono']
-
- specs = []
- for assembly in six.iterkeys(tests_by_assembly):
- assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
- assembly_subdir,
- assembly,
- assembly_extension)
- if self.config.build_config != 'gcov' or self.platform != 'windows':
- # normally, run each test as a separate process
- for test in tests_by_assembly[assembly]:
- cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
- specs.append(self.config.job_spec(cmdline,
- shortname='csharp.%s' % test,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS))
- else:
- # For C# test coverage, run all tests from the same assembly at once
- # using OpenCover.Console (only works on Windows).
- cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
- '-target:%s' % assembly_file,
- '-targetdir:src\\csharp',
- '-targetargs:%s' % ' '.join(nunit_args),
- '-filter:+[Grpc.Core]*',
- '-register:user',
- '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
-
- # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
- # to prevent problems with registering the profiler.
- run_exclusive = 1000000
- specs.append(self.config.job_spec(cmdline,
- shortname='csharp.coverage.%s' % assembly,
- cpu_cost=run_exclusive,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS))
- return specs
-
- def pre_build_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
- else:
- return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ if self.platform == 'windows':
+ _check_compiler(self.args.compiler, ['coreclr', 'default'])
+ _check_arch(self.args.arch, ['default'])
+ self._cmake_arch_option = 'x64'
+ self._make_options = []
+ else:
+ _check_compiler(self.args.compiler, ['default', 'coreclr'])
+ self._docker_distro = 'jessie'
+
+ if self.platform == 'mac':
+ # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
+ self._make_options = ['EMBED_OPENSSL=true']
+ if self.args.compiler != 'coreclr':
+ # On Mac, official distribution of mono is 32bit.
+ self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
+ else:
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+
+ def test_specs(self):
+ with open('src/csharp/tests.json') as f:
+ tests_by_assembly = json.load(f)
+
+ msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
+ nunit_args = ['--labels=All', '--noresult', '--workers=1']
+ assembly_subdir = 'bin/%s' % msbuild_config
+ assembly_extension = '.exe'
+
+ if self.args.compiler == 'coreclr':
+ assembly_subdir += '/netcoreapp1.0'
+ runtime_cmd = ['dotnet', 'exec']
+ assembly_extension = '.dll'
+ else:
+ assembly_subdir += '/net45'
+ if self.platform == 'windows':
+ runtime_cmd = []
+ else:
+ runtime_cmd = ['mono']
+
+ specs = []
+ for assembly in six.iterkeys(tests_by_assembly):
+ assembly_file = 'src/csharp/%s/%s/%s%s' % (
+ assembly, assembly_subdir, assembly, assembly_extension)
+ if self.config.build_config != 'gcov' or self.platform != 'windows':
+ # normally, run each test as a separate process
+ for test in tests_by_assembly[assembly]:
+ cmdline = runtime_cmd + [assembly_file, '--test=%s' % test
+ ] + nunit_args
+ specs.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='csharp.%s' % test,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ else:
+ # For C# test coverage, run all tests from the same assembly at once
+ # using OpenCover.Console (only works on Windows).
+ cmdline = [
+ 'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
+ '-target:%s' % assembly_file, '-targetdir:src\\csharp',
+ '-targetargs:%s' % ' '.join(nunit_args),
+ '-filter:+[Grpc.Core]*', '-register:user',
+ '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
+ ]
+
+ # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
+ # to prevent problems with registering the profiler.
+ run_exclusive = 1000000
+ specs.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='csharp.coverage.%s' % assembly,
+ cpu_cost=run_exclusive,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ return specs
+
+ def pre_build_steps(self):
+ if self.platform == 'windows':
+ return [[
+ 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
+ self._cmake_arch_option
+ ]]
+ else:
+ return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
- def make_targets(self):
- return ['grpc_csharp_ext']
+ def make_targets(self):
+ return ['grpc_csharp_ext']
- def make_options(self):
- return self._make_options;
+ def make_options(self):
+ return self._make_options
- def build_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
- else:
- return [['tools/run_tests/helper_scripts/build_csharp.sh']]
+ def build_steps(self):
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
+ else:
+ return [['tools/run_tests/helper_scripts/build_csharp.sh']]
- def post_tests_steps(self):
- if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
- else:
- return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
+ def post_tests_steps(self):
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
- def makefile_name(self):
- if self.platform == 'windows':
- return 'cmake/build/%s/Makefile' % self._cmake_arch_option
- else:
- return 'Makefile'
+ def makefile_name(self):
+ if self.platform == 'windows':
+ return 'cmake/build/%s/Makefile' % self._cmake_arch_option
+ else:
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
- _docker_arch_suffix(self.args.arch))
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/csharp_%s_%s' % (
+ self._docker_distro, _docker_arch_suffix(self.args.arch))
- def __str__(self):
- return 'csharp'
+ def __str__(self):
+ return 'csharp'
class ObjCLanguage(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
-
- def test_specs(self):
- return [
- self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
- timeout_seconds=60*60,
- shortname='objc-tests',
- cpu_cost=1e6,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS),
- self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
- timeout_seconds=60*60,
- shortname='objc-plugin-tests',
- cpu_cost=1e6,
- environ=_FORCE_ENVIRON_FOR_WRAPPERS),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-helloworld',
- cpu_cost=1e6,
- environ={'SCHEME': 'HelloWorld',
- 'EXAMPLE_PATH': 'examples/objective-c/helloworld'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-routeguide',
- cpu_cost=1e6,
- environ={'SCHEME': 'RouteGuideClient',
- 'EXAMPLE_PATH': 'examples/objective-c/route_guide'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-authsample',
- cpu_cost=1e6,
- environ={'SCHEME': 'AuthSample',
- 'EXAMPLE_PATH': 'examples/objective-c/auth_sample'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-sample',
- cpu_cost=1e6,
- environ={'SCHEME': 'Sample',
- 'EXAMPLE_PATH': 'src/objective-c/examples/Sample'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-sample-frameworks',
- cpu_cost=1e6,
- environ={'SCHEME': 'Sample',
- 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
- 'FRAMEWORKS': 'YES'}),
- self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
- timeout_seconds=10*60,
- shortname='objc-build-example-switftsample',
- cpu_cost=1e6,
- environ={'SCHEME': 'SwiftSample',
- 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'}),
- ]
-
- def pre_build_steps(self):
- return []
-
- def make_targets(self):
- return ['interop_server']
-
- def make_options(self):
- return []
-
- def build_steps(self):
- return [['src/objective-c/tests/build_tests.sh']]
-
- def post_tests_steps(self):
- return []
-
- def makefile_name(self):
- return 'Makefile'
-
- def dockerfile_dir(self):
- return None
-
- def __str__(self):
- return 'objc'
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+
+ def test_specs(self):
+ return [
+ self.config.job_spec(
+ ['src/objective-c/tests/run_tests.sh'],
+ timeout_seconds=60 * 60,
+ shortname='objc-tests',
+ cpu_cost=1e6,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+ self.config.job_spec(
+ ['src/objective-c/tests/run_plugin_tests.sh'],
+ timeout_seconds=60 * 60,
+ shortname='objc-plugin-tests',
+ cpu_cost=1e6,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-helloworld',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'HelloWorld',
+ 'EXAMPLE_PATH': 'examples/objective-c/helloworld'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-routeguide',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'RouteGuideClient',
+ 'EXAMPLE_PATH': 'examples/objective-c/route_guide'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-authsample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'AuthSample',
+ 'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-sample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'Sample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-sample-frameworks',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'Sample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
+ 'FRAMEWORKS': 'YES'
+ }),
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=10 * 60,
+ shortname='objc-build-example-switftsample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'SwiftSample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
+ }),
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return ['interop_server']
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return [['src/objective-c/tests/build_tests.sh']]
+
+ def post_tests_steps(self):
+ return []
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return None
+
+ def __str__(self):
+ return 'objc'
class Sanity(object):
- def configure(self, config, args):
- self.config = config
- self.args = args
- _check_compiler(self.args.compiler, ['default'])
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
- def test_specs(self):
- import yaml
- with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
- environ={'TEST': 'true'}
- if _is_use_docker_child():
- environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
- return [self.config.job_spec(cmd['script'].split(),
- timeout_seconds=30*60,
- environ=environ,
- cpu_cost=cmd.get('cpu_cost', 1))
- for cmd in yaml.load(f)]
+ def test_specs(self):
+ import yaml
+ with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
+ environ = {'TEST': 'true'}
+ if _is_use_docker_child():
+ environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
+ return [
+ self.config.job_spec(
+ cmd['script'].split(),
+ timeout_seconds=30 * 60,
+ environ=environ,
+ cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
+ ]
- def pre_build_steps(self):
- return []
+ def pre_build_steps(self):
+ return []
- def make_targets(self):
- return ['run_dep_checks']
+ def make_targets(self):
+ return ['run_dep_checks']
- def make_options(self):
- return []
+ def make_options(self):
+ return []
- def build_steps(self):
- return []
+ def build_steps(self):
+ return []
- def post_tests_steps(self):
- return []
+ def post_tests_steps(self):
+ return []
- def makefile_name(self):
- return 'Makefile'
+ def makefile_name(self):
+ return 'Makefile'
- def dockerfile_dir(self):
- return 'tools/dockerfile/test/sanity'
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/sanity'
+
+ def __str__(self):
+ return 'sanity'
- def __str__(self):
- return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
- _CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
-
+ _CONFIGS = dict((cfg['config'], Config(**cfg))
+ for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
@@ -1033,60 +1157,61 @@ _LANGUAGES = {
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
- 'objc' : ObjCLanguage(),
+ 'objc': ObjCLanguage(),
'sanity': Sanity()
- }
-
+}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
- }
+}
def _windows_arch_option(arch):
- """Returns msbuild cmdline option for selected architecture."""
- if arch == 'default' or arch == 'x86':
- return '/p:Platform=Win32'
- elif arch == 'x64':
- return '/p:Platform=x64'
- else:
- print('Architecture %s not supported.' % arch)
- sys.exit(1)
+ """Returns msbuild cmdline option for selected architecture."""
+ if arch == 'default' or arch == 'x86':
+ return '/p:Platform=Win32'
+ elif arch == 'x64':
+ return '/p:Platform=x64'
+ else:
+ print('Architecture %s not supported.' % arch)
+ sys.exit(1)
def _check_arch_option(arch):
- """Checks that architecture option is valid."""
- if platform_string() == 'windows':
- _windows_arch_option(arch)
- elif platform_string() == 'linux':
- # On linux, we need to be running under docker with the right architecture.
- runtime_arch = platform.architecture()[0]
- if arch == 'default':
- return
- elif runtime_arch == '64bit' and arch == 'x64':
- return
- elif runtime_arch == '32bit' and arch == 'x86':
- return
+ """Checks that architecture option is valid."""
+ if platform_string() == 'windows':
+ _windows_arch_option(arch)
+ elif platform_string() == 'linux':
+ # On linux, we need to be running under docker with the right architecture.
+ runtime_arch = platform.architecture()[0]
+ if arch == 'default':
+ return
+ elif runtime_arch == '64bit' and arch == 'x64':
+ return
+ elif runtime_arch == '32bit' and arch == 'x86':
+ return
+ else:
+ print('Architecture %s does not match current runtime architecture.'
+ % arch)
+ sys.exit(1)
else:
- print('Architecture %s does not match current runtime architecture.' % arch)
- sys.exit(1)
- else:
- if args.arch != 'default':
- print('Architecture %s not supported on current platform.' % args.arch)
- sys.exit(1)
+ if args.arch != 'default':
+ print('Architecture %s not supported on current platform.' %
+ args.arch)
+ sys.exit(1)
def _docker_arch_suffix(arch):
- """Returns suffix to dockerfile dir to use."""
- if arch == 'default' or arch == 'x64':
- return 'x64'
- elif arch == 'x86':
- return 'x86'
- else:
- print('Architecture %s not supported with current settings.' % arch)
- sys.exit(1)
+ """Returns suffix to dockerfile dir to use."""
+ if arch == 'default' or arch == 'x64':
+ return 'x64'
+ elif arch == 'x86':
+ return 'x86'
+ else:
+ print('Architecture %s not supported with current settings.' % arch)
+ sys.exit(1)
def runs_per_test_type(arg_str):
@@ -1111,478 +1236,581 @@ def runs_per_test_type(arg_str):
def percent_type(arg_str):
- pct = float(arg_str)
- if pct > 100 or pct < 0:
- raise argparse.ArgumentTypeError(
- "'%f' is not a valid percentage in the [0, 100] range" % pct)
- return pct
+ pct = float(arg_str)
+ if pct > 100 or pct < 0:
+ raise argparse.ArgumentTypeError(
+ "'%f' is not a valid percentage in the [0, 100] range" % pct)
+ return pct
+
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
- return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+ return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
-argp.add_argument('-c', '--config',
- choices=sorted(_CONFIGS.keys()),
- default='opt')
-argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
- help='A positive integer or "inf". If "inf", all tests will run in an '
- 'infinite loop. Especially useful in combination with "-f"')
+argp.add_argument(
+ '-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
+argp.add_argument(
+ '-n',
+ '--runs_per_test',
+ default=1,
+ type=runs_per_test_type,
+ help='A positive integer or "inf". If "inf", all tests will run in an '
+ 'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
-argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
- help='Run a random sample with that percentage of tests')
-argp.add_argument('-f', '--forever',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('-t', '--travis',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('--newline_on_success',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('-l', '--language',
- choices=['all'] + sorted(_LANGUAGES.keys()),
- nargs='+',
- default=['all'])
-argp.add_argument('-S', '--stop_on_failure',
- default=False,
- action='store_const',
- const=True)
-argp.add_argument('--use_docker',
- default=False,
- action='store_const',
- const=True,
- help='Run all the tests under docker. That provides ' +
- 'additional isolation and prevents the need to install ' +
- 'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
- default=False,
- action='store_const',
- const=True,
- help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--arch',
- choices=['default', 'x86', 'x64'],
- default='default',
- help='Selects architecture to target. For some platforms "default" is the only supported choice.')
-argp.add_argument('--compiler',
- choices=['default',
- 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
- 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
- 'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine', 'all_the_cpythons',
- 'electron1.3', 'electron1.6',
- 'coreclr',
- 'cmake', 'cmake_vs2015', 'cmake_vs2017'],
- default='default',
- help='Selects compiler to use. Allowed values depend on the platform and language.')
-argp.add_argument('--iomgr_platform',
- choices=['native', 'uv'],
- default='native',
- help='Selects iomgr platform to build on')
-argp.add_argument('--build_only',
- default=False,
- action='store_const',
- const=True,
- help='Perform all the build steps but don\'t run any tests.')
-argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
- help='Measure the cpu costs of tests')
-argp.add_argument('--update_submodules', default=[], nargs='*',
- help='Update some submodules before building. If any are updated, also run generate_projects. ' +
- 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
+argp.add_argument(
+ '-p',
+ '--sample_percent',
+ default=100.0,
+ type=percent_type,
+ help='Run a random sample with that percentage of tests')
+argp.add_argument(
+ '-f', '--forever', default=False, action='store_const', const=True)
+argp.add_argument(
+ '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+ '--newline_on_success', default=False, action='store_const', const=True)
+argp.add_argument(
+ '-l',
+ '--language',
+ choices=['all'] + sorted(_LANGUAGES.keys()),
+ nargs='+',
+ default=['all'])
+argp.add_argument(
+ '-S', '--stop_on_failure', default=False, action='store_const', const=True)
+argp.add_argument(
+ '--use_docker',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run all the tests under docker. That provides ' +
+ 'additional isolation and prevents the need to install ' +
+ 'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+ '--allow_flakes',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+ '--arch',
+ choices=['default', 'x86', 'x64'],
+ default='default',
+ help='Selects architecture to target. For some platforms "default" is the only supported choice.'
+)
+argp.add_argument(
+ '--compiler',
+ choices=[
+ 'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
+ 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'python2.7',
+ 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
+ 'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
+ 'cmake_vs2015', 'cmake_vs2017'
+ ],
+ default='default',
+ help='Selects compiler to use. Allowed values depend on the platform and language.'
+)
+argp.add_argument(
+ '--iomgr_platform',
+ choices=['native', 'uv'],
+ default='native',
+ help='Selects iomgr platform to build on')
+argp.add_argument(
+ '--build_only',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Perform all the build steps but don\'t run any tests.')
+argp.add_argument(
+ '--measure_cpu_costs',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Measure the cpu costs of tests')
+argp.add_argument(
+ '--update_submodules',
+ default=[],
+ nargs='*',
+ help='Update some submodules before building. If any are updated, also run generate_projects. '
+ +
+ 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
+)
argp.add_argument('-a', '--antagonists', default=0, type=int)
-argp.add_argument('-x', '--xml_report', default=None, type=str,
- help='Generates a JUnit-compatible XML report')
-argp.add_argument('--report_suite_name', default='tests', type=str,
- help='Test suite name to use in generated JUnit XML report')
-argp.add_argument('--quiet_success',
- default=False,
- action='store_const',
- const=True,
- help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
- 'Useful when running many iterations of each test (argument -n).')
-argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
- help='Don\'t try to iterate over many polling strategies when they exist')
-argp.add_argument('--force_use_pollers', default=None, type=str,
- help='Only use the specified comma-delimited list of polling engines. '
- 'Example: --force_use_pollers epollsig,poll '
- ' (This flag has no effect if --force_default_poller flag is also used)')
-argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
-argp.add_argument('--bq_result_table',
- default='',
- type=str,
- nargs='?',
- help='Upload test results to a specified BQ table.')
-argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action='store_const',
- help='Disable rerunning historically flaky tests')
+argp.add_argument(
+ '-x',
+ '--xml_report',
+ default=None,
+ type=str,
+ help='Generates a JUnit-compatible XML report')
+argp.add_argument(
+ '--report_suite_name',
+ default='tests',
+ type=str,
+ help='Test suite name to use in generated JUnit XML report')
+argp.add_argument(
+ '--quiet_success',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ + 'Useful when running many iterations of each test (argument -n).')
+argp.add_argument(
+ '--force_default_poller',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Don\'t try to iterate over many polling strategies when they exist')
+argp.add_argument(
+ '--force_use_pollers',
+ default=None,
+ type=str,
+ help='Only use the specified comma-delimited list of polling engines. '
+ 'Example: --force_use_pollers epollsig,poll '
+ ' (This flag has no effect if --force_default_poller flag is also used)')
+argp.add_argument(
+ '--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
+argp.add_argument(
+ '--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
+argp.add_argument(
+ '--disable_auto_set_flakes',
+ default=False,
+ const=True,
+ action='store_const',
+ help='Disable rerunning historically flaky tests')
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if not args.disable_auto_set_flakes:
- try:
- for test in get_bqtest_data():
- if test.flaky: flaky_tests.add(test.name)
- if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
- except:
- print("Unexpected error getting flaky tests: %s" % traceback.format_exc())
+ try:
+ for test in get_bqtest_data():
+ if test.flaky: flaky_tests.add(test.name)
+ if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
+ except:
+ print("Unexpected error getting flaky tests: %s" %
+ traceback.format_exc())
if args.force_default_poller:
- _POLLING_STRATEGIES = {}
+ _POLLING_STRATEGIES = {}
elif args.force_use_pollers:
- _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
+ _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
- spec = spec.split(':', 1)
- if len(spec) == 1:
- submodule = spec[0]
- branch = 'master'
- elif len(spec) == 2:
- submodule = spec[0]
- branch = spec[1]
- cwd = 'third_party/%s' % submodule
- def git(cmd, cwd=cwd):
- print('in %s: git %s' % (cwd, cmd))
- run_shell_command('git %s' % cmd, cwd=cwd)
- git('fetch')
- git('checkout %s' % branch)
- git('pull origin %s' % branch)
- if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
- need_to_regenerate_projects = True
+ spec = spec.split(':', 1)
+ if len(spec) == 1:
+ submodule = spec[0]
+ branch = 'master'
+ elif len(spec) == 2:
+ submodule = spec[0]
+ branch = spec[1]
+ cwd = 'third_party/%s' % submodule
+
+ def git(cmd, cwd=cwd):
+ print('in %s: git %s' % (cwd, cmd))
+ run_shell_command('git %s' % cmd, cwd=cwd)
+
+ git('fetch')
+ git('checkout %s' % branch)
+ git('pull origin %s' % branch)
+ if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
+ need_to_regenerate_projects = True
if need_to_regenerate_projects:
- if jobset.platform_string() == 'linux':
- run_shell_command('tools/buildgen/generate_projects.sh')
- else:
- print('WARNING: may need to regenerate projects, but since we are not on')
- print(' Linux this step is being skipped. Compilation MAY fail.')
-
+ if jobset.platform_string() == 'linux':
+ run_shell_command('tools/buildgen/generate_projects.sh')
+ else:
+ print(
+ 'WARNING: may need to regenerate projects, but since we are not on')
+ print(
+ ' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
- _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
+ _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
- lang_list = _LANGUAGES.keys()
+ lang_list = _LANGUAGES.keys()
else:
- lang_list = args.language
+ lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
- for bad in ['objc', 'sanity']:
- if bad in lang_list:
- lang_list.remove(bad)
+ for bad in ['objc', 'sanity']:
+ if bad in lang_list:
+ lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
- l.configure(run_config, args)
+ l.configure(run_config, args)
-language_make_options=[]
+language_make_options = []
if any(language.make_options() for language in languages):
- if not 'gcov' in args.config and len(languages) != 1:
- print('languages with custom make options cannot be built simultaneously with other languages')
- sys.exit(1)
- else:
- # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
- # together, and is only used under gcov. All other configs should build languages individually.
- language_make_options = list(set([make_option for lang in languages for make_option in lang.make_options()]))
+ if not 'gcov' in args.config and len(languages) != 1:
+ print(
+ 'languages with custom make options cannot be built simultaneously with other languages'
+ )
+ sys.exit(1)
+ else:
+ # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
+ # together, and is only used under gcov. All other configs should build languages individually.
+ language_make_options = list(
+ set([
+ make_option
+ for lang in languages for make_option in lang.make_options()
+ ]))
if args.use_docker:
- if not args.travis:
- print('Seen --use_docker flag, will run tests under docker.')
- print('')
- print('IMPORTANT: The changes you are testing need to be locally committed')
- print('because only the committed changes in the current branch will be')
- print('copied to the docker environment.')
- time.sleep(5)
-
- dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
- if len(dockerfile_dirs) > 1:
- if 'gcov' in args.config:
- dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
- print ('Using multilang_jessie_x64 docker image for code coverage for '
- 'all languages.')
+ if not args.travis:
+ print('Seen --use_docker flag, will run tests under docker.')
+ print('')
+ print(
+ 'IMPORTANT: The changes you are testing need to be locally committed'
+ )
+ print(
+ 'because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
+ time.sleep(5)
+
+ dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
+ if len(dockerfile_dirs) > 1:
+ if 'gcov' in args.config:
+ dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
+ print(
+ 'Using multilang_jessie_x64 docker image for code coverage for '
+ 'all languages.')
+ else:
+ print(
+ 'Languages to be tested require running under different docker '
+ 'images.')
+ sys.exit(1)
else:
- print ('Languages to be tested require running under different docker '
- 'images.')
- sys.exit(1)
- else:
- dockerfile_dir = next(iter(dockerfile_dirs))
-
- child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
- run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
-
- env = os.environ.copy()
- env['RUN_TESTS_COMMAND'] = run_tests_cmd
- env['DOCKERFILE_DIR'] = dockerfile_dir
- env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
- if args.xml_report:
- env['XML_REPORT'] = args.xml_report
- if not args.travis:
- env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
-
- subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
- shell=True,
- env=env)
- sys.exit(0)
+ dockerfile_dir = next(iter(dockerfile_dirs))
+
+ child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
+ run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
+ child_argv[1:])
+
+ env = os.environ.copy()
+ env['RUN_TESTS_COMMAND'] = run_tests_cmd
+ env['DOCKERFILE_DIR'] = dockerfile_dir
+ env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
+ if args.xml_report:
+ env['XML_REPORT'] = args.xml_report
+ if not args.travis:
+ env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
+
+ subprocess.check_call(
+ 'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
+ shell=True,
+ env=env)
+ sys.exit(0)
_check_arch_option(args.arch)
+
def make_jobspec(cfg, targets, makefile='Makefile'):
- if platform_string() == 'windows':
- return [jobset.JobSpec(['cmake', '--build', '.',
- '--target', '%s' % target,
- '--config', _MSBUILD_CONFIG[cfg]],
- cwd=os.path.dirname(makefile),
- timeout_seconds=None) for target in targets]
- else:
- if targets and makefile.startswith('cmake/build/'):
- # With cmake, we've passed all the build configuration in the pre-build step already
- return [jobset.JobSpec([os.getenv('MAKE', 'make'),
- '-j', '%d' % args.jobs] +
- targets,
- cwd='cmake/build',
- timeout_seconds=None)]
- if targets:
- return [jobset.JobSpec([os.getenv('MAKE', 'make'),
- '-f', makefile,
- '-j', '%d' % args.jobs,
- 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
- 'CONFIG=%s' % cfg,
- 'Q='] +
- language_make_options +
- ([] if not args.travis else ['JENKINS_BUILD=1']) +
- targets,
- timeout_seconds=None)]
+ if platform_string() == 'windows':
+ return [
+ jobset.JobSpec(
+ [
+ 'cmake', '--build', '.', '--target', '%s' % target,
+ '--config', _MSBUILD_CONFIG[cfg]
+ ],
+ cwd=os.path.dirname(makefile),
+ timeout_seconds=None) for target in targets
+ ]
else:
- return []
+ if targets and makefile.startswith('cmake/build/'):
+ # With cmake, we've passed all the build configuration in the pre-build step already
+ return [
+ jobset.JobSpec(
+ [os.getenv('MAKE', 'make'), '-j', '%d' % args.jobs] +
+ targets,
+ cwd='cmake/build',
+ timeout_seconds=None)
+ ]
+ if targets:
+ return [
+ jobset.JobSpec(
+ [
+ os.getenv('MAKE', 'make'), '-f', makefile, '-j', '%d' %
+ args.jobs,
+ 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
+ args.slowdown, 'CONFIG=%s' % cfg, 'Q='
+ ] + language_make_options +
+ ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
+ timeout_seconds=None)
+ ]
+ else:
+ return []
+
make_targets = {}
for l in languages:
- makefile = l.makefile_name()
- make_targets[makefile] = make_targets.get(makefile, set()).union(
- set(l.make_targets()))
+ makefile = l.makefile_name()
+ make_targets[makefile] = make_targets.get(
+ makefile, set()).union(set(l.make_targets()))
+
def build_step_environ(cfg):
- environ = {'CONFIG': cfg}
- msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
- if msbuild_cfg:
- environ['MSBUILD_CONFIG'] = msbuild_cfg
- return environ
-
-build_steps = list(set(
- jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=2)
- for l in languages
- for cmdline in l.pre_build_steps()))
+ environ = {'CONFIG': cfg}
+ msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
+ if msbuild_cfg:
+ environ['MSBUILD_CONFIG'] = msbuild_cfg
+ return environ
+
+
+build_steps = list(
+ set(
+ jobset.JobSpec(
+ cmdline, environ=build_step_environ(build_config), flake_retries=2)
+ for l in languages for cmdline in l.pre_build_steps()))
if make_targets:
- make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
- build_steps.extend(set(make_commands))
-build_steps.extend(set(
- jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
- for l in languages
- for cmdline in l.build_steps()))
-
-post_tests_steps = list(set(
- jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
- for l in languages
- for cmdline in l.post_tests_steps()))
+ make_commands = itertools.chain.from_iterable(
+ make_jobspec(build_config, list(targets), makefile)
+ for (makefile, targets) in make_targets.items())
+ build_steps.extend(set(make_commands))
+build_steps.extend(
+ set(
+ jobset.JobSpec(
+ cmdline,
+ environ=build_step_environ(build_config),
+ timeout_seconds=None)
+ for l in languages for cmdline in l.build_steps()))
+
+post_tests_steps = list(
+ set(
+ jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
+ for l in languages for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
- try:
- version = int(urllib.request.urlopen(
- 'http://localhost:%d/version_number' % legacy_server_port,
- timeout=10).read())
- except:
- pass
- else:
- urllib.request.urlopen(
- 'http://localhost:%d/quitquitquit' % legacy_server_port).read()
+ try:
+ version = int(
+ urllib.request.urlopen(
+ 'http://localhost:%d/version_number' % legacy_server_port,
+ timeout=10).read())
+ except:
+ pass
+ else:
+ urllib.request.urlopen('http://localhost:%d/quitquitquit' %
+ legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
- """Caculate number of runs and failures for a particular test.
+ """Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
- num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
- num_failures = 0
- for jobresult in list_of_results:
- if jobresult.retries > 0:
- num_runs += jobresult.retries
- if jobresult.num_failures > 0:
- num_failures += jobresult.num_failures
- return num_runs, num_failures
+ num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
+ num_failures = 0
+ for jobresult in list_of_results:
+ if jobresult.retries > 0:
+ num_runs += jobresult.retries
+ if jobresult.num_failures > 0:
+ num_failures += jobresult.num_failures
+ return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
- BUILD = object()
- TEST = object()
- POST_TEST = object()
+ BUILD = object()
+ TEST = object()
+ POST_TEST = object()
def _has_epollexclusive():
- binary = 'bins/%s/check_epollexclusive' % args.config
- if not os.path.exists(binary):
- return False
- try:
- subprocess.check_call(binary)
- return True
- except subprocess.CalledProcessError, e:
- return False
- except OSError, e:
- # For languages other than C and Windows the binary won't exist
- return False
+ binary = 'bins/%s/check_epollexclusive' % args.config
+ if not os.path.exists(binary):
+ return False
+ try:
+ subprocess.check_call(binary)
+ return True
+ except subprocess.CalledProcessError, e:
+ return False
+ except OSError, e:
+ # For languages other than C and Windows the binary won't exist
+ return False
# returns a list of things that failed (or an empty list on success)
-def _build_and_run(
- check_cancelled, newline_on_success, xml_report=None, build_only=False):
- """Do one pass of building & running tests."""
- # build latest sequentially
- num_failures, resultset = jobset.run(
- build_steps, maxjobs=1, stop_on_failure=True,
- newline_on_success=newline_on_success, travis=args.travis)
- if num_failures:
- return [BuildAndRunError.BUILD]
-
- if build_only:
- if xml_report:
- report_utils.render_junit_xml_report(resultset, xml_report,
- suite_name=args.report_suite_name)
- return []
-
- if not args.travis and not _has_epollexclusive() and platform_string() in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string()]:
- print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
- _POLLING_STRATEGIES[platform_string()].remove('epollex')
-
- # start antagonists
- antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
- for _ in range(0, args.antagonists)]
- start_port_server.start_port_server()
- resultset = None
- num_test_failures = 0
- try:
- infinite_runs = runs_per_test == 0
- one_run = set(
- spec
- for language in languages
- for spec in language.test_specs()
- if (re.search(args.regex, spec.shortname) and
- (args.regex_exclude == '' or
- not re.search(args.regex_exclude, spec.shortname))))
- # When running on travis, we want out test runs to be as similar as possible
- # for reproducibility purposes.
- if args.travis and args.max_time <= 0:
- massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
- else:
- # whereas otherwise, we want to shuffle things up to give all tests a
- # chance to run.
- massaged_one_run = list(one_run) # random.sample needs an indexable seq.
- num_jobs = len(massaged_one_run)
- # for a random sample, get as many as indicated by the 'sample_percent'
- # argument. By default this arg is 100, resulting in a shuffle of all
- # jobs.
- sample_size = int(num_jobs * args.sample_percent/100.0)
- massaged_one_run = random.sample(massaged_one_run, sample_size)
- if not isclose(args.sample_percent, 100.0):
- assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
- print("Running %d tests out of %d (~%d%%)" %
- (sample_size, num_jobs, args.sample_percent))
- if infinite_runs:
- assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
- runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
- else itertools.repeat(massaged_one_run, runs_per_test))
- all_runs = itertools.chain.from_iterable(runs_sequence)
-
- if args.quiet_success:
- jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
- num_test_failures, resultset = jobset.run(
- all_runs, check_cancelled, newline_on_success=newline_on_success,
- travis=args.travis, maxjobs=args.jobs, maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
- stop_on_failure=args.stop_on_failure,
- quiet_success=args.quiet_success, max_time=args.max_time)
- if resultset:
- for k, v in sorted(resultset.items()):
- num_runs, num_failures = _calculate_num_runs_failures(v)
- if num_failures > 0:
- if num_failures == num_runs: # what about infinite_runs???
- jobset.message('FAILED', k, do_newline=True)
- else:
+def _build_and_run(check_cancelled,
+ newline_on_success,
+ xml_report=None,
+ build_only=False):
+ """Do one pass of building & running tests."""
+ # build latest sequentially
+ num_failures, resultset = jobset.run(
+ build_steps,
+ maxjobs=1,
+ stop_on_failure=True,
+ newline_on_success=newline_on_success,
+ travis=args.travis)
+ if num_failures:
+ return [BuildAndRunError.BUILD]
+
+ if build_only:
+ if xml_report:
+ report_utils.render_junit_xml_report(
+ resultset, xml_report, suite_name=args.report_suite_name)
+ return []
+
+ if not args.travis and not _has_epollexclusive() and platform_string(
+ ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
+ platform_string()]:
+ print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
+ _POLLING_STRATEGIES[platform_string()].remove('epollex')
+
+ # start antagonists
+ antagonists = [
+ subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
+ for _ in range(0, args.antagonists)
+ ]
+ start_port_server.start_port_server()
+ resultset = None
+ num_test_failures = 0
+ try:
+ infinite_runs = runs_per_test == 0
+ one_run = set(spec
+ for language in languages
+ for spec in language.test_specs()
+ if (re.search(args.regex, spec.shortname) and (
+ args.regex_exclude == '' or not re.search(
+ args.regex_exclude, spec.shortname))))
+ # When running on travis, we want out test runs to be as similar as possible
+ # for reproducibility purposes.
+ if args.travis and args.max_time <= 0:
+ massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
+ else:
+ # whereas otherwise, we want to shuffle things up to give all tests a
+ # chance to run.
+ massaged_one_run = list(
+ one_run) # random.sample needs an indexable seq.
+ num_jobs = len(massaged_one_run)
+ # for a random sample, get as many as indicated by the 'sample_percent'
+ # argument. By default this arg is 100, resulting in a shuffle of all
+ # jobs.
+ sample_size = int(num_jobs * args.sample_percent / 100.0)
+ massaged_one_run = random.sample(massaged_one_run, sample_size)
+ if not isclose(args.sample_percent, 100.0):
+ assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
+ print("Running %d tests out of %d (~%d%%)" %
+ (sample_size, num_jobs, args.sample_percent))
+ if infinite_runs:
+ assert len(massaged_one_run
+ ) > 0, 'Must have at least one test for a -n inf run'
+ runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
+ else itertools.repeat(massaged_one_run, runs_per_test))
+ all_runs = itertools.chain.from_iterable(runs_sequence)
+
+ if args.quiet_success:
jobset.message(
- 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
+ 'START',
+ 'Running tests quietly, only failing tests will be reported',
do_newline=True)
- finally:
- for antagonist in antagonists:
- antagonist.kill()
- if args.bq_result_table and resultset:
- upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
- if xml_report and resultset:
- report_utils.render_junit_xml_report(resultset, xml_report,
- suite_name=args.report_suite_name)
-
- number_failures, _ = jobset.run(
- post_tests_steps, maxjobs=1, stop_on_failure=False,
- newline_on_success=newline_on_success, travis=args.travis)
+ num_test_failures, resultset = jobset.run(
+ all_runs,
+ check_cancelled,
+ newline_on_success=newline_on_success,
+ travis=args.travis,
+ maxjobs=args.jobs,
+ maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
+ stop_on_failure=args.stop_on_failure,
+ quiet_success=args.quiet_success,
+ max_time=args.max_time)
+ if resultset:
+ for k, v in sorted(resultset.items()):
+ num_runs, num_failures = _calculate_num_runs_failures(v)
+ if num_failures > 0:
+ if num_failures == num_runs: # what about infinite_runs???
+ jobset.message('FAILED', k, do_newline=True)
+ else:
+ jobset.message(
+ 'FLAKE',
+ '%s [%d/%d runs flaked]' %
+ (k, num_failures, num_runs),
+ do_newline=True)
+ finally:
+ for antagonist in antagonists:
+ antagonist.kill()
+ if args.bq_result_table and resultset:
+ upload_results_to_bq(resultset, args.bq_result_table, args,
+ platform_string())
+ if xml_report and resultset:
+ report_utils.render_junit_xml_report(
+ resultset, xml_report, suite_name=args.report_suite_name)
+
+ number_failures, _ = jobset.run(
+ post_tests_steps,
+ maxjobs=1,
+ stop_on_failure=False,
+ newline_on_success=newline_on_success,
+ travis=args.travis)
- out = []
- if number_failures:
- out.append(BuildAndRunError.POST_TEST)
- if num_test_failures:
- out.append(BuildAndRunError.TEST)
+ out = []
+ if number_failures:
+ out.append(BuildAndRunError.POST_TEST)
+ if num_test_failures:
+ out.append(BuildAndRunError.TEST)
- return out
+ return out
if forever:
- success = True
- while True:
- dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
- initial_time = dw.most_recent_change()
- have_files_changed = lambda: dw.most_recent_change() != initial_time
- previous_success = success
- errors = _build_and_run(check_cancelled=have_files_changed,
- newline_on_success=False,
- build_only=args.build_only) == 0
- if not previous_success and not errors:
- jobset.message('SUCCESS',
- 'All tests are now passing properly',
- do_newline=True)
- jobset.message('IDLE', 'No change detected')
- while not have_files_changed():
- time.sleep(1)
+ success = True
+ while True:
+ dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
+ initial_time = dw.most_recent_change()
+ have_files_changed = lambda: dw.most_recent_change() != initial_time
+ previous_success = success
+ errors = _build_and_run(
+ check_cancelled=have_files_changed,
+ newline_on_success=False,
+ build_only=args.build_only) == 0
+ if not previous_success and not errors:
+ jobset.message(
+ 'SUCCESS',
+ 'All tests are now passing properly',
+ do_newline=True)
+ jobset.message('IDLE', 'No change detected')
+ while not have_files_changed():
+ time.sleep(1)
else:
- errors = _build_and_run(check_cancelled=lambda: False,
- newline_on_success=args.newline_on_success,
- xml_report=args.xml_report,
- build_only=args.build_only)
- if not errors:
- jobset.message('SUCCESS', 'All tests passed', do_newline=True)
- else:
- jobset.message('FAILED', 'Some tests failed', do_newline=True)
- exit_code = 0
- if BuildAndRunError.BUILD in errors:
- exit_code |= 1
- if BuildAndRunError.TEST in errors:
- exit_code |= 2
- if BuildAndRunError.POST_TEST in errors:
- exit_code |= 4
- sys.exit(exit_code)
+ errors = _build_and_run(
+ check_cancelled=lambda: False,
+ newline_on_success=args.newline_on_success,
+ xml_report=args.xml_report,
+ build_only=args.build_only)
+ if not errors:
+ jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Some tests failed', do_newline=True)
+ exit_code = 0
+ if BuildAndRunError.BUILD in errors:
+ exit_code |= 1
+ if BuildAndRunError.TEST in errors:
+ exit_code |= 2
+ if BuildAndRunError.POST_TEST in errors:
+ exit_code |= 4
+ sys.exit(exit_code)
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 7c58d8efb1..49be8f1d7e 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Run test matrix."""
from __future__ import print_function
@@ -29,14 +28,14 @@ from python_utils.filter_pull_request_tests import filter_tests
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
-_DEFAULT_RUNTESTS_TIMEOUT = 1*60*60
+_DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
# Set the timeout high to allow enough time for sanitizers and pre-building
# clang docker.
-_CPP_RUNTESTS_TIMEOUT = 4*60*60
+_CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
# C++ TSAN takes longer than other sanitizers
-_CPP_TSAN_RUNTESTS_TIMEOUT = 8*60*60
+_CPP_TSAN_RUNTESTS_TIMEOUT = 8 * 60 * 60
# Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2
@@ -46,448 +45,517 @@ _REPORT_SUFFIX = 'sponge_log.xml'
def _report_filename(name):
- """Generates report file name"""
- return 'report_%s_%s' % (name, _REPORT_SUFFIX)
+ """Generates report file name"""
+ return 'report_%s_%s' % (name, _REPORT_SUFFIX)
def _report_filename_internal_ci(name):
- """Generates report file name that leads to better presentation by internal CI"""
- return '%s/%s' % (name, _REPORT_SUFFIX)
+ """Generates report file name that leads to better presentation by internal CI"""
+ return '%s/%s' % (name, _REPORT_SUFFIX)
-def _docker_jobspec(name, runtests_args=[], runtests_envs={},
+def _docker_jobspec(name,
+ runtests_args=[],
+ runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None):
- """Run a single instance of run_tests.py in a docker container"""
- if not timeout_seconds:
- timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
- test_job = jobset.JobSpec(
- cmdline=['python', 'tools/run_tests/run_tests.py',
- '--use_docker',
- '-t',
- '-j', str(inner_jobs),
- '-x', _report_filename(name),
- '--report_suite_name', '%s' % name] + runtests_args,
- environ=runtests_envs,
- shortname='run_tests_%s' % name,
- timeout_seconds=timeout_seconds)
- return test_job
-
-
-def _workspace_jobspec(name, runtests_args=[], workspace_name=None,
- runtests_envs={}, inner_jobs=_DEFAULT_INNER_JOBS,
+ """Run a single instance of run_tests.py in a docker container"""
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+ test_job = jobset.JobSpec(
+ cmdline=[
+ 'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
+ '-j', str(inner_jobs), '-x', _report_filename(name),
+ '--report_suite_name', '%s' % name
+ ] + runtests_args,
+ environ=runtests_envs,
+ shortname='run_tests_%s' % name,
+ timeout_seconds=timeout_seconds)
+ return test_job
+
+
+def _workspace_jobspec(name,
+ runtests_args=[],
+ workspace_name=None,
+ runtests_envs={},
+ inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None):
- """Run a single instance of run_tests.py in a separate workspace"""
- if not workspace_name:
- workspace_name = 'workspace_%s' % name
- if not timeout_seconds:
- timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
- env = {'WORKSPACE_NAME': workspace_name}
- env.update(runtests_envs)
- test_job = jobset.JobSpec(
- cmdline=['bash',
- 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
- '-t',
- '-j', str(inner_jobs),
- '-x', '../%s' % _report_filename(name),
- '--report_suite_name', '%s' % name] + runtests_args,
- environ=env,
- shortname='run_tests_%s' % name,
- timeout_seconds=timeout_seconds)
- return test_job
-
-
-def _generate_jobs(languages, configs, platforms, iomgr_platform = 'native',
- arch=None, compiler=None,
- labels=[], extra_args=[], extra_envs={},
- inner_jobs=_DEFAULT_INNER_JOBS,
- timeout_seconds=None):
- result = []
- for language in languages:
- for platform in platforms:
- for config in configs:
- name = '%s_%s_%s_%s' % (language, platform, config, iomgr_platform)
- runtests_args = ['-l', language,
- '-c', config,
- '--iomgr_platform', iomgr_platform]
- if arch or compiler:
- name += '_%s_%s' % (arch, compiler)
- runtests_args += ['--arch', arch,
- '--compiler', compiler]
- if '--build_only' in extra_args:
- name += '_buildonly'
- for extra_env in extra_envs:
- name += '_%s_%s' % (extra_env, extra_envs[extra_env])
-
- runtests_args += extra_args
- if platform == 'linux':
- job = _docker_jobspec(name=name, runtests_args=runtests_args,
- runtests_envs=extra_envs, inner_jobs=inner_jobs,
- timeout_seconds=timeout_seconds)
- else:
- job = _workspace_jobspec(name=name, runtests_args=runtests_args,
- runtests_envs=extra_envs, inner_jobs=inner_jobs,
- timeout_seconds=timeout_seconds)
-
- job.labels = [platform, config, language, iomgr_platform] + labels
- result.append(job)
- return result
+ """Run a single instance of run_tests.py in a separate workspace"""
+ if not workspace_name:
+ workspace_name = 'workspace_%s' % name
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+ env = {'WORKSPACE_NAME': workspace_name}
+ env.update(runtests_envs)
+ test_job = jobset.JobSpec(
+ cmdline=[
+ 'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
+ '-t', '-j', str(inner_jobs), '-x', '../%s' % _report_filename(name),
+ '--report_suite_name', '%s' % name
+ ] + runtests_args,
+ environ=env,
+ shortname='run_tests_%s' % name,
+ timeout_seconds=timeout_seconds)
+ return test_job
+
+
+def _generate_jobs(languages,
+ configs,
+ platforms,
+ iomgr_platform='native',
+ arch=None,
+ compiler=None,
+ labels=[],
+ extra_args=[],
+ extra_envs={},
+ inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
+ result = []
+ for language in languages:
+ for platform in platforms:
+ for config in configs:
+ name = '%s_%s_%s_%s' % (language, platform, config,
+ iomgr_platform)
+ runtests_args = [
+ '-l', language, '-c', config, '--iomgr_platform',
+ iomgr_platform
+ ]
+ if arch or compiler:
+ name += '_%s_%s' % (arch, compiler)
+ runtests_args += ['--arch', arch, '--compiler', compiler]
+ if '--build_only' in extra_args:
+ name += '_buildonly'
+ for extra_env in extra_envs:
+ name += '_%s_%s' % (extra_env, extra_envs[extra_env])
+
+ runtests_args += extra_args
+ if platform == 'linux':
+ job = _docker_jobspec(
+ name=name,
+ runtests_args=runtests_args,
+ runtests_envs=extra_envs,
+ inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
+ else:
+ job = _workspace_jobspec(
+ name=name,
+ runtests_args=runtests_args,
+ runtests_envs=extra_envs,
+ inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
+
+ job.labels = [platform, config, language, iomgr_platform
+ ] + labels
+ result.append(job)
+ return result
def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
- test_jobs = []
- # supported on linux only
- test_jobs += _generate_jobs(languages=['sanity', 'php7'],
- configs=['dbg', 'opt'],
- platforms=['linux'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # supported on all platforms.
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos', 'windows'],
- labels=['basictests', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- test_jobs += _generate_jobs(languages=['csharp', 'python'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos', 'windows'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # supported on linux and mac.
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos'],
- labels=['basictests', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'],
- configs=['dbg', 'opt'],
- platforms=['linux', 'macos'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # supported on mac only.
- test_jobs += _generate_jobs(languages=['objc'],
- configs=['dbg', 'opt'],
- platforms=['macos'],
- labels=['basictests', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # sanitizers
- test_jobs += _generate_jobs(languages=['c'],
- configs=['msan', 'asan', 'tsan', 'ubsan'],
- platforms=['linux'],
- labels=['sanitizers', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['asan'],
- platforms=['linux'],
- labels=['sanitizers', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['tsan'],
- platforms=['linux'],
- labels=['sanitizers', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
-
- return test_jobs
-
-
-def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
- test_jobs = []
- # portability C x86
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg'],
- platforms=['linux'],
- arch='x86',
- compiler='default',
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # portability C and C++ on x64
- for compiler in ['gcc4.8', 'gcc5.3', 'gcc_musl',
- 'clang3.5', 'clang3.6', 'clang3.7']:
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'],
- platforms=['linux'],
- arch='x64',
- compiler=compiler,
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- # portability C on Windows 64-bit (x86 is the default)
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg'],
- platforms=['windows'],
- arch='x64',
- compiler='default',
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- # portability C++ on Windows
- # TODO(jtattermusch): some of the tests are failing, so we force --build_only
- test_jobs += _generate_jobs(languages=['c++'],
- configs=['dbg'],
- platforms=['windows'],
- arch='default',
- compiler='default',
- labels=['portability', 'corelang'],
- extra_args=extra_args + ['--build_only'],
- inner_jobs=inner_jobs)
-
- # portability C and C++ on Windows using VS2017 (build only)
- # TODO(jtattermusch): some of the tests are failing, so we force --build_only
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'],
- platforms=['windows'],
- arch='x64',
- compiler='cmake_vs2017',
- labels=['portability', 'corelang'],
- extra_args=extra_args + ['--build_only'],
- inner_jobs=inner_jobs)
-
- # C and C++ with the c-ares DNS resolver on Linux
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'], platforms=['linux'],
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- # TODO(zyc): Turn on this test after adding c-ares support on windows.
- # C with the c-ares DNS resolver on Windows
- # test_jobs += _generate_jobs(languages=['c'],
- # configs=['dbg'], platforms=['windows'],
- # labels=['portability', 'corelang'],
- # extra_args=extra_args,
- # extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
-
- # C and C++ build with cmake on Linux
- # TODO(jtattermusch): some of the tests are failing, so we force --build_only
- # to make sure it's buildable at least.
- test_jobs += _generate_jobs(languages=['c', 'c++'],
- configs=['dbg'],
- platforms=['linux'],
- arch='default',
- compiler='cmake',
- labels=['portability', 'corelang'],
- extra_args=extra_args + ['--build_only'],
- inner_jobs=inner_jobs)
-
- test_jobs += _generate_jobs(languages=['python'],
- configs=['dbg'],
- platforms=['linux'],
- arch='default',
- compiler='python_alpine',
- labels=['portability', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- test_jobs += _generate_jobs(languages=['csharp'],
- configs=['dbg'],
- platforms=['linux'],
- arch='default',
- compiler='coreclr',
- labels=['portability', 'multilang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
-
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg'],
- platforms=['linux'],
- iomgr_platform='uv',
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs,
- timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-
- return test_jobs
+ test_jobs = []
+ # supported on linux only
+ test_jobs += _generate_jobs(
+ languages=['sanity', 'php7'],
+ configs=['dbg', 'opt'],
+ platforms=['linux'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # supported on all platforms.
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos', 'windows'],
+ labels=['basictests', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ test_jobs += _generate_jobs(
+ languages=['csharp', 'python'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos', 'windows'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # supported on linux and mac.
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos'],
+ labels=['basictests', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ test_jobs += _generate_jobs(
+ languages=['grpc-node', 'ruby', 'php'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # supported on mac only.
+ test_jobs += _generate_jobs(
+ languages=['objc'],
+ configs=['dbg', 'opt'],
+ platforms=['macos'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # sanitizers
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['msan', 'asan', 'tsan', 'ubsan'],
+ platforms=['linux'],
+ labels=['sanitizers', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['asan'],
+ platforms=['linux'],
+ labels=['sanitizers', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['tsan'],
+ platforms=['linux'],
+ labels=['sanitizers', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
+
+ return test_jobs
+
+
+def _create_portability_test_jobs(extra_args=[],
+ inner_jobs=_DEFAULT_INNER_JOBS):
+ test_jobs = []
+ # portability C x86
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='x86',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # portability C and C++ on x64
+ for compiler in [
+ 'gcc4.8', 'gcc5.3', 'gcc_musl', 'clang3.5', 'clang3.6', 'clang3.7'
+ ]:
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='x64',
+ compiler=compiler,
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # portability C on Windows 64-bit (x86 is the default)
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # portability C++ on Windows
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='default',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
+ # portability C and C++ on Windows using VS2017 (build only)
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='cmake_vs2017',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
+ # C and C++ with the c-ares DNS resolver on Linux
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # TODO(zyc): Turn on this test after adding c-ares support on windows.
+ # C with the c-ares DNS resolver on Windows
+ # test_jobs += _generate_jobs(languages=['c'],
+ # configs=['dbg'], platforms=['windows'],
+ # labels=['portability', 'corelang'],
+ # extra_args=extra_args,
+ # extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
+
+ # C and C++ build with cmake on Linux
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ # to make sure it's buildable at least.
+ test_jobs += _generate_jobs(
+ languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='cmake',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(
+ languages=['python'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='python_alpine',
+ labels=['portability', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(
+ languages=['csharp'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='coreclr',
+ labels=['portability', 'multilang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg'],
+ platforms=['linux'],
+ iomgr_platform='uv',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ return test_jobs
def _allowed_labels():
- """Returns a list of existing job labels."""
- all_labels = set()
- for job in _create_test_jobs() + _create_portability_test_jobs():
- for label in job.labels:
- all_labels.add(label)
- return sorted(all_labels)
+ """Returns a list of existing job labels."""
+ all_labels = set()
+ for job in _create_test_jobs() + _create_portability_test_jobs():
+ for label in job.labels:
+ all_labels.add(label)
+ return sorted(all_labels)
def _runs_per_test_type(arg_str):
- """Auxiliary function to parse the "runs_per_test" flag."""
- try:
- n = int(arg_str)
- if n <= 0: raise ValueError
- return n
- except:
- msg = '\'{}\' is not a positive integer'.format(arg_str)
- raise argparse.ArgumentTypeError(msg)
+ """Auxiliary function to parse the "runs_per_test" flag."""
+ try:
+ n = int(arg_str)
+ if n <= 0: raise ValueError
+ return n
+ except:
+ msg = '\'{}\' is not a positive integer'.format(arg_str)
+ raise argparse.ArgumentTypeError(msg)
if __name__ == "__main__":
- argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
- argp.add_argument('-j', '--jobs',
- default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
- type=int,
- help='Number of concurrent run_tests.py instances.')
- argp.add_argument('-f', '--filter',
- choices=_allowed_labels(),
- nargs='+',
- default=[],
- help='Filter targets to run by label with AND semantics.')
- argp.add_argument('--exclude',
- choices=_allowed_labels(),
- nargs='+',
- default=[],
- help='Exclude targets with any of given labels.')
- argp.add_argument('--build_only',
- default=False,
- action='store_const',
- const=True,
- help='Pass --build_only flag to run_tests.py instances.')
- argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
- help='Pass --force_default_poller to run_tests.py instances.')
- argp.add_argument('--dry_run',
- default=False,
- action='store_const',
- const=True,
- help='Only print what would be run.')
- argp.add_argument('--filter_pr_tests',
- default=False,
- action='store_const',
- const=True,
- help='Filters out tests irrelevant to pull request changes.')
- argp.add_argument('--base_branch',
- default='origin/master',
- type=str,
- help='Branch that pull request is requesting to merge into')
- argp.add_argument('--inner_jobs',
- default=_DEFAULT_INNER_JOBS,
- type=int,
- help='Number of jobs in each run_tests.py instance')
- argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type,
- help='How many times to run each tests. >1 runs implies ' +
- 'omitting passing test from the output & reports.')
- argp.add_argument('--max_time', default=-1, type=int,
- help='Maximum amount of time to run tests for' +
- '(other tests will be skipped)')
- argp.add_argument('--internal_ci',
- default=False,
- action='store_const',
- const=True,
- help='Put reports into subdirectories to improve presentation of '
- 'results by Internal CI.')
- argp.add_argument('--bq_result_table',
- default='',
- type=str,
- nargs='?',
- help='Upload test results to a specified BQ table.')
- args = argp.parse_args()
-
- if args.internal_ci:
- _report_filename = _report_filename_internal_ci # override the function
-
- extra_args = []
- if args.build_only:
- extra_args.append('--build_only')
- if args.force_default_poller:
- extra_args.append('--force_default_poller')
- if args.runs_per_test > 1:
- extra_args.append('-n')
- extra_args.append('%s' % args.runs_per_test)
- extra_args.append('--quiet_success')
- if args.max_time > 0:
- extra_args.extend(('--max_time', '%d' % args.max_time))
- if args.bq_result_table:
- extra_args.append('--bq_result_table')
- extra_args.append('%s' % args.bq_result_table)
- extra_args.append('--measure_cpu_costs')
- extra_args.append('--disable_auto_set_flakes')
-
- all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
- _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
-
- jobs = []
- for job in all_jobs:
- if not args.filter or all(filter in job.labels for filter in args.filter):
- if not any(exclude_label in job.labels for exclude_label in args.exclude):
- jobs.append(job)
-
- if not jobs:
- jobset.message('FAILED', 'No test suites match given criteria.',
- do_newline=True)
- sys.exit(1)
-
- print('IMPORTANT: The changes you are testing need to be locally committed')
- print('because only the committed changes in the current branch will be')
- print('copied to the docker environment or into subworkspaces.')
-
- skipped_jobs = []
-
- if args.filter_pr_tests:
- print('Looking for irrelevant tests to skip...')
- relevant_jobs = filter_tests(jobs, args.base_branch)
- if len(relevant_jobs) == len(jobs):
- print('No tests will be skipped.')
- else:
- print('These tests will be skipped:')
- skipped_jobs = list(set(jobs) - set(relevant_jobs))
- # Sort by shortnames to make printing of skipped tests consistent
- skipped_jobs.sort(key=lambda job: job.shortname)
- for job in list(skipped_jobs):
- print(' %s' % job.shortname)
- jobs = relevant_jobs
-
- print('Will run these tests:')
- for job in jobs:
+ argp = argparse.ArgumentParser(
+ description='Run a matrix of run_tests.py tests.')
+ argp.add_argument(
+ '-j',
+ '--jobs',
+ default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
+ type=int,
+ help='Number of concurrent run_tests.py instances.')
+ argp.add_argument(
+ '-f',
+ '--filter',
+ choices=_allowed_labels(),
+ nargs='+',
+ default=[],
+ help='Filter targets to run by label with AND semantics.')
+ argp.add_argument(
+ '--exclude',
+ choices=_allowed_labels(),
+ nargs='+',
+ default=[],
+ help='Exclude targets with any of given labels.')
+ argp.add_argument(
+ '--build_only',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Pass --build_only flag to run_tests.py instances.')
+ argp.add_argument(
+ '--force_default_poller',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Pass --force_default_poller to run_tests.py instances.')
+ argp.add_argument(
+ '--dry_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Only print what would be run.')
+ argp.add_argument(
+ '--filter_pr_tests',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Filters out tests irrelevant to pull request changes.')
+ argp.add_argument(
+ '--base_branch',
+ default='origin/master',
+ type=str,
+ help='Branch that pull request is requesting to merge into')
+ argp.add_argument(
+ '--inner_jobs',
+ default=_DEFAULT_INNER_JOBS,
+ type=int,
+ help='Number of jobs in each run_tests.py instance')
+ argp.add_argument(
+ '-n',
+ '--runs_per_test',
+ default=1,
+ type=_runs_per_test_type,
+ help='How many times to run each tests. >1 runs implies ' +
+ 'omitting passing test from the output & reports.')
+ argp.add_argument(
+ '--max_time',
+ default=-1,
+ type=int,
+ help='Maximum amount of time to run tests for' +
+ '(other tests will be skipped)')
+ argp.add_argument(
+ '--internal_ci',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Put reports into subdirectories to improve presentation of '
+ 'results by Internal CI.')
+ argp.add_argument(
+ '--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
+ args = argp.parse_args()
+
+ if args.internal_ci:
+ _report_filename = _report_filename_internal_ci # override the function
+
+ extra_args = []
+ if args.build_only:
+ extra_args.append('--build_only')
+ if args.force_default_poller:
+ extra_args.append('--force_default_poller')
+ if args.runs_per_test > 1:
+ extra_args.append('-n')
+ extra_args.append('%s' % args.runs_per_test)
+ extra_args.append('--quiet_success')
+ if args.max_time > 0:
+ extra_args.extend(('--max_time', '%d' % args.max_time))
+ if args.bq_result_table:
+ extra_args.append('--bq_result_table')
+ extra_args.append('%s' % args.bq_result_table)
+ extra_args.append('--measure_cpu_costs')
+ extra_args.append('--disable_auto_set_flakes')
+
+ all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
+ _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
+
+ jobs = []
+ for job in all_jobs:
+ if not args.filter or all(filter in job.labels
+ for filter in args.filter):
+ if not any(exclude_label in job.labels
+ for exclude_label in args.exclude):
+ jobs.append(job)
+
+ if not jobs:
+ jobset.message(
+ 'FAILED', 'No test suites match given criteria.', do_newline=True)
+ sys.exit(1)
+
+ print('IMPORTANT: The changes you are testing need to be locally committed')
+ print('because only the committed changes in the current branch will be')
+ print('copied to the docker environment or into subworkspaces.')
+
+ skipped_jobs = []
+
+ if args.filter_pr_tests:
+ print('Looking for irrelevant tests to skip...')
+ relevant_jobs = filter_tests(jobs, args.base_branch)
+ if len(relevant_jobs) == len(jobs):
+ print('No tests will be skipped.')
+ else:
+ print('These tests will be skipped:')
+ skipped_jobs = list(set(jobs) - set(relevant_jobs))
+ # Sort by shortnames to make printing of skipped tests consistent
+ skipped_jobs.sort(key=lambda job: job.shortname)
+ for job in list(skipped_jobs):
+ print(' %s' % job.shortname)
+ jobs = relevant_jobs
+
+ print('Will run these tests:')
+ for job in jobs:
+ if args.dry_run:
+ print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+ else:
+ print(' %s' % job.shortname)
+ print
+
if args.dry_run:
- print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+ print('--dry_run was used, exiting')
+ sys.exit(1)
+
+ jobset.message('START', 'Running test matrix.', do_newline=True)
+ num_failures, resultset = jobset.run(
+ jobs, newline_on_success=True, travis=True, maxjobs=args.jobs)
+ # Merge skipped tests into results to show skipped tests on report.xml
+ if skipped_jobs:
+ ignored_num_skipped_failures, skipped_results = jobset.run(
+ skipped_jobs, skip_jobs=True)
+ resultset.update(skipped_results)
+ report_utils.render_junit_xml_report(
+ resultset,
+ _report_filename('aggregate_tests'),
+ suite_name='aggregate_tests')
+
+ if num_failures == 0:
+ jobset.message(
+ 'SUCCESS',
+ 'All run_tests.py instance finished successfully.',
+ do_newline=True)
else:
- print(' %s' % job.shortname)
- print
-
- if args.dry_run:
- print('--dry_run was used, exiting')
- sys.exit(1)
-
- jobset.message('START', 'Running test matrix.', do_newline=True)
- num_failures, resultset = jobset.run(jobs,
- newline_on_success=True,
- travis=True,
- maxjobs=args.jobs)
- # Merge skipped tests into results to show skipped tests on report.xml
- if skipped_jobs:
- ignored_num_skipped_failures, skipped_results = jobset.run(
- skipped_jobs, skip_jobs=True)
- resultset.update(skipped_results)
- report_utils.render_junit_xml_report(resultset, _report_filename('aggregate_tests'),
- suite_name='aggregate_tests')
-
- if num_failures == 0:
- jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
- do_newline=True)
- else:
- jobset.message('FAILED', 'Some run_tests.py instance have failed.',
- do_newline=True)
- sys.exit(1)
+ jobset.message(
+ 'FAILED',
+ 'Some run_tests.py instance have failed.',
+ do_newline=True)
+ sys.exit(1)
diff --git a/tools/run_tests/sanity/check_bazel_workspace.py b/tools/run_tests/sanity/check_bazel_workspace.py
index 776c78b03f..b5a77f4479 100755
--- a/tools/run_tests/sanity/check_bazel_workspace.py
+++ b/tools/run_tests/sanity/check_bazel_workspace.py
@@ -27,23 +27,37 @@ os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
git_hash_pattern = re.compile('[0-9a-f]{40}')
# Parse git hashes from submodules
-git_submodules = subprocess.check_output('git submodule', shell=True).strip().split('\n')
-git_submodule_hashes = {re.search(git_hash_pattern, s).group() for s in git_submodules}
+git_submodules = subprocess.check_output(
+ 'git submodule', shell=True).strip().split('\n')
+git_submodule_hashes = {
+ re.search(git_hash_pattern, s).group()
+ for s in git_submodules
+}
# Parse git hashes from Bazel WORKSPACE {new_}http_archive rules
with open('WORKSPACE', 'r') as f:
- workspace_rules = [expr.value for expr in ast.parse(f.read()).body]
-
-http_archive_rules = [rule for rule in workspace_rules if rule.func.id.endswith('http_archive')]
-archive_urls = [kw.value.s for rule in http_archive_rules for kw in rule.keywords if kw.arg == 'url']
-workspace_git_hashes = {re.search(git_hash_pattern, url).group() for url in archive_urls}
+ workspace_rules = [expr.value for expr in ast.parse(f.read()).body]
+
+http_archive_rules = [
+ rule for rule in workspace_rules if rule.func.id.endswith('http_archive')
+]
+archive_urls = [
+ kw.value.s for rule in http_archive_rules for kw in rule.keywords
+ if kw.arg == 'url'
+]
+workspace_git_hashes = {
+ re.search(git_hash_pattern, url).group()
+ for url in archive_urls
+}
# Validate the equivalence of the git submodules and Bazel git dependencies. The
# condition we impose is that there is a git submodule for every dependency in
# the workspace, but not necessarily conversely. E.g. Bloaty is a dependency
# not used by any of the targets built by Bazel.
if len(workspace_git_hashes - git_submodule_hashes) > 0:
- print("Found discrepancies between git submodules and Bazel WORKSPACE dependencies")
+ print(
+ "Found discrepancies between git submodules and Bazel WORKSPACE dependencies"
+ )
sys.exit(1)
sys.exit(0)
diff --git a/tools/run_tests/sanity/check_sources_and_headers.py b/tools/run_tests/sanity/check_sources_and_headers.py
index 986b626b49..6a704eb2e0 100755
--- a/tools/run_tests/sanity/check_sources_and_headers.py
+++ b/tools/run_tests/sanity/check_sources_and_headers.py
@@ -21,8 +21,10 @@ import re
import sys
root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
-with open(os.path.join(root, 'tools', 'run_tests', 'generated', 'sources_and_headers.json')) as f:
- js = json.loads(f.read())
+with open(
+ os.path.join(root, 'tools', 'run_tests', 'generated',
+ 'sources_and_headers.json')) as f:
+ js = json.loads(f.read())
re_inc1 = re.compile(r'^#\s*include\s*"([^"]*)"')
assert re_inc1.match('#include "foo"').group(1) == 'foo'
@@ -31,35 +33,35 @@ assert re_inc2.match('#include <grpc++/foo>').group(1) == 'grpc++/foo'
def get_target(name):
- for target in js:
- if target['name'] == name:
- return target
- assert False, 'no target %s' % name
+ for target in js:
+ if target['name'] == name:
+ return target
+ assert False, 'no target %s' % name
def get_headers_transitive():
- """Computes set of headers transitively provided by each target"""
- target_headers_transitive = {}
- for target in js:
- target_name = target['name']
- assert not target_headers_transitive.has_key(target_name)
- target_headers_transitive[target_name] = set(target['headers'])
-
- # Make sure each target's transitive headers contain those
- # of their dependencies. If not, add them and continue doing
- # so until we get a full pass over all targets without any updates.
- closure_changed = True
- while closure_changed:
- closure_changed = False
+ """Computes set of headers transitively provided by each target"""
+ target_headers_transitive = {}
for target in js:
- target_name = target['name']
- for dep in target['deps']:
- headers = target_headers_transitive[target_name]
- old_count = len(headers)
- headers.update(target_headers_transitive[dep])
- if old_count != len(headers):
- closure_changed=True
- return target_headers_transitive
+ target_name = target['name']
+ assert not target_headers_transitive.has_key(target_name)
+ target_headers_transitive[target_name] = set(target['headers'])
+
+ # Make sure each target's transitive headers contain those
+ # of their dependencies. If not, add them and continue doing
+ # so until we get a full pass over all targets without any updates.
+ closure_changed = True
+ while closure_changed:
+ closure_changed = False
+ for target in js:
+ target_name = target['name']
+ for dep in target['deps']:
+ headers = target_headers_transitive[target_name]
+ old_count = len(headers)
+ headers.update(target_headers_transitive[dep])
+ if old_count != len(headers):
+ closure_changed = True
+ return target_headers_transitive
# precompute transitive closure of headers provided by each target
@@ -67,18 +69,23 @@ target_headers_transitive = get_headers_transitive()
def target_has_header(target, name):
- if name in target_headers_transitive[target['name']]:
- return True
- if name.startswith('absl/'):
- return True
- if name in ['src/core/lib/profiling/stap_probes.h',
- 'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h']:
- return True
- return False
+ if name.startswith('absl/'): return True
+ # print target['name'], name
+ if name in target['headers']:
+ return True
+ for dep in target['deps']:
+ if target_has_header(get_target(dep), name):
+ return True
+ if name in [
+ 'src/core/lib/profiling/stap_probes.h',
+ 'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h'
+ ]:
+ return True
+ return False
def produces_object(name):
- return os.path.splitext(name)[1] in ['.c', '.cc']
+ return os.path.splitext(name)[1] in ['.c', '.cc']
c_ish = {}
@@ -86,36 +93,37 @@ obj_producer_to_source = {'c': c_ish, 'c++': c_ish, 'csharp': {}}
errors = 0
for target in js:
- if not target['third_party']:
- for fn in target['src']:
- with open(os.path.join(root, fn)) as f:
- src = f.read().splitlines()
- for line in src:
- m = re_inc1.match(line)
- if m:
- if not target_has_header(target, m.group(1)):
- print (
- 'target %s (%s) does not name header %s as a dependency' % (
- target['name'], fn, m.group(1)))
- errors += 1
- m = re_inc2.match(line)
- if m:
- if not target_has_header(target, 'include/' + m.group(1)):
- print (
- 'target %s (%s) does not name header %s as a dependency' % (
- target['name'], fn, m.group(1)))
- errors += 1
- if target['type'] in ['lib', 'filegroup']:
- for fn in target['src']:
- language = target['language']
- if produces_object(fn):
- obj_base = os.path.splitext(os.path.basename(fn))[0]
- if obj_base in obj_producer_to_source[language]:
- if obj_producer_to_source[language][obj_base] != fn:
- print (
- 'target %s (%s) produces an aliased object file with %s' % (
- target['name'], fn, obj_producer_to_source[language][obj_base]))
- else:
- obj_producer_to_source[language][obj_base] = fn
+ if not target['third_party']:
+ for fn in target['src']:
+ with open(os.path.join(root, fn)) as f:
+ src = f.read().splitlines()
+ for line in src:
+ m = re_inc1.match(line)
+ if m:
+ if not target_has_header(target, m.group(1)):
+ print(
+ 'target %s (%s) does not name header %s as a dependency'
+ % (target['name'], fn, m.group(1)))
+ errors += 1
+ m = re_inc2.match(line)
+ if m:
+ if not target_has_header(target, 'include/' + m.group(1)):
+ print(
+ 'target %s (%s) does not name header %s as a dependency'
+ % (target['name'], fn, m.group(1)))
+ errors += 1
+ if target['type'] in ['lib', 'filegroup']:
+ for fn in target['src']:
+ language = target['language']
+ if produces_object(fn):
+ obj_base = os.path.splitext(os.path.basename(fn))[0]
+ if obj_base in obj_producer_to_source[language]:
+ if obj_producer_to_source[language][obj_base] != fn:
+ print(
+ 'target %s (%s) produces an aliased object file with %s'
+ % (target['name'], fn,
+ obj_producer_to_source[language][obj_base]))
+ else:
+ obj_producer_to_source[language][obj_base] = fn
assert errors == 0
diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py
index ff4ecba8ab..c2a6399ae8 100755
--- a/tools/run_tests/sanity/check_test_filtering.py
+++ b/tools/run_tests/sanity/check_test_filtering.py
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import sys
import unittest
@@ -25,108 +24,138 @@ sys.path.insert(0, os.path.abspath('tools/run_tests/'))
from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs
import python_utils.filter_pull_request_tests as filter_pull_request_tests
-_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'grpc-node', 'objc', 'php', 'php7', 'python', 'ruby']
+_LIST_OF_LANGUAGE_LABELS = [
+ 'c', 'c++', 'csharp', 'grpc-node', 'objc', 'php', 'php7', 'python', 'ruby'
+]
_LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows']
+
class TestFilteringTest(unittest.TestCase):
- def generate_all_tests(self):
- all_jobs = _create_test_jobs() + _create_portability_test_jobs()
- self.assertIsNotNone(all_jobs)
- return all_jobs
+ def generate_all_tests(self):
+ all_jobs = _create_test_jobs() + _create_portability_test_jobs()
+ self.assertIsNotNone(all_jobs)
+ return all_jobs
- def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
- """
+ def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
+ """
Default args should filter no tests because changed_files is empty and
default labels should be able to match all jobs
:param changed_files: mock list of changed_files from pull request
:param labels: list of job labels that should be skipped
"""
- all_jobs = self.generate_all_tests()
- # Replacing _get_changed_files function to allow specifying changed files in filter_tests function
- def _get_changed_files(foo):
- return changed_files
- filter_pull_request_tests._get_changed_files = _get_changed_files
- print()
- filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
-
- # Make sure sanity tests aren't being filtered out
- sanity_tests_in_all_jobs = 0
- sanity_tests_in_filtered_jobs = 0
- for job in all_jobs:
- if "sanity" in job.labels:
- sanity_tests_in_all_jobs += 1
- all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
- for job in filtered_jobs:
- if "sanity" in job.labels:
- sanity_tests_in_filtered_jobs += 1
- filtered_jobs = [job for job in filtered_jobs if "sanity" not in job.labels]
- self.assertEquals(sanity_tests_in_all_jobs, sanity_tests_in_filtered_jobs)
-
- for label in labels:
- for job in filtered_jobs:
- self.assertNotIn(label, job.labels)
-
- jobs_matching_labels = 0
- for label in labels:
- for job in all_jobs:
- if (label in job.labels):
- jobs_matching_labels += 1
- self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
-
- def test_individual_language_filters(self):
- # Changing unlisted file should trigger all languages
- self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
- # Changing core should trigger all tests
- self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
- # Testing individual languages
- self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._CORE_TEST_SUITE.labels +
- filter_pull_request_tests._CPP_TEST_SUITE.labels])
- self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._CPP_TEST_SUITE.labels])
- self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
- self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._OBJC_TEST_SUITE.labels])
- self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._PHP_TEST_SUITE.labels])
- self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._PYTHON_TEST_SUITE.labels])
- self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._RUBY_TEST_SUITE.labels])
-
- def test_combined_language_filters(self):
- self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'],
- [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
- filter_pull_request_tests._CORE_TEST_SUITE.labels])
- self.test_filtering(['src/cpp/foo.bar', "src/csharp/foo.bar"],
- [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
- filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
- self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"],
- [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
- filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in
- filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in
- filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in
- filter_pull_request_tests._RUBY_TEST_SUITE.labels])
-
- def test_platform_filter(self):
- self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in
- filter_pull_request_tests._WINDOWS_TEST_SUITE.labels])
-
- def test_whitelist(self):
- whitelist = filter_pull_request_tests._WHITELIST_DICT
- files_that_should_trigger_all_tests = ['src/core/foo.bar',
- 'some_file_not_on_the_white_list',
- 'BUILD',
- 'etc/roots.pem',
- 'Makefile',
- 'tools/foo']
- for key in whitelist.keys():
- for file_name in files_that_should_trigger_all_tests:
- self.assertFalse(re.match(key, file_name))
+ all_jobs = self.generate_all_tests()
+
+ # Replacing _get_changed_files function to allow specifying changed files in filter_tests function
+ def _get_changed_files(foo):
+ return changed_files
+
+ filter_pull_request_tests._get_changed_files = _get_changed_files
+ print()
+ filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
+
+ # Make sure sanity tests aren't being filtered out
+ sanity_tests_in_all_jobs = 0
+ sanity_tests_in_filtered_jobs = 0
+ for job in all_jobs:
+ if "sanity" in job.labels:
+ sanity_tests_in_all_jobs += 1
+ all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
+ for job in filtered_jobs:
+ if "sanity" in job.labels:
+ sanity_tests_in_filtered_jobs += 1
+ filtered_jobs = [
+ job for job in filtered_jobs if "sanity" not in job.labels
+ ]
+ self.assertEquals(sanity_tests_in_all_jobs,
+ sanity_tests_in_filtered_jobs)
+
+ for label in labels:
+ for job in filtered_jobs:
+ self.assertNotIn(label, job.labels)
+
+ jobs_matching_labels = 0
+ for label in labels:
+ for job in all_jobs:
+ if (label in job.labels):
+ jobs_matching_labels += 1
+ self.assertEquals(
+ len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
+
+ def test_individual_language_filters(self):
+ # Changing unlisted file should trigger all languages
+ self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
+ # Changing core should trigger all tests
+ self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
+ # Testing individual languages
+ self.test_filtering(['test/core/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CORE_TEST_SUITE.labels +
+ filter_pull_request_tests._CPP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/cpp/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/csharp/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/objective-c/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/php/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/python/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/ruby/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+ ])
+
+ def test_combined_language_filters(self):
+ self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+ label not in filter_pull_request_tests._CORE_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/cpp/foo.bar', "src/csharp/foo.bar"], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+ label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+ ])
+ self.test_filtering([
+ 'src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar",
+ "src/ruby/foo.bar"
+ ], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
+ and label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
+ and label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
+ and label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+ ])
+
+ def test_platform_filter(self):
+ self.test_filtering(['vsprojects/foo.bar'], [
+ label for label in _LIST_OF_PLATFORM_LABELS
+ if label not in filter_pull_request_tests._WINDOWS_TEST_SUITE.labels
+ ])
+
+ def test_whitelist(self):
+ whitelist = filter_pull_request_tests._WHITELIST_DICT
+ files_that_should_trigger_all_tests = [
+ 'src/core/foo.bar', 'some_file_not_on_the_white_list', 'BUILD',
+ 'etc/roots.pem', 'Makefile', 'tools/foo'
+ ]
+ for key in whitelist.keys():
+ for file_name in files_that_should_trigger_all_tests:
+ self.assertFalse(re.match(key, file_name))
+
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/tools/run_tests/sanity/check_tracer_sanity.py b/tools/run_tests/sanity/check_tracer_sanity.py
index 997ec79d02..c4c76530de 100755
--- a/tools/run_tests/sanity/check_tracer_sanity.py
+++ b/tools/run_tests/sanity/check_tracer_sanity.py
@@ -26,21 +26,22 @@ errors = 0
tracers = []
pattern = re.compile("GRPC_TRACER_INITIALIZER\((true|false), \"(.*)\"\)")
for root, dirs, files in os.walk('src/core'):
- for filename in files:
- path = os.path.join(root, filename)
- if os.path.splitext(path)[1] != '.c': continue
- with open(path) as f:
- text = f.read()
- for o in pattern.findall(text):
- tracers.append(o[1])
+ for filename in files:
+ path = os.path.join(root, filename)
+ if os.path.splitext(path)[1] != '.c': continue
+ with open(path) as f:
+ text = f.read()
+ for o in pattern.findall(text):
+ tracers.append(o[1])
with open('doc/environment_variables.md') as f:
- text = f.read()
+ text = f.read()
for t in tracers:
if t not in text:
- print("ERROR: tracer \"%s\" is not mentioned in doc/environment_variables.md" % t)
+ print(
+ "ERROR: tracer \"%s\" is not mentioned in doc/environment_variables.md"
+ % t)
errors += 1
-
assert errors == 0
diff --git a/tools/run_tests/sanity/check_version.py b/tools/run_tests/sanity/check_version.py
index b9b6bab26d..6154b2603e 100755
--- a/tools/run_tests/sanity/check_version.py
+++ b/tools/run_tests/sanity/check_version.py
@@ -31,56 +31,56 @@ sys.path.insert(0, os.path.abspath('tools/buildgen/plugins'))
from expand_version import Version
try:
- branch_name = subprocess.check_output(
- 'git rev-parse --abbrev-ref HEAD',
- shell=True)
+ branch_name = subprocess.check_output(
+ 'git rev-parse --abbrev-ref HEAD', shell=True)
except:
- print('WARNING: not a git repository')
- branch_name = None
+ print('WARNING: not a git repository')
+ branch_name = None
if branch_name is not None:
- m = re.match(r'^release-([0-9]+)_([0-9]+)$', branch_name)
- if m:
- print('RELEASE branch')
- # version number should align with the branched version
- check_version = lambda version: (
- version.major == int(m.group(1)) and
- version.minor == int(m.group(2)))
- warning = 'Version key "%%s" value "%%s" should have a major version %s and minor version %s' % (m.group(1), m.group(2))
- elif re.match(r'^debian/.*$', branch_name):
- # no additional version checks for debian branches
- check_version = lambda version: True
- else:
- # all other branches should have a -dev tag
- check_version = lambda version: version.tag == 'dev'
- warning = 'Version key "%s" value "%s" should have a -dev tag'
+ m = re.match(r'^release-([0-9]+)_([0-9]+)$', branch_name)
+ if m:
+ print('RELEASE branch')
+ # version number should align with the branched version
+ check_version = lambda version: (
+ version.major == int(m.group(1)) and
+ version.minor == int(m.group(2)))
+ warning = 'Version key "%%s" value "%%s" should have a major version %s and minor version %s' % (
+ m.group(1), m.group(2))
+ elif re.match(r'^debian/.*$', branch_name):
+ # no additional version checks for debian branches
+ check_version = lambda version: True
+ else:
+ # all other branches should have a -dev tag
+ check_version = lambda version: version.tag == 'dev'
+ warning = 'Version key "%s" value "%s" should have a -dev tag'
else:
- check_version = lambda version: True
+ check_version = lambda version: True
with open('build.yaml', 'r') as f:
- build_yaml = yaml.load(f.read())
+ build_yaml = yaml.load(f.read())
settings = build_yaml['settings']
top_version = Version(settings['version'])
if not check_version(top_version):
- errors += 1
- print(warning % ('version', top_version))
+ errors += 1
+ print(warning % ('version', top_version))
for tag, value in settings.iteritems():
- if re.match(r'^[a-z]+_version$', tag):
- value = Version(value)
- if tag != 'core_version':
- if value.major != top_version.major:
- errors += 1
- print('major version mismatch on %s: %d vs %d' % (tag, value.major,
- top_version.major))
- if value.minor != top_version.minor:
- errors += 1
- print('minor version mismatch on %s: %d vs %d' % (tag, value.minor,
- top_version.minor))
- if not check_version(value):
- errors += 1
- print(warning % (tag, value))
+ if re.match(r'^[a-z]+_version$', tag):
+ value = Version(value)
+ if tag != 'core_version':
+ if value.major != top_version.major:
+ errors += 1
+ print('major version mismatch on %s: %d vs %d' %
+ (tag, value.major, top_version.major))
+ if value.minor != top_version.minor:
+ errors += 1
+ print('minor version mismatch on %s: %d vs %d' %
+ (tag, value.minor, top_version.minor))
+ if not check_version(value):
+ errors += 1
+ print(warning % (tag, value))
sys.exit(errors)
diff --git a/tools/run_tests/sanity/core_banned_functions.py b/tools/run_tests/sanity/core_banned_functions.py
index 1f13905484..9ee28964a5 100755
--- a/tools/run_tests/sanity/core_banned_functions.py
+++ b/tools/run_tests/sanity/core_banned_functions.py
@@ -36,26 +36,28 @@ BANNED_EXCEPT = {
'grpc_wsa_error(': ['src/core/lib/iomgr/error.c'],
'grpc_log_if_error(': ['src/core/lib/iomgr/error.c'],
'grpc_slice_malloc(': ['src/core/lib/slice/slice.c'],
- 'grpc_closure_create(' : ['src/core/lib/iomgr/closure.c'],
- 'grpc_closure_init(' : ['src/core/lib/iomgr/closure.c'],
- 'grpc_closure_sched(' : ['src/core/lib/iomgr/closure.c'],
- 'grpc_closure_run(' : ['src/core/lib/iomgr/closure.c'],
- 'grpc_closure_list_sched(' : ['src/core/lib/iomgr/closure.c'],
- 'gpr_getenv_silent(' : ['src/core/lib/support/log.c', 'src/core/lib/support/env_linux.c',
- 'src/core/lib/support/env_posix.c', 'src/core/lib/support/env_windows.c'],
+ 'grpc_closure_create(': ['src/core/lib/iomgr/closure.c'],
+ 'grpc_closure_init(': ['src/core/lib/iomgr/closure.c'],
+ 'grpc_closure_sched(': ['src/core/lib/iomgr/closure.c'],
+ 'grpc_closure_run(': ['src/core/lib/iomgr/closure.c'],
+ 'grpc_closure_list_sched(': ['src/core/lib/iomgr/closure.c'],
+ 'gpr_getenv_silent(': [
+ 'src/core/lib/support/log.c', 'src/core/lib/support/env_linux.c',
+ 'src/core/lib/support/env_posix.c', 'src/core/lib/support/env_windows.c'
+ ],
}
errors = 0
for root, dirs, files in os.walk('src/core'):
- for filename in files:
- path = os.path.join(root, filename)
- if os.path.splitext(path)[1] != '.c': continue
- with open(path) as f:
- text = f.read()
- for banned, exceptions in BANNED_EXCEPT.items():
- if path in exceptions: continue
- if banned in text:
- print('Illegal use of "%s" in %s' % (banned, path))
- errors += 1
+ for filename in files:
+ path = os.path.join(root, filename)
+ if os.path.splitext(path)[1] != '.c': continue
+ with open(path) as f:
+ text = f.read()
+ for banned, exceptions in BANNED_EXCEPT.items():
+ if path in exceptions: continue
+ if banned in text:
+ print('Illegal use of "%s" in %s' % (banned, path))
+ errors += 1
assert errors == 0
diff --git a/tools/run_tests/start_port_server.py b/tools/run_tests/start_port_server.py
index 362875036f..0eeceb4ce9 100755
--- a/tools/run_tests/start_port_server.py
+++ b/tools/run_tests/start_port_server.py
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""
Wrapper around port server starting code.
diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py
index a065bb84cb..794db6e1be 100755
--- a/tools/run_tests/task_runner.py
+++ b/tools/run_tests/task_runner.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Runs selected gRPC test/build tasks."""
from __future__ import print_function
@@ -32,52 +31,54 @@ _TARGETS += artifact_targets.targets()
_TARGETS += distribtest_targets.targets()
_TARGETS += package_targets.targets()
+
def _create_build_map():
- """Maps task names and labels to list of tasks to be built."""
- target_build_map = dict([(target.name, [target])
- for target in _TARGETS])
- if len(_TARGETS) > len(target_build_map.keys()):
- raise Exception('Target names need to be unique')
-
- label_build_map = {}
- label_build_map['all'] = [t for t in _TARGETS] # to build all targets
- for target in _TARGETS:
- for label in target.labels:
- if label in label_build_map:
- label_build_map[label].append(target)
- else:
- label_build_map[label] = [target]
-
- if set(target_build_map.keys()).intersection(label_build_map.keys()):
- raise Exception('Target names need to be distinct from label names')
- return dict( target_build_map.items() + label_build_map.items())
+ """Maps task names and labels to list of tasks to be built."""
+ target_build_map = dict([(target.name, [target]) for target in _TARGETS])
+ if len(_TARGETS) > len(target_build_map.keys()):
+ raise Exception('Target names need to be unique')
+
+ label_build_map = {}
+ label_build_map['all'] = [t for t in _TARGETS] # to build all targets
+ for target in _TARGETS:
+ for label in target.labels:
+ if label in label_build_map:
+ label_build_map[label].append(target)
+ else:
+ label_build_map[label] = [target]
+
+ if set(target_build_map.keys()).intersection(label_build_map.keys()):
+ raise Exception('Target names need to be distinct from label names')
+ return dict(target_build_map.items() + label_build_map.items())
_BUILD_MAP = _create_build_map()
argp = argparse.ArgumentParser(description='Runs build/test targets.')
-argp.add_argument('-b', '--build',
- choices=sorted(_BUILD_MAP.keys()),
- nargs='+',
- default=['all'],
- help='Target name or target label to build.')
-argp.add_argument('-f', '--filter',
- choices=sorted(_BUILD_MAP.keys()),
- nargs='+',
- default=[],
- help='Filter targets to build with AND semantics.')
+argp.add_argument(
+ '-b',
+ '--build',
+ choices=sorted(_BUILD_MAP.keys()),
+ nargs='+',
+ default=['all'],
+ help='Target name or target label to build.')
+argp.add_argument(
+ '-f',
+ '--filter',
+ choices=sorted(_BUILD_MAP.keys()),
+ nargs='+',
+ default=[],
+ help='Filter targets to build with AND semantics.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('-t', '--travis',
- default=False,
- action='store_const',
- const=True)
+argp.add_argument(
+ '-t', '--travis', default=False, action='store_const', const=True)
args = argp.parse_args()
# Figure out which targets to build
targets = []
for label in args.build:
- targets += _BUILD_MAP[label]
+ targets += _BUILD_MAP[label]
# Among targets selected by -b, filter out those that don't match the filter
targets = [t for t in targets if all(f in t.labels for f in args.filter)]
@@ -86,30 +87,29 @@ targets = sorted(set(targets))
# Execute pre-build phase
prebuild_jobs = []
for target in targets:
- prebuild_jobs += target.pre_build_jobspecs()
+ prebuild_jobs += target.pre_build_jobspecs()
if prebuild_jobs:
- num_failures, _ = jobset.run(
- prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
- if num_failures != 0:
- jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
- sys.exit(1)
+ num_failures, _ = jobset.run(
+ prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
+ if num_failures != 0:
+ jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
+ sys.exit(1)
build_jobs = []
for target in targets:
- build_jobs.append(target.build_jobspec())
+ build_jobs.append(target.build_jobspec())
if not build_jobs:
- print('Nothing to build.')
- sys.exit(1)
+ print('Nothing to build.')
+ sys.exit(1)
jobset.message('START', 'Building targets.', do_newline=True)
num_failures, resultset = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs)
-report_utils.render_junit_xml_report(resultset, 'report_taskrunner_sponge_log.xml',
- suite_name='tasks')
+report_utils.render_junit_xml_report(
+ resultset, 'report_taskrunner_sponge_log.xml', suite_name='tasks')
if num_failures == 0:
- jobset.message('SUCCESS', 'All targets built successfully.',
- do_newline=True)
+ jobset.message(
+ 'SUCCESS', 'All targets built successfully.', do_newline=True)
else:
- jobset.message('FAILED', 'Failed to build targets.',
- do_newline=True)
- sys.exit(1)
+ jobset.message('FAILED', 'Failed to build targets.', do_newline=True)
+ sys.exit(1)