aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests')
-rw-r--r--tools/run_tests/generated/sources_and_headers.json161
-rw-r--r--tools/run_tests/generated/tests.json197
-rw-r--r--tools/run_tests/helper_scripts/pre_build_csharp.bat12
-rwxr-xr-xtools/run_tests/helper_scripts/pre_build_csharp.sh12
-rw-r--r--tools/run_tests/performance/scenario_config.py11
-rwxr-xr-xtools/run_tests/python_utils/jobset.py7
-rw-r--r--tools/run_tests/python_utils/start_port_server.py15
-rwxr-xr-xtools/run_tests/run_interop_tests.py16
-rwxr-xr-xtools/run_tests/run_microbenchmark.py68
-rwxr-xr-xtools/run_tests/run_performance_tests.py2
-rwxr-xr-xtools/run_tests/run_tests.py28
-rwxr-xr-xtools/run_tests/run_tests_matrix.py12
-rwxr-xr-xtools/run_tests/sanity/check_submodules.sh2
-rwxr-xr-xtools/run_tests/start_port_server.py46
14 files changed, 492 insertions, 97 deletions
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 1172eaf353..22cf618c22 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -739,6 +739,21 @@
"headers": [],
"is_filegroup": false,
"language": "c",
+ "name": "gpr_spinlock_test",
+ "src": [
+ "test/core/support/spinlock_test.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c",
"name": "gpr_stack_lockfree_test",
"src": [
"test/core/support/stack_lockfree_test.c"
@@ -1652,6 +1667,23 @@
"headers": [],
"is_filegroup": false,
"language": "c",
+ "name": "parse_address_test",
+ "src": [
+ "test/core/client_channel/parse_address_test.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c",
"name": "percent_decode_fuzzer",
"src": [
"test/core/slice/percent_decode_fuzzer.c"
@@ -2027,6 +2059,23 @@
"headers": [],
"is_filegroup": false,
"language": "c",
+ "name": "stream_owned_slice_test",
+ "src": [
+ "test/core/transport/stream_owned_slice_test.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c",
"name": "tcp_client_posix_test",
"src": [
"test/core/iomgr/tcp_client_posix_test.c"
@@ -2371,6 +2420,7 @@
"grpc",
"grpc++",
"grpc++_test_util",
+ "grpc_benchmark",
"grpc_test_util"
],
"headers": [],
@@ -2391,6 +2441,7 @@
"grpc",
"grpc++",
"grpc++_test_util",
+ "grpc_benchmark",
"grpc_test_util"
],
"headers": [],
@@ -2411,6 +2462,7 @@
"grpc",
"grpc++",
"grpc++_test_util",
+ "grpc_benchmark",
"grpc_test_util"
],
"headers": [],
@@ -2431,6 +2483,7 @@
"grpc",
"grpc++",
"grpc++_test_util",
+ "grpc_benchmark",
"grpc_test_util"
],
"headers": [],
@@ -2451,6 +2504,7 @@
"grpc",
"grpc++",
"grpc++_test_util",
+ "grpc_benchmark",
"grpc_test_util"
],
"headers": [],
@@ -2471,14 +2525,15 @@
"grpc",
"grpc++",
"grpc++_test_util",
+ "grpc_benchmark",
"grpc_test_util"
],
"headers": [],
"is_filegroup": false,
"language": "c++",
- "name": "bm_fullstack",
+ "name": "bm_fullstack_streaming_ping_pong",
"src": [
- "test/cpp/microbenchmarks/bm_fullstack.cc"
+ "test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc"
],
"third_party": false,
"type": "target"
@@ -2489,6 +2544,72 @@
"gpr",
"gpr_test_util",
"grpc",
+ "grpc++",
+ "grpc++_test_util",
+ "grpc_benchmark",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "bm_fullstack_streaming_pump",
+ "src": [
+ "test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "benchmark",
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc++",
+ "grpc++_test_util",
+ "grpc_benchmark",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "bm_fullstack_trickle",
+ "src": [
+ "test/cpp/microbenchmarks/bm_fullstack_trickle.cc"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "benchmark",
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc++",
+ "grpc++_test_util",
+ "grpc_benchmark",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "bm_fullstack_unary_ping_pong",
+ "src": [
+ "test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "benchmark",
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc++",
+ "grpc++_test_util",
+ "grpc_benchmark",
"grpc_test_util"
],
"headers": [],
@@ -5694,6 +5815,30 @@
},
{
"deps": [
+ "benchmark",
+ "grpc",
+ "grpc++",
+ "grpc_test_util"
+ ],
+ "headers": [
+ "test/cpp/microbenchmarks/fullstack_context_mutators.h",
+ "test/cpp/microbenchmarks/fullstack_fixtures.h",
+ "test/cpp/microbenchmarks/helpers.h"
+ ],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "grpc_benchmark",
+ "src": [
+ "test/cpp/microbenchmarks/fullstack_context_mutators.h",
+ "test/cpp/microbenchmarks/fullstack_fixtures.h",
+ "test/cpp/microbenchmarks/helpers.cc",
+ "test/cpp/microbenchmarks/helpers.h"
+ ],
+ "third_party": false,
+ "type": "lib"
+ },
+ {
+ "deps": [
"grpc++",
"grpc++_config_proto",
"grpc++_proto_reflection_desc_db",
@@ -7062,6 +7207,7 @@
"src/core/lib/support/env.h",
"src/core/lib/support/mpscq.h",
"src/core/lib/support/murmur_hash.h",
+ "src/core/lib/support/spinlock.h",
"src/core/lib/support/stack_lockfree.h",
"src/core/lib/support/string.h",
"src/core/lib/support/string_windows.h",
@@ -7127,6 +7273,7 @@
"src/core/lib/support/mpscq.h",
"src/core/lib/support/murmur_hash.c",
"src/core/lib/support/murmur_hash.h",
+ "src/core/lib/support/spinlock.h",
"src/core/lib/support/stack_lockfree.c",
"src/core/lib/support/stack_lockfree.h",
"src/core/lib/support/string.c",
@@ -7895,7 +8042,7 @@
"headers": [
"test/core/end2end/cq_verifier.h",
"test/core/end2end/fake_resolver.h",
- "test/core/end2end/fixtures/http_proxy.h",
+ "test/core/end2end/fixtures/http_proxy_fixture.h",
"test/core/end2end/fixtures/proxy.h",
"test/core/iomgr/endpoint_tests.h",
"test/core/util/debugger_macros.h",
@@ -7917,8 +8064,8 @@
"test/core/end2end/cq_verifier.h",
"test/core/end2end/fake_resolver.c",
"test/core/end2end/fake_resolver.h",
- "test/core/end2end/fixtures/http_proxy.c",
- "test/core/end2end/fixtures/http_proxy.h",
+ "test/core/end2end/fixtures/http_proxy_fixture.c",
+ "test/core/end2end/fixtures/http_proxy_fixture.h",
"test/core/end2end/fixtures/proxy.c",
"test/core/end2end/fixtures/proxy.h",
"test/core/iomgr/endpoint_tests.c",
@@ -7935,12 +8082,10 @@
"test/core/util/parse_hexstring.h",
"test/core/util/passthru_endpoint.c",
"test/core/util/passthru_endpoint.h",
+ "test/core/util/port.c",
"test/core/util/port.h",
- "test/core/util/port_posix.c",
"test/core/util/port_server_client.c",
"test/core/util/port_server_client.h",
- "test/core/util/port_uv.c",
- "test/core/util/port_windows.c",
"test/core/util/slice_splitter.c",
"test/core/util/slice_splitter.h",
"test/core/util/trickle_endpoint.c",
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 7a644f4a98..402dabc554 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -845,6 +845,28 @@
"posix",
"windows"
],
+ "cpu_cost": 10,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c",
+ "name": "gpr_spinlock_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ],
"cpu_cost": 7,
"exclude_configs": [],
"exclude_iomgrs": [],
@@ -1711,6 +1733,28 @@
"flaky": false,
"gtest": false,
"language": "c",
+ "name": "parse_address_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c",
"name": "percent_encoding_test",
"platforms": [
"linux",
@@ -2098,6 +2142,28 @@
"ci_platforms": [
"linux",
"mac",
+ "posix",
+ "windows"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c",
+ "name": "stream_owned_slice_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
"posix"
],
"cpu_cost": 0.5,
@@ -2646,7 +2712,88 @@
"flaky": false,
"gtest": false,
"language": "c++",
- "name": "bm_fullstack",
+ "name": "bm_fullstack_streaming_ping_pong",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "timeout_seconds": 1200
+ },
+ {
+ "args": [
+ "--benchmark_min_time=0"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "excluded_poll_engines": [
+ "poll",
+ "poll-cv"
+ ],
+ "flaky": false,
+ "gtest": false,
+ "language": "c++",
+ "name": "bm_fullstack_streaming_pump",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "timeout_seconds": 1200
+ },
+ {
+ "args": [
+ "--benchmark_min_time=0"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "excluded_poll_engines": [
+ "poll",
+ "poll-cv"
+ ],
+ "flaky": false,
+ "gtest": false,
+ "language": "c++",
+ "name": "bm_fullstack_trickle",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "timeout_seconds": 1200
+ },
+ {
+ "args": [
+ "--benchmark_min_time=0"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "excluded_poll_engines": [
+ "poll",
+ "poll-cv"
+ ],
+ "flaky": false,
+ "gtest": false,
+ "language": "c++",
+ "name": "bm_fullstack_unary_ping_pong",
"platforms": [
"linux",
"mac",
@@ -2983,7 +3130,9 @@
]
},
{
- "args": [],
+ "args": [
+ "--generated_file_path=gens/src/proto/grpc/testing/compiler_test.grpc.pb.h"
+ ],
"ci_platforms": [
"linux",
"mac",
@@ -80516,6 +80665,50 @@
},
{
"args": [
+ "test/core/end2end/fuzzers/api_fuzzer_corpus/clusterfuzz-testcase-5834320218423296"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "tsan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "api_fuzzer_one_entry",
+ "platforms": [
+ "linux"
+ ],
+ "uses_polling": false
+ },
+ {
+ "args": [
+ "test/core/end2end/fuzzers/api_fuzzer_corpus/clusterfuzz-testcase-6520142139752448"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "tsan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "api_fuzzer_one_entry",
+ "platforms": [
+ "linux"
+ ],
+ "uses_polling": false
+ },
+ {
+ "args": [
"test/core/end2end/fuzzers/api_fuzzer_corpus/crash-0597bbdd657fa4ed14443994c9147a1a7bbc205f"
],
"ci_platforms": [
diff --git a/tools/run_tests/helper_scripts/pre_build_csharp.bat b/tools/run_tests/helper_scripts/pre_build_csharp.bat
index f37f63b584..99df1c6626 100644
--- a/tools/run_tests/helper_scripts/pre_build_csharp.bat
+++ b/tools/run_tests/helper_scripts/pre_build_csharp.bat
@@ -89,18 +89,6 @@ if exist %NUGET% (
%NUGET% restore -PackagesDirectory ../packages || goto :error
cd ..
- cd Grpc.IntegrationTesting.Client || goto :error
- %NUGET% restore -PackagesDirectory ../packages || goto :error
- cd ..
-
- cd Grpc.IntegrationTesting.QpsWorker || goto :error
- %NUGET% restore -PackagesDirectory ../packages || goto :error
- cd ..
-
- cd Grpc.IntegrationTesting.StressClient || goto :error
- %NUGET% restore -PackagesDirectory ../packages || goto :error
- cd ..
-
cd Grpc.IntegrationTesting || goto :error
%NUGET% restore -PackagesDirectory ../packages || goto :error
diff --git a/tools/run_tests/helper_scripts/pre_build_csharp.sh b/tools/run_tests/helper_scripts/pre_build_csharp.sh
index 1f808556f4..d7665e15af 100755
--- a/tools/run_tests/helper_scripts/pre_build_csharp.sh
+++ b/tools/run_tests/helper_scripts/pre_build_csharp.sh
@@ -73,18 +73,6 @@ then
nuget restore -PackagesDirectory ../packages
cd ..
- cd Grpc.IntegrationTesting.Client
- nuget restore -PackagesDirectory ../packages
- cd ..
-
- cd Grpc.IntegrationTesting.QpsWorker
- nuget restore -PackagesDirectory ../packages
- cd ..
-
- cd Grpc.IntegrationTesting.StressClient
- nuget restore -PackagesDirectory ../packages
- cd ..
-
cd Grpc.IntegrationTesting
nuget restore -PackagesDirectory ../packages
cd ..
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 865125fcd7..830b8c0849 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -413,11 +413,12 @@ class NodeLanguage:
client_type='ASYNC_CLIENT', server_type='async_server',
client_language='c++')
- yield _ping_pong_scenario(
- 'node_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- categories=[SCALABLE, SMOKETEST])
+ # TODO(murgatroid99): fix bugs with this scenario and re-enable it
+ # yield _ping_pong_scenario(
+ # 'node_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
+ # client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ # unconstrained_client='async',
+ # categories=[SCALABLE, SMOKETEST])
# TODO(jtattermusch): make this scenario work
#yield _ping_pong_scenario(
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 7b2c62d1a2..f3047431e2 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -31,6 +31,7 @@
from __future__ import print_function
+import logging
import multiprocessing
import os
import platform
@@ -128,6 +129,8 @@ _TAG_COLOR = {
'SKIPPED': 'cyan'
}
+_FORMAT = '%(asctime)-15s %(message)s'
+logging.basicConfig(level=logging.INFO, format=_FORMAT)
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
@@ -137,8 +140,8 @@ def message(tag, msg, explanatory_text=None, do_newline=False):
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
- print(explanatory_text)
- print('%s: %s' % (tag, msg))
+ logging.info(explanatory_text)
+ logging.info('%s: %s', tag, msg)
else:
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py
index 4c9f6aac63..deb7354438 100644
--- a/tools/run_tests/python_utils/start_port_server.py
+++ b/tools/run_tests/python_utils/start_port_server.py
@@ -40,7 +40,10 @@ import tempfile
import time
-def start_port_server(port_server_port):
+# must be synchronized with test/core/utils/port_server_client.h
+_PORT_SERVER_PORT = 32766
+
+def start_port_server():
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
# if not running ==> start a new one
@@ -48,7 +51,7 @@ def start_port_server(port_server_port):
try:
version = int(
urllib.request.urlopen(
- 'http://localhost:%d/version_number' % port_server_port,
+ 'http://localhost:%d/version_number' % _PORT_SERVER_PORT,
timeout=10).read())
logging.info('detected port server running version %d', version)
running = True
@@ -67,7 +70,7 @@ def start_port_server(port_server_port):
if not running:
logging.info('port_server version mismatch: killing the old one')
urllib.request.urlopen('http://localhost:%d/quitquitquit' %
- port_server_port).read()
+ _PORT_SERVER_PORT).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
@@ -76,7 +79,7 @@ def start_port_server(port_server_port):
args = [
sys.executable,
os.path.abspath('tools/run_tests/python_utils/port_server.py'),
- '-p', '%d' % port_server_port, '-l', logfile
+ '-p', '%d' % _PORT_SERVER_PORT, '-l', logfile
]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
@@ -107,7 +110,7 @@ def start_port_server(port_server_port):
time.sleep(1)
try:
urllib.request.urlopen(
- 'http://localhost:%d/get' % port_server_port,
+ 'http://localhost:%d/get' % _PORT_SERVER_PORT,
timeout=1).read()
logging.info(
'last ditch attempt to contact port server succeeded')
@@ -119,7 +122,7 @@ def start_port_server(port_server_port):
print(port_log)
sys.exit(1)
try:
- port_server_url = 'http://localhost:%d/get' % port_server_port
+ port_server_url = 'http://localhost:%d/get' % _PORT_SERVER_PORT
urllib.request.urlopen(port_server_url, timeout=1).read()
logging.info('port server is up and ready')
break
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index b47dc1e8f2..0d5bec1d67 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -77,11 +77,15 @@ class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
+ self.http2_cwd = None
self.safename = 'cxx'
def client_cmd(self, args):
return ['bins/opt/interop_client'] + args
+ def client_cmd_http2interop(self, args):
+ return ['bins/opt/http2_client'] + args
+
def cloud_to_prod_env(self):
return {}
@@ -164,6 +168,7 @@ class JavaLanguage:
def __init__(self):
self.client_cwd = '../grpc-java'
self.server_cwd = '../grpc-java'
+ self.http2_cwd = '../grpc-java'
self.safename = str(self)
def client_cmd(self, args):
@@ -197,11 +202,15 @@ class GoLanguage:
# TODO: this relies on running inside docker
self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
+ self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
self.safename = str(self)
def client_cmd(self, args):
return ['go', 'run', 'client.go'] + args
+ def client_cmd_http2interop(self, args):
+ return ['go', 'run', 'negative_http2_client.go'] + args
+
def cloud_to_prod_env(self):
return {}
@@ -393,6 +402,7 @@ class PythonLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
+ self.http2_cwd = None
self.safename = str(self)
def client_cmd(self, args):
@@ -468,8 +478,7 @@ _HTTP2_TEST_CASES = ['tls', 'framing']
_HTTP2_BADSERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
'goaway', 'ping', 'max_streams']
-# TODO: Add python once the tests are fixed.
-_LANGUAGES_FOR_HTTP2_BADSERVER_TESTS = ['java']
+_LANGUAGES_FOR_HTTP2_BADSERVER_TESTS = ['java', 'go', 'python', 'c++']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
@@ -605,11 +614,12 @@ def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
client_options = common_options + ['--server_port=%s' %
(int(server_port)+offset)]
cmdline = bash_cmdline(language.client_cmd_http2interop(client_options))
+ cwd = language.http2_cwd
else:
client_options = interop_only_options + common_options + ['--server_port=%s' % server_port]
cmdline = bash_cmdline(language.client_cmd(client_options))
+ cwd = language.client_cwd
- cwd = language.client_cwd
environ = language.global_env()
if docker_image:
container_name = dockerjob.random_name('interop_client_%s' % language.safename)
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 4307906a7e..4fb1d5fc7c 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -44,8 +44,7 @@ os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
-port_server_port = 32766
-start_port_server.start_port_server(port_server_port)
+start_port_server.start_port_server()
def fnize(s):
out = ''
@@ -110,8 +109,7 @@ def collect_latency(bm_name, args):
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
@@ -119,8 +117,7 @@ def collect_latency(bm_name, args):
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
- jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
@@ -156,8 +153,7 @@ def collect_perf(bm_name, args):
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
- jobset.run(benchmarks, maxjobs=1,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
@@ -165,25 +161,29 @@ def collect_perf(bm_name, args):
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
- jobset.run(benchmarks, maxjobs=1,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
+ jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-def collect_summary(bm_name, args):
- heading('Summary: %s' % bm_name)
+def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call(
['make', bm_name,
- 'CONFIG=counters', '-j', '%d' % multiprocessing.cpu_count()])
- cmd = ['bins/counters/%s' % bm_name,
- '--benchmark_out=out.json',
+ 'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
+ cmd = ['bins/%s/%s' % (cfg, bm_name),
+ '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
'--benchmark_out_format=json']
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
- text(subprocess.check_output(cmd))
+ return subprocess.check_output(cmd)
+
+def collect_summary(bm_name, args):
+ heading('Summary: %s [no counters]' % bm_name)
+ text(run_summary(bm_name, 'opt', 'out'))
+ heading('Summary: %s [with counters]' % bm_name)
+ text(run_summary(bm_name, 'counters', 'out'))
if args.bigquery_upload:
with open('out.csv', 'w') as f:
- f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.json']))
+ f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.counters.json', 'out.opt.json']))
subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', 'out.csv'])
collectors = {
@@ -195,20 +195,28 @@ collectors = {
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument('-c', '--collect',
choices=sorted(collectors.keys()),
- nargs='+',
+ nargs='*',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b', '--benchmarks',
- default=['bm_fullstack',
+ default=['bm_fullstack_unary_ping_pong',
+ 'bm_fullstack_streaming_ping_pong',
+ 'bm_fullstack_streaming_pump',
'bm_closure',
'bm_cq',
'bm_call_create',
'bm_error',
'bm_chttp2_hpack',
- 'bm_metadata'],
+ 'bm_metadata',
+ 'bm_fullstack_trickle',
+ ],
nargs='+',
type=str,
help='Which microbenchmarks should be run')
+argp.add_argument('--diff_perf',
+ default=None,
+ type=str,
+ help='Diff microbenchmarks against this git revision')
argp.add_argument('--bigquery_upload',
default=False,
action='store_const',
@@ -223,6 +231,26 @@ args = argp.parse_args()
for bm_name in args.benchmarks:
for collect in args.collect:
collectors[collect](bm_name, args)
+if args.diff_perf:
+ for bm_name in args.benchmarks:
+ run_summary(bm_name, 'opt', '%s.new' % bm_name)
+ where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+ subprocess.check_call(['git', 'checkout', args.diff_perf])
+ comparables = []
+ subprocess.check_call(['make', 'clean'])
+ try:
+ for bm_name in args.benchmarks:
+ try:
+ run_summary(bm_name, 'opt', '%s.old' % bm_name)
+ comparables.append(bm_name)
+ except subprocess.CalledProcessError, e:
+ pass
+ finally:
+ subprocess.check_call(['git', 'checkout', where_am_i])
+ for bm_name in comparables:
+ subprocess.check_call(['third_party/benchmark/tools/compare_bench.py',
+ '%s.new.opt.json' % bm_name,
+ '%s.old.opt.json' % bm_name])
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f:
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 7c04d228ba..ee4102c591 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -101,7 +101,7 @@ def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None,
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
ssh_cmd = ['ssh']
cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
- ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)])
+ ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && tools/run_tests/start_port_server.py && %s' % ' '.join(cmdline)])
cmdline = ssh_cmd
jobspec = jobset.JobSpec(
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 9e9af59c25..43b8f8184e 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -307,9 +307,9 @@ class CLanguage(object):
assert base is not None
assert line[1] == ' '
test = base + line.strip()
- cmdline = [binary] + ['--gtest_filter=%s' % test]
+ cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
out.append(self.config.job_spec(cmdline,
- shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext),
+ shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
@@ -692,18 +692,11 @@ class RubyLanguage(object):
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
- #TODO(apolcyn) turn mac ruby tests back on once ruby 2.4 issues done
- if platform_string() == 'mac':
- print('skipping ruby test_specs on mac until running on 2.4')
- return []
return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
- if platform_string() == 'mac':
- print('skipping ruby pre_build_steps on mac until running on 2.4')
- return []
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
@@ -713,15 +706,9 @@ class RubyLanguage(object):
return []
def build_steps(self):
- if platform_string() == 'mac':
- print('skipping ruby build_steps on mac until running on 2.4')
- return []
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
- if platform_string() == 'mac':
- print('skipping ruby post_test_steps on mac until running on 2.4')
- return []
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
@@ -1311,7 +1298,9 @@ if args.use_docker:
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
- run_shell_command('tools/run_tests/dockerize/build_docker_and_run_tests.sh', env=env)
+ subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
+ shell=True,
+ env=env)
sys.exit(0)
_check_arch_option(args.arch)
@@ -1451,8 +1440,7 @@ def _build_and_run(
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
- port_server_port = 32766
- start_port_server.start_port_server(port_server_port)
+ start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
@@ -1479,10 +1467,9 @@ def _build_and_run(
sample_size = int(num_jobs * args.sample_percent/100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
+ assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
- else:
- assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
@@ -1495,7 +1482,6 @@ def _build_and_run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
- add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port},
quiet_success=args.quiet_success)
if resultset:
for k, v in sorted(resultset.items()):
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index bc4fdaba71..47422451f8 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -49,6 +49,9 @@ _RUNTESTS_TIMEOUT = 4*60*60
# Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2
+# report suffix is important for reports to get picked up by internal CI
+_REPORT_SUFFIX = 'sponge_log.xml'
+
def _docker_jobspec(name, runtests_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
"""Run a single instance of run_tests.py in a docker container"""
@@ -57,7 +60,7 @@ def _docker_jobspec(name, runtests_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
'--use_docker',
'-t',
'-j', str(inner_jobs),
- '-x', 'report_%s.xml' % name,
+ '-x', 'report_%s_%s' % (name, _REPORT_SUFFIX),
'--report_suite_name', '%s' % name] + runtests_args,
shortname='run_tests_%s' % name,
timeout_seconds=_RUNTESTS_TIMEOUT)
@@ -70,10 +73,11 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None, inner_jobs=_
workspace_name = 'workspace_%s' % name
env = {'WORKSPACE_NAME': workspace_name}
test_job = jobset.JobSpec(
- cmdline=['tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
+ cmdline=['bash',
+ 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
'-t',
'-j', str(inner_jobs),
- '-x', '../report_%s.xml' % name,
+ '-x', '../report_%s_%s' % (name, _REPORT_SUFFIX),
'--report_suite_name', '%s' % name] + runtests_args,
environ=env,
shortname='run_tests_%s' % name,
@@ -414,7 +418,7 @@ if __name__ == "__main__":
skipped_results = jobset.run(skipped_jobs,
skip_jobs=True)
resultset.update(skipped_results)
- report_utils.render_junit_xml_report(resultset, 'report.xml',
+ report_utils.render_junit_xml_report(resultset, 'report_%s' % _REPORT_SUFFIX,
suite_name='aggregate_tests')
if num_failures == 0:
diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh
index cfe4e2731c..3c5ba16b93 100755
--- a/tools/run_tests/sanity/check_submodules.sh
+++ b/tools/run_tests/sanity/check_submodules.sh
@@ -46,7 +46,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules
886e7d75368e3f4fab3f4d0d3584e4abfc557755 third_party/boringssl-with-bazel (version_for_cocoapods_7.0-857-g886e7d7)
30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e third_party/gflags (v2.2.0)
c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0)
- a428e42072765993ff674fda72863c9f1aa2d268 third_party/protobuf (v3.1.0-alpha-1)
+ 593e917c176b5bc5aafa57bf9f6030d749d91cd5 third_party/protobuf (v3.1.0-alpha-1-326-g593e917)
bcad91771b7f0bff28a1cac1981d7ef2b9bcef3c third_party/thrift (bcad917)
50893291621658f355bc5b4d450a8d06a563053d third_party/zlib (v1.2.8)
EOF
diff --git a/tools/run_tests/start_port_server.py b/tools/run_tests/start_port_server.py
new file mode 100755
index 0000000000..e33ac12bd3
--- /dev/null
+++ b/tools/run_tests/start_port_server.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Wrapper around port server starting code.
+
+Used by developers who wish to run individual C/C++ tests outside of the
+run_tests.py infrastructure.
+
+The path to this file is called out in test/core/util/port.c, and printed as
+an error message to users.
+"""
+
+import python_utils.start_port_server as start_port_server
+
+start_port_server.start_port_server()
+
+print "Port server started successfully"