aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests')
-rw-r--r--tools/run_tests/artifacts/artifact_targets.py2
-rw-r--r--tools/run_tests/artifacts/distribtest_targets.py12
-rw-r--r--tools/run_tests/generated/sources_and_headers.json201
-rw-r--r--tools/run_tests/generated/tests.json84
-rw-r--r--tools/run_tests/helper_scripts/pre_build_cmake.bat5
-rw-r--r--tools/run_tests/performance/scenario_config.py5
-rwxr-xr-xtools/run_tests/run_microbenchmark.py12
-rwxr-xr-xtools/run_tests/run_performance_tests.py359
-rwxr-xr-xtools/run_tests/run_tests.py63
-rwxr-xr-xtools/run_tests/run_tests_matrix.py80
-rwxr-xr-xtools/run_tests/sanity/check_submodules.sh2
11 files changed, 537 insertions, 288 deletions
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index bd186263ca..12263282ae 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -150,6 +150,7 @@ class PythonArtifact:
self.py_version,
'32' if self.arch == 'x86' else '64'],
environ=environ,
+ timeout_seconds=45*60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
@@ -257,6 +258,7 @@ class NodeExtArtifact:
['tools\\run_tests\\artifacts\\build_artifact_node.bat',
self.gyp_arch],
use_workspace=True,
+ timeout_seconds=45*60,
cpu_cost=cpu_cost)
else:
if self.platform == 'linux':
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py
index fa461efa85..fb1be383cd 100644
--- a/tools/run_tests/artifacts/distribtest_targets.py
+++ b/tools/run_tests/artifacts/distribtest_targets.py
@@ -255,12 +255,13 @@ class PHPDistribTest(object):
class CppDistribTest(object):
"""Tests Cpp make intall by building examples."""
- def __init__(self, platform, arch, docker_suffix=None):
- self.name = 'cpp_%s_%s_%s' % (platform, arch, docker_suffix)
+ def __init__(self, platform, arch, docker_suffix=None, testcase=None):
+ self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix, testcase)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
- self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix]
+ self.testcase = testcase
+ self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix, testcase]
def pre_build_jobspecs(self):
return []
@@ -271,7 +272,7 @@ class CppDistribTest(object):
'tools/dockerfile/distribtest/cpp_%s_%s' % (
self.docker_suffix,
self.arch),
- 'test/distrib/cpp/run_distrib_test.sh')
+ 'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
else:
raise Exception("Not supported yet.")
@@ -281,7 +282,8 @@ class CppDistribTest(object):
def targets():
"""Gets list of supported targets"""
- return [CppDistribTest('linux', 'x64', 'jessie'),
+ return [CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
+ CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
CSharpDistribTest('linux', 'x64', 'wheezy'),
CSharpDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x86', 'jessie'),
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 1976762367..2f6e34bfb3 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -541,6 +541,23 @@
"gpr",
"gpr_test_util",
"grpc",
+ "transport_security_test_lib"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c",
+ "name": "fake_transport_security_test",
+ "src": [
+ "test/core/tsi/fake_transport_security_test.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
"grpc_test_util"
],
"headers": [],
@@ -1027,6 +1044,23 @@
"headers": [],
"is_filegroup": false,
"language": "c",
+ "name": "grpc_channel_stack_builder_test",
+ "src": [
+ "test/core/channel/channel_stack_builder_test.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c",
"name": "grpc_channel_stack_test",
"src": [
"test/core/channel/channel_stack_test.c"
@@ -2168,6 +2202,23 @@
"gpr",
"gpr_test_util",
"grpc",
+ "transport_security_test_lib"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c",
+ "name": "ssl_transport_security_test",
+ "src": [
+ "test/core/tsi/ssl_transport_security_test.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
"grpc_test_util"
],
"headers": [],
@@ -2739,12 +2790,15 @@
"grpc_test_util_unsecure",
"grpc_unsecure"
],
- "headers": [],
+ "headers": [
+ "test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h"
+ ],
"is_filegroup": false,
"language": "c++",
"name": "bm_fullstack_streaming_ping_pong",
"src": [
- "test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc"
+ "test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc",
+ "test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h"
],
"third_party": false,
"type": "target"
@@ -2760,12 +2814,15 @@
"grpc_test_util_unsecure",
"grpc_unsecure"
],
- "headers": [],
+ "headers": [
+ "test/cpp/microbenchmarks/fullstack_streaming_pump.h"
+ ],
"is_filegroup": false,
"language": "c++",
"name": "bm_fullstack_streaming_pump",
"src": [
- "test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc"
+ "test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc",
+ "test/cpp/microbenchmarks/fullstack_streaming_pump.h"
],
"third_party": false,
"type": "target"
@@ -2803,12 +2860,15 @@
"grpc_test_util_unsecure",
"grpc_unsecure"
],
- "headers": [],
+ "headers": [
+ "test/cpp/microbenchmarks/fullstack_unary_ping_pong.h"
+ ],
"is_filegroup": false,
"language": "c++",
"name": "bm_fullstack_unary_ping_pong",
"src": [
- "test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc"
+ "test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc",
+ "test/cpp/microbenchmarks/fullstack_unary_ping_pong.h"
],
"third_party": false,
"type": "target"
@@ -2969,7 +3029,8 @@
"gpr",
"grpc",
"grpc++",
- "grpc++_codegen_base"
+ "grpc++_codegen_base",
+ "grpc++_core_stats"
],
"headers": [
"src/proto/grpc/testing/control.grpc.pb.h",
@@ -3002,7 +3063,8 @@
"gpr",
"grpc",
"grpc++_codegen_base",
- "grpc++_codegen_base_src"
+ "grpc++_codegen_base_src",
+ "grpc++_core_stats"
],
"headers": [
"src/proto/grpc/testing/control.grpc.pb.h",
@@ -3702,6 +3764,7 @@
"gpr_test_util",
"grpc",
"grpc++",
+ "grpc++_core_stats",
"grpc++_test_config",
"grpc++_test_util",
"grpc_test_util",
@@ -3723,6 +3786,7 @@
"gpr_test_util",
"grpc",
"grpc++",
+ "grpc++_core_stats",
"grpc++_test_config",
"grpc++_test_util",
"grpc_test_util",
@@ -3744,6 +3808,7 @@
"gpr_test_util",
"grpc",
"grpc++",
+ "grpc++_core_stats",
"grpc++_test_config",
"grpc++_test_util",
"grpc_test_util",
@@ -3851,6 +3916,7 @@
"gpr_test_util",
"grpc",
"grpc++",
+ "grpc++_core_stats",
"grpc++_test_config",
"grpc++_test_util",
"grpc_test_util",
@@ -4018,6 +4084,24 @@
"gpr",
"gpr_test_util",
"grpc",
+ "grpc++_test_util",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "stats_test",
+ "src": [
+ "test/core/debug/stats_test.cc"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
"grpc++",
"grpc_test_util"
],
@@ -5815,7 +5899,6 @@
"grpc_lb_policy_grpclb_secure",
"grpc_lb_policy_pick_first",
"grpc_lb_policy_round_robin",
- "grpc_load_reporting",
"grpc_max_age_filter",
"grpc_message_size_filter",
"grpc_resolver_dns_ares",
@@ -5824,6 +5907,7 @@
"grpc_resolver_sockaddr",
"grpc_secure",
"grpc_server_backward_compatibility",
+ "grpc_server_load_reporting",
"grpc_transport_chttp2_client_insecure",
"grpc_transport_chttp2_client_secure",
"grpc_transport_chttp2_server_insecure",
@@ -5845,7 +5929,7 @@
"deps": [
"gpr",
"grpc_base",
- "grpc_load_reporting",
+ "grpc_server_load_reporting",
"grpc_transport_chttp2_client_secure",
"grpc_transport_cronet_client_secure"
],
@@ -5922,7 +6006,6 @@
"grpc_lb_policy_grpclb",
"grpc_lb_policy_pick_first",
"grpc_lb_policy_round_robin",
- "grpc_load_reporting",
"grpc_max_age_filter",
"grpc_message_size_filter",
"grpc_resolver_dns_ares",
@@ -5930,6 +6013,7 @@
"grpc_resolver_fake",
"grpc_resolver_sockaddr",
"grpc_server_backward_compatibility",
+ "grpc_server_load_reporting",
"grpc_transport_chttp2_client_insecure",
"grpc_transport_chttp2_server_insecure",
"grpc_transport_inproc",
@@ -6024,6 +6108,26 @@
},
{
"deps": [
+ "grpc++"
+ ],
+ "headers": [
+ "src/cpp/util/core_stats.h",
+ "src/proto/grpc/core/stats.grpc.pb.h",
+ "src/proto/grpc/core/stats.pb.h",
+ "src/proto/grpc/core/stats_mock.grpc.pb.h"
+ ],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "grpc++_core_stats",
+ "src": [
+ "src/cpp/util/core_stats.cc",
+ "src/cpp/util/core_stats.h"
+ ],
+ "third_party": false,
+ "type": "lib"
+ },
+ {
+ "deps": [
"census",
"gpr",
"grpc",
@@ -6518,6 +6622,7 @@
"deps": [
"grpc",
"grpc++",
+ "grpc++_core_stats",
"grpc++_test_util",
"grpc_test_util"
],
@@ -7822,9 +7927,12 @@
"src/core/lib/compression/compression.c",
"src/core/lib/compression/message_compress.c",
"src/core/lib/compression/stream_compression.c",
+ "src/core/lib/debug/stats.c",
+ "src/core/lib/debug/stats_data.c",
"src/core/lib/http/format_request.c",
"src/core/lib/http/httpcli.c",
"src/core/lib/http/parser.c",
+ "src/core/lib/iomgr/call_combiner.c",
"src/core/lib/iomgr/closure.c",
"src/core/lib/iomgr/combiner.c",
"src/core/lib/iomgr/endpoint.c",
@@ -7833,8 +7941,6 @@
"src/core/lib/iomgr/endpoint_pair_windows.c",
"src/core/lib/iomgr/error.c",
"src/core/lib/iomgr/ev_epoll1_linux.c",
- "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c",
- "src/core/lib/iomgr/ev_epoll_thread_pool_linux.c",
"src/core/lib/iomgr/ev_epollex_linux.c",
"src/core/lib/iomgr/ev_epollsig_linux.c",
"src/core/lib/iomgr/ev_poll_posix.c",
@@ -7973,9 +8079,12 @@
"src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/compression/stream_compression.h",
+ "src/core/lib/debug/stats.h",
+ "src/core/lib/debug/stats_data.h",
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h",
+ "src/core/lib/iomgr/call_combiner.h",
"src/core/lib/iomgr/closure.h",
"src/core/lib/iomgr/combiner.h",
"src/core/lib/iomgr/endpoint.h",
@@ -7983,8 +8092,6 @@
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/error_internal.h",
"src/core/lib/iomgr/ev_epoll1_linux.h",
- "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h",
- "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h",
"src/core/lib/iomgr/ev_epollex_linux.h",
"src/core/lib/iomgr/ev_epollsig_linux.h",
"src/core/lib/iomgr/ev_poll_posix.h",
@@ -8103,9 +8210,12 @@
"src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/compression/stream_compression.h",
+ "src/core/lib/debug/stats.h",
+ "src/core/lib/debug/stats_data.h",
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h",
+ "src/core/lib/iomgr/call_combiner.h",
"src/core/lib/iomgr/closure.h",
"src/core/lib/iomgr/combiner.h",
"src/core/lib/iomgr/endpoint.h",
@@ -8113,8 +8223,6 @@
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/error_internal.h",
"src/core/lib/iomgr/ev_epoll1_linux.h",
- "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h",
- "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h",
"src/core/lib/iomgr/ev_epollex_linux.h",
"src/core/lib/iomgr/ev_epollsig_linux.h",
"src/core/lib/iomgr/ev_poll_posix.h",
@@ -8466,27 +8574,6 @@
"grpc_base"
],
"headers": [
- "src/core/ext/filters/load_reporting/load_reporting.h",
- "src/core/ext/filters/load_reporting/load_reporting_filter.h"
- ],
- "is_filegroup": true,
- "language": "c",
- "name": "grpc_load_reporting",
- "src": [
- "src/core/ext/filters/load_reporting/load_reporting.c",
- "src/core/ext/filters/load_reporting/load_reporting.h",
- "src/core/ext/filters/load_reporting/load_reporting_filter.c",
- "src/core/ext/filters/load_reporting/load_reporting_filter.h"
- ],
- "third_party": false,
- "type": "filegroup"
- },
- {
- "deps": [
- "gpr",
- "grpc_base"
- ],
- "headers": [
"src/core/ext/filters/max_age/max_age_filter.h"
],
"is_filegroup": true,
@@ -8694,6 +8781,27 @@
{
"deps": [
"gpr",
+ "grpc_base"
+ ],
+ "headers": [
+ "src/core/ext/filters/load_reporting/server_load_reporting_filter.h",
+ "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
+ ],
+ "is_filegroup": true,
+ "language": "c",
+ "name": "grpc_server_load_reporting",
+ "src": [
+ "src/core/ext/filters/load_reporting/server_load_reporting_filter.c",
+ "src/core/ext/filters/load_reporting/server_load_reporting_filter.h",
+ "src/core/ext/filters/load_reporting/server_load_reporting_plugin.c",
+ "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
+ ],
+ "third_party": false,
+ "type": "filegroup"
+ },
+ {
+ "deps": [
+ "gpr",
"gpr_test_util",
"grpc_base",
"grpc_client_channel",
@@ -9102,6 +9210,23 @@
},
{
"deps": [
+ "grpc"
+ ],
+ "headers": [
+ "test/core/tsi/transport_security_test_lib.h"
+ ],
+ "is_filegroup": true,
+ "language": "c",
+ "name": "transport_security_test_lib",
+ "src": [
+ "test/core/tsi/transport_security_test_lib.c",
+ "test/core/tsi/transport_security_test_lib.h"
+ ],
+ "third_party": false,
+ "type": "filegroup"
+ },
+ {
+ "deps": [
"gpr",
"grpc_base",
"grpc_trace",
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 118e5c6cee..1bb7a4aa24 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -652,6 +652,26 @@
],
"cpu_cost": 1.0,
"exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c",
+ "name": "fake_transport_security_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
"exclude_iomgrs": [
"uv"
],
@@ -1225,6 +1245,28 @@
"flaky": false,
"gtest": false,
"language": "c",
+ "name": "grpc_channel_stack_builder_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c",
"name": "grpc_channel_stack_test",
"platforms": [
"linux",
@@ -2278,6 +2320,26 @@
"ci_platforms": [
"linux",
"mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c",
+ "name": "ssl_transport_security_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
"posix",
"windows"
],
@@ -3943,6 +4005,28 @@
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
+ "gtest": true,
+ "language": "c++",
+ "name": "stats_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
"gtest": false,
"language": "c++",
"name": "status_test",
diff --git a/tools/run_tests/helper_scripts/pre_build_cmake.bat b/tools/run_tests/helper_scripts/pre_build_cmake.bat
index a770aa8118..d89fc5fec2 100644
--- a/tools/run_tests/helper_scripts/pre_build_cmake.bat
+++ b/tools/run_tests/helper_scripts/pre_build_cmake.bat
@@ -14,7 +14,8 @@
setlocal
-set ARCHITECTURE=%1
+set GENERATOR=%1
+set ARCHITECTURE=%2
cd /d %~dp0\..\..\..
@@ -27,7 +28,7 @@ cd build
@rem If yasm is not on the path, use hardcoded path instead.
yasm --version || set USE_HARDCODED_YASM_PATH_MAYBE=-DCMAKE_ASM_NASM_COMPILER="C:/Program Files (x86)/yasm/yasm.exe"
-cmake -G "Visual Studio 14 2015" -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=ON %USE_HARDCODED_YASM_PATH_MAYBE% ../.. || goto :error
+cmake -G %GENERATOR% -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=ON %USE_HARDCODED_YASM_PATH_MAYBE% ../.. || goto :error
endlocal
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 8868f40f49..cbcc4d0ecc 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -154,7 +154,9 @@ def _ping_pong_scenario(name, rpc_type,
scenario['client_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
- optimization_target = 'blend'
+ # Optimization target of 'throughput' does not work well with epoll1 polling
+ # engine. Use the default value of 'blend'
+ optimization_target = 'throughput'
if unconstrained_client:
outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client]
@@ -169,7 +171,6 @@ def _ping_pong_scenario(name, rpc_type,
scenario['client_config']['outstanding_rpcs_per_channel'] = deep
scenario['client_config']['client_channels'] = wide
scenario['client_config']['async_client_threads'] = 0
- optimization_target = 'throughput'
else:
scenario['client_config']['outstanding_rpcs_per_channel'] = 1
scenario['client_config']['client_channels'] = 1
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 312abd59c4..c136af58cb 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -83,12 +83,14 @@ def collect_latency(bm_name, args):
jobset.JobSpec(['bins/basicprof/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=0.05'],
- environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
+ environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
+ shortname='profile-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec([sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
'--source', '%s.trace' % fnize(line), '--fmt', 'simple',
- '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None))
+ '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60,
+ shortname='analyze-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
# periodically flush out the list of jobs: profile_analysis jobs at least
# consume upwards of five gigabytes of ram in some cases, and so analysing
@@ -126,14 +128,16 @@ def collect_perf(bm_name, args):
'-g', '-F', '997',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
- '--benchmark_min_time=10']))
+ '--benchmark_min_time=10'],
+ shortname='perf-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
environ = {
'PERF_BASE_NAME': fnize(line),
'OUTPUT_DIR': 'reports',
'OUTPUT_FILENAME': fnize(line),
- }))
+ },
+ shortname='flame-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
# periodically flush out the list of jobs: temporary space required for this
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 0db5e4ef83..9b20fae78f 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -281,18 +281,18 @@ def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
for worker_idx, worker in enumerate(workers)]
-def perf_report_processor_job(worker_host, perf_base_name, output_filename):
+def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports):
print('Creating perf report collection job for %s' % worker_host)
cmd = ''
if worker_host != 'localhost':
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \
- % (user_at_host, output_filename, args.flame_graph_reports, perf_base_name)
+ % (user_at_host, output_filename, flame_graph_reports, perf_base_name)
else:
cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
tools/run_tests/performance/process_local_perf_flamegraphs.sh" \
- % (output_filename, args.flame_graph_reports, perf_base_name)
+ % (output_filename, flame_graph_reports, perf_base_name)
return jobset.JobSpec(cmdline=cmd,
timeout_seconds=3*60,
@@ -332,7 +332,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
for language in languages:
for scenario_json in language.scenarios():
- if re.search(args.regex, scenario_json['name']):
+ if re.search(regex, scenario_json['name']):
categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest'])
if category in categories or category == 'all':
workers = workers_by_lang[str(language)][:]
@@ -376,7 +376,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
return scenarios
-def finish_qps_workers(jobs):
+def finish_qps_workers(jobs, qpsworker_jobs):
"""Waits for given jobs to finish and eventually kills them."""
retries = 0
num_killed = 0
@@ -399,10 +399,10 @@ profile_output_files = []
# Collect perf text reports and flamegraphs if perf_cmd was used
# Note the base names of perf text reports are used when creating and processing
# perf data. The scenario name uniqifies the output name in the final
-# perf reports directory.
+# perf reports directory.
# Alos, the perf profiles need to be fetched and processed after each scenario
# in order to avoid clobbering the output files.
-def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name):
+def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports):
perf_report_jobs = []
global profile_output_files
for host_and_port in hosts_and_base_names:
@@ -411,181 +411,184 @@ def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name):
# from the base filename, create .svg output filename
host = host_and_port.split(':')[0]
profile_output_files.append('%s.svg' % output_filename)
- perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename))
+ perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports))
jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1, clear_alarms=False)
jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
return failures
+def main():
+ argp = argparse.ArgumentParser(description='Run performance tests.')
+ argp.add_argument('-l', '--language',
+ choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
+ nargs='+',
+ required=True,
+ help='Languages to benchmark.')
+ argp.add_argument('--remote_driver_host',
+ default=None,
+ help='Run QPS driver on given host. By default, QPS driver is run locally.')
+ argp.add_argument('--remote_worker_host',
+ nargs='+',
+ default=[],
+ help='Worker hosts where to start QPS workers.')
+ argp.add_argument('--dry_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Just list scenarios to be run, but don\'t run them.')
+ argp.add_argument('-r', '--regex', default='.*', type=str,
+ help='Regex to select scenarios to run.')
+ argp.add_argument('--bq_result_table', default=None, type=str,
+ help='Bigquery "dataset.table" to upload results to.')
+ argp.add_argument('--category',
+ choices=['smoketest','all','scalable','sweep'],
+ default='all',
+ help='Select a category of tests to run.')
+ argp.add_argument('--netperf',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run netperf benchmark as one of the scenarios.')
+ argp.add_argument('--server_cpu_load',
+ default=0, type=int,
+ help='Select a targeted server cpu load to run. 0 means ignore this flag')
+ argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
+ help='Name of XML report file to generate.')
+ argp.add_argument('--perf_args',
+ help=('Example usage: "--perf_args=record -F 99 -g". '
+ 'Wrap QPS workers in a perf command '
+ 'with the arguments to perf specified here. '
+ '".svg" flame graph profiles will be '
+ 'created for each Qps Worker on each scenario. '
+ 'Files will output to "<repo_root>/<args.flame_graph_reports>" '
+ 'directory. Output files from running the worker '
+ 'under perf are saved in the repo root where its ran. '
+ 'Note that the perf "-g" flag is necessary for '
+ 'flame graphs generation to work (assuming the binary '
+ 'being profiled uses frame pointers, check out '
+ '"--call-graph dwarf" option using libunwind otherwise.) '
+ 'Also note that the entire "--perf_args=<arg(s)>" must '
+ 'be wrapped in quotes as in the example usage. '
+ 'If the "--perg_args" is unspecified, "perf" will '
+ 'not be used at all. '
+ 'See http://www.brendangregg.com/perf.html '
+ 'for more general perf examples.'))
+ argp.add_argument('--skip_generate_flamegraphs',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Turn flame graph generation off. '
+ 'May be useful if "perf_args" arguments do not make sense for '
+ 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
+ argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str,
+ help='Name of directory to output flame graph profiles to, if any are created.')
+
+ args = argp.parse_args()
+
+ languages = set(scenario_config.LANGUAGES[l]
+ for l in itertools.chain.from_iterable(
+ six.iterkeys(scenario_config.LANGUAGES) if x == 'all'
+ else [x] for x in args.language))
+
+
+ # Put together set of remote hosts where to run and build
+ remote_hosts = set()
+ if args.remote_worker_host:
+ for host in args.remote_worker_host:
+ remote_hosts.add(host)
+ if args.remote_driver_host:
+ remote_hosts.add(args.remote_driver_host)
+
+ if not args.dry_run:
+ if remote_hosts:
+ archive_repo(languages=[str(l) for l in languages])
+ prepare_remote_hosts(remote_hosts, prepare_local=True)
+ else:
+ prepare_remote_hosts([], prepare_local=True)
+
+ build_local = False
+ if not args.remote_driver_host:
+ build_local = True
+ if not args.dry_run:
+ build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
+
+ perf_cmd = None
+ if args.perf_args:
+ print('Running workers under perf profiler')
+ # Expect /usr/bin/perf to be installed here, as is usual
+ perf_cmd = ['/usr/bin/perf']
+ perf_cmd.extend(re.split('\s+', args.perf_args))
+
+ qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd)
+
+ # get list of worker addresses for each language.
+ workers_by_lang = dict([(str(language), []) for language in languages])
+ for job in qpsworker_jobs:
+ workers_by_lang[str(job.language)].append(job)
+
+ scenarios = create_scenarios(languages,
+ workers_by_lang=workers_by_lang,
+ remote_host=args.remote_driver_host,
+ regex=args.regex,
+ category=args.category,
+ bq_result_table=args.bq_result_table,
+ netperf=args.netperf,
+ netperf_hosts=args.remote_worker_host,
+ server_cpu_load=args.server_cpu_load)
+
+ if not scenarios:
+ raise Exception('No scenarios to run')
+
+ total_scenario_failures = 0
+ qps_workers_killed = 0
+ merged_resultset = {}
+ perf_report_failures = 0
+
+ for scenario in scenarios:
+ if args.dry_run:
+ print(scenario.name)
+ else:
+ scenario_failures = 0
+ try:
+ for worker in scenario.workers:
+ worker.start()
+ jobs = [scenario.jobspec]
+ if scenario.workers:
+ jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
+ scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1, clear_alarms=False)
+ total_scenario_failures += scenario_failures
+ merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset),
+ six.iteritems(resultset)))
+ finally:
+ # Consider qps workers that need to be killed as failures
+ qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs)
+
+ if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
+ workers_and_base_names = {}
+ for worker in scenario.workers:
+ if not worker.perf_file_base_name:
+ raise Exception('using perf buf perf report filename is unspecified')
+ workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
+ perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports)
+
+
+ # Still write the index.html even if some scenarios failed.
+ # 'profile_output_files' will only have names for scenarios that passed
+ if perf_cmd and not args.skip_generate_flamegraphs:
+ # write the index fil to the output dir, with all profiles from all scenarios/workers
+ report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files)
+
+ report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
+ suite_name='benchmarks')
+
+ if total_scenario_failures > 0 or qps_workers_killed > 0:
+ print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
+ sys.exit(1)
-argp = argparse.ArgumentParser(description='Run performance tests.')
-argp.add_argument('-l', '--language',
- choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
- nargs='+',
- required=True,
- help='Languages to benchmark.')
-argp.add_argument('--remote_driver_host',
- default=None,
- help='Run QPS driver on given host. By default, QPS driver is run locally.')
-argp.add_argument('--remote_worker_host',
- nargs='+',
- default=[],
- help='Worker hosts where to start QPS workers.')
-argp.add_argument('--dry_run',
- default=False,
- action='store_const',
- const=True,
- help='Just list scenarios to be run, but don\'t run them.')
-argp.add_argument('-r', '--regex', default='.*', type=str,
- help='Regex to select scenarios to run.')
-argp.add_argument('--bq_result_table', default=None, type=str,
- help='Bigquery "dataset.table" to upload results to.')
-argp.add_argument('--category',
- choices=['smoketest','all','scalable','sweep'],
- default='all',
- help='Select a category of tests to run.')
-argp.add_argument('--netperf',
- default=False,
- action='store_const',
- const=True,
- help='Run netperf benchmark as one of the scenarios.')
-argp.add_argument('--server_cpu_load',
- default=0, type=int,
- help='Select a targeted server cpu load to run. 0 means ignore this flag')
-argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
- help='Name of XML report file to generate.')
-argp.add_argument('--perf_args',
- help=('Example usage: "--perf_args=record -F 99 -g". '
- 'Wrap QPS workers in a perf command '
- 'with the arguments to perf specified here. '
- '".svg" flame graph profiles will be '
- 'created for each Qps Worker on each scenario. '
- 'Files will output to "<repo_root>/<args.flame_graph_reports>" '
- 'directory. Output files from running the worker '
- 'under perf are saved in the repo root where its ran. '
- 'Note that the perf "-g" flag is necessary for '
- 'flame graphs generation to work (assuming the binary '
- 'being profiled uses frame pointers, check out '
- '"--call-graph dwarf" option using libunwind otherwise.) '
- 'Also note that the entire "--perf_args=<arg(s)>" must '
- 'be wrapped in quotes as in the example usage. '
- 'If the "--perg_args" is unspecified, "perf" will '
- 'not be used at all. '
- 'See http://www.brendangregg.com/perf.html '
- 'for more general perf examples.'))
-argp.add_argument('--skip_generate_flamegraphs',
- default=False,
- action='store_const',
- const=True,
- help=('Turn flame graph generation off. '
- 'May be useful if "perf_args" arguments do not make sense for '
- 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
-argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str,
- help='Name of directory to output flame graph profiles to, if any are created.')
-
-args = argp.parse_args()
-
-languages = set(scenario_config.LANGUAGES[l]
- for l in itertools.chain.from_iterable(
- six.iterkeys(scenario_config.LANGUAGES) if x == 'all'
- else [x] for x in args.language))
-
-
-# Put together set of remote hosts where to run and build
-remote_hosts = set()
-if args.remote_worker_host:
- for host in args.remote_worker_host:
- remote_hosts.add(host)
-if args.remote_driver_host:
- remote_hosts.add(args.remote_driver_host)
-
-if not args.dry_run:
- if remote_hosts:
- archive_repo(languages=[str(l) for l in languages])
- prepare_remote_hosts(remote_hosts, prepare_local=True)
- else:
- prepare_remote_hosts([], prepare_local=True)
-
-build_local = False
-if not args.remote_driver_host:
- build_local = True
-if not args.dry_run:
- build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
-
-perf_cmd = None
-if args.perf_args:
- print('Running workers under perf profiler')
- # Expect /usr/bin/perf to be installed here, as is usual
- perf_cmd = ['/usr/bin/perf']
- perf_cmd.extend(re.split('\s+', args.perf_args))
-
-qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd)
-
-# get list of worker addresses for each language.
-workers_by_lang = dict([(str(language), []) for language in languages])
-for job in qpsworker_jobs:
- workers_by_lang[str(job.language)].append(job)
-
-scenarios = create_scenarios(languages,
- workers_by_lang=workers_by_lang,
- remote_host=args.remote_driver_host,
- regex=args.regex,
- category=args.category,
- bq_result_table=args.bq_result_table,
- netperf=args.netperf,
- netperf_hosts=args.remote_worker_host,
- server_cpu_load=args.server_cpu_load)
-
-if not scenarios:
- raise Exception('No scenarios to run')
-
-total_scenario_failures = 0
-qps_workers_killed = 0
-merged_resultset = {}
-perf_report_failures = 0
-
-for scenario in scenarios:
- if args.dry_run:
- print(scenario.name)
- else:
- scenario_failures = 0
- try:
- for worker in scenario.workers:
- worker.start()
- jobs = [scenario.jobspec]
- if scenario.workers:
- jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
- scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1, clear_alarms=False)
- total_scenario_failures += scenario_failures
- merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset),
- six.iteritems(resultset)))
- finally:
- # Consider qps workers that need to be killed as failures
- qps_workers_killed += finish_qps_workers(scenario.workers)
-
- if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
- workers_and_base_names = {}
- for worker in scenario.workers:
- if not worker.perf_file_base_name:
- raise Exception('using perf buf perf report filename is unspecified')
- workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
- perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name)
-
-
-# Still write the index.html even if some scenarios failed.
-# 'profile_output_files' will only have names for scenarios that passed
-if perf_cmd and not args.skip_generate_flamegraphs:
- # write the index fil to the output dir, with all profiles from all scenarios/workers
- report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files)
-
-report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
- suite_name='benchmarks')
-
-if total_scenario_failures > 0 or qps_workers_killed > 0:
- print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
- sys.exit(1)
-
-if perf_report_failures > 0:
- print('%s perf profile collection jobs failed' % perf_report_failures)
- sys.exit(1)
+ if perf_report_failures > 0:
+ print('%s perf profile collection jobs failed' % perf_report_failures)
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index d874b2a320..b66c5f7f71 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -69,17 +69,22 @@ _POLLING_STRATEGIES = {
}
-def get_flaky_tests(limit=None):
+BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
+
+
+def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
+ SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
+ MAX(cpu_measured) as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
- result
+ result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
@@ -89,15 +94,15 @@ SELECT
GROUP BY
filtered_test_name
HAVING
- SUM(result != 'PASSED' AND result != 'SKIPPED') > 0"""
+ flaky OR cpu > 0"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None,
**query_job['jobReference']).execute(num_retries=3)
- flake_names = [row['f'][0]['v'] for row in page['rows']]
- return flake_names
+ test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']]
+ return test_data
def platform_string():
@@ -141,6 +146,9 @@ class Config(object):
if not flaky and shortname and shortname in flaky_tests:
print('Setting %s to flaky' % shortname)
flaky = True
+ if shortname in shortname_to_cpu:
+ print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, shortname_to_cpu[shortname]))
+ cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
@@ -235,8 +243,10 @@ class CLanguage(object):
self.config = config
self.args = args
if self.platform == 'windows':
- _check_compiler(self.args.compiler, ['default', 'cmake'])
+ _check_compiler(self.args.compiler, ['default', 'cmake', 'cmake_vs2015',
+ 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
+ self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
@@ -363,11 +373,13 @@ class CLanguage(object):
'check_epollexclusive']
def make_options(self):
- return self._make_options;
+ return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
- return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat', self._cmake_arch_option]]
+ return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
+ self._cmake_generator_option,
+ self._cmake_arch_option]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
@@ -1205,7 +1217,7 @@ argp.add_argument('--compiler',
'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6',
'coreclr',
- 'cmake'],
+ 'cmake', 'cmake_vs2015', 'cmake_vs2017'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--iomgr_platform',
@@ -1250,9 +1262,12 @@ argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action
args = argp.parse_args()
flaky_tests = set()
+shortname_to_cpu = {}
if not args.disable_auto_set_flakes:
try:
- flaky_tests = set(get_flaky_tests())
+ for test in get_bqtest_data():
+ if test.flaky: flaky_tests.add(test.name)
+ if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print("Unexpected error getting flaky tests:", sys.exc_info()[0])
@@ -1364,27 +1379,11 @@ _check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
- if makefile.startswith('cmake/build/'):
- return [jobset.JobSpec(['cmake', '--build', '.',
- '--target', '%s' % target,
- '--config', _MSBUILD_CONFIG[cfg]],
- cwd=os.path.dirname(makefile),
- timeout_seconds=None) for target in targets]
- extra_args = []
- # better do parallel compilation
- # empirically /m:2 gives the best performance/price and should prevent
- # overloading the windows workers.
- extra_args.extend(['/m:2'])
- # disable PDB generation: it's broken, and we don't need it during CI
- extra_args.extend(['/p:Jenkins=true'])
- return [
- jobset.JobSpec([_windows_build_bat(args.compiler),
- 'vsprojects\\%s.sln' % target,
- '/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
- extra_args +
- language_make_options,
- shell=True, timeout_seconds=None)
- for target in targets]
+ return [jobset.JobSpec(['cmake', '--build', '.',
+ '--target', '%s' % target,
+ '--config', _MSBUILD_CONFIG[cfg]],
+ cwd=os.path.dirname(makefile),
+ timeout_seconds=None) for target in targets]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
@@ -1528,7 +1527,7 @@ def _build_and_run(
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
- massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
+ massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 7d26b284da..957e7b569e 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -29,9 +29,11 @@ from python_utils.filter_pull_request_tests import filter_tests
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
+_DEFAULT_RUNTESTS_TIMEOUT = 1*60*60
+
# Set the timeout high to allow enough time for sanitizers and pre-building
# clang docker.
-_RUNTESTS_TIMEOUT = 4*60*60
+_CPP_RUNTESTS_TIMEOUT = 4*60*60
# Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2
@@ -51,8 +53,11 @@ def _report_filename_internal_ci(name):
def _docker_jobspec(name, runtests_args=[], runtests_envs={},
- inner_jobs=_DEFAULT_INNER_JOBS):
+ inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
"""Run a single instance of run_tests.py in a docker container"""
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
test_job = jobset.JobSpec(
cmdline=['python', 'tools/run_tests/run_tests.py',
'--use_docker',
@@ -62,15 +67,18 @@ def _docker_jobspec(name, runtests_args=[], runtests_envs={},
'--report_suite_name', '%s' % name] + runtests_args,
environ=runtests_envs,
shortname='run_tests_%s' % name,
- timeout_seconds=_RUNTESTS_TIMEOUT)
+ timeout_seconds=timeout_seconds)
return test_job
def _workspace_jobspec(name, runtests_args=[], workspace_name=None,
- runtests_envs={}, inner_jobs=_DEFAULT_INNER_JOBS):
+ runtests_envs={}, inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
"""Run a single instance of run_tests.py in a separate workspace"""
if not workspace_name:
workspace_name = 'workspace_%s' % name
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
env = {'WORKSPACE_NAME': workspace_name}
env.update(runtests_envs)
test_job = jobset.JobSpec(
@@ -82,14 +90,15 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None,
'--report_suite_name', '%s' % name] + runtests_args,
environ=env,
shortname='run_tests_%s' % name,
- timeout_seconds=_RUNTESTS_TIMEOUT)
+ timeout_seconds=timeout_seconds)
return test_job
def _generate_jobs(languages, configs, platforms, iomgr_platform = 'native',
arch=None, compiler=None,
labels=[], extra_args=[], extra_envs={},
- inner_jobs=_DEFAULT_INNER_JOBS):
+ inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
result = []
for language in languages:
for platform in platforms:
@@ -110,10 +119,12 @@ def _generate_jobs(languages, configs, platforms, iomgr_platform = 'native',
runtests_args += extra_args
if platform == 'linux':
job = _docker_jobspec(name=name, runtests_args=runtests_args,
- runtests_envs=extra_envs, inner_jobs=inner_jobs)
+ runtests_envs=extra_envs, inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
else:
job = _workspace_jobspec(name=name, runtests_args=runtests_args,
- runtests_envs=extra_envs, inner_jobs=inner_jobs)
+ runtests_envs=extra_envs, inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
job.labels = [platform, config, language, iomgr_platform] + labels
result.append(job)
@@ -136,7 +147,8 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
platforms=['linux', 'macos', 'windows'],
labels=['basictests', 'corelang'],
extra_args=extra_args,
- inner_jobs=inner_jobs)
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['csharp', 'node', 'python'],
configs=['dbg', 'opt'],
@@ -151,7 +163,8 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
platforms=['linux', 'macos'],
labels=['basictests', 'corelang'],
extra_args=extra_args,
- inner_jobs=inner_jobs)
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['ruby', 'php'],
configs=['dbg', 'opt'],
@@ -174,13 +187,15 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
platforms=['linux'],
labels=['sanitizers', 'corelang'],
extra_args=extra_args,
- inner_jobs=inner_jobs)
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['c++'],
configs=['asan', 'tsan'],
platforms=['linux'],
labels=['sanitizers', 'corelang'],
extra_args=extra_args,
- inner_jobs=inner_jobs)
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
return test_jobs
@@ -207,18 +222,18 @@ def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS)
compiler=compiler,
labels=['portability', 'corelang'],
extra_args=extra_args,
- inner_jobs=inner_jobs)
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
- # portability C on Windows
- for arch in ['x86', 'x64']:
- test_jobs += _generate_jobs(languages=['c'],
- configs=['dbg'],
- platforms=['windows'],
- arch=arch,
- compiler='default',
- labels=['portability', 'corelang'],
- extra_args=extra_args,
- inner_jobs=inner_jobs)
+ # portability C on Windows 64-bit (x86 is the default)
+ test_jobs += _generate_jobs(languages=['c'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
# portability C++ on Windows
# TODO(jtattermusch): some of the tests are failing, so we force --build_only
@@ -231,15 +246,27 @@ def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS)
extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs)
+ # portability C and C++ on Windows using VS2017 (build only)
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ test_jobs += _generate_jobs(languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='cmake_vs2017',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
# C and C++ with the c-ares DNS resolver on Linux
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'], platforms=['linux'],
labels=['portability', 'corelang'],
extra_args=extra_args,
- extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
+ extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# TODO(zyc): Turn on this test after adding c-ares support on windows.
- # C with the c-ares DNS resolver on Windonws
+ # C with the c-ares DNS resolver on Windows
# test_jobs += _generate_jobs(languages=['c'],
# configs=['dbg'], platforms=['windows'],
# labels=['portability', 'corelang'],
@@ -282,7 +309,8 @@ def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS)
iomgr_platform='uv',
labels=['portability', 'corelang'],
extra_args=extra_args,
- inner_jobs=inner_jobs)
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['node'],
configs=['dbg'],
diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh
index b0a0c3a8eb..7c934b1ba7 100755
--- a/tools/run_tests/sanity/check_submodules.sh
+++ b/tools/run_tests/sanity/check_submodules.sh
@@ -33,7 +33,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules
ec44c6c1675c25b9827aacd08c02433cccde7780 third_party/googletest (release-1.8.0)
80a37e0782d2d702d52234b62dd4b9ec74fd2c95 third_party/protobuf (v3.4.0)
cacf7f1d4e3d44d871b605da3b647f07d718623f third_party/zlib (v1.2.11)
- 7691f773af79bf75a62d1863fd0f13ebf9dc51b1 third_party/cares/cares (1.12.0)
+ 3be1924221e1326df520f8498d704a5c4c8d0cce third_party/cares/cares (cares-1_13_0)
EOF
diff -u $submodules $want_submodules