aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-03-31 07:49:58 -0700
committerGravatar Craig Tiller <ctiller@google.com>2016-03-31 07:49:58 -0700
commit64a12c55dfe4bdbdb65b2c508a06f4335f100a98 (patch)
tree27210a0df4e73a44487d89dc012502fa5d992ee4 /tools
parent6169d5f7b002a68daa3eda36798cec34de11d57f (diff)
parent6fa2ce56f3600b044d9e27897823de736e948a73 (diff)
Merge github.com:grpc/grpc into copyright-fix
Diffstat (limited to 'tools')
-rwxr-xr-xtools/codegen/core/gen_static_metadata.py29
-rwxr-xr-xtools/gce/create_interop_worker.sh64
-rwxr-xr-xtools/run_tests/build_python.sh1
-rwxr-xr-xtools/run_tests/jobset.py7
-rwxr-xr-xtools/run_tests/performance/build_performance.sh40
-rwxr-xr-xtools/run_tests/performance/remote_host_build.sh36
-rwxr-xr-xtools/run_tests/performance/remote_host_prepare.sh44
-rwxr-xr-xtools/run_tests/run_performance_tests.py353
-rwxr-xr-xtools/run_tests/run_python.sh1
-rwxr-xr-xtools/run_tests/run_tests.py5
-rw-r--r--tools/run_tests/sources_and_headers.json50
-rw-r--r--tools/run_tests/tests.json613
12 files changed, 1225 insertions, 18 deletions
diff --git a/tools/codegen/core/gen_static_metadata.py b/tools/codegen/core/gen_static_metadata.py
index dd5632805a..b4ba02bbe5 100755
--- a/tools/codegen/core/gen_static_metadata.py
+++ b/tools/codegen/core/gen_static_metadata.py
@@ -69,6 +69,7 @@ CONFIG = [
(':scheme', 'grpc'),
(':authority', ''),
(':method', 'GET'),
+ (':method', 'PUT'),
(':path', '/'),
(':path', '/index.html'),
(':status', '204'),
@@ -232,20 +233,20 @@ with open(sys.argv[0]) as my_source:
if line[0] != '#':
break
copyright.append(line)
- put_banner([H,C], [line[1:].strip() for line in copyright])
+ put_banner([H,C], [line[2:].rstrip() for line in copyright])
put_banner([H,C],
"""WARNING: Auto-generated code.
-To make changes to this file, change tools/codegen/core/gen_static_metadata.py,
-and then re-run it.
+To make changes to this file, change
+tools/codegen/core/gen_static_metadata.py, and then re-run it.
-See metadata.h for an explanation of the interface here, and metadata.c for an
-explanation of what's going on.
+See metadata.h for an explanation of the interface here, and metadata.c for
+an explanation of what's going on.
""".splitlines())
-print >>H, '#ifndef GRPC_INTERNAL_CORE_TRANSPORT_STATIC_METADATA_H'
-print >>H, '#define GRPC_INTERNAL_CORE_TRANSPORT_STATIC_METADATA_H'
+print >>H, '#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
+print >>H, '#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >>H
print >>H, '#include "src/core/lib/transport/metadata.h"'
print >>H
@@ -264,13 +265,13 @@ print >>C
print >>H, '#define GRPC_STATIC_MDELEM_COUNT %d' % len(all_elems)
print >>H, 'extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];'
-print >>H, 'extern gpr_uintptr grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];'
+print >>H, 'extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];'
for i, elem in enumerate(all_elems):
print >>H, '/* "%s": "%s" */' % elem
print >>H, '#define %s (&grpc_static_mdelem_table[%d])' % (mangle(elem).upper(), i)
print >>H
print >>C, 'grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];'
-print >>C, 'gpr_uintptr grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {'
+print >>C, 'uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {'
print >>C, ' %s' % ','.join('%d' % static_userdata.get(elem, 0) for elem in all_elems)
print >>C, '};'
print >>C
@@ -285,8 +286,8 @@ def md_idx(m):
if m == m2:
return i
-print >>H, 'extern const gpr_uint8 grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2];'
-print >>C, 'const gpr_uint8 grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2] = {'
+print >>H, 'extern const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2];'
+print >>C, 'const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2] = {'
print >>C, ','.join('%d' % str_idx(x) for x in itertools.chain.from_iterable([a,b] for a, b in all_elems))
print >>C, '};'
print >>C
@@ -297,15 +298,15 @@ print >>C, '%s' % ',\n'.join(' "%s"' % s for s in all_strs)
print >>C, '};'
print >>C
-print >>H, 'extern const gpr_uint8 grpc_static_accept_encoding_metadata[%d];' % (1 << len(COMPRESSION_ALGORITHMS))
-print >>C, 'const gpr_uint8 grpc_static_accept_encoding_metadata[%d] = {' % (1 << len(COMPRESSION_ALGORITHMS))
+print >>H, 'extern const uint8_t grpc_static_accept_encoding_metadata[%d];' % (1 << len(COMPRESSION_ALGORITHMS))
+print >>C, 'const uint8_t grpc_static_accept_encoding_metadata[%d] = {' % (1 << len(COMPRESSION_ALGORITHMS))
print >>C, '0,%s' % ','.join('%d' % md_idx(elem) for elem in compression_elems)
print >>C, '};'
print >>C
print >>H, '#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) (&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]])'
-print >>H, '#endif /* GRPC_INTERNAL_CORE_TRANSPORT_STATIC_METADATA_H */'
+print >>H, '#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */'
H.close()
C.close()
diff --git a/tools/gce/create_interop_worker.sh b/tools/gce/create_interop_worker.sh
new file mode 100755
index 0000000000..3c49c6102a
--- /dev/null
+++ b/tools/gce/create_interop_worker.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Creates an interop worker on GCE.
+# IMPORTANT: After this script finishes, there are still some manual
+# steps needed there are hard to automatize.
+# See go/grpc-jenkins-setup for followup instructions.
+
+set -ex
+
+cd $(dirname $0)
+
+CLOUD_PROJECT=grpc-testing
+ZONE=us-east1-a # canary gateway is reachable from this zone
+
+INSTANCE_NAME="${1:-grpc-canary-interop2}"
+
+gcloud compute instances create $INSTANCE_NAME \
+ --project="$CLOUD_PROJECT" \
+ --zone "$ZONE" \
+ --machine-type n1-standard-16 \
+ --image ubuntu-15-10 \
+ --boot-disk-size 1000 \
+ --scopes https://www.googleapis.com/auth/xapi.zoo
+
+echo 'Created GCE instance, waiting 60 seconds for it to come online.'
+sleep 60
+
+gcloud compute copy-files \
+ --project="$CLOUD_PROJECT" \
+ --zone "$ZONE" \
+ jenkins_master.pub linux_worker_init.sh ${INSTANCE_NAME}:~
+
+gcloud compute ssh \
+ --project="$CLOUD_PROJECT" \
+ --zone "$ZONE" \
+ $INSTANCE_NAME --command "./linux_worker_init.sh"
diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh
index 23c6e01738..30d121007f 100755
--- a/tools/run_tests/build_python.sh
+++ b/tools/run_tests/build_python.sh
@@ -40,6 +40,7 @@ export PATH=$ROOT/bins/$CONFIG:$ROOT/bins/$CONFIG/protobuf:$PATH
export CFLAGS="-I$ROOT/include -std=gnu99"
export LDFLAGS="-L$ROOT/libs/$CONFIG"
export GRPC_PYTHON_BUILD_WITH_CYTHON=1
+export GRPC_PYTHON_USE_PRECOMPILED_BINARIES=0
if [ "$CONFIG" = "gcov" ]
then
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index af4b5e09b5..e9675fb785 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -151,7 +151,8 @@ class JobSpec(object):
def __init__(self, cmdline, shortname=None, environ=None, hash_targets=None,
cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
- timeout_retries=0, kill_handler=None, cpu_cost=1.0):
+ timeout_retries=0, kill_handler=None, cpu_cost=1.0,
+ verbose_success=False):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
@@ -176,6 +177,7 @@ class JobSpec(object):
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
+ self.verbose_success = verbose_success
def identity(self):
return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
@@ -287,7 +289,8 @@ class Job(object):
cores = (user + sys) / real
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost)
message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
- self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
+ self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
+ stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
if self._bin_hash:
diff --git a/tools/run_tests/performance/build_performance.sh b/tools/run_tests/performance/build_performance.sh
new file mode 100755
index 0000000000..00cc41ec73
--- /dev/null
+++ b/tools/run_tests/performance/build_performance.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+set -ex
+
+cd $(dirname $0)/../../..
+
+#TODO(jtattermusch): add support for more languages
+
+CONFIG=${CONFIG:-opt}
+
+# build C++ qps worker & driver
+make CONFIG=${CONFIG} qps_worker qps_driver -j8
diff --git a/tools/run_tests/performance/remote_host_build.sh b/tools/run_tests/performance/remote_host_build.sh
new file mode 100755
index 0000000000..f23ea921ce
--- /dev/null
+++ b/tools/run_tests/performance/remote_host_build.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+set -ex
+
+cd $(dirname $0)/../../..
+
+# execute the build script remotely
+ssh "${USER_AT_HOST}" "CONFIG=${CONFIG} ~/performance_workspace/grpc/tools/run_tests/performance/build_performance.sh"
diff --git a/tools/run_tests/performance/remote_host_prepare.sh b/tools/run_tests/performance/remote_host_prepare.sh
new file mode 100755
index 0000000000..bad2424a6b
--- /dev/null
+++ b/tools/run_tests/performance/remote_host_prepare.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+set -ex
+
+cd $(dirname $0)/../../..
+
+# cleanup after previous builds
+ssh "${USER_AT_HOST}" "rm -rf ~/performance_workspace && mkdir -p ~/performance_workspace"
+
+# TODO(jtattermusch): To be sure there are not running processes that would
+# mess with the results, be rough and reboot the slave here
+# and wait for it to come back online.
+
+# push the current sources to the slave and unpack it.
+scp ../grpc.tar "${USER_AT_HOST}:~/performance_workspace"
+ssh "${USER_AT_HOST}" "tar -xf ~/performance_workspace/grpc.tar -C ~/performance_workspace" \ No newline at end of file
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
new file mode 100755
index 0000000000..77c0addb42
--- /dev/null
+++ b/tools/run_tests/run_performance_tests.py
@@ -0,0 +1,353 @@
+#!/usr/bin/env python2.7
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run performance tests locally or remotely."""
+
+import argparse
+import jobset
+import multiprocessing
+import os
+import subprocess
+import sys
+import tempfile
+import time
+import uuid
+
+
+_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+os.chdir(_ROOT)
+
+
+_REMOTE_HOST_USERNAME = 'jenkins'
+
+
+class CXXLanguage:
+
+ def __init__(self):
+ self.safename = 'cxx'
+
+ def scenarios(self):
+ # TODO(jtattermusch): add more scenarios
+ return {
+ # Scenario 1: generic async streaming ping-pong (contentionless latency)
+ 'cpp_async_generic_streaming_ping_pong': [
+ '--rpc_type=STREAMING',
+ '--client_type=ASYNC_CLIENT',
+ '--server_type=ASYNC_GENERIC_SERVER',
+ '--outstanding_rpcs_per_channel=1',
+ '--client_channels=1',
+ '--bbuf_req_size=0',
+ '--bbuf_resp_size=0',
+ '--async_client_threads=1',
+ '--async_server_threads=1',
+ '--secure_test=true',
+ '--num_servers=1',
+ '--num_clients=1',
+ '--server_core_limit=0',
+ '--client_core_limit=0'],
+ # Scenario 5: Sync unary ping-pong with protobufs
+ 'cpp_sync_unary_ping_pong_protobuf': [
+ '--rpc_type=UNARY',
+ '--client_type=SYNC_CLIENT',
+ '--server_type=SYNC_SERVER',
+ '--outstanding_rpcs_per_channel=1',
+ '--client_channels=1',
+ '--simple_req_size=0',
+ '--simple_resp_size=0',
+ '--secure_test=true',
+ '--num_servers=1',
+ '--num_clients=1',
+ '--server_core_limit=0',
+ '--client_core_limit=0']}
+
+ def __str__(self):
+ return 'c++'
+
+
+class CSharpLanguage:
+
+ def __init__(self):
+ self.safename = str(self)
+
+ def __str__(self):
+ return 'csharp'
+
+
+class NodeLanguage:
+
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def __str__(self):
+ return 'node'
+
+
+_LANGUAGES = {
+ 'c++' : CXXLanguage(),
+ 'csharp' : CSharpLanguage(),
+ 'node' : NodeLanguage(),
+}
+
+
+class QpsWorkerJob:
+ """Encapsulates a qps worker server job."""
+
+ def __init__(self, spec, host_and_port):
+ self._spec = spec
+ self.host_and_port = host_and_port
+ self._job = jobset.Job(spec, bin_hash=None, newline_on_success=True, travis=True, add_env={})
+
+ def is_running(self):
+ """Polls a job and returns True if given job is still running."""
+ return self._job.state(jobset.NoCache()) == jobset._RUNNING
+
+ def kill(self):
+ return self._job.kill()
+
+
+def create_qpsworker_job(language, port=10000, remote_host=None):
+ # TODO: support more languages
+ cmd = 'bins/opt/qps_worker --driver_port=%s' % port
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && %s"' % (user_at_host, cmd)
+ host_and_port='%s:%s' % (remote_host, port)
+ else:
+ host_and_port='localhost:%s' % port
+
+ jobspec = jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='qps_worker',
+ timeout_seconds=15*60,
+ shell=True)
+ return QpsWorkerJob(jobspec, host_and_port)
+
+
+def create_scenario_jobspec(scenario_name, driver_args, workers, remote_host=None):
+ """Runs one scenario using QPS driver."""
+ # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+ cmd = 'QPS_WORKERS="%s" bins/opt/qps_driver ' % ','.join(workers)
+ cmd += ' '.join(driver_args)
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && %s"' % (user_at_host, cmd)
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='qps_driver.%s' % scenario_name,
+ timeout_seconds=3*60,
+ shell=True,
+ verbose_success=True)
+
+
+def archive_repo():
+ """Archives local version of repo including submodules."""
+ # TODO: also archive grpc-go and grpc-java repos
+ archive_job = jobset.JobSpec(
+ cmdline=['tar', '-cf', '../grpc.tar', '../grpc/'],
+ shortname='archive_repo',
+ timeout_seconds=3*60)
+
+ jobset.message('START', 'Archiving local repository.', do_newline=True)
+ num_failures, _ = jobset.run(
+ [archive_job], newline_on_success=True, maxjobs=1)
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'Archive with local repository create successfully.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED', 'Failed to archive local repository.',
+ do_newline=True)
+ sys.exit(1)
+
+
+def prepare_remote_hosts(hosts):
+ """Prepares remote hosts."""
+ prepare_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ prepare_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
+ shortname='remote_host_prepare.%s' % host,
+ environ = {'USER_AT_HOST': user_at_host},
+ timeout_seconds=3*60))
+ jobset.message('START', 'Preparing remote hosts.', do_newline=True)
+ num_failures, _ = jobset.run(
+ prepare_jobs, newline_on_success=True, maxjobs=10)
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'Remote hosts ready to start build.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED', 'Failed to prepare remote hosts.',
+ do_newline=True)
+ sys.exit(1)
+
+
+def build_on_remote_hosts(hosts, build_local=False):
+ """Builds performance worker on remote hosts."""
+ build_timeout = 15*60
+ build_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_build.sh'],
+ shortname='remote_host_build.%s' % host,
+ environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
+ timeout_seconds=build_timeout))
+ if build_local:
+ # Build locally as well
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/build_performance.sh'],
+ shortname='local_build',
+ environ = {'CONFIG': 'opt'},
+ timeout_seconds=build_timeout))
+ jobset.message('START', 'Building on remote hosts.', do_newline=True)
+ num_failures, _ = jobset.run(
+ build_jobs, newline_on_success=True, maxjobs=10)
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'Build on remote hosts was successful.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED', 'Failed to build on remote hosts.',
+ do_newline=True)
+ sys.exit(1)
+
+
+def start_qpsworkers(worker_hosts):
+ """Starts QPS workers as background jobs."""
+ if not worker_hosts:
+ # run two workers locally
+ workers=[(None, 10000), (None, 10010)]
+ elif len(worker_hosts) == 1:
+ # run two workers on the remote host
+ workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
+ else:
+ # run one worker per each remote host
+ workers=[(worker_host, 10000) for worker_host in worker_hosts]
+
+ return [create_qpsworker_job(CXXLanguage(),
+ port=worker[1],
+ remote_host=worker[0])
+ for worker in workers]
+
+
+def create_scenarios(languages, workers, remote_host=None):
+ """Create jobspecs for scenarios to run."""
+ scenarios = []
+ for language in languages:
+ for scenario_name, driver_args in language.scenarios().iteritems():
+ scenario = create_scenario_jobspec(scenario_name,
+ driver_args,
+ workers,
+ remote_host=remote_host)
+ scenarios.append(scenario)
+
+ # the very last scenario requests shutting down the workers.
+ scenarios.append(create_scenario_jobspec('quit_workers',
+ ['--quit=true'],
+ workers,
+ remote_host=remote_host))
+ return scenarios
+
+
+def finish_qps_workers(jobs):
+ """Waits for given jobs to finish and eventually kills them."""
+ retries = 0
+ while any(job.is_running() for job in jobs):
+ for job in qpsworker_jobs:
+ if job.is_running():
+ print 'QPS worker "%s" is still running.' % job.host_and_port
+ if retries > 10:
+ print 'Killing all QPS workers.'
+ for job in jobs:
+ job.kill()
+ retries += 1
+ time.sleep(3)
+ print 'All QPS workers finished.'
+
+
+argp = argparse.ArgumentParser(description='Run performance tests.')
+argp.add_argument('--remote_driver_host',
+ default=None,
+ help='Run QPS driver on given host. By default, QPS driver is run locally.')
+argp.add_argument('--remote_worker_host',
+ nargs='+',
+ default=[],
+ help='Worker hosts where to start QPS workers.')
+
+args = argp.parse_args()
+
+# Put together set of remote hosts where to run and build
+remote_hosts = set()
+if args.remote_worker_host:
+ for host in args.remote_worker_host:
+ remote_hosts.add(host)
+if args.remote_driver_host:
+ remote_hosts.add(args.remote_driver_host)
+
+if remote_hosts:
+ archive_repo()
+ prepare_remote_hosts(remote_hosts)
+
+build_local = False
+if not args.remote_driver_host:
+ build_local = True
+build_on_remote_hosts(remote_hosts, build_local=build_local)
+
+qpsworker_jobs = start_qpsworkers(args.remote_worker_host)
+
+worker_addresses = [job.host_and_port for job in qpsworker_jobs]
+
+try:
+ scenarios = create_scenarios(languages=[CXXLanguage()],
+ workers=worker_addresses,
+ remote_host=args.remote_driver_host)
+ if not scenarios:
+ raise Exception('No scenarios to run')
+
+ jobset.message('START', 'Running scenarios.', do_newline=True)
+ num_failures, _ = jobset.run(
+ scenarios, newline_on_success=True, maxjobs=1)
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'All scenarios finished successfully.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED', 'Some of the scenarios failed.',
+ do_newline=True)
+ sys.exit(1)
+finally:
+ finish_qps_workers(qpsworker_jobs)
diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/run_python.sh
index a8c4a8c008..a93ef2576d 100755
--- a/tools/run_tests/run_python.sh
+++ b/tools/run_tests/run_python.sh
@@ -40,6 +40,7 @@ export PATH=$ROOT/bins/$CONFIG:$ROOT/bins/$CONFIG/protobuf:$PATH
export CFLAGS="-I$ROOT/include -std=c89"
export LDFLAGS="-L$ROOT/libs/$CONFIG"
export GRPC_PYTHON_BUILD_WITH_CYTHON=1
+export GRPC_PYTHON_USE_PRECOMPILED_BINARIES=0
if [ "$CONFIG" = "gcov" ]
then
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 14da220e26..a13f2a3a35 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -478,8 +478,9 @@ class CSharpLanguage(object):
_check_compiler(self.args.compiler, ['default'])
if self.platform == 'mac':
# On Mac, official distribution of mono is 32bit.
- self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true',
- 'CFLAGS=-arch i386', 'LDFLAGS=-arch i386']
+ # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
+ self._make_options = ['EMBED_OPENSSL=true',
+ 'CFLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index d7c9839d5a..0807c54773 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -1035,6 +1035,54 @@
],
"headers": [],
"language": "c",
+ "name": "internal_api_canary_iomgr_test",
+ "src": [
+ "test/core/internal_api_canaries/iomgr.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "language": "c",
+ "name": "internal_api_canary_support_test",
+ "src": [
+ "test/core/internal_api_canaries/iomgr.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "language": "c",
+ "name": "internal_api_canary_transport_test",
+ "src": [
+ "test/core/internal_api_canaries/iomgr.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "language": "c",
"name": "invalid_call_argument_test",
"src": [
"test/core/end2end/invalid_call_argument_test.c"
@@ -6579,6 +6627,7 @@
"test/core/end2end/tests/graceful_server_shutdown.c",
"test/core/end2end/tests/high_initial_seqno.c",
"test/core/end2end/tests/hpack_size.c",
+ "test/core/end2end/tests/idempotent_request.c",
"test/core/end2end/tests/invoke_large_request.c",
"test/core/end2end/tests/large_metadata.c",
"test/core/end2end/tests/max_concurrent_streams.c",
@@ -6635,6 +6684,7 @@
"test/core/end2end/tests/graceful_server_shutdown.c",
"test/core/end2end/tests/high_initial_seqno.c",
"test/core/end2end/tests/hpack_size.c",
+ "test/core/end2end/tests/idempotent_request.c",
"test/core/end2end/tests/invoke_large_request.c",
"test/core/end2end/tests/large_metadata.c",
"test/core/end2end/tests/max_concurrent_streams.c",
diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json
index 5f72b8c582..e516543dae 100644
--- a/tools/run_tests/tests.json
+++ b/tools/run_tests/tests.json
@@ -4651,6 +4651,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_census_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -5443,6 +5465,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_compress_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -6218,6 +6262,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_fakesec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -6991,6 +7056,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -7681,6 +7768,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+pipe_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -8257,6 +8360,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+poll_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -8833,6 +8952,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+poll+pipe_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -9489,6 +9624,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+trace_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -10264,6 +10421,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_oauth2_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -10957,6 +11135,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -11587,6 +11786,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -12217,6 +12437,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair+trace_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -12868,6 +13109,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_1byte_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -13599,6 +13861,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_ssl_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -14289,6 +14573,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_ssl+poll_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -14887,6 +15187,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_ssl_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -15543,6 +15864,26 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -16179,6 +16520,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds+poll_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -16835,6 +17192,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_census_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -17605,6 +17984,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_compress_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -18375,6 +18776,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -19049,6 +19472,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+pipe_nosec_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -19609,6 +20048,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+poll_nosec_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -20169,6 +20624,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+poll+pipe_nosec_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -20803,6 +21274,28 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+trace_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -21494,6 +21987,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_proxy_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -22103,6 +22617,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -22712,6 +23247,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair+trace_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -23342,6 +23898,27 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_1byte_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -23999,6 +24576,26 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds_nosec_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [
@@ -24619,6 +25216,22 @@
},
{
"args": [
+ "idempotent_request"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds+poll_nosec_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"invoke_large_request"
],
"ci_platforms": [