aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/performance
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests/performance')
-rw-r--r--tools/run_tests/performance/OWNERS9
-rwxr-xr-xtools/run_tests/performance/bq_upload_result.py219
-rwxr-xr-xtools/run_tests/performance/build_performance.sh19
-rwxr-xr-xtools/run_tests/performance/build_performance_go.sh6
-rwxr-xr-xtools/run_tests/performance/build_performance_php7.sh (renamed from tools/run_tests/performance/run_worker_node.sh)19
-rwxr-xr-xtools/run_tests/performance/kill_workers.sh9
-rw-r--r--tools/run_tests/performance/massage_qps_stats.py629
-rw-r--r--tools/run_tests/performance/massage_qps_stats_helpers.py71
-rwxr-xr-xtools/run_tests/performance/patch_scenario_results_schema.py35
-rwxr-xr-xtools/run_tests/performance/process_local_perf_flamegraphs.sh10
-rwxr-xr-xtools/run_tests/performance/process_remote_perf_flamegraphs.sh15
-rwxr-xr-xtools/run_tests/performance/remote_host_build.sh3
-rwxr-xr-xtools/run_tests/performance/remote_host_prepare.sh5
-rwxr-xr-xtools/run_tests/performance/run_netperf.sh2
-rwxr-xr-xtools/run_tests/performance/run_qps_driver.sh2
-rwxr-xr-xtools/run_tests/performance/run_worker_csharp.sh4
-rwxr-xr-xtools/run_tests/performance/run_worker_go.sh4
-rwxr-xr-xtools/run_tests/performance/run_worker_java.sh4
-rwxr-xr-xtools/run_tests/performance/run_worker_php.sh9
-rwxr-xr-xtools/run_tests/performance/run_worker_python.sh4
-rwxr-xr-xtools/run_tests/performance/run_worker_ruby.sh4
-rw-r--r--tools/run_tests/performance/scenario_config.py1934
-rw-r--r--tools/run_tests/performance/scenario_result_schema.json126
23 files changed, 1803 insertions, 1339 deletions
diff --git a/tools/run_tests/performance/OWNERS b/tools/run_tests/performance/OWNERS
new file mode 100644
index 0000000000..98c81529f3
--- /dev/null
+++ b/tools/run_tests/performance/OWNERS
@@ -0,0 +1,9 @@
+set noparent
+
+# These owners are in place to ensure that scenario_result_schema.json is not
+# modified without also running tools/run_tests/performance/patch_scenario_results_schema.py
+# to update the BigQuery schema
+
+@ncteisen
+@matt-kwong
+@ctiller
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index 31819d6159..6702587557 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -26,146 +26,161 @@ import time
import uuid
import massage_qps_stats
-
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
-
-_PROJECT_ID='grpc-testing'
+_PROJECT_ID = 'grpc-testing'
def _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, result_file):
- with open(result_file, 'r') as f:
- (col1, col2, col3) = f.read().split(',')
- latency50 = float(col1.strip()) * 1000
- latency90 = float(col2.strip()) * 1000
- latency99 = float(col3.strip()) * 1000
-
- scenario_result = {
- 'scenario': {
- 'name': 'netperf_tcp_rr'
- },
- 'summary': {
- 'latency50': latency50,
- 'latency90': latency90,
- 'latency99': latency99
+ with open(result_file, 'r') as f:
+ (col1, col2, col3) = f.read().split(',')
+ latency50 = float(col1.strip()) * 1000
+ latency90 = float(col2.strip()) * 1000
+ latency99 = float(col3.strip()) * 1000
+
+ scenario_result = {
+ 'scenario': {
+ 'name': 'netperf_tcp_rr'
+ },
+ 'summary': {
+ 'latency50': latency50,
+ 'latency90': latency90,
+ 'latency99': latency99
+ }
}
- }
- bq = big_query_utils.create_big_query()
- _create_results_table(bq, dataset_id, table_id)
+ bq = big_query_utils.create_big_query()
+ _create_results_table(bq, dataset_id, table_id)
- if not _insert_result(bq, dataset_id, table_id, scenario_result, flatten=False):
- print('Error uploading result to bigquery.')
- sys.exit(1)
+ if not _insert_result(
+ bq, dataset_id, table_id, scenario_result, flatten=False):
+ print('Error uploading result to bigquery.')
+ sys.exit(1)
def _upload_scenario_result_to_bigquery(dataset_id, table_id, result_file):
- with open(result_file, 'r') as f:
- scenario_result = json.loads(f.read())
+ with open(result_file, 'r') as f:
+ scenario_result = json.loads(f.read())
- bq = big_query_utils.create_big_query()
- _create_results_table(bq, dataset_id, table_id)
+ bq = big_query_utils.create_big_query()
+ _create_results_table(bq, dataset_id, table_id)
- if not _insert_result(bq, dataset_id, table_id, scenario_result):
- print('Error uploading result to bigquery.')
- sys.exit(1)
+ if not _insert_result(bq, dataset_id, table_id, scenario_result):
+ print('Error uploading result to bigquery.')
+ sys.exit(1)
def _insert_result(bq, dataset_id, table_id, scenario_result, flatten=True):
- if flatten:
- _flatten_result_inplace(scenario_result)
- _populate_metadata_inplace(scenario_result)
- row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
- return big_query_utils.insert_rows(bq,
- _PROJECT_ID,
- dataset_id,
- table_id,
- [row])
+ if flatten:
+ _flatten_result_inplace(scenario_result)
+ _populate_metadata_inplace(scenario_result)
+ row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
+ return big_query_utils.insert_rows(bq, _PROJECT_ID, dataset_id, table_id,
+ [row])
def _create_results_table(bq, dataset_id, table_id):
- with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
- table_schema = json.loads(f.read())
- desc = 'Results of performance benchmarks.'
- return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id,
- table_id, table_schema, desc)
+ with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+ 'r') as f:
+ table_schema = json.loads(f.read())
+ desc = 'Results of performance benchmarks.'
+ return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id, table_id,
+ table_schema, desc)
def _flatten_result_inplace(scenario_result):
- """Bigquery is not really great for handling deeply nested data
+ """Bigquery is not really great for handling deeply nested data
and repeated fields. To maintain values of some fields while keeping
the schema relatively simple, we artificially leave some of the fields
as JSON strings.
"""
- scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
- scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
- scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
- scenario_result['serverCpuStats'] = []
- for stats in scenario_result['serverStats']:
- scenario_result['serverCpuStats'].append(dict())
- scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop('totalCpuTime', None)
- scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop('idleCpuTime', None)
- for stats in scenario_result['clientStats']:
- stats['latencies'] = json.dumps(stats['latencies'])
- stats.pop('requestResults', None)
- scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
- scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
- scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
- scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
- scenario_result['serverCpuUsage'] = scenario_result['summary'].pop('serverCpuUsage', None)
- scenario_result['summary'].pop('successfulRequestsPerSecond', None)
- scenario_result['summary'].pop('failedRequestsPerSecond', None)
- massage_qps_stats.massage_qps_stats(scenario_result)
+ scenario_result['scenario']['clientConfig'] = json.dumps(
+ scenario_result['scenario']['clientConfig'])
+ scenario_result['scenario']['serverConfig'] = json.dumps(
+ scenario_result['scenario']['serverConfig'])
+ scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
+ scenario_result['serverCpuStats'] = []
+ for stats in scenario_result['serverStats']:
+ scenario_result['serverCpuStats'].append(dict())
+ scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop(
+ 'totalCpuTime', None)
+ scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop(
+ 'idleCpuTime', None)
+ for stats in scenario_result['clientStats']:
+ stats['latencies'] = json.dumps(stats['latencies'])
+ stats.pop('requestResults', None)
+ scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
+ scenario_result['clientSuccess'] = json.dumps(
+ scenario_result['clientSuccess'])
+ scenario_result['serverSuccess'] = json.dumps(
+ scenario_result['serverSuccess'])
+ scenario_result['requestResults'] = json.dumps(
+ scenario_result.get('requestResults', []))
+ scenario_result['serverCpuUsage'] = scenario_result['summary'].pop(
+ 'serverCpuUsage', None)
+ scenario_result['summary'].pop('successfulRequestsPerSecond', None)
+ scenario_result['summary'].pop('failedRequestsPerSecond', None)
+ massage_qps_stats.massage_qps_stats(scenario_result)
def _populate_metadata_inplace(scenario_result):
- """Populates metadata based on environment variables set by Jenkins."""
- # NOTE: Grabbing the Jenkins environment variables will only work if the
- # driver is running locally on the same machine where Jenkins has started
- # the job. For our setup, this is currently the case, so just assume that.
- build_number = os.getenv('BUILD_NUMBER')
- build_url = os.getenv('BUILD_URL')
- job_name = os.getenv('JOB_NAME')
- git_commit = os.getenv('GIT_COMMIT')
- # actual commit is the actual head of PR that is getting tested
- git_actual_commit = os.getenv('ghprbActualCommit')
-
- utc_timestamp = str(calendar.timegm(time.gmtime()))
- metadata = {'created': utc_timestamp}
-
- if build_number:
- metadata['buildNumber'] = build_number
- if build_url:
- metadata['buildUrl'] = build_url
- if job_name:
- metadata['jobName'] = job_name
- if git_commit:
- metadata['gitCommit'] = git_commit
- if git_actual_commit:
- metadata['gitActualCommit'] = git_actual_commit
-
- scenario_result['metadata'] = metadata
+ """Populates metadata based on environment variables set by Jenkins."""
+ # NOTE: Grabbing the Jenkins environment variables will only work if the
+ # driver is running locally on the same machine where Jenkins has started
+ # the job. For our setup, this is currently the case, so just assume that.
+ build_number = os.getenv('BUILD_NUMBER')
+ build_url = os.getenv('BUILD_URL')
+ job_name = os.getenv('JOB_NAME')
+ git_commit = os.getenv('GIT_COMMIT')
+ # actual commit is the actual head of PR that is getting tested
+ git_actual_commit = os.getenv('ghprbActualCommit')
+
+ utc_timestamp = str(calendar.timegm(time.gmtime()))
+ metadata = {'created': utc_timestamp}
+
+ if build_number:
+ metadata['buildNumber'] = build_number
+ if build_url:
+ metadata['buildUrl'] = build_url
+ if job_name:
+ metadata['jobName'] = job_name
+ if git_commit:
+ metadata['gitCommit'] = git_commit
+ if git_actual_commit:
+ metadata['gitActualCommit'] = git_actual_commit
+
+ scenario_result['metadata'] = metadata
argp = argparse.ArgumentParser(description='Upload result to big query.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
- help='Bigquery "dataset.table" to upload results to.')
-argp.add_argument('--file_to_upload', default='scenario_result.json', type=str,
- help='Report file to upload.')
-argp.add_argument('--file_format',
- choices=['scenario_result','netperf_latency_csv'],
- default='scenario_result',
- help='Format of the file to upload.')
+argp.add_argument(
+ '--bq_result_table',
+ required=True,
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to upload results to.')
+argp.add_argument(
+ '--file_to_upload',
+ default='scenario_result.json',
+ type=str,
+ help='Report file to upload.')
+argp.add_argument(
+ '--file_format',
+ choices=['scenario_result', 'netperf_latency_csv'],
+ default='scenario_result',
+ help='Format of the file to upload.')
args = argp.parse_args()
dataset_id, table_id = args.bq_result_table.split('.', 2)
if args.file_format == 'netperf_latency_csv':
- _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, args.file_to_upload)
+ _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id,
+ args.file_to_upload)
else:
- _upload_scenario_result_to_bigquery(dataset_id, table_id, args.file_to_upload)
+ _upload_scenario_result_to_bigquery(dataset_id, table_id,
+ args.file_to_upload)
print('Successfully uploaded %s to BigQuery.\n' % args.file_to_upload)
diff --git a/tools/run_tests/performance/build_performance.sh b/tools/run_tests/performance/build_performance.sh
index e46d4e0040..22e0ca9fa0 100755
--- a/tools/run_tests/performance/build_performance.sh
+++ b/tools/run_tests/performance/build_performance.sh
@@ -16,7 +16,7 @@
source ~/.rvm/scripts/rvm
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
CONFIG=${CONFIG:-opt}
@@ -28,10 +28,11 @@ then
# TODO(jtattermusch): not embedding OpenSSL breaks the C# build because
# grpc_csharp_ext needs OpenSSL embedded and some intermediate files from
# this build will be reused.
- make CONFIG=${CONFIG} EMBED_OPENSSL=true EMBED_ZLIB=true qps_worker qps_json_driver -j8
+ make CONFIG="${CONFIG}" EMBED_OPENSSL=true EMBED_ZLIB=true qps_worker qps_json_driver -j8
fi
-for language in $@
+PHP_ALREADY_BUILT=""
+for language in "$@"
do
case "$language" in
"c++")
@@ -43,11 +44,19 @@ do
"go")
tools/run_tests/performance/build_performance_go.sh
;;
+ "php7"|"php7_protobuf_c")
+ if [ -n "$PHP_ALREADY_BUILT" ]; then
+ echo "Skipping PHP build as already built by $PHP_ALREADY_BUILT"
+ else
+ PHP_ALREADY_BUILT=$language
+ tools/run_tests/performance/build_performance_php7.sh
+ fi
+ ;;
"csharp")
- python tools/run_tests/run_tests.py -l $language -c $CONFIG --build_only -j 8 --compiler coreclr
+ python tools/run_tests/run_tests.py -l "$language" -c "$CONFIG" --build_only -j 8 --compiler coreclr
;;
*)
- python tools/run_tests/run_tests.py -l $language -c $CONFIG --build_only -j 8
+ python tools/run_tests/run_tests.py -l "$language" -c "$CONFIG" --build_only -j 8
;;
esac
done
diff --git a/tools/run_tests/performance/build_performance_go.sh b/tools/run_tests/performance/build_performance_go.sh
index 5e97fb68d3..812728d4ce 100755
--- a/tools/run_tests/performance/build_performance_go.sh
+++ b/tools/run_tests/performance/build_performance_go.sh
@@ -15,7 +15,7 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
export GOPATH=$(pwd)/../gopath
@@ -24,6 +24,6 @@ go get google.golang.org/grpc
rm -rf "${GOPATH}/src/google.golang.org/grpc"
# Get the revision of grpc-go we want to test
-git clone --recursive ../grpc-go ${GOPATH}/src/google.golang.org/grpc
+git clone --recursive ../grpc-go "${GOPATH}/src/google.golang.org/grpc"
-(cd ${GOPATH}/src/google.golang.org/grpc/benchmark/worker && go install)
+(cd "${GOPATH}/src/google.golang.org/grpc/benchmark/worker" && go install)
diff --git a/tools/run_tests/performance/run_worker_node.sh b/tools/run_tests/performance/build_performance_php7.sh
index 1286c831fb..37ca9ee877 100755
--- a/tools/run_tests/performance/run_worker_node.sh
+++ b/tools/run_tests/performance/build_performance_php7.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2015 gRPC authors.
+# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,11 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-source ~/.nvm/nvm.sh
-nvm use 8
-
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
+CONFIG=${CONFIG:-opt}
+python tools/run_tests/run_tests.py -l php7 -c "$CONFIG" --build_only -j 8
+
+# Set up all dependences needed for PHP QPS test
+cd src/php/tests/qps
+composer install
+# Install protobuf C-extension for php
+cd vendor/google/protobuf/php/ext/google/protobuf
+phpize
+./configure
+make
-node src/node/performance/worker.js $@
diff --git a/tools/run_tests/performance/kill_workers.sh b/tools/run_tests/performance/kill_workers.sh
index dd17eea5f2..95a5bf5dcb 100755
--- a/tools/run_tests/performance/kill_workers.sh
+++ b/tools/run_tests/performance/kill_workers.sh
@@ -15,7 +15,7 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
# Make sure there are no pre-existing QPS workers around before starting
# the performance test suite
@@ -24,16 +24,17 @@ cd $(dirname $0)/../../..
killall -9 qps_worker || true
# C#
+# shellcheck disable=SC2009
ps -C mono -o pid=,cmd= | grep QpsWorker | awk '{print $1}' | xargs kill -9 || true
+# shellcheck disable=SC2009
ps -C dotnet -o pid=,cmd= | grep QpsWorker | awk '{print $1}' | xargs kill -9 || true
# Ruby
+# shellcheck disable=SC2009
ps -C ruby -o pid=,cmd= | grep 'qps/worker.rb' | awk '{print $1}' | xargs kill -9 || true
-# Node
-ps -C node -o pid=,cmd= | grep 'performance/worker.js' | awk '{print $1}' | xargs kill -9 || true
-
# Python
+# shellcheck disable=SC2009
ps -C python -o pid=,cmd= | grep 'qps_worker.py' | awk '{print $1}' | xargs kill -9 || true
# Java
diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py
index e1620adad2..790202c43e 100644
--- a/tools/run_tests/performance/massage_qps_stats.py
+++ b/tools/run_tests/performance/massage_qps_stats.py
@@ -15,182 +15,455 @@
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
+
+
def massage_qps_stats(scenario_result):
- for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
- if "coreStats" not in stats: return
- core_stats = stats["coreStats"]
- del stats["coreStats"]
- stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "client_calls_created")
- stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "server_calls_created")
- stats["core_cqs_created"] = massage_qps_stats_helpers.counter(core_stats, "cqs_created")
- stats["core_client_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_channels_created")
- stats["core_client_subchannels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_subchannels_created")
- stats["core_server_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "server_channels_created")
- stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(core_stats, "syscall_poll")
- stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(core_stats, "syscall_wait")
- stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick")
- stats["core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_without_poller")
- stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_again")
- stats["core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_fd")
- stats["core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_cv")
- stats["core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_own_thread")
- stats["core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(core_stats, "histogram_slow_lookups")
- stats["core_syscall_write"] = massage_qps_stats_helpers.counter(core_stats, "syscall_write")
- stats["core_syscall_read"] = massage_qps_stats_helpers.counter(core_stats, "syscall_read")
- stats["core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_pollers_created")
- stats["core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_poller_polls")
- stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_batches")
- stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_cancel")
- stats["core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_initial_metadata")
- stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_message")
- stats["core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_trailing_metadata")
- stats["core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_initial_metadata")
- stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_message")
- stats["core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_trailing_metadata")
- stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_settings_writes")
- stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_pings_sent")
- stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_begun")
- stats["core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_offloaded")
- stats["core_http2_writes_continued"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_continued")
- stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_partial_writes")
- stats["core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_initial_write")
- stats["core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_start_new_stream")
- stats["core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_message")
- stats["core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_initial_metadata")
- stats["core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_trailing_metadata")
- stats["core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_retry_send_ping")
- stats["core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_continue_pings")
- stats["core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_goaway_sent")
- stats["core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_rst_stream")
- stats["core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_close_from_api")
- stats["core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_stream_flow_control")
- stats["core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control")
- stats["core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_settings")
- stats["core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
- stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
- stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_update")
- stats["core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_application_ping")
- stats["core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_keepalive_ping")
- stats["core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control_unstalled")
- stats["core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_ping_response")
- stats["core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_force_rst_stream")
- stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_indexed")
- stats["core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx")
- stats["core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx_v")
- stats["core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx")
- stats["core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx_v")
- stats["core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx")
- stats["core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx_v")
- stats["core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_uncompressed")
- stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_huffman")
- stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary")
- stats["core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary_base64")
- stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_indexed")
- stats["core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx")
- stats["core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx_v")
- stats["core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx")
- stats["core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx_v")
- stats["core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx")
- stats["core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx_v")
- stats["core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_uncompressed")
- stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_huffman")
- stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary")
- stats["core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary_base64")
- stats["core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_initiated")
- stats["core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_items")
- stats["core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_final_items")
- stats["core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_offloaded")
- stats["core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_short_items")
- stats["core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_long_items")
- stats["core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_to_self")
- stats["core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(core_stats, "executor_wakeup_initiated")
- stats["core_executor_queue_drained"] = massage_qps_stats_helpers.counter(core_stats, "executor_queue_drained")
- stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(core_stats, "executor_push_retries")
- stats["core_executor_threads_created"] = massage_qps_stats_helpers.counter(core_stats, "executor_threads_created")
- stats["core_executor_threads_used"] = massage_qps_stats_helpers.counter(core_stats, "executor_threads_used")
- stats["core_server_requested_calls"] = massage_qps_stats_helpers.counter(core_stats, "server_requested_calls")
- stats["core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(core_stats, "server_slowpath_requests_queued")
- h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
- stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_call_initial_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "poll_events_returned")
- stats["core_poll_events_returned"] = ",".join("%f" % x for x in h.buckets)
- stats["core_poll_events_returned_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
- stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_write_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_iov_size")
- stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
- stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
- stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer_iov_size")
- stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_offer_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_size")
- stats["core_http2_send_message_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_message_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_initial_metadata_per_write")
- stats["core_http2_send_initial_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_per_write")
- stats["core_http2_send_message_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_message_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_trailing_metadata_per_write")
- stats["core_http2_send_trailing_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_flowctl_per_write")
- stats["core_http2_send_flowctl_per_write"] = ",".join("%f" % x for x in h.buckets)
- stats["core_http2_send_flowctl_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "executor_closures_per_wakeup")
- stats["core_executor_closures_per_wakeup"] = ",".join("%f" % x for x in h.buckets)
- stats["core_executor_closures_per_wakeup_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_executor_closures_per_wakeup_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_executor_closures_per_wakeup_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_executor_closures_per_wakeup_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
- h = massage_qps_stats_helpers.histogram(core_stats, "server_cqs_checked")
- stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
- stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x for x in h.boundaries)
- stats["core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
- stats["core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
- stats["core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
+ for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
+ if "coreStats" not in stats: return
+ core_stats = stats["coreStats"]
+ del stats["coreStats"]
+ stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_calls_created")
+ stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_calls_created")
+ stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "cqs_created")
+ stats[
+ "core_client_channels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_channels_created")
+ stats[
+ "core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_subchannels_created")
+ stats[
+ "core_server_channels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_channels_created")
+ stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_poll")
+ stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_wait")
+ stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick")
+ stats[
+ "core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kicked_without_poller")
+ stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kicked_again")
+ stats[
+ "core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_wakeup_fd")
+ stats[
+ "core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_wakeup_cv")
+ stats[
+ "core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_own_thread")
+ stats[
+ "core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
+ core_stats, "histogram_slow_lookups")
+ stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_write")
+ stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_read")
+ stats[
+ "core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "tcp_backup_pollers_created")
+ stats[
+ "core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
+ core_stats, "tcp_backup_poller_polls")
+ stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_batches")
+ stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_cancel")
+ stats[
+ "core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_initial_metadata")
+ stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_message")
+ stats[
+ "core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_trailing_metadata")
+ stats[
+ "core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_initial_metadata")
+ stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_message")
+ stats[
+ "core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_trailing_metadata")
+ stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_settings_writes")
+ stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_pings_sent")
+ stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_begun")
+ stats[
+ "core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_offloaded")
+ stats[
+ "core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_continued")
+ stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_partial_writes")
+ stats[
+ "core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_initial_write")
+ stats[
+ "core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_start_new_stream")
+ stats[
+ "core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_message")
+ stats[
+ "core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_initial_metadata")
+ stats[
+ "core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_send_trailing_metadata")
+ stats[
+ "core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_retry_send_ping")
+ stats[
+ "core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_continue_pings")
+ stats[
+ "core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_goaway_sent")
+ stats[
+ "core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_rst_stream")
+ stats[
+ "core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_close_from_api")
+ stats[
+ "core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_stream_flow_control")
+ stats[
+ "core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_transport_flow_control")
+ stats[
+ "core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_settings")
+ stats[
+ "core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
+ stats[
+ "core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
+ stats[
+ "core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_flow_control_unstalled_by_update")
+ stats[
+ "core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_application_ping")
+ stats[
+ "core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_keepalive_ping")
+ stats[
+ "core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_transport_flow_control_unstalled")
+ stats[
+ "core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_ping_response")
+ stats[
+ "core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_force_rst_stream")
+ stats[
+ "core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_spurious_writes_begun")
+ stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_indexed")
+ stats[
+ "core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_incidx")
+ stats[
+ "core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_incidx_v")
+ stats[
+ "core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_notidx")
+ stats[
+ "core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_notidx_v")
+ stats[
+ "core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_nvridx")
+ stats[
+ "core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_nvridx_v")
+ stats[
+ "core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_uncompressed")
+ stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_huffman")
+ stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_binary")
+ stats[
+ "core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_binary_base64")
+ stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_indexed")
+ stats[
+ "core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_incidx")
+ stats[
+ "core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_incidx_v")
+ stats[
+ "core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_notidx")
+ stats[
+ "core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_notidx_v")
+ stats[
+ "core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_nvridx")
+ stats[
+ "core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_nvridx_v")
+ stats[
+ "core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_uncompressed")
+ stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_huffman")
+ stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_binary")
+ stats[
+ "core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_binary_base64")
+ stats[
+ "core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_initiated")
+ stats[
+ "core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_scheduled_items")
+ stats[
+ "core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_scheduled_final_items")
+ stats[
+ "core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_offloaded")
+ stats[
+ "core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_locks_initiated")
+ stats[
+ "core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_locks_scheduled_items")
+ stats[
+ "core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_set_notify_on_cancel")
+ stats[
+ "core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_cancelled")
+ stats[
+ "core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_short_items")
+ stats[
+ "core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_long_items")
+ stats[
+ "core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_to_self")
+ stats[
+ "core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_wakeup_initiated")
+ stats[
+ "core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_queue_drained")
+ stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_push_retries")
+ stats[
+ "core_server_requested_calls"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_requested_calls")
+ stats[
+ "core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_slowpath_requests_queued")
+ stats[
+ "core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_trylock_failures")
+ stats[
+ "core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_trylock_successes")
+ stats[
+ "core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_transient_pop_failures")
+ h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
+ stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_call_initial_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "poll_events_returned")
+ stats["core_poll_events_returned"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_poll_events_returned_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
+ stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_write_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_write_iov_size")
+ stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_write_iov_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
+ stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_read_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
+ stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_read_offer_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_read_offer_iov_size")
+ stats["core_tcp_read_offer_iov_size"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "http2_send_message_size")
+ stats["core_http2_send_message_size"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_message_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_initial_metadata_per_write")
+ stats["core_http2_send_initial_metadata_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "http2_send_message_per_write")
+ stats["core_http2_send_message_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_message_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_trailing_metadata_per_write")
+ stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "http2_send_flowctl_per_write")
+ stats["core_http2_send_flowctl_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "server_cqs_checked")
+ stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_server_cqs_checked_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
diff --git a/tools/run_tests/performance/massage_qps_stats_helpers.py b/tools/run_tests/performance/massage_qps_stats_helpers.py
index a2fe4ae6c3..108451cd55 100644
--- a/tools/run_tests/performance/massage_qps_stats_helpers.py
+++ b/tools/run_tests/performance/massage_qps_stats_helpers.py
@@ -14,44 +14,49 @@
import collections
+
def _threshold_for_count_below(buckets, boundaries, count_below):
- count_so_far = 0
- for lower_idx in range(0, len(buckets)):
- count_so_far += buckets[lower_idx]
- if count_so_far >= count_below:
- break
- if count_so_far == count_below:
- # this bucket hits the threshold exactly... we should be midway through
- # any run of zero values following the bucket
- for upper_idx in range(lower_idx + 1, len(buckets)):
- if buckets[upper_idx] != 0:
- break
- return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
- else:
- # treat values as uniform throughout the bucket, and find where this value
- # should lie
- lower_bound = boundaries[lower_idx]
- upper_bound = boundaries[lower_idx + 1]
- return (upper_bound -
- (upper_bound - lower_bound) * (count_so_far - count_below) /
- float(buckets[lower_idx]))
+ count_so_far = 0
+ for lower_idx in range(0, len(buckets)):
+ count_so_far += buckets[lower_idx]
+ if count_so_far >= count_below:
+ break
+ if count_so_far == count_below:
+ # this bucket hits the threshold exactly... we should be midway through
+ # any run of zero values following the bucket
+ for upper_idx in range(lower_idx + 1, len(buckets)):
+ if buckets[upper_idx] != 0:
+ break
+ return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
+ else:
+ # treat values as uniform throughout the bucket, and find where this value
+ # should lie
+ lower_bound = boundaries[lower_idx]
+ upper_bound = boundaries[lower_idx + 1]
+ return (upper_bound - (upper_bound - lower_bound) *
+ (count_so_far - count_below) / float(buckets[lower_idx]))
+
def percentile(buckets, pctl, boundaries):
- return _threshold_for_count_below(
- buckets, boundaries, sum(buckets) * pctl / 100.0)
+ return _threshold_for_count_below(buckets, boundaries,
+ sum(buckets) * pctl / 100.0)
+
def counter(core_stats, name):
- for stat in core_stats['metrics']:
- if stat['name'] == name:
- return int(stat.get('count', 0))
+ for stat in core_stats['metrics']:
+ if stat['name'] == name:
+ return int(stat.get('count', 0))
+
Histogram = collections.namedtuple('Histogram', 'buckets boundaries')
+
+
def histogram(core_stats, name):
- for stat in core_stats['metrics']:
- if stat['name'] == name:
- buckets = []
- boundaries = []
- for b in stat['histogram']['buckets']:
- buckets.append(int(b.get('count', 0)))
- boundaries.append(int(b.get('start', 0)))
- return Histogram(buckets=buckets, boundaries=boundaries)
+ for stat in core_stats['metrics']:
+ if stat['name'] == name:
+ buckets = []
+ boundaries = []
+ for b in stat['histogram']['buckets']:
+ buckets.append(int(b.get('count', 0)))
+ boundaries.append(int(b.get('start', 0)))
+ return Histogram(buckets=buckets, boundaries=boundaries)
diff --git a/tools/run_tests/performance/patch_scenario_results_schema.py b/tools/run_tests/performance/patch_scenario_results_schema.py
index 81ba5381b3..2a2aadc242 100755
--- a/tools/run_tests/performance/patch_scenario_results_schema.py
+++ b/tools/run_tests/performance/patch_scenario_results_schema.py
@@ -25,27 +25,32 @@ import sys
import time
import uuid
-
-gcp_utils_dir = os.path.abspath(os.path.join(
- os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
+_PROJECT_ID = 'grpc-testing'
-_PROJECT_ID='grpc-testing'
def _patch_results_table(dataset_id, table_id):
- bq = big_query_utils.create_big_query()
- with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
- table_schema = json.loads(f.read())
- desc = 'Results of performance benchmarks.'
- return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id,
- table_id, table_schema)
-
-
-argp = argparse.ArgumentParser(description='Patch schema of scenario results table.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
- help='Bigquery "dataset.table" to patch.')
+ bq = big_query_utils.create_big_query()
+ with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+ 'r') as f:
+ table_schema = json.loads(f.read())
+ desc = 'Results of performance benchmarks.'
+ return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id, table_id,
+ table_schema)
+
+
+argp = argparse.ArgumentParser(
+ description='Patch schema of scenario results table.')
+argp.add_argument(
+ '--bq_result_table',
+ required=True,
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to patch.')
args = argp.parse_args()
diff --git a/tools/run_tests/performance/process_local_perf_flamegraphs.sh b/tools/run_tests/performance/process_local_perf_flamegraphs.sh
index d0c22f2070..ccb5b19f2a 100755
--- a/tools/run_tests/performance/process_local_perf_flamegraphs.sh
+++ b/tools/run_tests/performance/process_local_perf_flamegraphs.sh
@@ -13,13 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-mkdir -p $OUTPUT_DIR
+mkdir -p "$OUTPUT_DIR"
-PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data
-PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf
+PERF_DATA_FILE="${PERF_BASE_NAME}-perf.data"
+PERF_SCRIPT_OUTPUT="${PERF_BASE_NAME}-out.perf"
# Generate Flame graphs
echo "running perf script on $PERF_DATA_FILE"
-perf script -i $PERF_DATA_FILE > $PERF_SCRIPT_OUTPUT
+perf script -i "$PERF_DATA_FILE" > "$PERF_SCRIPT_OUTPUT"
-~/FlameGraph/stackcollapse-perf.pl $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg
+~/FlameGraph/stackcollapse-perf.pl "$PERF_SCRIPT_OUTPUT" | ~/FlameGraph/flamegraph.pl > "${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg"
diff --git a/tools/run_tests/performance/process_remote_perf_flamegraphs.sh b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh
index a0c4f6ff32..2ea6b4f2a6 100755
--- a/tools/run_tests/performance/process_remote_perf_flamegraphs.sh
+++ b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh
@@ -13,17 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-mkdir -p $OUTPUT_DIR
+mkdir -p "$OUTPUT_DIR"
-PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data
-PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf
+PERF_DATA_FILE="${PERF_BASE_NAME}-perf.data"
+PERF_SCRIPT_OUTPUT="${PERF_BASE_NAME}-out.perf"
# Generate Flame graphs
echo "running perf script on $USER_AT_HOST with perf.data"
-ssh $USER_AT_HOST "cd ~/performance_workspace/grpc && perf script -i $PERF_DATA_FILE | gzip > ${PERF_SCRIPT_OUTPUT}.gz"
+# shellcheck disable=SC2029
+ssh "$USER_AT_HOST" "cd ~/performance_workspace/grpc && perf script -i $PERF_DATA_FILE | gzip > ${PERF_SCRIPT_OUTPUT}.gz"
-scp $USER_AT_HOST:~/performance_workspace/grpc/$PERF_SCRIPT_OUTPUT.gz .
+scp "$USER_AT_HOST:~/performance_workspace/grpc/$PERF_SCRIPT_OUTPUT.gz" .
-gzip -d -f $PERF_SCRIPT_OUTPUT.gz
+gzip -d -f "$PERF_SCRIPT_OUTPUT.gz"
-~/FlameGraph/stackcollapse-perf.pl --kernel $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl --color=java --hash > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg
+~/FlameGraph/stackcollapse-perf.pl --kernel "$PERF_SCRIPT_OUTPUT" | ~/FlameGraph/flamegraph.pl --color=java --hash > "${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg"
diff --git a/tools/run_tests/performance/remote_host_build.sh b/tools/run_tests/performance/remote_host_build.sh
index 75352e9220..862bd6c05f 100755
--- a/tools/run_tests/performance/remote_host_build.sh
+++ b/tools/run_tests/performance/remote_host_build.sh
@@ -15,7 +15,8 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
# execute the build script remotely
+# shellcheck disable=SC2029
ssh "${USER_AT_HOST}" "CONFIG=${CONFIG} ~/performance_workspace/grpc/tools/run_tests/performance/build_performance.sh $*"
diff --git a/tools/run_tests/performance/remote_host_prepare.sh b/tools/run_tests/performance/remote_host_prepare.sh
index bf91acbddf..d69c85b40e 100755
--- a/tools/run_tests/performance/remote_host_prepare.sh
+++ b/tools/run_tests/performance/remote_host_prepare.sh
@@ -15,7 +15,7 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
# TODO(jtattermusch): To be sure there are no running processes that would
# mess with the results, be rough and reboot the slave here
@@ -24,7 +24,7 @@ ssh "${USER_AT_HOST}" "killall -9 qps_worker dotnet mono node ruby worker || tru
# On Windows, killall is not supported & we need to kill all pending workers
# before attempting to delete the workspace
-ssh "${USER_AT_HOST}" "ps -e | egrep 'qps_worker|dotnet' | awk '{print $1}' | xargs kill -9 || true"
+ssh "${USER_AT_HOST}" "ps -e | egrep 'qps_worker|dotnet' | awk '{print \$1}' | xargs kill -9 || true"
# cleanup after previous builds
ssh "${USER_AT_HOST}" "rm -rf ~/performance_workspace && mkdir -p ~/performance_workspace"
@@ -36,4 +36,5 @@ scp ../grpc.tar "${USER_AT_HOST}:~/performance_workspace"
ssh "${USER_AT_HOST}" "tar -xf ~/performance_workspace/grpc.tar -C ~/performance_workspace || tar -xf ~/performance_workspace/grpc.tar -C ~/performance_workspace"
# For consistency with local run, invoke the kill_workers script remotely.
+# shellcheck disable=SC2088
ssh "${USER_AT_HOST}" "~/performance_workspace/grpc/tools/run_tests/performance/kill_workers.sh"
diff --git a/tools/run_tests/performance/run_netperf.sh b/tools/run_tests/performance/run_netperf.sh
index b415ede529..2a32051dac 100755
--- a/tools/run_tests/performance/run_netperf.sh
+++ b/tools/run_tests/performance/run_netperf.sh
@@ -15,7 +15,7 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
netperf >netperf_latency.txt -P 0 -t TCP_RR -H "$NETPERF_SERVER_HOST" -- -r 1,1 -o P50_LATENCY,P90_LATENCY,P99_LATENCY
diff --git a/tools/run_tests/performance/run_qps_driver.sh b/tools/run_tests/performance/run_qps_driver.sh
index 1851632da1..2d9e310dec 100755
--- a/tools/run_tests/performance/run_qps_driver.sh
+++ b/tools/run_tests/performance/run_qps_driver.sh
@@ -15,7 +15,7 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
bins/opt/qps_json_driver "$@"
diff --git a/tools/run_tests/performance/run_worker_csharp.sh b/tools/run_tests/performance/run_worker_csharp.sh
index e2e941f34a..6546d6010b 100755
--- a/tools/run_tests/performance/run_worker_csharp.sh
+++ b/tools/run_tests/performance/run_worker_csharp.sh
@@ -15,9 +15,9 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
# needed to correctly locate testca
cd src/csharp/Grpc.IntegrationTesting.QpsWorker/bin/Release/netcoreapp1.0
-dotnet exec Grpc.IntegrationTesting.QpsWorker.dll $@
+dotnet exec Grpc.IntegrationTesting.QpsWorker.dll "$@"
diff --git a/tools/run_tests/performance/run_worker_go.sh b/tools/run_tests/performance/run_worker_go.sh
index 6d8abb4147..f8e821a265 100755
--- a/tools/run_tests/performance/run_worker_go.sh
+++ b/tools/run_tests/performance/run_worker_go.sh
@@ -15,8 +15,8 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
export GOPATH=$(pwd)/../gopath
-${GOPATH}/bin/worker $@
+"${GOPATH}/bin/worker" "$@"
diff --git a/tools/run_tests/performance/run_worker_java.sh b/tools/run_tests/performance/run_worker_java.sh
index 039a9618e1..cff6faf680 100755
--- a/tools/run_tests/performance/run_worker_java.sh
+++ b/tools/run_tests/performance/run_worker_java.sh
@@ -16,9 +16,9 @@
set -ex
# Enter repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
# Enter the grpc-java repo root (expected to be next to grpc repo root)
cd ../grpc-java
-benchmarks/build/install/grpc-benchmarks/bin/benchmark_worker $@
+benchmarks/build/install/grpc-benchmarks/bin/benchmark_worker "$@"
diff --git a/tools/run_tests/performance/run_worker_php.sh b/tools/run_tests/performance/run_worker_php.sh
index 5d0c4fa4fd..2fe2493e60 100755
--- a/tools/run_tests/performance/run_worker_php.sh
+++ b/tools/run_tests/performance/run_worker_php.sh
@@ -16,13 +16,8 @@
source ~/.rvm/scripts/rvm
set -ex
-repo=$(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
-# First set up all dependences needed for PHP QPS test
-cd $repo
-cd src/php/tests/qps
-composer install
# The proxy worker for PHP is implemented in Ruby
-cd ../../../..
-ruby src/ruby/qps/proxy-worker.rb $@
+ruby src/ruby/qps/proxy-worker.rb "$@"
diff --git a/tools/run_tests/performance/run_worker_python.sh b/tools/run_tests/performance/run_worker_python.sh
index cd7d0ebbae..01241c8296 100755
--- a/tools/run_tests/performance/run_worker_python.sh
+++ b/tools/run_tests/performance/run_worker_python.sh
@@ -15,6 +15,6 @@
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
-PYTHONPATH=src/python/grpcio_tests:src/python/gens py27/bin/python src/python/grpcio_tests/tests/qps/qps_worker.py $@
+PYTHONPATH=src/python/grpcio_tests:src/python/gens py27/bin/python src/python/grpcio_tests/tests/qps/qps_worker.py "$@"
diff --git a/tools/run_tests/performance/run_worker_ruby.sh b/tools/run_tests/performance/run_worker_ruby.sh
index db8a7d8cd6..729c5cec97 100755
--- a/tools/run_tests/performance/run_worker_ruby.sh
+++ b/tools/run_tests/performance/run_worker_ruby.sh
@@ -16,6 +16,6 @@
source ~/.rvm/scripts/rvm
set -ex
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
-ruby src/ruby/qps/worker.rb $@
+ruby src/ruby/qps/worker.rb "$@"
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 5efc9f5648..f05753154e 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -16,65 +16,64 @@
import math
-WARMUP_SECONDS=5
-JAVA_WARMUP_SECONDS=15 # Java needs more warmup time for JIT to kick in.
-BENCHMARK_SECONDS=30
-
-SMOKETEST='smoketest'
-SCALABLE='scalable'
-SWEEP='sweep'
-DEFAULT_CATEGORIES=[SCALABLE, SMOKETEST]
-
-SECURE_SECARGS = {'use_test_ca': True,
- 'server_host_override': 'foo.test.google.fr'}
+WARMUP_SECONDS = 5
+JAVA_WARMUP_SECONDS = 15 # Java needs more warmup time for JIT to kick in.
+BENCHMARK_SECONDS = 30
+
+SMOKETEST = 'smoketest'
+SCALABLE = 'scalable'
+INPROC = 'inproc'
+SWEEP = 'sweep'
+DEFAULT_CATEGORIES = [SCALABLE, SMOKETEST]
+
+SECURE_SECARGS = {
+ 'use_test_ca': True,
+ 'server_host_override': 'foo.test.google.fr'
+}
HISTOGRAM_PARAMS = {
- 'resolution': 0.01,
- 'max_possible': 60e9,
+ 'resolution': 0.01,
+ 'max_possible': 60e9,
}
# target number of RPCs outstanding on across all client channels in
# non-ping-pong tests (since we can only specify per-channel numbers, the
# actual target will be slightly higher)
-OUTSTANDING_REQUESTS={
- 'async': 6400,
- 'async-limited': 800,
- 'sync': 1000
-}
+OUTSTANDING_REQUESTS = {'async': 6400, 'async-limited': 800, 'sync': 1000}
# wide is the number of client channels in multi-channel tests (1 otherwise)
-WIDE=64
+WIDE = 64
def _get_secargs(is_secure):
- if is_secure:
- return SECURE_SECARGS
- else:
- return None
+ if is_secure:
+ return SECURE_SECARGS
+ else:
+ return None
def remove_nonproto_fields(scenario):
- """Remove special-purpose that contains some extra info about the scenario
+ """Remove special-purpose that contains some extra info about the scenario
but don't belong to the ScenarioConfig protobuf message"""
- scenario.pop('CATEGORIES', None)
- scenario.pop('CLIENT_LANGUAGE', None)
- scenario.pop('SERVER_LANGUAGE', None)
- scenario.pop('EXCLUDED_POLL_ENGINES', None)
- return scenario
+ scenario.pop('CATEGORIES', None)
+ scenario.pop('CLIENT_LANGUAGE', None)
+ scenario.pop('SERVER_LANGUAGE', None)
+ scenario.pop('EXCLUDED_POLL_ENGINES', None)
+ return scenario
def geometric_progression(start, stop, step):
- n = start
- while n < stop:
- yield int(round(n))
- n *= step
+ n = start
+ while n < stop:
+ yield int(round(n))
+ n *= step
def _payload_type(use_generic_payload, req_size, resp_size):
r = {}
sizes = {
- 'req_size': req_size,
- 'resp_size': resp_size,
+ 'req_size': req_size,
+ 'resp_size': resp_size,
}
if use_generic_payload:
r['bytebuf_params'] = sizes
@@ -82,21 +81,36 @@ def _payload_type(use_generic_payload, req_size, resp_size):
r['simple_params'] = sizes
return r
+
+def _load_params(offered_load):
+ r = {}
+ if offered_load is None:
+ r['closed_loop'] = {}
+ else:
+ load = {}
+ load['offered_load'] = offered_load
+ r['poisson'] = load
+ return r
+
+
def _add_channel_arg(config, key, value):
- if 'channel_args' in config:
- channel_args = config['channel_args']
- else:
- channel_args = []
- config['channel_args'] = channel_args
- arg = {'name': key}
- if isinstance(value, int):
- arg['int_value'] = value
- else:
- arg['str_value'] = value
- channel_args.append(arg)
-
-def _ping_pong_scenario(name, rpc_type,
- client_type, server_type,
+ if 'channel_args' in config:
+ channel_args = config['channel_args']
+ else:
+ channel_args = []
+ config['channel_args'] = channel_args
+ arg = {'name': key}
+ if isinstance(value, int):
+ arg['int_value'] = value
+ else:
+ arg['str_value'] = value
+ channel_args.append(arg)
+
+
+def _ping_pong_scenario(name,
+ rpc_type,
+ client_type,
+ server_type,
secure=True,
use_generic_payload=False,
req_size=0,
@@ -115,917 +129,1035 @@ def _ping_pong_scenario(name, rpc_type,
resource_quota_size=None,
messages_per_stream=None,
excluded_poll_engines=[],
- minimal_stack=False):
- """Creates a basic ping pong scenario."""
- scenario = {
- 'name': name,
- 'num_servers': 1,
- 'num_clients': 1,
- 'client_config': {
- 'client_type': client_type,
- 'security_params': _get_secargs(secure),
- 'outstanding_rpcs_per_channel': 1,
- 'client_channels': 1,
- 'async_client_threads': 1,
- 'threads_per_cq': client_threads_per_cq,
- 'rpc_type': rpc_type,
- 'load_params': {
- 'closed_loop': {}
- },
- 'histogram_params': HISTOGRAM_PARAMS,
- 'channel_args': [],
- },
- 'server_config': {
- 'server_type': server_type,
- 'security_params': _get_secargs(secure),
- 'async_server_threads': async_server_threads,
- 'threads_per_cq': server_threads_per_cq,
- 'channel_args': [],
- },
- 'warmup_seconds': warmup_seconds,
- 'benchmark_seconds': BENCHMARK_SECONDS
- }
- if resource_quota_size:
- scenario['server_config']['resource_quota_size'] = resource_quota_size
- if use_generic_payload:
- if server_type != 'ASYNC_GENERIC_SERVER':
- raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
- scenario['server_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
-
- scenario['client_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
-
- # Optimization target of 'throughput' does not work well with epoll1 polling
- # engine. Use the default value of 'blend'
- optimization_target = 'throughput'
-
- if unconstrained_client:
- outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client]
- # clamp buffer usage to something reasonable (16 gig for now)
- MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
- if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
- outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size, resp_size))
- wide = channels if channels is not None else WIDE
- deep = int(math.ceil(1.0 * outstanding_calls / wide))
-
- scenario['num_clients'] = num_clients if num_clients is not None else 0 # use as many clients as available.
- scenario['client_config']['outstanding_rpcs_per_channel'] = deep
- scenario['client_config']['client_channels'] = wide
- scenario['client_config']['async_client_threads'] = 0
- else:
- scenario['client_config']['outstanding_rpcs_per_channel'] = 1
- scenario['client_config']['client_channels'] = 1
- scenario['client_config']['async_client_threads'] = 1
- optimization_target = 'latency'
-
- optimization_channel_arg = {
- 'name': 'grpc.optimization_target',
- 'str_value': optimization_target
- }
- scenario['client_config']['channel_args'].append(optimization_channel_arg)
- scenario['server_config']['channel_args'].append(optimization_channel_arg)
-
- if minimal_stack:
- _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
- _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
-
- if messages_per_stream:
- scenario['client_config']['messages_per_stream'] = messages_per_stream
- if client_language:
- # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
- scenario['CLIENT_LANGUAGE'] = client_language
- if server_language:
- # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
- scenario['SERVER_LANGUAGE'] = server_language
- if categories:
- scenario['CATEGORIES'] = categories
- if len(excluded_poll_engines):
- # The polling engines for which this scenario is excluded
- scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
- return scenario
+ minimal_stack=False,
+ offered_load=None):
+ """Creates a basic ping pong scenario."""
+ scenario = {
+ 'name': name,
+ 'num_servers': 1,
+ 'num_clients': 1,
+ 'client_config': {
+ 'client_type': client_type,
+ 'security_params': _get_secargs(secure),
+ 'outstanding_rpcs_per_channel': 1,
+ 'client_channels': 1,
+ 'async_client_threads': 1,
+ 'threads_per_cq': client_threads_per_cq,
+ 'rpc_type': rpc_type,
+ 'histogram_params': HISTOGRAM_PARAMS,
+ 'channel_args': [],
+ },
+ 'server_config': {
+ 'server_type': server_type,
+ 'security_params': _get_secargs(secure),
+ 'async_server_threads': async_server_threads,
+ 'threads_per_cq': server_threads_per_cq,
+ 'channel_args': [],
+ },
+ 'warmup_seconds': warmup_seconds,
+ 'benchmark_seconds': BENCHMARK_SECONDS
+ }
+ if resource_quota_size:
+ scenario['server_config']['resource_quota_size'] = resource_quota_size
+ if use_generic_payload:
+ if server_type != 'ASYNC_GENERIC_SERVER':
+ raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
+ scenario['server_config']['payload_config'] = _payload_type(
+ use_generic_payload, req_size, resp_size)
+
+ scenario['client_config']['payload_config'] = _payload_type(
+ use_generic_payload, req_size, resp_size)
+
+ # Optimization target of 'throughput' does not work well with epoll1 polling
+ # engine. Use the default value of 'blend'
+ optimization_target = 'throughput'
+
+ if unconstrained_client:
+ outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[
+ unconstrained_client]
+ # clamp buffer usage to something reasonable (16 gig for now)
+ MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
+ if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
+ outstanding_calls = max(1,
+ MAX_MEMORY_USE / max(req_size, resp_size))
+ wide = channels if channels is not None else WIDE
+ deep = int(math.ceil(1.0 * outstanding_calls / wide))
+
+ scenario[
+ 'num_clients'] = num_clients if num_clients is not None else 0 # use as many clients as available.
+ scenario['client_config']['outstanding_rpcs_per_channel'] = deep
+ scenario['client_config']['client_channels'] = wide
+ scenario['client_config']['async_client_threads'] = 0
+ if offered_load is not None:
+ optimization_target = 'latency'
+ else:
+ scenario['client_config']['outstanding_rpcs_per_channel'] = 1
+ scenario['client_config']['client_channels'] = 1
+ scenario['client_config']['async_client_threads'] = 1
+ optimization_target = 'latency'
+
+ scenario['client_config']['load_params'] = _load_params(offered_load)
+
+ optimization_channel_arg = {
+ 'name': 'grpc.optimization_target',
+ 'str_value': optimization_target
+ }
+ scenario['client_config']['channel_args'].append(optimization_channel_arg)
+ scenario['server_config']['channel_args'].append(optimization_channel_arg)
+
+ if minimal_stack:
+ _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
+ _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
+
+ if messages_per_stream:
+ scenario['client_config']['messages_per_stream'] = messages_per_stream
+ if client_language:
+ # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
+ scenario['CLIENT_LANGUAGE'] = client_language
+ if server_language:
+ # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
+ scenario['SERVER_LANGUAGE'] = server_language
+ if categories:
+ scenario['CATEGORIES'] = categories
+ if len(excluded_poll_engines):
+ # The polling engines for which this scenario is excluded
+ scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
+ return scenario
class CXXLanguage:
- def __init__(self):
- self.safename = 'cxx'
-
- def worker_cmdline(self):
- return ['bins/opt/qps_worker']
-
- def worker_port_offset(self):
- return 0
-
- def scenarios(self):
- # TODO(ctiller): add 70% load latency test
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_unary_1channel_100rpcs_1MB', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- unconstrained_client='async', outstanding=100, channels=1,
- num_clients=1,
- secure=False,
- categories=[SMOKETEST] + [SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_streaming_from_client_1channel_1MB', rpc_type='STREAMING_FROM_CLIENT',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- unconstrained_client='async', outstanding=1, channels=1,
- num_clients=1,
- secure=False,
- categories=[SMOKETEST] + [SCALABLE])
-
- for secure in [True, False]:
- secstr = 'secure' if secure else 'insecure'
- smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_ping_pong_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True, async_server_threads=1,
- secure=secure,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
-
- for mps in geometric_progression(1, 20, 10):
+ def __init__(self):
+ self.safename = 'cxx'
+
+ def worker_cmdline(self):
+ return ['bins/opt/qps_worker']
+
+ def worker_port_offset(self):
+ return 0
+
+ def scenarios(self):
+ # TODO(ctiller): add 70% load latency test
yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
- rpc_type='STREAMING',
+ 'cpp_protobuf_async_unary_1channel_100rpcs_1MB',
+ rpc_type='UNARY',
client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ unconstrained_client='async',
+ outstanding=100,
+ channels=1,
+ num_clients=1,
+ secure=False,
+ categories=[SMOKETEST] + [INPROC] + [SCALABLE])
- for mps in geometric_progression(1, 200, math.sqrt(10)):
yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
- rpc_type='STREAMING',
+ 'cpp_protobuf_async_streaming_from_client_1channel_1MB',
+ rpc_type='STREAMING_FROM_CLIENT',
client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=[SWEEP])
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
- rpc_type='STREAMING',
- req_size=1024*1024,
- resp_size=1024*1024,
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE],
- channels=1, outstanding=100)
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' % secstr,
- rpc_type='STREAMING',
- req_size=64*1024,
- resp_size=64*1024,
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
-
- # TODO(https://github.com/grpc/grpc/issues/11500) Re-enable this test
- #yield _ping_pong_scenario(
- # 'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
- # rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT',
- # server_type='ASYNC_GENERIC_SERVER',
- # unconstrained_client='async-limited', use_generic_payload=True,
- # secure=secure,
- # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
- # categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- client_threads_per_cq=2, server_threads_per_cq=2,
- categories=smoketest_categories+[SCALABLE])
-
- #yield _ping_pong_scenario(
- # 'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' % secstr,
- # rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT',
- # server_type='ASYNC_SERVER',
- # unconstrained_client='async-limited',
- # secure=secure,
- # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
- # categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- client_threads_per_cq=2, server_threads_per_cq=2,
- categories=smoketest_categories+[SCALABLE])
-
- #yield _ping_pong_scenario(
- # 'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
- # rpc_type='UNARY',
- # client_type='ASYNC_CLIENT',
- # server_type='ASYNC_SERVER',
- # unconstrained_client='async-limited',
- # secure=secure,
- # client_threads_per_cq=1000000, server_threads_per_cq=1000000,
- # categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' % secstr,
- rpc_type='UNARY',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- client_threads_per_cq=2, server_threads_per_cq=2,
- categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async-limited', use_generic_payload=True,
- async_server_threads=1,
- minimal_stack=not secure,
- secure=secure)
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s' %
- (secstr),
- rpc_type='UNARY',
- client_type='ASYNC_CLIENT',
- server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories + [SCALABLE],
- excluded_poll_engines = ['poll-cv'])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s' %
- (secstr),
- rpc_type='UNARY',
- client_type='ASYNC_CLIENT',
- server_type='ASYNC_SERVER',
- channels=1,
- outstanding=64,
- req_size=128,
- resp_size=8*1024*1024,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories + [SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s' % secstr,
- rpc_type='STREAMING',
- client_type='ASYNC_CLIENT',
- server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE],
- excluded_poll_engines = ['poll-cv'])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr, rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- secure=secure,
- minimal_stack=not secure,
- categories=smoketest_categories + [SCALABLE])
-
- for rpc_type in ['unary', 'streaming', 'streaming_from_client', 'streaming_from_server']:
- for synchronicity in ['sync', 'async']:
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity, rpc_type, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- async_server_threads=1,
- minimal_stack=not secure,
- secure=secure)
-
- for size in geometric_progression(1, 1024*1024*1024+1, 8):
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' % (synchronicity, rpc_type, secstr, size),
- rpc_type=rpc_type.upper(),
- req_size=size,
- resp_size=size,
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure,
- minimal_stack=not secure,
- categories=[SWEEP])
-
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%s' % (synchronicity, rpc_type, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure,
- minimal_stack=not secure,
- server_threads_per_cq=3,
- client_threads_per_cq=3,
- categories=smoketest_categories+[SCALABLE])
-
- # TODO(vjpai): Re-enable this test. It has a lot of timeouts
- # and hasn't yet been conclusively identified as a test failure
- # or race in the library
- # yield _ping_pong_scenario(
- # 'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
- # rpc_type=rpc_type.upper(),
- # client_type='%s_CLIENT' % synchronicity.upper(),
- # server_type='%s_SERVER' % synchronicity.upper(),
- # unconstrained_client=synchronicity,
- # secure=secure,
- # categories=smoketest_categories+[SCALABLE],
- # resource_quota_size=500*1024)
-
- if rpc_type == 'streaming':
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ unconstrained_client='async',
+ outstanding=1,
+ channels=1,
+ num_clients=1,
+ secure=False,
+ categories=[SMOKETEST] + [INPROC] + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=300,
+ resp_size=50,
+ unconstrained_client='async',
+ outstanding=30000,
+ channels=300,
+ offered_load=37500,
+ secure=False,
+ async_server_threads=16,
+ server_threads_per_cq=1,
+ categories=[SMOKETEST] + [SCALABLE])
+
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST]
+ if secure else [INPROC]) + [SCALABLE]
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
for mps in geometric_progression(1, 20, 10):
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=smoketest_categories+[SCALABLE])
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+ (mps, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
for mps in geometric_progression(1, 200, math.sqrt(10)):
- yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity,
- secure=secure, messages_per_stream=mps,
- minimal_stack=not secure,
- categories=[SWEEP])
-
- for channels in geometric_progression(1, 20000, math.sqrt(10)):
- for outstanding in geometric_progression(1, 200000, math.sqrt(10)):
- if synchronicity == 'sync' and outstanding > 1200: continue
- if outstanding < channels: continue
yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding' % (synchronicity, rpc_type, secstr, channels, outstanding),
- rpc_type=rpc_type.upper(),
- client_type='%s_CLIENT' % synchronicity.upper(),
- server_type='%s_SERVER' % synchronicity.upper(),
- unconstrained_client=synchronicity, secure=secure,
+ 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+ (mps, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ messages_per_stream=mps,
minimal_stack=not secure,
- categories=[SWEEP], channels=channels, outstanding=outstanding)
-
- def __str__(self):
- return 'c++'
+ categories=[SWEEP])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
+ rpc_type='STREAMING',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE],
+ channels=1,
+ outstanding=100)
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' %
+ secstr,
+ rpc_type='STREAMING',
+ req_size=64 * 1024,
+ resp_size=64 * 1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ secure=secure,
+ client_threads_per_cq=1000000,
+ server_threads_per_cq=1000000,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' %
+ secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async-limited',
+ secure=secure,
+ client_threads_per_cq=1000000,
+ server_threads_per_cq=1000000,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async-limited',
+ secure=secure,
+ client_threads_per_cq=1000000,
+ server_threads_per_cq=1000000,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' %
+ secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ async_server_threads=1,
+ minimal_stack=not secure,
+ secure=secure)
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s'
+ % (secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE],
+ excluded_poll_engines=['poll-cv'])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s'
+ % (secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ outstanding=64,
+ req_size=128,
+ resp_size=8 * 1024 * 1024,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE],
+ excluded_poll_engines=['poll-cv'])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ for rpc_type in [
+ 'unary', 'streaming', 'streaming_from_client',
+ 'streaming_from_server'
+ ]:
+ for synchronicity in ['sync', 'async']:
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity,
+ rpc_type, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ async_server_threads=1,
+ minimal_stack=not secure,
+ secure=secure)
+
+ for size in geometric_progression(1, 1024 * 1024 * 1024 + 1,
+ 8):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' %
+ (synchronicity, rpc_type, secstr, size),
+ rpc_type=rpc_type.upper(),
+ req_size=size,
+ resp_size=size,
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=[SWEEP])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s' %
+ (synchronicity, rpc_type, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ server_threads_per_cq=3,
+ client_threads_per_cq=3,
+ categories=smoketest_categories + [SCALABLE])
+
+ # TODO(vjpai): Re-enable this test. It has a lot of timeouts
+ # and hasn't yet been conclusively identified as a test failure
+ # or race in the library
+ # yield _ping_pong_scenario(
+ # 'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
+ # rpc_type=rpc_type.upper(),
+ # client_type='%s_CLIENT' % synchronicity.upper(),
+ # server_type='%s_SERVER' % synchronicity.upper(),
+ # unconstrained_client=synchronicity,
+ # secure=secure,
+ # categories=smoketest_categories+[SCALABLE],
+ # resource_quota_size=500*1024)
+
+ if rpc_type == 'streaming':
+ for mps in geometric_progression(1, 20, 10):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+ % (synchronicity, rpc_type, mps, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ for mps in geometric_progression(1, 200, math.sqrt(10)):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+ % (synchronicity, rpc_type, mps, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=[SWEEP])
+
+ for channels in geometric_progression(
+ 1, 20000, math.sqrt(10)):
+ for outstanding in geometric_progression(
+ 1, 200000, math.sqrt(10)):
+ if synchronicity == 'sync' and outstanding > 1200:
+ continue
+ if outstanding < channels: continue
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding'
+ % (synchronicity, rpc_type, secstr, channels,
+ outstanding),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=[SWEEP],
+ channels=channels,
+ outstanding=outstanding)
+
+ def __str__(self):
+ return 'c++'
class CSharpLanguage:
- def __init__(self):
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_csharp.sh']
-
- def worker_port_offset(self):
- return 100
-
- def scenarios(self):
- yield _ping_pong_scenario(
- 'csharp_generic_async_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_generic_async_streaming_ping_pong_insecure_1MB', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- use_generic_payload=True,
- secure=False,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_generic_async_streaming_qps_unconstrained_insecure', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=False,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- categories=[SMOKETEST,SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1)
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async', server_language='c++',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='sync', server_language='c++',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'cpp_to_csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async', client_language='c++',
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'csharp_protobuf_async_unary_ping_pong_1MB', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- categories=[SMOKETEST, SCALABLE])
-
- def __str__(self):
- return 'csharp'
-
-
-class NodeLanguage:
-
- def __init__(self):
- pass
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_node.sh',
- '--benchmark_impl=grpc']
-
- def worker_port_offset(self):
- return 200
-
- def scenarios(self):
- # TODO(jtattermusch): make this scenario work
- yield _ping_pong_scenario(
- 'node_generic_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True)
-
- yield _ping_pong_scenario(
- 'node_protobuf_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'node_protobuf_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- categories=[SCALABLE, SMOKETEST])
-
- yield _ping_pong_scenario(
- 'cpp_to_node_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- client_language='c++')
-
- yield _ping_pong_scenario(
- 'node_protobuf_unary_ping_pong_1MB', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- categories=[SCALABLE])
-
- sizes = [('1B', 1), ('1KB', 1024), ('10KB', 10 * 1024),
- ('1MB', 1024 * 1024), ('10MB', 10 * 1024 * 1024),
- ('100MB', 100 * 1024 * 1024)]
-
- for size_name, size in sizes:
- for secure in (True, False):
+ def __init__(self):
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_csharp.sh']
+
+ def worker_port_offset(self):
+ return 100
+
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_ping_pong_insecure_1MB',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ use_generic_payload=True,
+ secure=False,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_qps_unconstrained_insecure',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=False,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
yield _ping_pong_scenario(
- 'node_protobuf_unary_ping_pong_%s_resp_%s' %
- (size_name, 'secure' if secure else 'insecure'),
+ 'csharp_protobuf_async_unary_ping_pong',
rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=0, resp_size=size,
- secure=secure,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_sync_to_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
categories=[SCALABLE])
- yield _ping_pong_scenario(
- 'node_protobuf_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- categories=[SCALABLE, SMOKETEST])
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1,
+ categories=[SMOKETEST, SCALABLE])
- yield _ping_pong_scenario(
- 'node_protobuf_streaming_qps_unconstrained', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async')
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
- yield _ping_pong_scenario(
- 'node_to_cpp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync',
+ server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_to_csharp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ client_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
- yield _ping_pong_scenario(
- 'node_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ def __str__(self):
+ return 'csharp'
- def __str__(self):
- return 'node'
class PythonLanguage:
- def __init__(self):
- self.safename = 'python'
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_python.sh']
-
- def worker_port_offset(self):
- return 500
-
- def scenarios(self):
- yield _ping_pong_scenario(
- 'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'python_protobuf_async_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='sync')
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='sync')
-
- yield _ping_pong_scenario(
- 'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1,
- categories=[SMOKETEST, SCALABLE])
-
- yield _ping_pong_scenario(
- 'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- server_language='c++', async_server_threads=1)
-
- yield _ping_pong_scenario(
- 'python_protobuf_sync_unary_ping_pong_1MB', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- categories=[SMOKETEST, SCALABLE])
-
- def __str__(self):
- return 'python'
+ def __init__(self):
+ self.safename = 'python'
-class RubyLanguage:
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_python.sh']
- def __init__(self):
- pass
- self.safename = str(self)
+ def worker_port_offset(self):
+ return 500
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_ruby.sh']
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'python_generic_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
- def worker_port_offset(self):
- return 300
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER')
- def scenarios(self):
- yield _ping_pong_scenario(
- 'ruby_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
+ yield _ping_pong_scenario(
+ 'python_protobuf_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER')
- yield _ping_pong_scenario(
- 'ruby_protobuf_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- categories=[SMOKETEST, SCALABLE])
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
- yield _ping_pong_scenario(
- 'ruby_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='sync')
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync')
- yield _ping_pong_scenario(
- 'ruby_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='sync')
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync')
- yield _ping_pong_scenario(
- 'ruby_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'python_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1,
+ categories=[SMOKETEST, SCALABLE])
- yield _ping_pong_scenario(
- 'ruby_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'python_to_cpp_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
- yield _ping_pong_scenario(
- 'ruby_protobuf_unary_ping_pong_1MB', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- req_size=1024*1024, resp_size=1024*1024,
- categories=[SMOKETEST, SCALABLE])
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
- def __str__(self):
- return 'ruby'
+ def __str__(self):
+ return 'python'
-class PhpLanguage:
+class RubyLanguage:
- def __init__(self):
- pass
- self.safename = str(self)
+ def __init__(self):
+ pass
+ self.safename = str(self)
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_php.sh']
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_ruby.sh']
- def worker_port_offset(self):
- return 800
+ def worker_port_offset(self):
+ return 300
- def scenarios(self):
- yield _ping_pong_scenario(
- 'php_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
- yield _ping_pong_scenario(
- 'php_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
- def __str__(self):
- return 'php'
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_sync_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='sync')
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_sync_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='sync')
-class JavaLanguage:
+ yield _ping_pong_scenario(
+ 'ruby_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
- def __init__(self):
- pass
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_java.sh']
-
- def worker_port_offset(self):
- return 400
-
- def scenarios(self):
- for secure in [True, False]:
- secstr = 'secure' if secure else 'insecure'
- smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
-
- yield _ping_pong_scenario(
- 'java_generic_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True, async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'java_protobuf_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=smoketest_categories+[SCALABLE])
-
- yield _ping_pong_scenario(
- 'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'java_generic_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
- categories=[SCALABLE])
-
- yield _ping_pong_scenario(
- 'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
- client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async-limited', use_generic_payload=True,
- async_server_threads=1,
- secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
-
- # TODO(jtattermusch): add scenarios java vs C++
-
- def __str__(self):
- return 'java'
+ yield _ping_pong_scenario(
+ 'ruby_to_cpp_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
-class GoLanguage:
+ def __str__(self):
+ return 'ruby'
+
+
+class Php7Language:
+
+ def __init__(self, php7_protobuf_c=False):
+ pass
+ self.php7_protobuf_c = php7_protobuf_c
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ if self.php7_protobuf_c:
+ return [
+ 'tools/run_tests/performance/run_worker_php.sh',
+ '--use_protobuf_c_extension'
+ ]
+ return ['tools/run_tests/performance/run_worker_php.sh']
+
+ def worker_port_offset(self):
+ if self.php7_protobuf_c:
+ return 900
+ return 800
+
+ def scenarios(self):
+ php7_extension_mode = 'php7_protobuf_php_extension'
+ if self.php7_protobuf_c:
+ php7_extension_mode = 'php7_protobuf_c_extension'
- def __init__(self):
- pass
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_go.sh']
-
- def worker_port_offset(self):
- return 600
-
- def scenarios(self):
- for secure in [True, False]:
- secstr = 'secure' if secure else 'insecure'
- smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
-
- # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
- # but that's mostly because of lack of better name of the enum value.
- yield _ping_pong_scenario(
- 'go_generic_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- use_generic_payload=True, async_server_threads=1,
- secure=secure,
- categories=smoketest_categories)
-
- yield _ping_pong_scenario(
- 'go_protobuf_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- async_server_threads=1,
- secure=secure)
-
- yield _ping_pong_scenario(
- 'go_protobuf_sync_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- async_server_threads=1,
- secure=secure,
- categories=smoketest_categories)
-
- # unconstrained_client='async' is intended (client uses goroutines)
- yield _ping_pong_scenario(
- 'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- categories=smoketest_categories+[SCALABLE])
-
- # unconstrained_client='async' is intended (client uses goroutines)
- yield _ping_pong_scenario(
- 'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
- unconstrained_client='async',
- secure=secure,
- categories=[SCALABLE])
-
- # unconstrained_client='async' is intended (client uses goroutines)
- # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
- # but that's mostly because of lack of better name of the enum value.
- yield _ping_pong_scenario(
- 'go_generic_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
- client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- unconstrained_client='async', use_generic_payload=True,
- secure=secure,
- categories=[SCALABLE])
-
- # TODO(jtattermusch): add scenarios go vs C++
-
- def __str__(self):
- return 'go'
-
-class NodeExpressLanguage:
-
- def __init__(self):
- pass
- self.safename = str(self)
-
- def worker_cmdline(self):
- return ['tools/run_tests/performance/run_worker_node.sh',
- '--benchmark_impl=express']
-
- def worker_port_offset(self):
- return 700
-
- def scenarios(self):
- yield _ping_pong_scenario(
- 'node_express_json_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- categories=[SCALABLE, SMOKETEST])
-
- yield _ping_pong_scenario(
- 'node_express_json_async_unary_qps_unconstrained', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- unconstrained_client='async',
- categories=[SCALABLE, SMOKETEST])
-
- sizes = [('1B', 1), ('1KB', 1024), ('10KB', 10 * 1024),
- ('1MB', 1024 * 1024), ('10MB', 10 * 1024 * 1024),
- ('100MB', 100 * 1024 * 1024)]
-
- for size_name, size in sizes:
- for secure in (True, False):
yield _ping_pong_scenario(
- 'node_express_json_unary_ping_pong_%s_resp_%s' %
- (size_name, 'secure' if secure else 'insecure'),
+ '%s_to_cpp_protobuf_sync_unary_ping_pong' % php7_extension_mode,
rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- req_size=0, resp_size=size,
- secure=secure,
- categories=[SCALABLE])
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
+ # better than async_server_threads=0/CPU usage 490%.
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' %
+ php7_extension_mode,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ outstanding=1,
+ async_server_threads=1,
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' %
+ php7_extension_mode,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ outstanding=1,
+ async_server_threads=1,
+ unconstrained_client='sync')
+
+ def __str__(self):
+ if self.php7_protobuf_c:
+ return 'php7_protobuf_c'
+ return 'php7'
+
+
+class JavaLanguage:
+
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_java.sh']
+
+ def worker_port_offset(self):
+ return 400
+
+ def scenarios(self):
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_unary_ping_pong_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_unary_ping_pong_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_unary_qps_unconstrained_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_qps_one_server_core_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ # TODO(jtattermusch): add scenarios java vs C++
+
+ def __str__(self):
+ return 'java'
+
+
+class GoLanguage:
- def __str__(self):
- return 'node_express'
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_go.sh']
+
+ def worker_port_offset(self):
+ return 600
+
+ def scenarios(self):
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+
+ # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+ # but that's mostly because of lack of better name of the enum value.
+ yield _ping_pong_scenario(
+ 'go_generic_sync_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure)
+
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_unary_ping_pong_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ categories=[SCALABLE])
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+ # but that's mostly because of lack of better name of the enum value.
+ yield _ping_pong_scenario(
+ 'go_generic_sync_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ categories=[SCALABLE])
+
+ # TODO(jtattermusch): add scenarios go vs C++
+
+ def __str__(self):
+ return 'go'
LANGUAGES = {
- 'c++' : CXXLanguage(),
- 'csharp' : CSharpLanguage(),
- 'node' : NodeLanguage(),
- 'node_express': NodeExpressLanguage(),
- 'ruby' : RubyLanguage(),
- 'php' : PhpLanguage(),
- 'java' : JavaLanguage(),
- 'python' : PythonLanguage(),
- 'go' : GoLanguage(),
+ 'c++': CXXLanguage(),
+ 'csharp': CSharpLanguage(),
+ 'ruby': RubyLanguage(),
+ 'php7': Php7Language(),
+ 'php7_protobuf_c': Php7Language(php7_protobuf_c=True),
+ 'java': JavaLanguage(),
+ 'python': PythonLanguage(),
+ 'go': GoLanguage(),
}
diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json
index c7b1904bd1..b00c2eed16 100644
--- a/tools/run_tests/performance/scenario_result_schema.json
+++ b/tools/run_tests/performance/scenario_result_schema.json
@@ -382,6 +382,11 @@
},
{
"mode": "NULLABLE",
+ "name": "core_http2_spurious_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
"name": "core_hpack_recv_indexed",
"type": "INTEGER"
},
@@ -512,6 +517,26 @@
},
{
"mode": "NULLABLE",
+ "name": "core_call_combiner_locks_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_locks_scheduled_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_set_notify_on_cancel",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_cancelled",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
"name": "core_executor_scheduled_short_items",
"type": "INTEGER"
},
@@ -542,22 +567,27 @@
},
{
"mode": "NULLABLE",
- "name": "core_executor_threads_created",
+ "name": "core_server_requested_calls",
"type": "INTEGER"
},
{
"mode": "NULLABLE",
- "name": "core_executor_threads_used",
+ "name": "core_server_slowpath_requests_queued",
"type": "INTEGER"
},
{
"mode": "NULLABLE",
- "name": "core_server_requested_calls",
+ "name": "core_cq_ev_queue_trylock_failures",
"type": "INTEGER"
},
{
"mode": "NULLABLE",
- "name": "core_server_slowpath_requests_queued",
+ "name": "core_cq_ev_queue_trylock_successes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_transient_pop_failures",
"type": "INTEGER"
},
{
@@ -862,31 +892,6 @@
},
{
"mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup",
- "type": "STRING"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_bkts",
- "type": "STRING"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_50p",
- "type": "FLOAT"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_95p",
- "type": "FLOAT"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_99p",
- "type": "FLOAT"
- },
- {
- "mode": "NULLABLE",
"name": "core_server_cqs_checked",
"type": "STRING"
},
@@ -1209,6 +1214,11 @@
},
{
"mode": "NULLABLE",
+ "name": "core_http2_spurious_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
"name": "core_hpack_recv_indexed",
"type": "INTEGER"
},
@@ -1339,6 +1349,26 @@
},
{
"mode": "NULLABLE",
+ "name": "core_call_combiner_locks_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_locks_scheduled_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_set_notify_on_cancel",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_cancelled",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
"name": "core_executor_scheduled_short_items",
"type": "INTEGER"
},
@@ -1369,22 +1399,27 @@
},
{
"mode": "NULLABLE",
- "name": "core_executor_threads_created",
+ "name": "core_server_requested_calls",
"type": "INTEGER"
},
{
"mode": "NULLABLE",
- "name": "core_executor_threads_used",
+ "name": "core_server_slowpath_requests_queued",
"type": "INTEGER"
},
{
"mode": "NULLABLE",
- "name": "core_server_requested_calls",
+ "name": "core_cq_ev_queue_trylock_failures",
"type": "INTEGER"
},
{
"mode": "NULLABLE",
- "name": "core_server_slowpath_requests_queued",
+ "name": "core_cq_ev_queue_trylock_successes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_transient_pop_failures",
"type": "INTEGER"
},
{
@@ -1689,31 +1724,6 @@
},
{
"mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup",
- "type": "STRING"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_bkts",
- "type": "STRING"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_50p",
- "type": "FLOAT"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_95p",
- "type": "FLOAT"
- },
- {
- "mode": "NULLABLE",
- "name": "core_executor_closures_per_wakeup_99p",
- "type": "FLOAT"
- },
- {
- "mode": "NULLABLE",
"name": "core_server_cqs_checked",
"type": "STRING"
},