aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/performance
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-11-16 14:56:34 -0800
committerGravatar Craig Tiller <ctiller@google.com>2016-11-16 14:56:34 -0800
commit1016d9feca27f295fb8fe1c0e76a9fbd0ef14051 (patch)
tree5f9ce273109af25c2e8585118bd2b77fd2bd1227 /tools/run_tests/performance
parent13fda37dee1b1de6dda079b3a019e138352ee176 (diff)
parent740665a6f65b3d827e0755de8bb1bcd57745b9f1 (diff)
Merge github.com:grpc/grpc into bm_fullstack
Diffstat (limited to 'tools/run_tests/performance')
-rwxr-xr-xtools/run_tests/performance/bq_upload_result.py4
-rw-r--r--tools/run_tests/performance/scenario_config.py33
2 files changed, 6 insertions, 31 deletions
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index 0ea23d2212..ddcf053ae5 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -115,6 +115,9 @@ def _flatten_result_inplace(scenario_result):
scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
+ for stats in scenario_result['serverStats']:
+ stats.pop('totalCpuTime', None)
+ stats.pop('idleCpuTime', None)
for stats in scenario_result['clientStats']:
stats['latencies'] = json.dumps(stats['latencies'])
stats.pop('requestResults', None)
@@ -122,6 +125,7 @@ def _flatten_result_inplace(scenario_result):
scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
+ scenario_result['summary'].pop('serverCpuUsage', None)
scenario_result['summary'].pop('successfulRequestsPerSecond', None)
scenario_result['summary'].pop('failedRequestsPerSecond', None)
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 4e4c16642b..c3c5ece362 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -688,46 +688,17 @@ class NodeExpressLanguage:
return 700
def scenarios(self):
- # TODO(jtattermusch): make this scenario work
- #yield _ping_pong_scenario(
- # 'node_generic_async_streaming_ping_pong', rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
- # use_generic_payload=True)
-
- # TODO(jtattermusch): make this scenario work
- #yield _ping_pong_scenario(
- # 'node_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
-
yield _ping_pong_scenario(
- 'node_protobuf_unary_ping_pong', rpc_type='UNARY',
+ 'node_express_json_unary_ping_pong', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
categories=[SCALABLE, SMOKETEST])
yield _ping_pong_scenario(
- 'node_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
+ 'node_express_json_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async',
categories=[SCALABLE, SMOKETEST])
- # TODO(jtattermusch): make this scenario work
- #yield _ping_pong_scenario(
- # 'node_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- # unconstrained_client='async')
-
- # TODO(jtattermusch): make this scenario work
- #yield _ping_pong_scenario(
- # 'node_to_cpp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
- # client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- # server_language='c++', server_core_limit=1, async_server_threads=1)
-
- # TODO(jtattermusch): make this scenario work
- #yield _ping_pong_scenario(
- # 'node_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
- # client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
- # server_language='c++', server_core_limit=1, async_server_threads=1)
-
def __str__(self):
return 'node_express'