aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/performance
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests/performance')
-rw-r--r--tools/run_tests/performance/README.md106
-rw-r--r--tools/run_tests/performance/scenario_config.py69
2 files changed, 174 insertions, 1 deletions
diff --git a/tools/run_tests/performance/README.md b/tools/run_tests/performance/README.md
new file mode 100644
index 0000000000..5fd64f6ee9
--- /dev/null
+++ b/tools/run_tests/performance/README.md
@@ -0,0 +1,106 @@
+# Overview of performance test suite, with steps for manual runs:
+
+For design of the tests, see
+http://www.grpc.io/docs/guides/benchmarking.html.
+
+## Pre-reqs for running these manually:
+In general the benchmark workers and driver build scripts expect
+[linux_performance_worker_init.sh](../../gce/linux_performance_worker_init.sh) to have been ran already.
+
+### To run benchmarks locally:
+* From the grpc repo root, start the
+[run_performance_tests.py](../run_performance_tests.py) runner script.
+
+### On remote machines, to start the driver and workers manually:
+The [run_performance_test.py](../run_performance_tests.py) top-level runner script can also
+be used with remote machines, but for e.g., profiling the server,
+it might be useful to run workers manually.
+
+1. You'll need a "driver" and separate "worker" machines.
+For example, you might use one GCE "driver" machine and 3 other
+GCE "worker" machines that are in the same zone.
+
+2. Connect to each worker machine and start up a benchmark worker with a "driver_port".
+ * For example, to start the grpc-go benchmark worker:
+ [grpc-go worker main.go](https://github.com/grpc/grpc-go/blob/master/benchmark/worker/main.go) --driver_port <driver_port>
+
+#### Comands to start workers in different languages:
+ * Note that these commands are what the top-level
+ [run_performance_test.py](../run_performance_tests.py) script uses to
+ build and run different workers through the
+ [build_performance.sh](./build_performance.sh) script and "run worker"
+ scripts (such as the [run_worker_java.sh](./run_worker_java.sh)).
+
+##### Running benchmark workers for C-core wrapped languages (C++, Python, C#, Node, Ruby):
+ * These are more simple since they all live in the main grpc repo.
+
+```
+$ cd <grpc_repo_root>
+$ tools/run_tests/performance/build_performance.sh
+$ tools/run_tests/performance/run_worker_<language>.sh
+```
+
+ * Note that there is one "run_worker" script per language, e.g.,
+ [run_worker_csharp.sh](./run_worker_csharp.sh) for c#.
+
+##### Running benchmark workers for gRPC-Java:
+ * You'll need the [grpc-java](https://github.com/grpc/grpc-java) repo.
+
+```
+$ cd <grpc-java-repo>
+$ ./gradlew -PskipCodegen=true :grpc-benchmarks:installDist
+$ benchmarks/build/install/grpc-benchmarks/bin/benchmark_worker --driver_port <driver_port>
+```
+
+##### Running benchmark workers for gRPC-Go:
+ * You'll need the [grpc-go repo](https://github.com/grpc/grpc-go)
+
+```
+$ cd <grpc-go-repo>/benchmark/worker && go install
+$ # if profiling, it might be helpful to turn off inlining by building with "-gcflags=-l"
+$ $GOPATH/bin/worker --driver_port <driver_port>
+```
+
+#### Build the driver:
+* Connect to the driver machine (if using a remote driver) and from the grpc repo root:
+```
+$ tools/run_tests/performance/build_performance.sh
+```
+
+#### Run the driver:
+1. Get the 'scenario_json' relevant for the scenario to run. Note that "scenario
+ json" configs are generated from [scenario_config.py](./scenario_config.py).
+ The [driver](../../../test/cpp/qps/qps_json_driver.cc) takes a list of these configs as a json string of the form: `{scenario: <json_list_of_scenarios> }`
+ in its `--scenarios_json` command argument.
+ One quick way to get a valid json string to pass to the driver is by running
+ the [run_performance_tests.py](./run_performance_tests.py) locally and copying the logged scenario json command arg.
+
+2. From the grpc repo root:
+
+* Set `QPS_WORKERS` environment variable to a comma separated list of worker
+machines. Note that the driver will start the "benchmark server" on the first
+entry in the list, and the rest will be told to run as clients against the
+benchmark server.
+
+Example running and profiling of go benchmark server:
+```
+$ export QPS_WORKERS=<host1>:<10000>,<host2>,10000,<host3>:10000
+$ bins/opt/qps_json_driver --scenario_json='<scenario_json_scenario_config_string>'
+```
+
+### Example profiling commands
+
+While running the benchmark, a profiler can be attached to the server.
+
+Example to count syscalls in grpc-go server during a benchmark:
+* Connect to server machine and run:
+```
+$ netstat -tulpn | grep <driver_port> # to get pid of worker
+$ perf stat -p <worker_pid> -e syscalls:sys_enter_write # stop after test complete
+```
+
+Example memory profile of grpc-go server, with `go tools pprof`:
+* After a run is done on the server, see its alloc profile with:
+```
+$ go tool pprof --text --alloc_space http://localhost:<pprof_port>/debug/heap
+```
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 1d91b61ba4..af510fe049 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -215,6 +215,29 @@ class CXXLanguage:
categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
+ rpc_type='STREAMING',
+ req_size=1024*1024,
+ resp_size=1024*1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async', use_generic_payload=True,
+ secure=secure,
+ categories=smoketest_categories+[SCALABLE],
+ channels=1, outstanding=100)
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' % secstr,
+ rpc_type='STREAMING',
+ req_size=64*1024,
+ resp_size=64*1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async', use_generic_payload=True,
+ secure=secure,
+ categories=smoketest_categories+[SCALABLE])
+
+ yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
@@ -235,6 +258,19 @@ class CXXLanguage:
excluded_poll_engines = ['poll-cv'])
yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s' %
+ (secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ outstanding=64,
+ req_size=128,
+ resp_size=8*1024*1024,
+ secure=secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
@@ -244,6 +280,13 @@ class CXXLanguage:
categories=smoketest_categories+[SCALABLE],
excluded_poll_engines = ['poll-cv'])
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_ping_pong_%s_1mb' % secstr, rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ req_size=1024*1024, resp_size=1024*1024,
+ secure=secure,
+ categories=smoketest_categories)
+
for rpc_type in ['unary', 'streaming']:
for synchronicity in ['sync', 'async']:
yield _ping_pong_scenario(
@@ -376,6 +419,12 @@ class CSharpLanguage:
unconstrained_client='async', client_language='c++',
categories=[SCALABLE])
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_unary_ping_pong_1mb', rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ req_size=1024*1024, resp_size=1024*1024,
+ categories=[SMOKETEST])
+
def __str__(self):
return 'csharp'
@@ -413,9 +462,15 @@ class NodeLanguage:
yield _ping_pong_scenario(
'cpp_to_node_unary_ping_pong', rpc_type='UNARY',
- client_type='ASYNC_CLIENT', server_type='async_server',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
client_language='c++')
+ yield _ping_pong_scenario(
+ 'node_protobuf_async_unary_ping_pong_1mb', rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ req_size=1024*1024, resp_size=1024*1024,
+ categories=[SMOKETEST])
+
# TODO(murgatroid99): fix bugs with this scenario and re-enable it
# yield _ping_pong_scenario(
# 'node_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
@@ -496,6 +551,12 @@ class PythonLanguage:
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_ping_pong_1mb', rpc_type='UNARY',
+ client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
+ req_size=1024*1024, resp_size=1024*1024,
+ categories=[SMOKETEST])
+
def __str__(self):
return 'python'
@@ -542,6 +603,12 @@ class RubyLanguage:
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
server_language='c++', async_server_threads=1)
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_async_unary_ping_pong_1mb', rpc_type='UNARY',
+ client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+ req_size=1024*1024, resp_size=1024*1024,
+ categories=[SMOKETEST])
+
def __str__(self):
return 'ruby'