aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/proto/grpc/testing/control.proto
blob: a4a9c8fe5714f7543fe9395704405bcf2c141a22 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

syntax = "proto3";

import "src/proto/grpc/testing/payloads.proto";
import "src/proto/grpc/testing/stats.proto";

package grpc.testing;

enum ClientType {
  // Many languages support a basic distinction between using
  // sync or async client, and this allows the specification
  SYNC_CLIENT = 0;
  ASYNC_CLIENT = 1;
  OTHER_CLIENT = 2; // used for some language-specific variants
}

enum ServerType {
  SYNC_SERVER = 0;
  ASYNC_SERVER = 1;
  ASYNC_GENERIC_SERVER = 2;
  OTHER_SERVER = 3; // used for some language-specific variants
}

enum RpcType {
  UNARY = 0;
  STREAMING = 1;
  STREAMING_FROM_CLIENT = 2;
  STREAMING_FROM_SERVER = 3;
  STREAMING_BOTH_WAYS = 4;
}

// Parameters of poisson process distribution, which is a good representation
// of activity coming in from independent identical stationary sources.
message PoissonParams {
  // The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
  double offered_load = 1;
}

// Once an RPC finishes, immediately start a new one.
// No configuration parameters needed.
message ClosedLoopParams {}

message LoadParams {
  oneof load {
    ClosedLoopParams closed_loop = 1;
    PoissonParams poisson = 2;
  };
}

// presence of SecurityParams implies use of TLS
message SecurityParams {
  bool use_test_ca = 1;
  string server_host_override = 2;
  string cred_type = 3;
}

message ChannelArg {
  string name = 1;
  oneof value {
    string str_value = 2;
    int32 int_value = 3;
  }
}

message ClientConfig {
  // List of targets to connect to. At least one target needs to be specified.
  repeated string server_targets = 1;
  ClientType client_type = 2;
  SecurityParams security_params = 3;
  // How many concurrent RPCs to start for each channel.
  // For synchronous client, use a separate thread for each outstanding RPC.
  int32 outstanding_rpcs_per_channel = 4;
  // Number of independent client channels to create.
  // i-th channel will connect to server_target[i % server_targets.size()]
  int32 client_channels = 5;
  // Only for async client. Number of threads to use to start/manage RPCs.
  int32 async_client_threads = 7;
  RpcType rpc_type = 8;
  // The requested load for the entire client (aggregated over all the threads).
  LoadParams load_params = 10;
  PayloadConfig payload_config = 11;
  HistogramParams histogram_params = 12;

  // Specify the cores we should run the client on, if desired
  repeated int32 core_list = 13;
  int32 core_limit = 14;

  // If we use an OTHER_CLIENT client_type, this string gives more detail
  string other_client_api = 15;

  repeated ChannelArg channel_args = 16;

  // Number of threads that share each completion queue
  int32 threads_per_cq = 17;

  // Number of messages on a stream before it gets finished/restarted
  int32 messages_per_stream = 18;

  // Use coalescing API when possible.
  bool use_coalesce_api = 19;

  // If 0, disabled. Else, specifies the period between gathering latency
  // medians in milliseconds.
  int32 median_latency_collection_interval_millis = 20;
}

message ClientStatus { ClientStats stats = 1; }

// Request current stats
message Mark {
  // if true, the stats will be reset after taking their snapshot.
  bool reset = 1;
}

message ClientArgs {
  oneof argtype {
    ClientConfig setup = 1;
    Mark mark = 2;
  }
}

message ServerConfig {
  ServerType server_type = 1;
  SecurityParams security_params = 2;
  // Port on which to listen. Zero means pick unused port.
  int32 port = 4;
  // Only for async server. Number of threads used to serve the requests.
  int32 async_server_threads = 7;
  // Specify the number of cores to limit server to, if desired
  int32 core_limit = 8;
  // payload config, used in generic server.
  // Note this must NOT be used in proto (non-generic) servers. For proto servers,
  // 'response sizes' must be configured from the 'response_size' field of the
  // 'SimpleRequest' objects in RPC requests.
  PayloadConfig payload_config = 9;

  // Specify the cores we should run the server on, if desired
  repeated int32 core_list = 10;

  // If we use an OTHER_SERVER client_type, this string gives more detail
  string other_server_api = 11;

  // Number of threads that share each completion queue
  int32 threads_per_cq = 12;

  // c++-only options (for now) --------------------------------

  // Buffer pool size (no buffer pool specified if unset)
  int32 resource_quota_size = 1001;
  repeated ChannelArg channel_args = 1002;
}

message ServerArgs {
  oneof argtype {
    ServerConfig setup = 1;
    Mark mark = 2;
  }
}

message ServerStatus {
  ServerStats stats = 1;
  // the port bound by the server
  int32 port = 2;
  // Number of cores available to the server
  int32 cores = 3;
}

message CoreRequest {
}

message CoreResponse {
  // Number of cores available on the server
  int32 cores = 1;
}

message Void {
}

// A single performance scenario: input to qps_json_driver
message Scenario {
  // Human readable name for this scenario
  string name = 1;
  // Client configuration
  ClientConfig client_config = 2;
  // Number of clients to start for the test
  int32 num_clients = 3;
  // Server configuration
  ServerConfig server_config = 4;
  // Number of servers to start for the test
  int32 num_servers = 5;
  // Warmup period, in seconds
  int32 warmup_seconds = 6;
  // Benchmark time, in seconds
  int32 benchmark_seconds = 7;
  // Number of workers to spawn locally (usually zero)
  int32 spawn_local_worker_count = 8;
}

// A set of scenarios to be run with qps_json_driver
message Scenarios {
  repeated Scenario scenarios = 1;
}

// Basic summary that can be computed from ClientStats and ServerStats
// once the scenario has finished.
message ScenarioResultSummary
{
  // Total number of operations per second over all clients.
  double qps = 1;
  // QPS per one server core.
  double qps_per_server_core = 2;
  // server load based on system_time (0.85 => 85%)
  double server_system_time = 3;
  // server load based on user_time (0.85 => 85%)
  double server_user_time = 4;
  // client load based on system_time (0.85 => 85%)
  double client_system_time = 5;
  // client load based on user_time (0.85 => 85%)
  double client_user_time = 6;

  // X% latency percentiles (in nanoseconds)
  double latency_50 = 7;
  double latency_90 = 8;
  double latency_95 = 9;
  double latency_99 = 10;
  double latency_999 = 11;

  // server cpu usage percentage
  double server_cpu_usage = 12;

  // Number of requests that succeeded/failed
  double successful_requests_per_second = 13;
  double failed_requests_per_second = 14;

  // Number of polls called inside completion queue per request
  double client_polls_per_request = 15;
  double server_polls_per_request = 16;

  // Queries per CPU-sec over all servers or clients
  double server_queries_per_cpu_sec = 17;
  double client_queries_per_cpu_sec = 18;
}

// Results of a single benchmark scenario.
message ScenarioResult {
  // Inputs used to run the scenario.
  Scenario scenario = 1;
  // Histograms from all clients merged into one histogram.
  HistogramData latencies = 2;
  // Client stats for each client
  repeated ClientStats client_stats = 3;
  // Server stats for each server
  repeated ServerStats server_stats = 4;
  // Number of cores available to each server
  repeated int32 server_cores = 5;
  // An after-the-fact computed summary
  ScenarioResultSummary summary = 6;
  // Information on success or failure of each worker
  repeated bool client_success = 7;
  repeated bool server_success = 8;
  // Number of failed requests (one row per status code seen)
  repeated RequestResultCount request_results = 9;
}