xref: /aosp_15_r20/external/grpc-grpc/src/proto/grpc/testing/control.proto (revision cc02d7e222339f7a4f6ba5f422e6413f4bd931f2)
1// Copyright 2015 gRPC authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17import "src/proto/grpc/testing/payloads.proto";
18import "src/proto/grpc/testing/stats.proto";
19import "google/protobuf/timestamp.proto";
20
21package grpc.testing;
22
23enum ClientType {
24  // Many languages support a basic distinction between using
25  // sync or async client, and this allows the specification
26  SYNC_CLIENT = 0;
27  ASYNC_CLIENT = 1;
28  OTHER_CLIENT = 2; // used for some language-specific variants
29  CALLBACK_CLIENT = 3;
30}
31
32enum ServerType {
33  SYNC_SERVER = 0;
34  ASYNC_SERVER = 1;
35  ASYNC_GENERIC_SERVER = 2;
36  OTHER_SERVER = 3; // used for some language-specific variants
37  CALLBACK_SERVER = 4;
38}
39
40enum RpcType {
41  UNARY = 0;
42  STREAMING = 1;
43  STREAMING_FROM_CLIENT = 2;
44  STREAMING_FROM_SERVER = 3;
45  STREAMING_BOTH_WAYS = 4;
46}
47
48// Parameters of poisson process distribution, which is a good representation
49// of activity coming in from independent identical stationary sources.
50message PoissonParams {
51  // The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
52  double offered_load = 1;
53}
54
55// Once an RPC finishes, immediately start a new one.
56// No configuration parameters needed.
57message ClosedLoopParams {}
58
59message LoadParams {
60  oneof load {
61    ClosedLoopParams closed_loop = 1;
62    PoissonParams poisson = 2;
63  };
64}
65
66// presence of SecurityParams implies use of TLS
67message SecurityParams {
68  bool use_test_ca = 1;
69  string server_host_override = 2;
70  string cred_type = 3;
71}
72
73message ChannelArg {
74  string name = 1;
75  oneof value {
76    string str_value = 2;
77    int32 int_value = 3;
78  }
79}
80
81message ClientConfig {
82  // List of targets to connect to. At least one target needs to be specified.
83  repeated string server_targets = 1;
84  ClientType client_type = 2;
85  SecurityParams security_params = 3;
86  // How many concurrent RPCs to start for each channel.
87  // For synchronous client, use a separate thread for each outstanding RPC.
88  int32 outstanding_rpcs_per_channel = 4;
89  // Number of independent client channels to create.
90  // i-th channel will connect to server_target[i % server_targets.size()]
91  int32 client_channels = 5;
92  // Only for async client. Number of threads to use to start/manage RPCs.
93  int32 async_client_threads = 7;
94  RpcType rpc_type = 8;
95  // The requested load for the entire client (aggregated over all the threads).
96  LoadParams load_params = 10;
97  PayloadConfig payload_config = 11;
98  HistogramParams histogram_params = 12;
99
100  // Specify the cores we should run the client on, if desired
101  repeated int32 core_list = 13;
102  int32 core_limit = 14;
103
104  // If we use an OTHER_CLIENT client_type, this string gives more detail
105  string other_client_api = 15;
106
107  repeated ChannelArg channel_args = 16;
108
109  // Number of threads that share each completion queue
110  int32 threads_per_cq = 17;
111
112  // Number of messages on a stream before it gets finished/restarted
113  int32 messages_per_stream = 18;
114
115  // Use coalescing API when possible.
116  bool use_coalesce_api = 19;
117
118  // If 0, disabled. Else, specifies the period between gathering latency
119  // medians in milliseconds.
120  int32 median_latency_collection_interval_millis = 20;
121
122  // Number of client processes. 0 indicates no restriction.
123  int32 client_processes = 21;
124}
125
126message ClientStatus { ClientStats stats = 1; }
127
128// Request current stats
129message Mark {
130  // if true, the stats will be reset after taking their snapshot.
131  bool reset = 1;
132}
133
134message ClientArgs {
135  oneof argtype {
136    ClientConfig setup = 1;
137    Mark mark = 2;
138  }
139}
140
141message ServerConfig {
142  ServerType server_type = 1;
143  SecurityParams security_params = 2;
144  // Port on which to listen. Zero means pick unused port.
145  int32 port = 4;
146  // Only for async server. Number of threads used to serve the requests.
147  int32 async_server_threads = 7;
148  // Specify the number of cores to limit server to, if desired
149  int32 core_limit = 8;
150  // payload config, used in generic server.
151  // Note this must NOT be used in proto (non-generic) servers. For proto servers,
152  // 'response sizes' must be configured from the 'response_size' field of the
153  // 'SimpleRequest' objects in RPC requests.
154  PayloadConfig payload_config = 9;
155
156  // Specify the cores we should run the server on, if desired
157  repeated int32 core_list = 10;
158
159  // If we use an OTHER_SERVER client_type, this string gives more detail
160  string other_server_api = 11;
161
162  // Number of threads that share each completion queue
163  int32 threads_per_cq = 12;
164
165  // c++-only options (for now) --------------------------------
166
167  // Buffer pool size (no buffer pool specified if unset)
168  int32 resource_quota_size = 1001;
169  repeated ChannelArg channel_args = 1002;
170
171  // Number of server processes. 0 indicates no restriction.
172  int32 server_processes = 21;
173}
174
175message ServerArgs {
176  oneof argtype {
177    ServerConfig setup = 1;
178    Mark mark = 2;
179  }
180}
181
182message ServerStatus {
183  ServerStats stats = 1;
184  // the port bound by the server
185  int32 port = 2;
186  // Number of cores available to the server
187  int32 cores = 3;
188}
189
190message CoreRequest {
191}
192
193message CoreResponse {
194  // Number of cores available on the server
195  int32 cores = 1;
196}
197
198message Void {
199}
200
201// A single performance scenario: input to qps_json_driver
202message Scenario {
203  // Human readable name for this scenario
204  string name = 1;
205  // Client configuration
206  ClientConfig client_config = 2;
207  // Number of clients to start for the test
208  int32 num_clients = 3;
209  // Server configuration
210  ServerConfig server_config = 4;
211  // Number of servers to start for the test
212  int32 num_servers = 5;
213  // Warmup period, in seconds
214  int32 warmup_seconds = 6;
215  // Benchmark time, in seconds
216  int32 benchmark_seconds = 7;
217  // Number of workers to spawn locally (usually zero)
218  int32 spawn_local_worker_count = 8;
219}
220
221// A set of scenarios to be run with qps_json_driver
222message Scenarios {
223  repeated Scenario scenarios = 1;
224}
225
226// Basic summary that can be computed from ClientStats and ServerStats
227// once the scenario has finished.
228message ScenarioResultSummary
229{
230  // Total number of operations per second over all clients. What is counted as 1 'operation' depends on the benchmark scenarios:
231  // For unary benchmarks, an operation is processing of a single unary RPC.
232  // For streaming benchmarks, an operation is processing of a single ping pong of request and response.
233  double qps = 1;
234  // QPS per server core.
235  double qps_per_server_core = 2;
236  // The total server cpu load based on system time across all server processes, expressed as percentage of a single cpu core.
237  // For example, 85 implies 85% of a cpu core, 125 implies 125% of a cpu core. Since we are accumulating the cpu load across all the server
238  // processes, the value could > 100 when there are multiple servers or a single server using multiple threads and cores.
239  // Same explanation for the total client cpu load below.
240  double server_system_time = 3;
241  // The total server cpu load based on user time across all server processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
242  double server_user_time = 4;
243  // The total client cpu load based on system time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
244  double client_system_time = 5;
245  // The total client cpu load based on user time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
246  double client_user_time = 6;
247
248  // X% latency percentiles (in nanoseconds)
249  double latency_50 = 7;
250  double latency_90 = 8;
251  double latency_95 = 9;
252  double latency_99 = 10;
253  double latency_999 = 11;
254
255  // server cpu usage percentage
256  double server_cpu_usage = 12;
257
258  // Number of requests that succeeded/failed
259  double successful_requests_per_second = 13;
260  double failed_requests_per_second = 14;
261
262  // Number of polls called inside completion queue per request
263  double client_polls_per_request = 15;
264  double server_polls_per_request = 16;
265
266  // Queries per CPU-sec over all servers or clients
267  double server_queries_per_cpu_sec = 17;
268  double client_queries_per_cpu_sec = 18;
269
270
271  // Start and end time for the test scenario
272  google.protobuf.Timestamp start_time = 19;
273  google.protobuf.Timestamp end_time =20;
274}
275
276// Results of a single benchmark scenario.
277message ScenarioResult {
278  // Inputs used to run the scenario.
279  Scenario scenario = 1;
280  // Histograms from all clients merged into one histogram.
281  HistogramData latencies = 2;
282  // Client stats for each client
283  repeated ClientStats client_stats = 3;
284  // Server stats for each server
285  repeated ServerStats server_stats = 4;
286  // Number of cores available to each server
287  repeated int32 server_cores = 5;
288  // An after-the-fact computed summary
289  ScenarioResultSummary summary = 6;
290  // Information on success or failure of each worker
291  repeated bool client_success = 7;
292  repeated bool server_success = 8;
293  // Number of failed requests (one row per status code seen)
294  repeated RequestResultCount request_results = 9;
295}
296