1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <cinttypes>
20 #include <mutex>
21 #include <thread>
22
23 #include <gtest/gtest.h>
24
25 #include <grpc/grpc.h>
26 #include <grpc/support/time.h>
27 #include <grpcpp/channel.h>
28 #include <grpcpp/client_context.h>
29 #include <grpcpp/create_channel.h>
30 #include <grpcpp/impl/sync.h>
31 #include <grpcpp/resource_quota.h>
32 #include <grpcpp/server.h>
33 #include <grpcpp/server_builder.h>
34 #include <grpcpp/server_context.h>
35
36 #include "src/core/lib/gprpp/env.h"
37 #include "src/core/lib/surface/api_trace.h"
38 #include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
39 #include "src/proto/grpc/testing/echo.grpc.pb.h"
40 #include "test/core/util/port.h"
41 #include "test/core/util/test_config.h"
42
43 const int kNumThreads = 10; // Number of threads
44 const int kNumAsyncSendThreads = 2;
45 const int kNumAsyncReceiveThreads = 5;
46 const int kNumAsyncServerThreads = 5;
47 const int kNumRpcs = 1000; // Number of RPCs per thread
48
49 namespace grpc {
50 namespace testing {
51
52 class TestServiceImpl : public grpc::testing::EchoTestService::Service {
53 public:
TestServiceImpl()54 TestServiceImpl() {}
55
Echo(ServerContext *,const EchoRequest * request,EchoResponse * response)56 Status Echo(ServerContext* /*context*/, const EchoRequest* request,
57 EchoResponse* response) override {
58 response->set_message(request->message());
59 return Status::OK;
60 }
61 };
62
63 template <class Service>
64 class CommonStressTest {
65 public:
CommonStressTest()66 CommonStressTest() : kMaxMessageSize_(8192) {
67 #if TARGET_OS_IPHONE
68 // Workaround Apple CFStream bug
69 grpc_core::SetEnv("grpc_cfstream", "0");
70 #endif
71 }
~CommonStressTest()72 virtual ~CommonStressTest() {}
73 virtual void SetUp() = 0;
74 virtual void TearDown() = 0;
75 virtual void ResetStub() = 0;
76 virtual bool AllowExhaustion() = 0;
GetStub()77 grpc::testing::EchoTestService::Stub* GetStub() { return stub_.get(); }
78
79 protected:
80 std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
81 std::unique_ptr<Server> server_;
82
83 virtual void SetUpStart(ServerBuilder* builder, Service* service) = 0;
SetUpStartCommon(ServerBuilder * builder,Service * service)84 void SetUpStartCommon(ServerBuilder* builder, Service* service) {
85 builder->RegisterService(service);
86 builder->SetMaxMessageSize(
87 kMaxMessageSize_); // For testing max message size.
88 }
SetUpEnd(ServerBuilder * builder)89 void SetUpEnd(ServerBuilder* builder) { server_ = builder->BuildAndStart(); }
TearDownStart()90 void TearDownStart() { server_->Shutdown(); }
TearDownEnd()91 void TearDownEnd() {}
92
93 private:
94 const int kMaxMessageSize_;
95 };
96
97 template <class Service>
98 class CommonStressTestInsecure : public CommonStressTest<Service> {
99 public:
ResetStub()100 void ResetStub() override {
101 std::shared_ptr<Channel> channel = grpc::CreateChannel(
102 server_address_.str(), InsecureChannelCredentials());
103 this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
104 }
AllowExhaustion()105 bool AllowExhaustion() override { return false; }
106
107 protected:
SetUpStart(ServerBuilder * builder,Service * service)108 void SetUpStart(ServerBuilder* builder, Service* service) override {
109 int port = grpc_pick_unused_port_or_die();
110 this->server_address_ << "localhost:" << port;
111 // Setup server
112 builder->AddListeningPort(server_address_.str(),
113 InsecureServerCredentials());
114 this->SetUpStartCommon(builder, service);
115 }
116
117 private:
118 std::ostringstream server_address_;
119 };
120
121 template <class Service, bool allow_resource_exhaustion>
122 class CommonStressTestInproc : public CommonStressTest<Service> {
123 public:
ResetStub()124 void ResetStub() override {
125 ChannelArguments args;
126 std::shared_ptr<Channel> channel = this->server_->InProcessChannel(args);
127 this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
128 }
AllowExhaustion()129 bool AllowExhaustion() override { return allow_resource_exhaustion; }
130
131 protected:
SetUpStart(ServerBuilder * builder,Service * service)132 void SetUpStart(ServerBuilder* builder, Service* service) override {
133 this->SetUpStartCommon(builder, service);
134 }
135 };
136
137 template <class BaseClass>
138 class CommonStressTestSyncServer : public BaseClass {
139 public:
SetUp()140 void SetUp() override {
141 ServerBuilder builder;
142 this->SetUpStart(&builder, &service_);
143 this->SetUpEnd(&builder);
144 }
TearDown()145 void TearDown() override {
146 this->TearDownStart();
147 this->TearDownEnd();
148 }
149
150 private:
151 TestServiceImpl service_;
152 };
153
154 template <class BaseClass>
155 class CommonStressTestSyncServerLowThreadCount : public BaseClass {
156 public:
SetUp()157 void SetUp() override {
158 ServerBuilder builder;
159 ResourceQuota quota;
160 this->SetUpStart(&builder, &service_);
161 quota.SetMaxThreads(4);
162 builder.SetResourceQuota(quota);
163 this->SetUpEnd(&builder);
164 }
TearDown()165 void TearDown() override {
166 this->TearDownStart();
167 this->TearDownEnd();
168 }
169
170 private:
171 TestServiceImpl service_;
172 };
173
174 template <class BaseClass>
175 class CommonStressTestAsyncServer : public BaseClass {
176 public:
CommonStressTestAsyncServer()177 CommonStressTestAsyncServer() : contexts_(kNumAsyncServerThreads * 100) {}
SetUp()178 void SetUp() override {
179 shutting_down_ = false;
180 ServerBuilder builder;
181 this->SetUpStart(&builder, &service_);
182 cq_ = builder.AddCompletionQueue();
183 this->SetUpEnd(&builder);
184 for (int i = 0; i < kNumAsyncServerThreads * 100; i++) {
185 RefreshContext(i);
186 }
187 for (int i = 0; i < kNumAsyncServerThreads; i++) {
188 server_threads_.emplace_back(&CommonStressTestAsyncServer::ProcessRpcs,
189 this);
190 }
191 }
TearDown()192 void TearDown() override {
193 {
194 grpc::internal::MutexLock l(&mu_);
195 this->TearDownStart();
196 shutting_down_ = true;
197 cq_->Shutdown();
198 }
199
200 for (int i = 0; i < kNumAsyncServerThreads; i++) {
201 server_threads_[i].join();
202 }
203
204 void* ignored_tag;
205 bool ignored_ok;
206 while (cq_->Next(&ignored_tag, &ignored_ok)) {
207 }
208 this->TearDownEnd();
209 }
210
211 private:
ProcessRpcs()212 void ProcessRpcs() {
213 void* tag;
214 bool ok;
215 while (cq_->Next(&tag, &ok)) {
216 if (ok) {
217 int i = static_cast<int>(reinterpret_cast<intptr_t>(tag));
218 switch (contexts_[i].state) {
219 case Context::READY: {
220 contexts_[i].state = Context::DONE;
221 EchoResponse send_response;
222 send_response.set_message(contexts_[i].recv_request.message());
223 contexts_[i].response_writer->Finish(send_response, Status::OK,
224 tag);
225 break;
226 }
227 case Context::DONE:
228 RefreshContext(i);
229 break;
230 }
231 }
232 }
233 }
RefreshContext(int i)234 void RefreshContext(int i) {
235 grpc::internal::MutexLock l(&mu_);
236 if (!shutting_down_) {
237 contexts_[i].state = Context::READY;
238 contexts_[i].srv_ctx.reset(new ServerContext);
239 contexts_[i].response_writer.reset(
240 new grpc::ServerAsyncResponseWriter<EchoResponse>(
241 contexts_[i].srv_ctx.get()));
242 service_.RequestEcho(contexts_[i].srv_ctx.get(),
243 &contexts_[i].recv_request,
244 contexts_[i].response_writer.get(), cq_.get(),
245 cq_.get(), reinterpret_cast<void*>(i));
246 }
247 }
248 struct Context {
249 std::unique_ptr<ServerContext> srv_ctx;
250 std::unique_ptr<grpc::ServerAsyncResponseWriter<EchoResponse>>
251 response_writer;
252 EchoRequest recv_request;
253 enum { READY, DONE } state;
254 };
255 std::vector<Context> contexts_;
256 grpc::testing::EchoTestService::AsyncService service_;
257 std::unique_ptr<ServerCompletionQueue> cq_;
258 bool shutting_down_;
259 grpc::internal::Mutex mu_;
260 std::vector<std::thread> server_threads_;
261 };
262
263 template <class Common>
264 class End2endTest : public ::testing::Test {
265 protected:
End2endTest()266 End2endTest() {}
SetUp()267 void SetUp() override { common_.SetUp(); }
TearDown()268 void TearDown() override { common_.TearDown(); }
ResetStub()269 void ResetStub() { common_.ResetStub(); }
270
271 Common common_;
272 };
273
SendRpc(grpc::testing::EchoTestService::Stub * stub,int num_rpcs,bool allow_exhaustion,gpr_atm * errors)274 static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
275 bool allow_exhaustion, gpr_atm* errors) {
276 EchoRequest request;
277 EchoResponse response;
278 request.set_message("Hello");
279
280 for (int i = 0; i < num_rpcs; ++i) {
281 ClientContext context;
282 Status s = stub->Echo(&context, request, &response);
283 EXPECT_TRUE(s.ok() || (allow_exhaustion &&
284 s.error_code() == StatusCode::RESOURCE_EXHAUSTED));
285 if (!s.ok()) {
286 if (!(allow_exhaustion &&
287 s.error_code() == StatusCode::RESOURCE_EXHAUSTED)) {
288 gpr_log(GPR_ERROR, "RPC error: %d: %s", s.error_code(),
289 s.error_message().c_str());
290 }
291 gpr_atm_no_barrier_fetch_add(errors, gpr_atm{1});
292 } else {
293 EXPECT_EQ(response.message(), request.message());
294 }
295 }
296 }
297
298 typedef ::testing::Types<
299 CommonStressTestSyncServer<CommonStressTestInsecure<TestServiceImpl>>,
300 CommonStressTestAsyncServer<
301 CommonStressTestInsecure<grpc::testing::EchoTestService::AsyncService>>>
302 CommonTypes;
303 TYPED_TEST_SUITE(End2endTest, CommonTypes);
TYPED_TEST(End2endTest,ThreadStress)304 TYPED_TEST(End2endTest, ThreadStress) {
305 this->common_.ResetStub();
306 std::vector<std::thread> threads;
307 gpr_atm errors;
308 gpr_atm_rel_store(&errors, gpr_atm{0});
309 threads.reserve(kNumThreads);
310 for (int i = 0; i < kNumThreads; ++i) {
311 threads.emplace_back(SendRpc, this->common_.GetStub(), kNumRpcs,
312 this->common_.AllowExhaustion(), &errors);
313 }
314 for (int i = 0; i < kNumThreads; ++i) {
315 threads[i].join();
316 }
317 uint64_t error_cnt = static_cast<uint64_t>(gpr_atm_no_barrier_load(&errors));
318 if (error_cnt != 0) {
319 gpr_log(GPR_INFO, "RPC error count: %" PRIu64, error_cnt);
320 }
321 // If this test allows resource exhaustion, expect that it actually sees some
322 if (this->common_.AllowExhaustion()) {
323 EXPECT_GT(error_cnt, 0);
324 }
325 }
326
327 template <class Common>
328 class AsyncClientEnd2endTest : public ::testing::Test {
329 protected:
AsyncClientEnd2endTest()330 AsyncClientEnd2endTest() : rpcs_outstanding_(0) {}
331
SetUp()332 void SetUp() override { common_.SetUp(); }
TearDown()333 void TearDown() override {
334 void* ignored_tag;
335 bool ignored_ok;
336 while (cq_.Next(&ignored_tag, &ignored_ok)) {
337 }
338 common_.TearDown();
339 }
340
Wait()341 void Wait() {
342 grpc::internal::MutexLock l(&mu_);
343 while (rpcs_outstanding_ != 0) {
344 cv_.Wait(&mu_);
345 }
346
347 cq_.Shutdown();
348 }
349
350 struct AsyncClientCall {
351 EchoResponse response;
352 ClientContext context;
353 Status status;
354 std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader;
355 };
356
AsyncSendRpc(int num_rpcs)357 void AsyncSendRpc(int num_rpcs) {
358 for (int i = 0; i < num_rpcs; ++i) {
359 AsyncClientCall* call = new AsyncClientCall;
360 EchoRequest request;
361 request.set_message("Hello: " + std::to_string(i));
362 call->response_reader =
363 common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
364 call->response_reader->Finish(&call->response, &call->status, call);
365
366 grpc::internal::MutexLock l(&mu_);
367 rpcs_outstanding_++;
368 }
369 }
370
AsyncCompleteRpc()371 void AsyncCompleteRpc() {
372 while (true) {
373 void* got_tag;
374 bool ok = false;
375 if (!cq_.Next(&got_tag, &ok)) break;
376 AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
377 if (!ok) {
378 gpr_log(GPR_DEBUG, "Error: %d", call->status.error_code());
379 }
380 delete call;
381
382 bool notify;
383 {
384 grpc::internal::MutexLock l(&mu_);
385 rpcs_outstanding_--;
386 notify = (rpcs_outstanding_ == 0);
387 }
388 if (notify) {
389 cv_.Signal();
390 }
391 }
392 }
393
394 Common common_;
395 CompletionQueue cq_;
396 grpc::internal::Mutex mu_;
397 grpc::internal::CondVar cv_;
398 int rpcs_outstanding_;
399 };
400
401 TYPED_TEST_SUITE(AsyncClientEnd2endTest, CommonTypes);
TYPED_TEST(AsyncClientEnd2endTest,ThreadStress)402 TYPED_TEST(AsyncClientEnd2endTest, ThreadStress) {
403 this->common_.ResetStub();
404 std::vector<std::thread> send_threads, completion_threads;
405 for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
406 completion_threads.emplace_back(
407 &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncCompleteRpc,
408 this);
409 }
410 for (int i = 0; i < kNumAsyncSendThreads; ++i) {
411 send_threads.emplace_back(
412 &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncSendRpc,
413 this, kNumRpcs);
414 }
415 for (int i = 0; i < kNumAsyncSendThreads; ++i) {
416 send_threads[i].join();
417 }
418
419 this->Wait();
420 for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
421 completion_threads[i].join();
422 }
423 }
424
425 } // namespace testing
426 } // namespace grpc
427
main(int argc,char ** argv)428 int main(int argc, char** argv) {
429 grpc::testing::TestEnvironment env(&argc, argv);
430 ::testing::InitGoogleTest(&argc, argv);
431 return RUN_ALL_TESTS();
432 }
433