xref: /aosp_15_r20/external/grpc-grpc/src/cpp/server/server_cc.cc (revision cc02d7e222339f7a4f6ba5f422e6413f4bd931f2)
1 //
2 // Copyright 2015 gRPC authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 //
17 
18 #include <limits.h>
19 #include <string.h>
20 
21 #include <algorithm>
22 #include <atomic>
23 #include <cstdlib>
24 #include <memory>
25 #include <new>
26 #include <sstream>
27 #include <string>
28 #include <type_traits>
29 #include <utility>
30 #include <vector>
31 
32 #include "absl/status/status.h"
33 
34 #include <grpc/byte_buffer.h>
35 #include <grpc/grpc.h>
36 #include <grpc/impl/channel_arg_names.h>
37 #include <grpc/slice.h>
38 #include <grpc/support/log.h>
39 #include <grpc/support/sync.h>
40 #include <grpc/support/time.h>
41 #include <grpcpp/channel.h>
42 #include <grpcpp/completion_queue.h>
43 #include <grpcpp/generic/async_generic_service.h>
44 #include <grpcpp/health_check_service_interface.h>
45 #include <grpcpp/impl/call.h>
46 #include <grpcpp/impl/call_op_set.h>
47 #include <grpcpp/impl/call_op_set_interface.h>
48 #include <grpcpp/impl/completion_queue_tag.h>
49 #include <grpcpp/impl/interceptor_common.h>
50 #include <grpcpp/impl/metadata_map.h>
51 #include <grpcpp/impl/rpc_method.h>
52 #include <grpcpp/impl/rpc_service_method.h>
53 #include <grpcpp/impl/server_callback_handlers.h>
54 #include <grpcpp/impl/server_initializer.h>
55 #include <grpcpp/impl/service_type.h>
56 #include <grpcpp/impl/sync.h>
57 #include <grpcpp/security/server_credentials.h>
58 #include <grpcpp/server.h>
59 #include <grpcpp/server_context.h>
60 #include <grpcpp/server_interface.h>
61 #include <grpcpp/support/byte_buffer.h>
62 #include <grpcpp/support/channel_arguments.h>
63 #include <grpcpp/support/client_interceptor.h>
64 #include <grpcpp/support/interceptor.h>
65 #include <grpcpp/support/method_handler.h>
66 #include <grpcpp/support/server_interceptor.h>
67 #include <grpcpp/support/slice.h>
68 #include <grpcpp/support/status.h>
69 
70 #include "src/core/ext/transport/inproc/inproc_transport.h"
71 #include "src/core/lib/gprpp/manual_constructor.h"
72 #include "src/core/lib/iomgr/exec_ctx.h"
73 #include "src/core/lib/iomgr/iomgr.h"
74 #include "src/core/lib/resource_quota/api.h"
75 #include "src/core/lib/surface/completion_queue.h"
76 #include "src/core/lib/surface/server.h"
77 #include "src/cpp/client/create_channel_internal.h"
78 #include "src/cpp/server/external_connection_acceptor_impl.h"
79 #include "src/cpp/server/health/default_health_check_service.h"
80 #include "src/cpp/thread_manager/thread_manager.h"
81 
82 namespace grpc {
83 namespace {
84 
85 // The default value for maximum number of threads that can be created in the
86 // sync server. This value of INT_MAX is chosen to match the default behavior if
87 // no ResourceQuota is set. To modify the max number of threads in a sync
88 // server, pass a custom ResourceQuota object  (with the desired number of
89 // max-threads set) to the server builder.
90 #define DEFAULT_MAX_SYNC_SERVER_THREADS INT_MAX
91 
92 // Give a useful status error message if the resource is exhausted specifically
93 // because the server threadpool is full.
94 const char* kServerThreadpoolExhausted = "Server Threadpool Exhausted";
95 
96 // Although we might like to give a useful status error message on unimplemented
97 // RPCs, it's not always possible since that also would need to be added across
98 // languages and isn't actually required by the spec.
99 const char* kUnknownRpcMethod = "";
100 
101 class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
102  public:
~DefaultGlobalCallbacks()103   ~DefaultGlobalCallbacks() override {}
PreSynchronousRequest(ServerContext *)104   void PreSynchronousRequest(ServerContext* /*context*/) override {}
PostSynchronousRequest(ServerContext *)105   void PostSynchronousRequest(ServerContext* /*context*/) override {}
106 };
107 
108 std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
109 gpr_once g_once_init_callbacks = GPR_ONCE_INIT;
110 
InitGlobalCallbacks()111 void InitGlobalCallbacks() {
112   if (!g_callbacks) {
113     g_callbacks.reset(new DefaultGlobalCallbacks());
114   }
115 }
116 
117 class ShutdownTag : public internal::CompletionQueueTag {
118  public:
FinalizeResult(void **,bool *)119   bool FinalizeResult(void** /*tag*/, bool* /*status*/) override {
120     return false;
121   }
122 };
123 
124 class PhonyTag : public internal::CompletionQueueTag {
125  public:
FinalizeResult(void **,bool *)126   bool FinalizeResult(void** /*tag*/, bool* /*status*/) override {
127     return true;
128   }
129 };
130 
131 class UnimplementedAsyncRequestContext {
132  protected:
UnimplementedAsyncRequestContext()133   UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
134 
135   GenericServerContext server_context_;
136   GenericServerAsyncReaderWriter generic_stream_;
137 };
138 
139 }  // namespace
140 
BaseAsyncRequest(ServerInterface * server,ServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,bool delete_on_finalize)141 ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
142     ServerInterface* server, ServerContext* context,
143     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
144     ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
145     : server_(server),
146       context_(context),
147       stream_(stream),
148       call_cq_(call_cq),
149       notification_cq_(notification_cq),
150       tag_(tag),
151       delete_on_finalize_(delete_on_finalize),
152       call_(nullptr),
153       done_intercepting_(false) {
154   // Set up interception state partially for the receive ops. call_wrapper_ is
155   // not filled at this point, but it will be filled before the interceptors are
156   // run.
157   interceptor_methods_.SetCall(&call_wrapper_);
158   interceptor_methods_.SetReverse();
159   call_cq_->RegisterAvalanching();  // This op will trigger more ops
160   call_metric_recording_enabled_ = server_->call_metric_recording_enabled();
161   server_metric_recorder_ = server_->server_metric_recorder();
162 }
163 
~BaseAsyncRequest()164 ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
165   call_cq_->CompleteAvalanching();
166 }
167 
FinalizeResult(void ** tag,bool * status)168 bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
169                                                        bool* status) {
170   if (done_intercepting_) {
171     *tag = tag_;
172     if (delete_on_finalize_) {
173       delete this;
174     }
175     return true;
176   }
177   context_->set_call(call_, call_metric_recording_enabled_,
178                      server_metric_recorder_);
179   context_->cq_ = call_cq_;
180   if (call_wrapper_.call() == nullptr) {
181     // Fill it since it is empty.
182     call_wrapper_ = internal::Call(
183         call_, server_, call_cq_, server_->max_receive_message_size(), nullptr);
184   }
185 
186   // just the pointers inside call are copied here
187   stream_->BindCall(&call_wrapper_);
188 
189   if (*status && call_ && call_wrapper_.server_rpc_info()) {
190     done_intercepting_ = true;
191     // Set interception point for RECV INITIAL METADATA
192     interceptor_methods_.AddInterceptionHookPoint(
193         experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
194     interceptor_methods_.SetRecvInitialMetadata(&context_->client_metadata_);
195     if (interceptor_methods_.RunInterceptors(
196             [this]() { ContinueFinalizeResultAfterInterception(); })) {
197       // There are no interceptors to run. Continue
198     } else {
199       // There were interceptors to be run, so
200       // ContinueFinalizeResultAfterInterception will be run when interceptors
201       // are done.
202       return false;
203     }
204   }
205   if (*status && call_) {
206     context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
207   }
208   *tag = tag_;
209   if (delete_on_finalize_) {
210     delete this;
211   }
212   return true;
213 }
214 
215 void ServerInterface::BaseAsyncRequest::
ContinueFinalizeResultAfterInterception()216     ContinueFinalizeResultAfterInterception() {
217   context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
218   // Queue a tag which will be returned immediately
219   grpc_core::ExecCtx exec_ctx;
220   grpc_cq_begin_op(notification_cq_->cq(), this);
221   grpc_cq_end_op(
222       notification_cq_->cq(), this, absl::OkStatus(),
223       [](void* /*arg*/, grpc_cq_completion* completion) { delete completion; },
224       nullptr, new grpc_cq_completion());
225 }
226 
RegisteredAsyncRequest(ServerInterface * server,ServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,const char * name,internal::RpcMethod::RpcType type)227 ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
228     ServerInterface* server, ServerContext* context,
229     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
230     ServerCompletionQueue* notification_cq, void* tag, const char* name,
231     internal::RpcMethod::RpcType type)
232     : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
233                        true),
234       name_(name),
235       type_(type) {}
236 
IssueRequest(void * registered_method,grpc_byte_buffer ** payload,ServerCompletionQueue * notification_cq)237 void ServerInterface::RegisteredAsyncRequest::IssueRequest(
238     void* registered_method, grpc_byte_buffer** payload,
239     ServerCompletionQueue* notification_cq) {
240   // The following call_start_batch is internally-generated so no need for an
241   // explanatory log on failure.
242   GPR_ASSERT(grpc_server_request_registered_call(
243                  server_->server(), registered_method, &call_,
244                  &context_->deadline_, context_->client_metadata_.arr(),
245                  payload, call_cq_->cq(), notification_cq->cq(),
246                  this) == GRPC_CALL_OK);
247 }
248 
GenericAsyncRequest(ServerInterface * server,GenericServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,bool delete_on_finalize,bool issue_request)249 ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
250     ServerInterface* server, GenericServerContext* context,
251     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
252     ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize,
253     bool issue_request)
254     : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
255                        delete_on_finalize) {
256   grpc_call_details_init(&call_details_);
257   GPR_ASSERT(notification_cq);
258   GPR_ASSERT(call_cq);
259   if (issue_request) {
260     IssueRequest();
261   }
262 }
263 
FinalizeResult(void ** tag,bool * status)264 bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
265                                                           bool* status) {
266   // If we are done intercepting, there is nothing more for us to do
267   if (done_intercepting_) {
268     return BaseAsyncRequest::FinalizeResult(tag, status);
269   }
270   // TODO(yangg) remove the copy here.
271   if (*status) {
272     static_cast<GenericServerContext*>(context_)->method_ =
273         StringFromCopiedSlice(call_details_.method);
274     static_cast<GenericServerContext*>(context_)->host_ =
275         StringFromCopiedSlice(call_details_.host);
276     context_->deadline_ = call_details_.deadline;
277   }
278   grpc_slice_unref(call_details_.method);
279   grpc_slice_unref(call_details_.host);
280   call_wrapper_ = internal::Call(
281       call_, server_, call_cq_, server_->max_receive_message_size(),
282       context_->set_server_rpc_info(
283           static_cast<GenericServerContext*>(context_)->method_.c_str(),
284           internal::RpcMethod::BIDI_STREAMING,
285           *server_->interceptor_creators()));
286   return BaseAsyncRequest::FinalizeResult(tag, status);
287 }
288 
IssueRequest()289 void ServerInterface::GenericAsyncRequest::IssueRequest() {
290   // The following call_start_batch is internally-generated so no need for an
291   // explanatory log on failure.
292   GPR_ASSERT(grpc_server_request_call(server_->server(), &call_, &call_details_,
293                                       context_->client_metadata_.arr(),
294                                       call_cq_->cq(), notification_cq_->cq(),
295                                       this) == GRPC_CALL_OK);
296 }
297 
298 namespace {
299 class ShutdownCallback : public grpc_completion_queue_functor {
300  public:
ShutdownCallback()301   ShutdownCallback() {
302     functor_run = &ShutdownCallback::Run;
303     // Set inlineable to true since this callback is trivial and thus does not
304     // need to be run from the executor (triggering a thread hop). This should
305     // only be used by internal callbacks like this and not by user application
306     // code.
307     inlineable = true;
308   }
309   // TakeCQ takes ownership of the cq into the shutdown callback
310   // so that the shutdown callback will be responsible for destroying it
TakeCQ(CompletionQueue * cq)311   void TakeCQ(CompletionQueue* cq) { cq_ = cq; }
312 
313   // The Run function will get invoked by the completion queue library
314   // when the shutdown is actually complete
Run(grpc_completion_queue_functor * cb,int)315   static void Run(grpc_completion_queue_functor* cb, int) {
316     auto* callback = static_cast<ShutdownCallback*>(cb);
317     delete callback->cq_;
318     delete callback;
319   }
320 
321  private:
322   CompletionQueue* cq_ = nullptr;
323 };
324 }  // namespace
325 
326 /// Use private inheritance rather than composition only to establish order
327 /// of construction, since the public base class should be constructed after the
328 /// elements belonging to the private base class are constructed. This is not
329 /// possible using true composition.
330 class Server::UnimplementedAsyncRequest final
331     : private grpc::UnimplementedAsyncRequestContext,
332       public GenericAsyncRequest {
333  public:
UnimplementedAsyncRequest(ServerInterface * server,grpc::ServerCompletionQueue * cq)334   UnimplementedAsyncRequest(ServerInterface* server,
335                             grpc::ServerCompletionQueue* cq)
336       : GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
337                             /*tag=*/nullptr, /*delete_on_finalize=*/false,
338                             /*issue_request=*/false) {
339     // Issue request here instead of the base class to prevent race on vptr.
340     IssueRequest();
341   }
342 
343   bool FinalizeResult(void** tag, bool* status) override;
344 
context()345   grpc::ServerContext* context() { return &server_context_; }
stream()346   grpc::GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
347 };
348 
349 /// UnimplementedAsyncResponse should not post user-visible completions to the
350 /// C++ completion queue, but is generated as a CQ event by the core
351 class Server::UnimplementedAsyncResponse final
352     : public grpc::internal::CallOpSet<
353           grpc::internal::CallOpSendInitialMetadata,
354           grpc::internal::CallOpServerSendStatus> {
355  public:
356   explicit UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
~UnimplementedAsyncResponse()357   ~UnimplementedAsyncResponse() override { delete request_; }
358 
FinalizeResult(void ** tag,bool * status)359   bool FinalizeResult(void** tag, bool* status) override {
360     if (grpc::internal::CallOpSet<
361             grpc::internal::CallOpSendInitialMetadata,
362             grpc::internal::CallOpServerSendStatus>::FinalizeResult(tag,
363                                                                     status)) {
364       delete this;
365     } else {
366       // The tag was swallowed due to interception. We will see it again.
367     }
368     return false;
369   }
370 
371  private:
372   UnimplementedAsyncRequest* const request_;
373 };
374 
375 class Server::SyncRequest final : public grpc::internal::CompletionQueueTag {
376  public:
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc_core::Server::RegisteredCallAllocation * data)377   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method,
378               grpc_core::Server::RegisteredCallAllocation* data)
379       : SyncRequest(server, method) {
380     CommonSetup(data);
381     data->deadline = &deadline_;
382     data->optional_payload = has_request_payload_ ? &request_payload_ : nullptr;
383   }
384 
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc_core::Server::BatchCallAllocation * data)385   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method,
386               grpc_core::Server::BatchCallAllocation* data)
387       : SyncRequest(server, method) {
388     CommonSetup(data);
389     call_details_ = new grpc_call_details;
390     grpc_call_details_init(call_details_);
391     data->details = call_details_;
392   }
393 
~SyncRequest()394   ~SyncRequest() override {
395     // The destructor should only cleanup those objects created in the
396     // constructor, since some paths may or may not actually go through the
397     // Run stage where other objects are allocated.
398     if (has_request_payload_ && request_payload_) {
399       grpc_byte_buffer_destroy(request_payload_);
400     }
401     if (call_details_ != nullptr) {
402       grpc_call_details_destroy(call_details_);
403       delete call_details_;
404     }
405     grpc_metadata_array_destroy(&request_metadata_);
406     server_->UnrefWithPossibleNotify();
407   }
408 
FinalizeResult(void **,bool * status)409   bool FinalizeResult(void** /*tag*/, bool* status) override {
410     if (!*status) {
411       delete this;
412       return false;
413     }
414     if (call_details_) {
415       deadline_ = call_details_->deadline;
416     }
417     return true;
418   }
419 
Run(const std::shared_ptr<GlobalCallbacks> & global_callbacks,bool resources)420   void Run(const std::shared_ptr<GlobalCallbacks>& global_callbacks,
421            bool resources) {
422     ctx_.Init(deadline_, &request_metadata_);
423     wrapped_call_.Init(
424         call_, server_, &cq_, server_->max_receive_message_size(),
425         ctx_->ctx.set_server_rpc_info(method_->name(), method_->method_type(),
426                                       server_->interceptor_creators_));
427     ctx_->ctx.set_call(call_, server_->call_metric_recording_enabled(),
428                        server_->server_metric_recorder());
429     ctx_->ctx.cq_ = &cq_;
430     request_metadata_.count = 0;
431 
432     global_callbacks_ = global_callbacks;
433     resources_ = resources;
434 
435     interceptor_methods_.SetCall(&*wrapped_call_);
436     interceptor_methods_.SetReverse();
437     // Set interception point for RECV INITIAL METADATA
438     interceptor_methods_.AddInterceptionHookPoint(
439         grpc::experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
440     interceptor_methods_.SetRecvInitialMetadata(&ctx_->ctx.client_metadata_);
441 
442     if (has_request_payload_) {
443       // Set interception point for RECV MESSAGE
444       auto* handler = resources_ ? method_->handler()
445                                  : server_->resource_exhausted_handler_.get();
446       deserialized_request_ = handler->Deserialize(call_, request_payload_,
447                                                    &request_status_, nullptr);
448       if (!request_status_.ok()) {
449         gpr_log(GPR_DEBUG, "Failed to deserialize message.");
450       }
451       request_payload_ = nullptr;
452       interceptor_methods_.AddInterceptionHookPoint(
453           grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
454       interceptor_methods_.SetRecvMessage(deserialized_request_, nullptr);
455     }
456 
457     if (interceptor_methods_.RunInterceptors(
458             [this]() { ContinueRunAfterInterception(); })) {
459       ContinueRunAfterInterception();
460     } else {
461       // There were interceptors to be run, so ContinueRunAfterInterception
462       // will be run when interceptors are done.
463     }
464   }
465 
ContinueRunAfterInterception()466   void ContinueRunAfterInterception() {
467     ctx_->ctx.BeginCompletionOp(&*wrapped_call_, nullptr, nullptr);
468     global_callbacks_->PreSynchronousRequest(&ctx_->ctx);
469     auto* handler = resources_ ? method_->handler()
470                                : server_->resource_exhausted_handler_.get();
471     handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
472         &*wrapped_call_, &ctx_->ctx, deserialized_request_, request_status_,
473         nullptr, nullptr));
474     global_callbacks_->PostSynchronousRequest(&ctx_->ctx);
475 
476     cq_.Shutdown();
477 
478     grpc::internal::CompletionQueueTag* op_tag = ctx_->ctx.GetCompletionOpTag();
479     cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
480 
481     // Ensure the cq_ is shutdown
482     grpc::PhonyTag ignored_tag;
483     GPR_ASSERT(cq_.Pluck(&ignored_tag) == false);
484 
485     // Cleanup structures allocated during Run/ContinueRunAfterInterception
486     wrapped_call_.Destroy();
487     ctx_.Destroy();
488 
489     delete this;
490   }
491 
492   // For requests that must be only cleaned up but not actually Run
Cleanup()493   void Cleanup() {
494     cq_.Shutdown();
495     grpc_call_unref(call_);
496     delete this;
497   }
498 
499  private:
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method)500   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method)
501       : server_(server),
502         method_(method),
503         has_request_payload_(method->method_type() ==
504                                  grpc::internal::RpcMethod::NORMAL_RPC ||
505                              method->method_type() ==
506                                  grpc::internal::RpcMethod::SERVER_STREAMING),
507         cq_(grpc_completion_queue_create_for_pluck(nullptr)) {}
508 
509   template <class CallAllocation>
CommonSetup(CallAllocation * data)510   void CommonSetup(CallAllocation* data) {
511     server_->Ref();
512     grpc_metadata_array_init(&request_metadata_);
513     data->tag = static_cast<void*>(this);
514     data->call = &call_;
515     data->initial_metadata = &request_metadata_;
516     data->cq = cq_.cq();
517   }
518 
519   Server* const server_;
520   grpc::internal::RpcServiceMethod* const method_;
521   const bool has_request_payload_;
522   grpc_call* call_;
523   grpc_call_details* call_details_ = nullptr;
524   gpr_timespec deadline_;
525   grpc_metadata_array request_metadata_;
526   grpc_byte_buffer* request_payload_ = nullptr;
527   grpc::CompletionQueue cq_;
528   grpc::Status request_status_;
529   std::shared_ptr<GlobalCallbacks> global_callbacks_;
530   bool resources_;
531   void* deserialized_request_ = nullptr;
532   grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
533 
534   // ServerContextWrapper allows ManualConstructor while using a private
535   // contructor of ServerContext via this friend class.
536   struct ServerContextWrapper {
537     ServerContext ctx;
538 
ServerContextWrappergrpc::Server::SyncRequest::ServerContextWrapper539     ServerContextWrapper(gpr_timespec deadline, grpc_metadata_array* arr)
540         : ctx(deadline, arr) {}
541   };
542 
543   grpc_core::ManualConstructor<ServerContextWrapper> ctx_;
544   grpc_core::ManualConstructor<internal::Call> wrapped_call_;
545 };
546 
547 template <class ServerContextType>
548 class Server::CallbackRequest final
549     : public grpc::internal::CompletionQueueTag {
550  public:
551   static_assert(
552       std::is_base_of<grpc::CallbackServerContext, ServerContextType>::value,
553       "ServerContextType must be derived from CallbackServerContext");
554 
555   // For codegen services, the value of method represents the defined
556   // characteristics of the method being requested. For generic services, method
557   // is nullptr since these services don't have pre-defined methods.
CallbackRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc::CompletionQueue * cq,grpc_core::Server::RegisteredCallAllocation * data)558   CallbackRequest(Server* server, grpc::internal::RpcServiceMethod* method,
559                   grpc::CompletionQueue* cq,
560                   grpc_core::Server::RegisteredCallAllocation* data)
561       : server_(server),
562         method_(method),
563         has_request_payload_(method->method_type() ==
564                                  grpc::internal::RpcMethod::NORMAL_RPC ||
565                              method->method_type() ==
566                                  grpc::internal::RpcMethod::SERVER_STREAMING),
567         cq_(cq),
568         tag_(this),
569         ctx_(server_->context_allocator() != nullptr
570                  ? server_->context_allocator()->NewCallbackServerContext()
571                  : nullptr) {
572     CommonSetup(server, data);
573     data->deadline = &deadline_;
574     data->optional_payload = has_request_payload_ ? &request_payload_ : nullptr;
575   }
576 
577   // For generic services, method is nullptr since these services don't have
578   // pre-defined methods.
CallbackRequest(Server * server,grpc::CompletionQueue * cq,grpc_core::Server::BatchCallAllocation * data)579   CallbackRequest(Server* server, grpc::CompletionQueue* cq,
580                   grpc_core::Server::BatchCallAllocation* data)
581       : server_(server),
582         method_(nullptr),
583         has_request_payload_(false),
584         call_details_(new grpc_call_details),
585         cq_(cq),
586         tag_(this),
587         ctx_(server_->context_allocator() != nullptr
588                  ? server_->context_allocator()
589                        ->NewGenericCallbackServerContext()
590                  : nullptr) {
591     CommonSetup(server, data);
592     grpc_call_details_init(call_details_);
593     data->details = call_details_;
594   }
595 
~CallbackRequest()596   ~CallbackRequest() override {
597     delete call_details_;
598     grpc_metadata_array_destroy(&request_metadata_);
599     if (has_request_payload_ && request_payload_) {
600       grpc_byte_buffer_destroy(request_payload_);
601     }
602     if (ctx_alloc_by_default_ || server_->context_allocator() == nullptr) {
603       default_ctx_.Destroy();
604     }
605     server_->UnrefWithPossibleNotify();
606   }
607 
608   // Needs specialization to account for different processing of metadata
609   // in generic API
610   bool FinalizeResult(void** tag, bool* status) override;
611 
612  private:
613   // method_name needs to be specialized between named method and generic
614   const char* method_name() const;
615 
616   class CallbackCallTag : public grpc_completion_queue_functor {
617    public:
CallbackCallTag(Server::CallbackRequest<ServerContextType> * req)618     explicit CallbackCallTag(Server::CallbackRequest<ServerContextType>* req)
619         : req_(req) {
620       functor_run = &CallbackCallTag::StaticRun;
621       // Set inlineable to true since this callback is internally-controlled
622       // without taking any locks, and thus does not need to be run from the
623       // executor (which triggers a thread hop). This should only be used by
624       // internal callbacks like this and not by user application code. The work
625       // here is actually non-trivial, but there is no chance of having user
626       // locks conflict with each other so it's ok to run inlined.
627       inlineable = true;
628     }
629 
630     // force_run can not be performed on a tag if operations using this tag
631     // have been sent to PerformOpsOnCall. It is intended for error conditions
632     // that are detected before the operations are internally processed.
force_run(bool ok)633     void force_run(bool ok) { Run(ok); }
634 
635    private:
636     Server::CallbackRequest<ServerContextType>* req_;
637     grpc::internal::Call* call_;
638 
StaticRun(grpc_completion_queue_functor * cb,int ok)639     static void StaticRun(grpc_completion_queue_functor* cb, int ok) {
640       static_cast<CallbackCallTag*>(cb)->Run(static_cast<bool>(ok));
641     }
Run(bool ok)642     void Run(bool ok) {
643       void* ignored = req_;
644       bool new_ok = ok;
645       GPR_ASSERT(!req_->FinalizeResult(&ignored, &new_ok));
646       GPR_ASSERT(ignored == req_);
647 
648       if (!ok) {
649         // The call has been shutdown.
650         // Delete its contents to free up the request.
651         delete req_;
652         return;
653       }
654 
655       // Bind the call, deadline, and metadata from what we got
656       req_->ctx_->set_call(req_->call_,
657                            req_->server_->call_metric_recording_enabled(),
658                            req_->server_->server_metric_recorder());
659       req_->ctx_->cq_ = req_->cq_;
660       req_->ctx_->BindDeadlineAndMetadata(req_->deadline_,
661                                           &req_->request_metadata_);
662       req_->request_metadata_.count = 0;
663 
664       // Create a C++ Call to control the underlying core call
665       call_ =
666           new (grpc_call_arena_alloc(req_->call_, sizeof(grpc::internal::Call)))
667               grpc::internal::Call(
668                   req_->call_, req_->server_, req_->cq_,
669                   req_->server_->max_receive_message_size(),
670                   req_->ctx_->set_server_rpc_info(
671                       req_->method_name(),
672                       (req_->method_ != nullptr)
673                           ? req_->method_->method_type()
674                           : grpc::internal::RpcMethod::BIDI_STREAMING,
675                       req_->server_->interceptor_creators_));
676 
677       req_->interceptor_methods_.SetCall(call_);
678       req_->interceptor_methods_.SetReverse();
679       // Set interception point for RECV INITIAL METADATA
680       req_->interceptor_methods_.AddInterceptionHookPoint(
681           grpc::experimental::InterceptionHookPoints::
682               POST_RECV_INITIAL_METADATA);
683       req_->interceptor_methods_.SetRecvInitialMetadata(
684           &req_->ctx_->client_metadata_);
685 
686       if (req_->has_request_payload_) {
687         // Set interception point for RECV MESSAGE
688         req_->request_ = req_->method_->handler()->Deserialize(
689             req_->call_, req_->request_payload_, &req_->request_status_,
690             &req_->handler_data_);
691         if (!(req_->request_status_.ok())) {
692           gpr_log(GPR_DEBUG, "Failed to deserialize message.");
693         }
694         req_->request_payload_ = nullptr;
695         req_->interceptor_methods_.AddInterceptionHookPoint(
696             grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
697         req_->interceptor_methods_.SetRecvMessage(req_->request_, nullptr);
698       }
699 
700       if (req_->interceptor_methods_.RunInterceptors(
701               [this] { ContinueRunAfterInterception(); })) {
702         ContinueRunAfterInterception();
703       } else {
704         // There were interceptors to be run, so ContinueRunAfterInterception
705         // will be run when interceptors are done.
706       }
707     }
ContinueRunAfterInterception()708     void ContinueRunAfterInterception() {
709       auto* handler = (req_->method_ != nullptr)
710                           ? req_->method_->handler()
711                           : req_->server_->generic_handler_.get();
712       handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
713           call_, req_->ctx_, req_->request_, req_->request_status_,
714           req_->handler_data_, [this] { delete req_; }));
715     }
716   };
717 
718   template <class CallAllocation>
CommonSetup(Server * server,CallAllocation * data)719   void CommonSetup(Server* server, CallAllocation* data) {
720     server->Ref();
721     grpc_metadata_array_init(&request_metadata_);
722     data->tag = static_cast<void*>(&tag_);
723     data->call = &call_;
724     data->initial_metadata = &request_metadata_;
725     if (ctx_ == nullptr) {
726       default_ctx_.Init();
727       ctx_ = &*default_ctx_;
728       ctx_alloc_by_default_ = true;
729     }
730     ctx_->set_context_allocator(server->context_allocator());
731     data->cq = cq_->cq();
732   }
733 
734   Server* const server_;
735   grpc::internal::RpcServiceMethod* const method_;
736   const bool has_request_payload_;
737   grpc_byte_buffer* request_payload_ = nullptr;
738   void* request_ = nullptr;
739   void* handler_data_ = nullptr;
740   grpc::Status request_status_;
741   grpc_call_details* const call_details_ = nullptr;
742   grpc_call* call_;
743   gpr_timespec deadline_;
744   grpc_metadata_array request_metadata_;
745   grpc::CompletionQueue* const cq_;
746   bool ctx_alloc_by_default_ = false;
747   CallbackCallTag tag_;
748   ServerContextType* ctx_ = nullptr;
749   grpc_core::ManualConstructor<ServerContextType> default_ctx_;
750   grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
751 };
752 
753 template <>
FinalizeResult(void **,bool *)754 bool Server::CallbackRequest<grpc::CallbackServerContext>::FinalizeResult(
755     void** /*tag*/, bool* /*status*/) {
756   return false;
757 }
758 
759 template <>
760 bool Server::CallbackRequest<
FinalizeResult(void **,bool * status)761     grpc::GenericCallbackServerContext>::FinalizeResult(void** /*tag*/,
762                                                         bool* status) {
763   if (*status) {
764     deadline_ = call_details_->deadline;
765     // TODO(yangg) remove the copy here
766     ctx_->method_ = grpc::StringFromCopiedSlice(call_details_->method);
767     ctx_->host_ = grpc::StringFromCopiedSlice(call_details_->host);
768   }
769   grpc_slice_unref(call_details_->method);
770   grpc_slice_unref(call_details_->host);
771   return false;
772 }
773 
774 template <>
method_name() const775 const char* Server::CallbackRequest<grpc::CallbackServerContext>::method_name()
776     const {
777   return method_->name();
778 }
779 
780 template <>
781 const char* Server::CallbackRequest<
method_name() const782     grpc::GenericCallbackServerContext>::method_name() const {
783   return ctx_->method().c_str();
784 }
785 
786 // Implementation of ThreadManager. Each instance of SyncRequestThreadManager
787 // manages a pool of threads that poll for incoming Sync RPCs and call the
788 // appropriate RPC handlers
789 class Server::SyncRequestThreadManager : public grpc::ThreadManager {
790  public:
SyncRequestThreadManager(Server * server,grpc::CompletionQueue * server_cq,std::shared_ptr<GlobalCallbacks> global_callbacks,grpc_resource_quota * rq,int min_pollers,int max_pollers,int cq_timeout_msec)791   SyncRequestThreadManager(Server* server, grpc::CompletionQueue* server_cq,
792                            std::shared_ptr<GlobalCallbacks> global_callbacks,
793                            grpc_resource_quota* rq, int min_pollers,
794                            int max_pollers, int cq_timeout_msec)
795       : ThreadManager("SyncServer", rq, min_pollers, max_pollers),
796         server_(server),
797         server_cq_(server_cq),
798         cq_timeout_msec_(cq_timeout_msec),
799         global_callbacks_(std::move(global_callbacks)) {}
800 
PollForWork(void ** tag,bool * ok)801   WorkStatus PollForWork(void** tag, bool* ok) override {
802     *tag = nullptr;
803     // TODO(ctiller): workaround for GPR_TIMESPAN based deadlines not working
804     // right now
805     gpr_timespec deadline =
806         gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
807                      gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN));
808 
809     switch (server_cq_->AsyncNext(tag, ok, deadline)) {
810       case grpc::CompletionQueue::TIMEOUT:
811         return TIMEOUT;
812       case grpc::CompletionQueue::SHUTDOWN:
813         return SHUTDOWN;
814       case grpc::CompletionQueue::GOT_EVENT:
815         return WORK_FOUND;
816     }
817 
818     GPR_UNREACHABLE_CODE(return TIMEOUT);
819   }
820 
DoWork(void * tag,bool ok,bool resources)821   void DoWork(void* tag, bool ok, bool resources) override {
822     (void)ok;
823     SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
824 
825     // Under the AllocatingRequestMatcher model we will never see an invalid tag
826     // here.
827     GPR_DEBUG_ASSERT(sync_req != nullptr);
828     GPR_DEBUG_ASSERT(ok);
829 
830     sync_req->Run(global_callbacks_, resources);
831   }
832 
AddSyncMethod(grpc::internal::RpcServiceMethod * method,void * tag)833   void AddSyncMethod(grpc::internal::RpcServiceMethod* method, void* tag) {
834     grpc_core::Server::FromC(server_->server())
835         ->SetRegisteredMethodAllocator(server_cq_->cq(), tag, [this, method] {
836           grpc_core::Server::RegisteredCallAllocation result;
837           new SyncRequest(server_, method, &result);
838           return result;
839         });
840     has_sync_method_ = true;
841   }
842 
AddUnknownSyncMethod()843   void AddUnknownSyncMethod() {
844     if (has_sync_method_) {
845       unknown_method_ = std::make_unique<grpc::internal::RpcServiceMethod>(
846           "unknown", grpc::internal::RpcMethod::BIDI_STREAMING,
847           new grpc::internal::UnknownMethodHandler(kUnknownRpcMethod));
848       grpc_core::Server::FromC(server_->server())
849           ->SetBatchMethodAllocator(server_cq_->cq(), [this] {
850             grpc_core::Server::BatchCallAllocation result;
851             new SyncRequest(server_, unknown_method_.get(), &result);
852             return result;
853           });
854     }
855   }
856 
Shutdown()857   void Shutdown() override {
858     ThreadManager::Shutdown();
859     server_cq_->Shutdown();
860   }
861 
Wait()862   void Wait() override {
863     ThreadManager::Wait();
864     // Drain any pending items from the queue
865     void* tag;
866     bool ok;
867     while (server_cq_->Next(&tag, &ok)) {
868       // This problem can arise if the server CQ gets a request queued to it
869       // before it gets shutdown but then pulls it after shutdown.
870       static_cast<SyncRequest*>(tag)->Cleanup();
871     }
872   }
873 
Start()874   void Start() {
875     if (has_sync_method_) {
876       Initialize();  // ThreadManager's Initialize()
877     }
878   }
879 
880  private:
881   Server* server_;
882   grpc::CompletionQueue* server_cq_;
883   int cq_timeout_msec_;
884   bool has_sync_method_ = false;
885   std::unique_ptr<grpc::internal::RpcServiceMethod> unknown_method_;
886   std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
887 };
888 
Server(grpc::ChannelArguments * args,std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>> sync_server_cqs,int min_pollers,int max_pollers,int sync_cq_timeout_msec,std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>> acceptors,grpc_server_config_fetcher * server_config_fetcher,grpc_resource_quota * server_rq,std::vector<std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>> interceptor_creators,experimental::ServerMetricRecorder * server_metric_recorder)889 Server::Server(
890     grpc::ChannelArguments* args,
891     std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
892         sync_server_cqs,
893     int min_pollers, int max_pollers, int sync_cq_timeout_msec,
894     std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>>
895         acceptors,
896     grpc_server_config_fetcher* server_config_fetcher,
897     grpc_resource_quota* server_rq,
898     std::vector<
899         std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
900         interceptor_creators,
901     experimental::ServerMetricRecorder* server_metric_recorder)
902     : acceptors_(std::move(acceptors)),
903       interceptor_creators_(std::move(interceptor_creators)),
904       max_receive_message_size_(INT_MIN),
905       sync_server_cqs_(std::move(sync_server_cqs)),
906       started_(false),
907       shutdown_(false),
908       shutdown_notified_(false),
909       server_(nullptr),
910       server_initializer_(new ServerInitializer(this)),
911       health_check_service_disabled_(false),
912       server_metric_recorder_(server_metric_recorder) {
913   gpr_once_init(&grpc::g_once_init_callbacks, grpc::InitGlobalCallbacks);
914   global_callbacks_ = grpc::g_callbacks;
915   global_callbacks_->UpdateArguments(args);
916 
917   if (sync_server_cqs_ != nullptr) {
918     bool default_rq_created = false;
919     if (server_rq == nullptr) {
920       server_rq = grpc_resource_quota_create("SyncServer-default-rq");
921       grpc_resource_quota_set_max_threads(server_rq,
922                                           DEFAULT_MAX_SYNC_SERVER_THREADS);
923       default_rq_created = true;
924     }
925 
926     for (const auto& it : *sync_server_cqs_) {
927       sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
928           this, it.get(), global_callbacks_, server_rq, min_pollers,
929           max_pollers, sync_cq_timeout_msec));
930     }
931 
932     if (default_rq_created) {
933       grpc_resource_quota_unref(server_rq);
934     }
935   }
936 
937   for (auto& acceptor : acceptors_) {
938     acceptor->SetToChannelArgs(args);
939   }
940 
941   grpc_channel_args channel_args;
942   args->SetChannelArgs(&channel_args);
943 
944   for (size_t i = 0; i < channel_args.num_args; i++) {
945     if (0 == strcmp(channel_args.args[i].key,
946                     grpc::kHealthCheckServiceInterfaceArg)) {
947       if (channel_args.args[i].value.pointer.p == nullptr) {
948         health_check_service_disabled_ = true;
949       } else {
950         health_check_service_.reset(
951             static_cast<grpc::HealthCheckServiceInterface*>(
952                 channel_args.args[i].value.pointer.p));
953       }
954     }
955     if (0 ==
956         strcmp(channel_args.args[i].key, GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)) {
957       max_receive_message_size_ = channel_args.args[i].value.integer;
958     }
959     if (0 == strcmp(channel_args.args[i].key,
960                     GRPC_ARG_SERVER_CALL_METRIC_RECORDING)) {
961       call_metric_recording_enabled_ = channel_args.args[i].value.integer;
962     }
963   }
964   server_ = grpc_server_create(&channel_args, nullptr);
965   grpc_server_set_config_fetcher(server_, server_config_fetcher);
966 }
967 
~Server()968 Server::~Server() {
969   {
970     grpc::internal::ReleasableMutexLock lock(&mu_);
971     if (started_ && !shutdown_) {
972       lock.Release();
973       Shutdown();
974     } else if (!started_) {
975       // Shutdown the completion queues
976       for (const auto& value : sync_req_mgrs_) {
977         value->Shutdown();
978       }
979       CompletionQueue* callback_cq =
980           callback_cq_.load(std::memory_order_relaxed);
981       if (callback_cq != nullptr) {
982         if (grpc_iomgr_run_in_background()) {
983           // gRPC-core provides the backing needed for the preferred CQ type
984           callback_cq->Shutdown();
985         } else {
986           CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
987         }
988         callback_cq_.store(nullptr, std::memory_order_release);
989       }
990     }
991   }
992   // Destroy health check service before we destroy the C server so that
993   // it does not call grpc_server_request_registered_call() after the C
994   // server has been destroyed.
995   health_check_service_.reset();
996   grpc_server_destroy(server_);
997 }
998 
SetGlobalCallbacks(GlobalCallbacks * callbacks)999 void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
1000   GPR_ASSERT(!grpc::g_callbacks);
1001   GPR_ASSERT(callbacks);
1002   grpc::g_callbacks.reset(callbacks);
1003 }
1004 
c_server()1005 grpc_server* Server::c_server() { return server_; }
1006 
InProcessChannel(const grpc::ChannelArguments & args)1007 std::shared_ptr<grpc::Channel> Server::InProcessChannel(
1008     const grpc::ChannelArguments& args) {
1009   grpc_channel_args channel_args = args.c_channel_args();
1010   return grpc::CreateChannelInternal(
1011       "inproc", grpc_inproc_channel_create(server_, &channel_args, nullptr),
1012       std::vector<std::unique_ptr<
1013           grpc::experimental::ClientInterceptorFactoryInterface>>());
1014 }
1015 
1016 std::shared_ptr<grpc::Channel>
InProcessChannelWithInterceptors(const grpc::ChannelArguments & args,std::vector<std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>> interceptor_creators)1017 Server::experimental_type::InProcessChannelWithInterceptors(
1018     const grpc::ChannelArguments& args,
1019     std::vector<
1020         std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
1021         interceptor_creators) {
1022   grpc_channel_args channel_args = args.c_channel_args();
1023   return grpc::CreateChannelInternal(
1024       "inproc",
1025       grpc_inproc_channel_create(server_->server_, &channel_args, nullptr),
1026       std::move(interceptor_creators));
1027 }
1028 
PayloadHandlingForMethod(grpc::internal::RpcServiceMethod * method)1029 static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
1030     grpc::internal::RpcServiceMethod* method) {
1031   switch (method->method_type()) {
1032     case grpc::internal::RpcMethod::NORMAL_RPC:
1033     case grpc::internal::RpcMethod::SERVER_STREAMING:
1034       return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER;
1035     case grpc::internal::RpcMethod::CLIENT_STREAMING:
1036     case grpc::internal::RpcMethod::BIDI_STREAMING:
1037       return GRPC_SRM_PAYLOAD_NONE;
1038   }
1039   GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;);
1040 }
1041 
RegisterService(const std::string * addr,grpc::Service * service)1042 bool Server::RegisterService(const std::string* addr, grpc::Service* service) {
1043   bool has_async_methods = service->has_async_methods();
1044   if (has_async_methods) {
1045     GPR_ASSERT(service->server_ == nullptr &&
1046                "Can only register an asynchronous service against one server.");
1047     service->server_ = this;
1048   }
1049 
1050   const char* method_name = nullptr;
1051 
1052   for (const auto& method : service->methods_) {
1053     if (method == nullptr) {  // Handled by generic service if any.
1054       continue;
1055     }
1056 
1057     void* method_registration_tag = grpc_server_register_method(
1058         server_, method->name(), addr ? addr->c_str() : nullptr,
1059         PayloadHandlingForMethod(method.get()), 0);
1060     if (method_registration_tag == nullptr) {
1061       gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
1062               method->name());
1063       return false;
1064     }
1065 
1066     if (method->handler() == nullptr) {  // Async method without handler
1067       method->set_server_tag(method_registration_tag);
1068     } else if (method->api_type() ==
1069                grpc::internal::RpcServiceMethod::ApiType::SYNC) {
1070       for (const auto& value : sync_req_mgrs_) {
1071         value->AddSyncMethod(method.get(), method_registration_tag);
1072       }
1073     } else {
1074       has_callback_methods_ = true;
1075       grpc::internal::RpcServiceMethod* method_value = method.get();
1076       grpc::CompletionQueue* cq = CallbackCQ();
1077       grpc_server_register_completion_queue(server_, cq->cq(), nullptr);
1078       grpc_core::Server::FromC(server_)->SetRegisteredMethodAllocator(
1079           cq->cq(), method_registration_tag, [this, cq, method_value] {
1080             grpc_core::Server::RegisteredCallAllocation result;
1081             new CallbackRequest<grpc::CallbackServerContext>(this, method_value,
1082                                                              cq, &result);
1083             return result;
1084           });
1085     }
1086 
1087     method_name = method->name();
1088   }
1089 
1090   // Parse service name.
1091   if (method_name != nullptr) {
1092     std::stringstream ss(method_name);
1093     std::string service_name;
1094     if (std::getline(ss, service_name, '/') &&
1095         std::getline(ss, service_name, '/')) {
1096       services_.push_back(service_name);
1097     }
1098   }
1099   return true;
1100 }
1101 
RegisterAsyncGenericService(grpc::AsyncGenericService * service)1102 void Server::RegisterAsyncGenericService(grpc::AsyncGenericService* service) {
1103   GPR_ASSERT(service->server_ == nullptr &&
1104              "Can only register an async generic service against one server.");
1105   service->server_ = this;
1106   has_async_generic_service_ = true;
1107 }
1108 
RegisterCallbackGenericService(grpc::CallbackGenericService * service)1109 void Server::RegisterCallbackGenericService(
1110     grpc::CallbackGenericService* service) {
1111   GPR_ASSERT(
1112       service->server_ == nullptr &&
1113       "Can only register a callback generic service against one server.");
1114   service->server_ = this;
1115   has_callback_generic_service_ = true;
1116   generic_handler_.reset(service->Handler());
1117 
1118   grpc::CompletionQueue* cq = CallbackCQ();
1119   grpc_core::Server::FromC(server_)->SetBatchMethodAllocator(cq->cq(), [this,
1120                                                                         cq] {
1121     grpc_core::Server::BatchCallAllocation result;
1122     new CallbackRequest<grpc::GenericCallbackServerContext>(this, cq, &result);
1123     return result;
1124   });
1125 }
1126 
AddListeningPort(const std::string & addr,grpc::ServerCredentials * creds)1127 int Server::AddListeningPort(const std::string& addr,
1128                              grpc::ServerCredentials* creds) {
1129   GPR_ASSERT(!started_);
1130   int port = creds->AddPortToServer(addr, server_);
1131   global_callbacks_->AddPort(this, addr, creds, port);
1132   return port;
1133 }
1134 
Ref()1135 void Server::Ref() {
1136   shutdown_refs_outstanding_.fetch_add(1, std::memory_order_relaxed);
1137 }
1138 
UnrefWithPossibleNotify()1139 void Server::UnrefWithPossibleNotify() {
1140   if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
1141                        1, std::memory_order_acq_rel) == 1)) {
1142     // No refs outstanding means that shutdown has been initiated and no more
1143     // callback requests are outstanding.
1144     grpc::internal::MutexLock lock(&mu_);
1145     GPR_ASSERT(shutdown_);
1146     shutdown_done_ = true;
1147     shutdown_done_cv_.Signal();
1148   }
1149 }
1150 
UnrefAndWaitLocked()1151 void Server::UnrefAndWaitLocked() {
1152   if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
1153                        1, std::memory_order_acq_rel) == 1)) {
1154     shutdown_done_ = true;
1155     return;  // no need to wait on CV since done condition already set
1156   }
1157   while (!shutdown_done_) {
1158     shutdown_done_cv_.Wait(&mu_);
1159   }
1160 }
1161 
Start(grpc::ServerCompletionQueue ** cqs,size_t num_cqs)1162 void Server::Start(grpc::ServerCompletionQueue** cqs, size_t num_cqs) {
1163   GPR_ASSERT(!started_);
1164   global_callbacks_->PreServerStart(this);
1165   started_ = true;
1166 
1167   // Only create default health check service when user did not provide an
1168   // explicit one.
1169   if (health_check_service_ == nullptr && !health_check_service_disabled_ &&
1170       grpc::DefaultHealthCheckServiceEnabled()) {
1171     auto default_hc_service = std::make_unique<DefaultHealthCheckService>();
1172     auto* hc_service_impl = default_hc_service->GetHealthCheckService();
1173     health_check_service_ = std::move(default_hc_service);
1174     RegisterService(nullptr, hc_service_impl);
1175   }
1176 
1177   for (auto& acceptor : acceptors_) {
1178     acceptor->GetCredentials()->AddPortToServer(acceptor->name(), server_);
1179   }
1180 
1181 #ifndef NDEBUG
1182   for (size_t i = 0; i < num_cqs; i++) {
1183     cq_list_.push_back(cqs[i]);
1184   }
1185 #endif
1186 
1187   // We must have exactly one generic service to handle requests for
1188   // unmatched method names (i.e., to return UNIMPLEMENTED for any RPC
1189   // method for which we don't have a registered implementation).  This
1190   // service comes from one of the following places (first match wins):
1191   // - If the application supplied a generic service via either the async
1192   //   or callback APIs, we use that.
1193   // - If there are callback methods, register a callback generic service.
1194   // - If there are sync methods, register a sync generic service.
1195   //   (This must be done before server start to initialize an
1196   //   AllocatingRequestMatcher.)
1197   // - Otherwise (we have only async methods), we wait until the server
1198   //   is started and then start an UnimplementedAsyncRequest on each
1199   //   async CQ, so that the requests will be moved along by polling
1200   //   done in application threads.
1201   bool unknown_rpc_needed =
1202       !has_async_generic_service_ && !has_callback_generic_service_;
1203   if (unknown_rpc_needed && has_callback_methods_) {
1204     unimplemented_service_ = std::make_unique<grpc::CallbackGenericService>();
1205     RegisterCallbackGenericService(unimplemented_service_.get());
1206     unknown_rpc_needed = false;
1207   }
1208   if (unknown_rpc_needed && !sync_req_mgrs_.empty()) {
1209     sync_req_mgrs_[0]->AddUnknownSyncMethod();
1210     unknown_rpc_needed = false;
1211   }
1212 
1213   grpc_server_start(server_);
1214 
1215   if (unknown_rpc_needed) {
1216     for (size_t i = 0; i < num_cqs; i++) {
1217       if (cqs[i]->IsFrequentlyPolled()) {
1218         new UnimplementedAsyncRequest(this, cqs[i]);
1219       }
1220     }
1221     unknown_rpc_needed = false;
1222   }
1223 
1224   // If this server has any support for synchronous methods (has any sync
1225   // server CQs), make sure that we have a ResourceExhausted handler
1226   // to deal with the case of thread exhaustion
1227   if (sync_server_cqs_ != nullptr && !sync_server_cqs_->empty()) {
1228     resource_exhausted_handler_ =
1229         std::make_unique<grpc::internal::ResourceExhaustedHandler>(
1230             kServerThreadpoolExhausted);
1231   }
1232 
1233   for (const auto& value : sync_req_mgrs_) {
1234     value->Start();
1235   }
1236 
1237   for (auto& acceptor : acceptors_) {
1238     acceptor->Start();
1239   }
1240 }
1241 
ShutdownInternal(gpr_timespec deadline)1242 void Server::ShutdownInternal(gpr_timespec deadline) {
1243   grpc::internal::MutexLock lock(&mu_);
1244   if (shutdown_) {
1245     return;
1246   }
1247 
1248   shutdown_ = true;
1249 
1250   for (auto& acceptor : acceptors_) {
1251     acceptor->Shutdown();
1252   }
1253 
1254   /// The completion queue to use for server shutdown completion notification
1255   grpc::CompletionQueue shutdown_cq;
1256   grpc::ShutdownTag shutdown_tag;  // Phony shutdown tag
1257   grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
1258 
1259   shutdown_cq.Shutdown();
1260 
1261   void* tag;
1262   bool ok;
1263   grpc::CompletionQueue::NextStatus status =
1264       shutdown_cq.AsyncNext(&tag, &ok, deadline);
1265 
1266   // If this timed out, it means we are done with the grace period for a clean
1267   // shutdown. We should force a shutdown now by cancelling all inflight calls
1268   if (status == grpc::CompletionQueue::NextStatus::TIMEOUT) {
1269     grpc_server_cancel_all_calls(server_);
1270     status =
1271         shutdown_cq.AsyncNext(&tag, &ok, gpr_inf_future(GPR_CLOCK_MONOTONIC));
1272   }
1273   // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
1274   // successfully shutdown
1275 
1276   // Drop the shutdown ref and wait for all other refs to drop as well.
1277   UnrefAndWaitLocked();
1278 
1279   // Shutdown all ThreadManagers. This will try to gracefully stop all the
1280   // threads in the ThreadManagers (once they process any inflight requests)
1281   for (const auto& value : sync_req_mgrs_) {
1282     value->Shutdown();  // ThreadManager's Shutdown()
1283   }
1284 
1285   // Wait for threads in all ThreadManagers to terminate
1286   for (const auto& value : sync_req_mgrs_) {
1287     value->Wait();
1288   }
1289 
1290   // Shutdown the callback CQ. The CQ is owned by its own shutdown tag, so it
1291   // will delete itself at true shutdown.
1292   CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
1293   if (callback_cq != nullptr) {
1294     if (grpc_iomgr_run_in_background()) {
1295       // gRPC-core provides the backing needed for the preferred CQ type
1296       callback_cq->Shutdown();
1297     } else {
1298       CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
1299     }
1300     callback_cq_.store(nullptr, std::memory_order_release);
1301   }
1302 
1303   // Drain the shutdown queue (if the previous call to AsyncNext() timed out
1304   // and we didn't remove the tag from the queue yet)
1305   while (shutdown_cq.Next(&tag, &ok)) {
1306     // Nothing to be done here. Just ignore ok and tag values
1307   }
1308 
1309   shutdown_notified_ = true;
1310   shutdown_cv_.SignalAll();
1311 
1312 #ifndef NDEBUG
1313   // Unregister this server with the CQs passed into it by the user so that
1314   // those can be checked for properly-ordered shutdown.
1315   for (auto* cq : cq_list_) {
1316     cq->UnregisterServer(this);
1317   }
1318   cq_list_.clear();
1319 #endif
1320 }
1321 
Wait()1322 void Server::Wait() {
1323   grpc::internal::MutexLock lock(&mu_);
1324   while (started_ && !shutdown_notified_) {
1325     shutdown_cv_.Wait(&mu_);
1326   }
1327 }
1328 
PerformOpsOnCall(grpc::internal::CallOpSetInterface * ops,grpc::internal::Call * call)1329 void Server::PerformOpsOnCall(grpc::internal::CallOpSetInterface* ops,
1330                               grpc::internal::Call* call) {
1331   ops->FillOps(call);
1332 }
1333 
FinalizeResult(void ** tag,bool * status)1334 bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
1335                                                        bool* status) {
1336   if (GenericAsyncRequest::FinalizeResult(tag, status)) {
1337     // We either had no interceptors run or we are done intercepting
1338     if (*status) {
1339       // Create a new request/response pair using the server and CQ values
1340       // stored in this object's base class.
1341       new UnimplementedAsyncRequest(server_, notification_cq_);
1342       new UnimplementedAsyncResponse(this);
1343     } else {
1344       delete this;
1345     }
1346   } else {
1347     // The tag was swallowed due to interception. We will see it again.
1348   }
1349   return false;
1350 }
1351 
UnimplementedAsyncResponse(UnimplementedAsyncRequest * request)1352 Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
1353     UnimplementedAsyncRequest* request)
1354     : request_(request) {
1355   grpc::Status status(grpc::StatusCode::UNIMPLEMENTED, kUnknownRpcMethod);
1356   grpc::internal::UnknownMethodHandler::FillOps(request_->context(),
1357                                                 kUnknownRpcMethod, this);
1358   request_->stream()->call_.PerformOps(this);
1359 }
1360 
initializer()1361 grpc::ServerInitializer* Server::initializer() {
1362   return server_initializer_.get();
1363 }
1364 
CallbackCQ()1365 grpc::CompletionQueue* Server::CallbackCQ() {
1366   // TODO(vjpai): Consider using a single global CQ for the default CQ
1367   // if there is no explicit per-server CQ registered
1368   CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
1369   if (callback_cq != nullptr) {
1370     return callback_cq;
1371   }
1372   // The callback_cq_ wasn't already set, so grab a lock and set it up exactly
1373   // once for this server.
1374   grpc::internal::MutexLock l(&mu_);
1375   callback_cq = callback_cq_.load(std::memory_order_relaxed);
1376   if (callback_cq != nullptr) {
1377     return callback_cq;
1378   }
1379   if (grpc_iomgr_run_in_background()) {
1380     // gRPC-core provides the backing needed for the preferred CQ type
1381     auto* shutdown_callback = new grpc::ShutdownCallback;
1382     callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
1383         GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
1384         shutdown_callback});
1385 
1386     // Transfer ownership of the new cq to its own shutdown callback
1387     shutdown_callback->TakeCQ(callback_cq);
1388   } else {
1389     // Otherwise we need to use the alternative CQ variant
1390     callback_cq = CompletionQueue::CallbackAlternativeCQ();
1391   }
1392 
1393   callback_cq_.store(callback_cq, std::memory_order_release);
1394   return callback_cq;
1395 }
1396 
1397 }  // namespace grpc
1398