1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #ifndef GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_H
20 #define GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_H
21
22 #include <grpc/support/port_platform.h>
23
24 #include <stddef.h>
25 #include <stdint.h>
26 #include <string.h>
27
28 #include <functional>
29 #include <string>
30 #include <utility>
31
32 #include "absl/functional/any_invocable.h"
33 #include "absl/status/status.h"
34 #include "absl/strings/string_view.h"
35 #include "absl/types/optional.h"
36
37 #include <grpc/impl/connectivity_state.h>
38 #include <grpc/slice.h>
39 #include <grpc/status.h>
40 #include <grpc/support/log.h>
41 #include <grpc/support/time.h>
42
43 #include "src/core/lib/channel/context.h"
44 #include "src/core/lib/debug/trace.h"
45 #include "src/core/lib/gprpp/orphanable.h"
46 #include "src/core/lib/gprpp/ref_counted.h"
47 #include "src/core/lib/iomgr/call_combiner.h"
48 #include "src/core/lib/iomgr/closure.h"
49 #include "src/core/lib/iomgr/endpoint.h"
50 #include "src/core/lib/iomgr/error.h"
51 #include "src/core/lib/iomgr/iomgr_fwd.h"
52 #include "src/core/lib/iomgr/polling_entity.h"
53 #include "src/core/lib/promise/arena_promise.h"
54 #include "src/core/lib/promise/context.h"
55 #include "src/core/lib/promise/latch.h"
56 #include "src/core/lib/promise/pipe.h"
57 #include "src/core/lib/resource_quota/arena.h"
58 #include "src/core/lib/slice/slice_buffer.h"
59 #include "src/core/lib/transport/call_final_info.h"
60 #include "src/core/lib/transport/call_spine.h"
61 #include "src/core/lib/transport/connectivity_state.h"
62 #include "src/core/lib/transport/message.h"
63 #include "src/core/lib/transport/metadata.h"
64 #include "src/core/lib/transport/metadata_batch.h"
65 #include "src/core/lib/transport/transport_fwd.h"
66
67 // Minimum and maximum protocol accepted versions.
68 #define GRPC_PROTOCOL_VERSION_MAX_MAJOR 2
69 #define GRPC_PROTOCOL_VERSION_MAX_MINOR 1
70 #define GRPC_PROTOCOL_VERSION_MIN_MAJOR 2
71 #define GRPC_PROTOCOL_VERSION_MIN_MINOR 1
72
73 #define GRPC_ARG_TRANSPORT "grpc.internal.transport"
74
75 namespace grpc_core {
76
77 // Move only type that tracks call startup.
78 // Allows observation of when client_initial_metadata has been processed by the
79 // end of the local call stack.
80 // Interested observers can call Wait() to obtain a promise that will resolve
81 // when all local client_initial_metadata processing has completed.
82 // The result of this token is either true on successful completion, or false
83 // if the metadata was not sent.
84 // To set a successful completion, call Complete(true). For failure, call
85 // Complete(false).
86 // If Complete is not called, the destructor of a still held token will complete
87 // with failure.
88 // Transports should hold this token until client_initial_metadata has passed
89 // any flow control (eg MAX_CONCURRENT_STREAMS for http2).
90 class ClientInitialMetadataOutstandingToken {
91 public:
Empty()92 static ClientInitialMetadataOutstandingToken Empty() {
93 return ClientInitialMetadataOutstandingToken();
94 }
95 static ClientInitialMetadataOutstandingToken New(
96 Arena* arena = GetContext<Arena>()) {
97 ClientInitialMetadataOutstandingToken token;
98 token.latch_ = arena->New<Latch<bool>>();
99 return token;
100 }
101
102 ClientInitialMetadataOutstandingToken(
103 const ClientInitialMetadataOutstandingToken&) = delete;
104 ClientInitialMetadataOutstandingToken& operator=(
105 const ClientInitialMetadataOutstandingToken&) = delete;
ClientInitialMetadataOutstandingToken(ClientInitialMetadataOutstandingToken && other)106 ClientInitialMetadataOutstandingToken(
107 ClientInitialMetadataOutstandingToken&& other) noexcept
108 : latch_(std::exchange(other.latch_, nullptr)) {}
109 ClientInitialMetadataOutstandingToken& operator=(
110 ClientInitialMetadataOutstandingToken&& other) noexcept {
111 latch_ = std::exchange(other.latch_, nullptr);
112 return *this;
113 }
~ClientInitialMetadataOutstandingToken()114 ~ClientInitialMetadataOutstandingToken() {
115 if (latch_ != nullptr) latch_->Set(false);
116 }
Complete(bool success)117 void Complete(bool success) { std::exchange(latch_, nullptr)->Set(success); }
118
119 // Returns a promise that will resolve when this object (or its moved-from
120 // ancestor) is dropped.
Wait()121 auto Wait() { return latch_->Wait(); }
122
123 private:
124 ClientInitialMetadataOutstandingToken() = default;
125
126 Latch<bool>* latch_ = nullptr;
127 };
128
129 using ClientInitialMetadataOutstandingTokenWaitType =
130 decltype(std::declval<ClientInitialMetadataOutstandingToken>().Wait());
131
132 struct CallArgs {
133 // Initial metadata from the client to the server.
134 // During promise setup this can be manipulated by filters (and then
135 // passed on to the next filter).
136 ClientMetadataHandle client_initial_metadata;
137 // Token indicating that client_initial_metadata is still being processed.
138 // This should be moved around and only destroyed when the transport is
139 // satisfied that the metadata has passed any flow control measures it has.
140 ClientInitialMetadataOutstandingToken client_initial_metadata_outstanding;
141 // Latch that will ultimately contain the polling entity for the call.
142 // TODO(ctiller): remove once event engine lands
143 Latch<grpc_polling_entity>* polling_entity;
144 // Initial metadata from the server to the client.
145 // Set once when it's available.
146 // During promise setup filters can substitute their own latch for this
147 // and consequently intercept the sent value and mutate/observe it.
148 PipeSender<ServerMetadataHandle>* server_initial_metadata;
149 // Messages travelling from the application to the transport.
150 PipeReceiver<MessageHandle>* client_to_server_messages;
151 // Messages travelling from the transport to the application.
152 PipeSender<MessageHandle>* server_to_client_messages;
153 };
154
155 using NextPromiseFactory =
156 std::function<ArenaPromise<ServerMetadataHandle>(CallArgs)>;
157
158 } // namespace grpc_core
159
160 // forward declarations
161
162 // grpc_stream doesn't actually exist. It's used as a typesafe
163 // opaque pointer for whatever data the transport wants to track
164 // for a stream.
165 typedef struct grpc_stream grpc_stream;
166
167 extern grpc_core::DebugOnlyTraceFlag grpc_trace_stream_refcount;
168
169 typedef struct grpc_stream_refcount {
170 grpc_core::RefCount refs;
171 grpc_closure destroy;
172 #ifndef NDEBUG
173 const char* object_type;
174 #endif
175 } grpc_stream_refcount;
176
177 #ifndef NDEBUG
178 void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
179 grpc_iomgr_cb_func cb, void* cb_arg,
180 const char* object_type);
181 #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
182 grpc_stream_ref_init(rc, ir, cb, cb_arg, objtype)
183 #else
184 void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
185 grpc_iomgr_cb_func cb, void* cb_arg);
186 #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
187 do { \
188 grpc_stream_ref_init(rc, ir, cb, cb_arg); \
189 (void)(objtype); \
190 } while (0)
191 #endif
192
193 #ifndef NDEBUG
grpc_stream_ref(grpc_stream_refcount * refcount,const char * reason)194 inline void grpc_stream_ref(grpc_stream_refcount* refcount,
195 const char* reason) {
196 if (grpc_trace_stream_refcount.enabled()) {
197 gpr_log(GPR_DEBUG, "%s %p:%p REF %s", refcount->object_type, refcount,
198 refcount->destroy.cb_arg, reason);
199 }
200 refcount->refs.RefNonZero(DEBUG_LOCATION, reason);
201 }
202 #else
grpc_stream_ref(grpc_stream_refcount * refcount)203 inline void grpc_stream_ref(grpc_stream_refcount* refcount) {
204 refcount->refs.RefNonZero();
205 }
206 #endif
207
208 void grpc_stream_destroy(grpc_stream_refcount* refcount);
209
210 #ifndef NDEBUG
grpc_stream_unref(grpc_stream_refcount * refcount,const char * reason)211 inline void grpc_stream_unref(grpc_stream_refcount* refcount,
212 const char* reason) {
213 if (grpc_trace_stream_refcount.enabled()) {
214 gpr_log(GPR_DEBUG, "%s %p:%p UNREF %s", refcount->object_type, refcount,
215 refcount->destroy.cb_arg, reason);
216 }
217 if (GPR_UNLIKELY(refcount->refs.Unref(DEBUG_LOCATION, reason))) {
218 grpc_stream_destroy(refcount);
219 }
220 }
221 #else
grpc_stream_unref(grpc_stream_refcount * refcount)222 inline void grpc_stream_unref(grpc_stream_refcount* refcount) {
223 if (GPR_UNLIKELY(refcount->refs.Unref())) {
224 grpc_stream_destroy(refcount);
225 }
226 }
227 #endif
228
229 // Wrap a buffer that is owned by some stream object into a slice that shares
230 // the same refcount
231 grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount,
232 void* buffer, size_t length);
233
234 // This struct (which is present in both grpc_transport_stream_op_batch
235 // and grpc_transport_op_batch) is a convenience to allow filters or
236 // transports to schedule a closure related to a particular batch without
237 // having to allocate memory. The general pattern is to initialize the
238 // closure with the callback arg set to the batch and extra_arg set to
239 // whatever state is associated with the handler (e.g., the call element
240 // or the transport stream object).
241 //
242 // Note that this can only be used by the current handler of a given
243 // batch on the way down the stack (i.e., whichever filter or transport is
244 // currently handling the batch). Once a filter or transport passes control
245 // of the batch to the next handler, it cannot depend on the contents of
246 // this struct anymore, because the next handler may reuse it.
247 struct grpc_handler_private_op_data {
248 void* extra_arg = nullptr;
249 grpc_closure closure;
grpc_handler_private_op_datagrpc_handler_private_op_data250 grpc_handler_private_op_data() { memset(&closure, 0, sizeof(closure)); }
251 };
252
253 typedef struct grpc_transport_stream_op_batch_payload
254 grpc_transport_stream_op_batch_payload;
255
256 // Transport stream op: a set of operations to perform on a transport
257 // against a single stream
258 struct grpc_transport_stream_op_batch {
grpc_transport_stream_op_batchgrpc_transport_stream_op_batch259 grpc_transport_stream_op_batch()
260 : send_initial_metadata(false),
261 send_trailing_metadata(false),
262 send_message(false),
263 recv_initial_metadata(false),
264 recv_message(false),
265 recv_trailing_metadata(false),
266 cancel_stream(false),
267 is_traced(false) {}
268
269 /// Should be scheduled when all of the non-recv operations in the batch
270 /// are complete.
271
272 /// The recv ops (recv_initial_metadata, recv_message, and
273 /// recv_trailing_metadata) each have their own callbacks. If a batch
274 /// contains both recv ops and non-recv ops, on_complete should be
275 /// scheduled as soon as the non-recv ops are complete, regardless of
276 /// whether or not the recv ops are complete. If a batch contains
277 /// only recv ops, on_complete can be null.
278 grpc_closure* on_complete = nullptr;
279
280 /// Values for the stream op (fields set are determined by flags above)
281 grpc_transport_stream_op_batch_payload* payload = nullptr;
282
283 /// Send initial metadata to the peer, from the provided metadata batch.
284 bool send_initial_metadata : 1;
285
286 /// Send trailing metadata to the peer, from the provided metadata batch.
287 bool send_trailing_metadata : 1;
288
289 /// Send message data to the peer, from the provided byte stream.
290 bool send_message : 1;
291
292 /// Receive initial metadata from the stream, into provided metadata batch.
293 bool recv_initial_metadata : 1;
294
295 /// Receive message data from the stream, into provided byte stream.
296 bool recv_message : 1;
297
298 /// Receive trailing metadata from the stream, into provided metadata batch.
299 ///
300 bool recv_trailing_metadata : 1;
301
302 /// Cancel this stream with the provided error
303 bool cancel_stream : 1;
304
305 /// Is this stream traced
306 bool is_traced : 1;
307
HasOpgrpc_transport_stream_op_batch308 bool HasOp() const {
309 return send_initial_metadata || send_trailing_metadata || send_message ||
310 recv_initial_metadata || recv_message || recv_trailing_metadata ||
311 cancel_stream;
312 }
313
314 //**************************************************************************
315 // remaining fields are initialized and used at the discretion of the
316 // current handler of the op
317
318 grpc_handler_private_op_data handler_private;
319 };
320
321 struct grpc_transport_stream_op_batch_payload {
grpc_transport_stream_op_batch_payloadgrpc_transport_stream_op_batch_payload322 explicit grpc_transport_stream_op_batch_payload(
323 grpc_call_context_element* context)
324 : context(context) {}
325 struct {
326 grpc_metadata_batch* send_initial_metadata = nullptr;
327 } send_initial_metadata;
328
329 struct {
330 grpc_metadata_batch* send_trailing_metadata = nullptr;
331 // Set by the transport to true if the stream successfully wrote the
332 // trailing metadata. If this is not set but there was a send trailing
333 // metadata op present, this can indicate that a server call can be marked
334 // as a cancellation (since the stream was write-closed before status could
335 // be delivered).
336 bool* sent = nullptr;
337 } send_trailing_metadata;
338
339 struct {
340 // The transport (or a filter that decides to return a failure before
341 // the op gets down to the transport) takes ownership.
342 // The batch's on_complete will not be called until after the byte
343 // stream is orphaned.
344 grpc_core::SliceBuffer* send_message;
345 uint32_t flags = 0;
346 // Set by the transport if the stream has been closed for writes. If this
347 // is set and send message op is present, we set the operation to be a
348 // failure without sending a cancel OP down the stack. This is so that the
349 // status of the call does not get overwritten by the Cancel OP, which would
350 // be especially problematic if we had received a valid status from the
351 // server.
352 // For send_initial_metadata, it is fine for the status to be overwritten
353 // because at that point, the client will not have received a status.
354 // For send_trailing_metadata, we might overwrite the status if we have
355 // non-zero metadata to send. This is fine because the API does not allow
356 // the client to send trailing metadata.
357 bool stream_write_closed = false;
358 } send_message;
359
360 struct {
361 grpc_metadata_batch* recv_initial_metadata = nullptr;
362 /// Should be enqueued when initial metadata is ready to be processed.
363 grpc_closure* recv_initial_metadata_ready = nullptr;
364 // If not NULL, will be set to true if trailing metadata is
365 // immediately available. This may be a signal that we received a
366 // Trailers-Only response. The retry filter checks this to know whether to
367 // defer the decision to commit the call or not. The C++ callback API also
368 // uses this to set the success flag of OnReadInitialMetadataDone()
369 // callback.
370 bool* trailing_metadata_available = nullptr;
371 } recv_initial_metadata;
372
373 struct {
374 // Will be set by the transport to point to the byte stream containing a
375 // received message. Will be nullopt if trailing metadata is received
376 // instead of a message.
377 absl::optional<grpc_core::SliceBuffer>* recv_message = nullptr;
378 uint32_t* flags = nullptr;
379 // Was this recv_message failed for reasons other than a clean end-of-stream
380 bool* call_failed_before_recv_message = nullptr;
381 /// Should be enqueued when one message is ready to be processed.
382 grpc_closure* recv_message_ready = nullptr;
383 } recv_message;
384
385 struct {
386 grpc_metadata_batch* recv_trailing_metadata = nullptr;
387 grpc_transport_stream_stats* collect_stats = nullptr;
388 /// Should be enqueued when trailing metadata is ready to be processed.
389 grpc_closure* recv_trailing_metadata_ready = nullptr;
390 } recv_trailing_metadata;
391
392 /// Forcefully close this stream.
393 /// The HTTP2 semantics should be:
394 /// - server side: if cancel_error has
395 /// grpc_core::StatusIntProperty::kRpcStatus, and trailing metadata has not
396 /// been sent, send trailing metadata with status and message from
397 /// cancel_error (use grpc_error_get_status) followed by a RST_STREAM with
398 /// error=GRPC_CHTTP2_NO_ERROR to force a full close
399 /// - at all other times: use grpc_error_get_status to get a status code, and
400 /// convert to a HTTP2 error code using
401 /// grpc_chttp2_grpc_status_to_http2_error. Send a RST_STREAM with this
402 /// error.
403 struct {
404 // Error contract: the transport that gets this op must cause cancel_error
405 // to be unref'ed after processing it
406 grpc_error_handle cancel_error;
407 // If true the transport should endeavor to delay sending the cancellation
408 // notification for some small amount of time, in order to foil certain
409 // exploits.
410 // This should be set for cancellations that result from malformed client
411 // initial metadata.
412 bool tarpit = false;
413 } cancel_stream;
414
415 // Indexes correspond to grpc_context_index enum values
416 grpc_call_context_element* context;
417 };
418
419 /// Transport op: a set of operations to perform on a transport as a whole
420 typedef struct grpc_transport_op {
421 /// Called when processing of this op is done.
422 grpc_closure* on_consumed = nullptr;
423 /// connectivity monitoring - set connectivity_state to NULL to unsubscribe
424 grpc_core::OrphanablePtr<grpc_core::ConnectivityStateWatcherInterface>
425 start_connectivity_watch;
426 grpc_core::ConnectivityStateWatcherInterface* stop_connectivity_watch =
427 nullptr;
428 /// should the transport be disconnected
429 /// Error contract: the transport that gets this op must cause
430 /// disconnect_with_error to be unref'ed after processing it
431 grpc_error_handle disconnect_with_error;
432 /// what should the goaway contain?
433 /// Error contract: the transport that gets this op must cause
434 /// goaway_error to be unref'ed after processing it
435 grpc_error_handle goaway_error;
436 void (*set_accept_stream_fn)(void* user_data, grpc_core::Transport* transport,
437 const void* server_data) = nullptr;
438 void (*set_registered_method_matcher_fn)(
439 void* user_data, grpc_core::ServerMetadata* metadata) = nullptr;
440 void* set_accept_stream_user_data = nullptr;
441 void (*set_make_promise_fn)(void* user_data, grpc_core::Transport* transport,
442 const void* server_data) = nullptr;
443 void* set_make_promise_user_data = nullptr;
444 /// add this transport to a pollset
445 grpc_pollset* bind_pollset = nullptr;
446 /// add this transport to a pollset_set
447 grpc_pollset_set* bind_pollset_set = nullptr;
448 /// send a ping, if either on_initiate or on_ack is not NULL
449 struct {
450 /// Ping may be delayed by the transport, on_initiate callback will be
451 /// called when the ping is actually being sent.
452 grpc_closure* on_initiate = nullptr;
453 /// Called when the ping ack is received
454 grpc_closure* on_ack = nullptr;
455 } send_ping;
456 grpc_connectivity_state start_connectivity_watch_state = GRPC_CHANNEL_IDLE;
457 // If true, will reset the channel's connection backoff.
458 bool reset_connect_backoff = false;
459
460 /// set the callback for accepting new streams;
461 /// this is a permanent callback, unlike the other one-shot closures.
462 /// If true, the callback is set to set_accept_stream_fn, with its
463 /// user_data argument set to set_accept_stream_user_data.
464 /// `set_registered_method_matcher_fn` is also set with its user_data argument
465 /// set to set_accept_stream_user_data. The transport should invoke
466 /// `set_registered_method_matcher_fn` after initial metadata is received but
467 /// before recv_initial_metadata_ready callback is invoked. If the transport
468 /// detects an error in the stream, invoking
469 /// `set_registered_method_matcher_fn` can be skipped.
470 bool set_accept_stream = false;
471
472 /// set the callback for accepting new streams based upon promises;
473 /// this is a permanent callback, unlike the other one-shot closures.
474 /// If true, the callback is set to set_make_promise_fn, with its
475 /// user_data argument set to set_make_promise_data
476 bool set_make_promise = false;
477
478 //**************************************************************************
479 // remaining fields are initialized and used at the discretion of the
480 // transport implementation
481
482 grpc_handler_private_op_data handler_private;
483 } grpc_transport_op;
484
485 void grpc_transport_stream_op_batch_finish_with_failure(
486 grpc_transport_stream_op_batch* batch, grpc_error_handle error,
487 grpc_core::CallCombiner* call_combiner);
488 void grpc_transport_stream_op_batch_queue_finish_with_failure(
489 grpc_transport_stream_op_batch* batch, grpc_error_handle error,
490 grpc_core::CallCombinerClosureList* closures);
491 // Fail a batch from within the transport (i.e. without the activity lock/call
492 // combiner taken).
493 void grpc_transport_stream_op_batch_finish_with_failure_from_transport(
494 grpc_transport_stream_op_batch* batch, grpc_error_handle error);
495
496 std::string grpc_transport_stream_op_batch_string(
497 grpc_transport_stream_op_batch* op, bool truncate);
498 std::string grpc_transport_op_string(grpc_transport_op* op);
499
500 namespace grpc_core {
501
502 class FilterStackTransport {
503 public:
504 // Memory required for a single stream element - this is allocated by upper
505 // layers and initialized by the transport
506 virtual size_t SizeOfStream() const = 0;
507
508 // Initialize transport data for a stream.
509 // Returns 0 on success, any other (transport-defined) value for failure.
510 // May assume that stream contains all-zeros.
511 // Arguments:
512 // stream - a pointer to uninitialized memory to initialize
513 // server_data - either NULL for a client initiated stream, or a pointer
514 // supplied from the accept_stream callback function
515 virtual void InitStream(grpc_stream* stream, grpc_stream_refcount* refcount,
516 const void* server_data, Arena* arena) = 0;
517
518 // HACK: inproc does not handle stream op batch callbacks correctly (receive
519 // ops are required to complete prior to on_complete triggering).
520 // This flag is used to disable coalescing of batches in connected_channel for
521 // that specific transport.
522 // TODO(ctiller): This ought not be necessary once we have promises complete.
523 virtual bool HackyDisableStreamOpBatchCoalescingInConnectedChannel()
524 const = 0;
525
526 virtual void PerformStreamOp(grpc_stream* stream,
527 grpc_transport_stream_op_batch* op) = 0;
528
529 // Destroy transport data for a stream.
530 // Requires: a recv_batch with final_state == GRPC_STREAM_CLOSED has been
531 // received by the up-layer. Must not be called in the same call stack as
532 // recv_frame.
533 // Arguments:
534 // stream - the grpc_stream to destroy (memory is still owned by the
535 // caller, but any child memory must be cleaned up)
536 virtual void DestroyStream(grpc_stream* stream,
537 grpc_closure* then_schedule_closure) = 0;
538
539 protected:
540 ~FilterStackTransport() = default;
541 };
542
543 class ClientTransport {
544 public:
545 virtual void StartCall(CallHandler call_handler) = 0;
546
547 protected:
548 ~ClientTransport() = default;
549 };
550
551 class ServerTransport {
552 public:
553 // Acceptor helps transports create calls.
554 class Acceptor {
555 public:
556 // Returns an arena that can be used to allocate memory for initial metadata
557 // parsing, and later passed to CreateCall() as the underlying arena for
558 // that call.
559 virtual Arena* CreateArena() = 0;
560 // Create a call at the server (or fail)
561 // arena must have been previously allocated by CreateArena()
562 virtual absl::StatusOr<CallInitiator> CreateCall(
563 ClientMetadata& client_initial_metadata, Arena* arena) = 0;
564
565 protected:
566 ~Acceptor() = default;
567 };
568
569 // Called once slightly after transport setup to register the accept function.
570 virtual void SetAcceptor(Acceptor* acceptor) = 0;
571
572 protected:
573 ~ServerTransport() = default;
574 };
575
576 class Transport : public Orphanable {
577 public:
578 struct RawPointerChannelArgTag {};
ChannelArgName()579 static absl::string_view ChannelArgName() { return GRPC_ARG_TRANSPORT; }
580
581 virtual FilterStackTransport* filter_stack_transport() = 0;
582 virtual ClientTransport* client_transport() = 0;
583 virtual ServerTransport* server_transport() = 0;
584
585 // name of this transport implementation
586 virtual absl::string_view GetTransportName() const = 0;
587
588 // implementation of grpc_transport_set_pollset
589 virtual void SetPollset(grpc_stream* stream, grpc_pollset* pollset) = 0;
590
591 // implementation of grpc_transport_set_pollset
592 virtual void SetPollsetSet(grpc_stream* stream,
593 grpc_pollset_set* pollset_set) = 0;
594
595 void SetPollingEntity(grpc_stream* stream,
596 grpc_polling_entity* pollset_or_pollset_set);
597
598 // implementation of grpc_transport_perform_op
599 virtual void PerformOp(grpc_transport_op* op) = 0;
600
601 // implementation of grpc_transport_get_endpoint
602 virtual grpc_endpoint* GetEndpoint() = 0;
603 };
604
605 } // namespace grpc_core
606
607 // Allocate a grpc_transport_op, and preconfigure the on_complete closure to
608 // \a on_complete and then delete the returned transport op
609 grpc_transport_op* grpc_make_transport_op(grpc_closure* on_complete);
610 // Allocate a grpc_transport_stream_op_batch, and preconfigure the on_complete
611 // closure
612 // to \a on_complete and then delete the returned transport op
613 grpc_transport_stream_op_batch* grpc_make_transport_stream_op(
614 grpc_closure* on_complete);
615
616 namespace grpc_core {
617 // This is the key to be used for loading/storing keepalive_throttling in the
618 // absl::Status object.
619 constexpr const char* kKeepaliveThrottlingKey =
620 "grpc.internal.keepalive_throttling";
621 } // namespace grpc_core
622
623 #endif // GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_H
624