1 // Generated by the protocol buffer compiler.  DO NOT EDIT!
2 // source: tensorflow/core/protobuf/eager_service.proto
3 
4 #include "tensorflow/core/protobuf/eager_service.pb.h"
5 
6 #include <algorithm>
7 #include <cstdint>
8 
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15 
16 PROTOBUF_PRAGMA_INIT_SEG
17 
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20 
21 namespace tensorflow {
22 namespace eager {
Operation_Input(::_pbi::ConstantInitialized)23 PROTOBUF_CONSTEXPR Operation_Input::Operation_Input(
24     ::_pbi::ConstantInitialized): _impl_{
25     /*decltype(_impl_.item_)*/{}
26   , /*decltype(_impl_._cached_size_)*/{}
27   , /*decltype(_impl_._oneof_case_)*/{}} {}
28 struct Operation_InputDefaultTypeInternal {
Operation_InputDefaultTypeInternaltensorflow::eager::Operation_InputDefaultTypeInternal29   PROTOBUF_CONSTEXPR Operation_InputDefaultTypeInternal()
30       : _instance(::_pbi::ConstantInitialized{}) {}
~Operation_InputDefaultTypeInternaltensorflow::eager::Operation_InputDefaultTypeInternal31   ~Operation_InputDefaultTypeInternal() {}
32   union {  // NOLINT(misc-non-private-member-variables-in-classes)
33     Operation_Input _instance;
34   };
35 };
36 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 Operation_InputDefaultTypeInternal _Operation_Input_default_instance_;
Operation_AttrsEntry_DoNotUse(::_pbi::ConstantInitialized)37 PROTOBUF_CONSTEXPR Operation_AttrsEntry_DoNotUse::Operation_AttrsEntry_DoNotUse(
38     ::_pbi::ConstantInitialized) {}
39 struct Operation_AttrsEntry_DoNotUseDefaultTypeInternal {
Operation_AttrsEntry_DoNotUseDefaultTypeInternaltensorflow::eager::Operation_AttrsEntry_DoNotUseDefaultTypeInternal40   PROTOBUF_CONSTEXPR Operation_AttrsEntry_DoNotUseDefaultTypeInternal()
41       : _instance(::_pbi::ConstantInitialized{}) {}
~Operation_AttrsEntry_DoNotUseDefaultTypeInternaltensorflow::eager::Operation_AttrsEntry_DoNotUseDefaultTypeInternal42   ~Operation_AttrsEntry_DoNotUseDefaultTypeInternal() {}
43   union {  // NOLINT(misc-non-private-member-variables-in-classes)
44     Operation_AttrsEntry_DoNotUse _instance;
45   };
46 };
47 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 Operation_AttrsEntry_DoNotUseDefaultTypeInternal _Operation_AttrsEntry_DoNotUse_default_instance_;
Operation(::_pbi::ConstantInitialized)48 PROTOBUF_CONSTEXPR Operation::Operation(
49     ::_pbi::ConstantInitialized): _impl_{
50     /*decltype(_impl_.control_op_ids_)*/{}
51   , /*decltype(_impl_._control_op_ids_cached_byte_size_)*/{0}
52   , /*decltype(_impl_.attrs_)*/{}
53   , /*decltype(_impl_.op_inputs_)*/{}
54   , /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
55   , /*decltype(_impl_.device_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
56   , /*decltype(_impl_.id_)*/::int64_t{0}
57   , /*decltype(_impl_.func_step_id_)*/::int64_t{0}
58   , /*decltype(_impl_.is_component_function_)*/false
59   , /*decltype(_impl_.is_function_)*/false
60   , /*decltype(_impl_._cached_size_)*/{}} {}
61 struct OperationDefaultTypeInternal {
OperationDefaultTypeInternaltensorflow::eager::OperationDefaultTypeInternal62   PROTOBUF_CONSTEXPR OperationDefaultTypeInternal()
63       : _instance(::_pbi::ConstantInitialized{}) {}
~OperationDefaultTypeInternaltensorflow::eager::OperationDefaultTypeInternal64   ~OperationDefaultTypeInternal() {}
65   union {  // NOLINT(misc-non-private-member-variables-in-classes)
66     Operation _instance;
67   };
68 };
69 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OperationDefaultTypeInternal _Operation_default_instance_;
QueueItem(::_pbi::ConstantInitialized)70 PROTOBUF_CONSTEXPR QueueItem::QueueItem(
71     ::_pbi::ConstantInitialized): _impl_{
72     /*decltype(_impl_.item_)*/{}
73   , /*decltype(_impl_._cached_size_)*/{}
74   , /*decltype(_impl_._oneof_case_)*/{}} {}
75 struct QueueItemDefaultTypeInternal {
QueueItemDefaultTypeInternaltensorflow::eager::QueueItemDefaultTypeInternal76   PROTOBUF_CONSTEXPR QueueItemDefaultTypeInternal()
77       : _instance(::_pbi::ConstantInitialized{}) {}
~QueueItemDefaultTypeInternaltensorflow::eager::QueueItemDefaultTypeInternal78   ~QueueItemDefaultTypeInternal() {}
79   union {  // NOLINT(misc-non-private-member-variables-in-classes)
80     QueueItem _instance;
81   };
82 };
83 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 QueueItemDefaultTypeInternal _QueueItem_default_instance_;
QueueResponse(::_pbi::ConstantInitialized)84 PROTOBUF_CONSTEXPR QueueResponse::QueueResponse(
85     ::_pbi::ConstantInitialized): _impl_{
86     /*decltype(_impl_.shape_)*/{}
87   , /*decltype(_impl_.tensor_)*/{}
88   , /*decltype(_impl_.device_)*/{}
89   , /*decltype(_impl_._cached_size_)*/{}} {}
90 struct QueueResponseDefaultTypeInternal {
QueueResponseDefaultTypeInternaltensorflow::eager::QueueResponseDefaultTypeInternal91   PROTOBUF_CONSTEXPR QueueResponseDefaultTypeInternal()
92       : _instance(::_pbi::ConstantInitialized{}) {}
~QueueResponseDefaultTypeInternaltensorflow::eager::QueueResponseDefaultTypeInternal93   ~QueueResponseDefaultTypeInternal() {}
94   union {  // NOLINT(misc-non-private-member-variables-in-classes)
95     QueueResponse _instance;
96   };
97 };
98 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 QueueResponseDefaultTypeInternal _QueueResponse_default_instance_;
CreateContextRequest(::_pbi::ConstantInitialized)99 PROTOBUF_CONSTEXPR CreateContextRequest::CreateContextRequest(
100     ::_pbi::ConstantInitialized): _impl_{
101     /*decltype(_impl_.cluster_device_attributes_)*/{}
102   , /*decltype(_impl_.server_def_)*/nullptr
103   , /*decltype(_impl_.version_def_)*/nullptr
104   , /*decltype(_impl_.keep_alive_secs_)*/::int64_t{0}
105   , /*decltype(_impl_.context_id_)*/::uint64_t{0u}
106   , /*decltype(_impl_.context_view_id_)*/::uint64_t{0u}
107   , /*decltype(_impl_.async_)*/false
108   , /*decltype(_impl_.lazy_copy_remote_function_inputs_)*/false
109   , /*decltype(_impl_._cached_size_)*/{}} {}
110 struct CreateContextRequestDefaultTypeInternal {
CreateContextRequestDefaultTypeInternaltensorflow::eager::CreateContextRequestDefaultTypeInternal111   PROTOBUF_CONSTEXPR CreateContextRequestDefaultTypeInternal()
112       : _instance(::_pbi::ConstantInitialized{}) {}
~CreateContextRequestDefaultTypeInternaltensorflow::eager::CreateContextRequestDefaultTypeInternal113   ~CreateContextRequestDefaultTypeInternal() {}
114   union {  // NOLINT(misc-non-private-member-variables-in-classes)
115     CreateContextRequest _instance;
116   };
117 };
118 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CreateContextRequestDefaultTypeInternal _CreateContextRequest_default_instance_;
CreateContextResponse(::_pbi::ConstantInitialized)119 PROTOBUF_CONSTEXPR CreateContextResponse::CreateContextResponse(
120     ::_pbi::ConstantInitialized): _impl_{
121     /*decltype(_impl_.device_attributes_)*/{}
122   , /*decltype(_impl_._cached_size_)*/{}} {}
123 struct CreateContextResponseDefaultTypeInternal {
CreateContextResponseDefaultTypeInternaltensorflow::eager::CreateContextResponseDefaultTypeInternal124   PROTOBUF_CONSTEXPR CreateContextResponseDefaultTypeInternal()
125       : _instance(::_pbi::ConstantInitialized{}) {}
~CreateContextResponseDefaultTypeInternaltensorflow::eager::CreateContextResponseDefaultTypeInternal126   ~CreateContextResponseDefaultTypeInternal() {}
127   union {  // NOLINT(misc-non-private-member-variables-in-classes)
128     CreateContextResponse _instance;
129   };
130 };
131 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CreateContextResponseDefaultTypeInternal _CreateContextResponse_default_instance_;
UpdateContextRequest(::_pbi::ConstantInitialized)132 PROTOBUF_CONSTEXPR UpdateContextRequest::UpdateContextRequest(
133     ::_pbi::ConstantInitialized): _impl_{
134     /*decltype(_impl_.cluster_device_attributes_)*/{}
135   , /*decltype(_impl_.server_def_)*/nullptr
136   , /*decltype(_impl_.context_id_)*/::uint64_t{0u}
137   , /*decltype(_impl_.context_view_id_)*/::uint64_t{0u}
138   , /*decltype(_impl_._cached_size_)*/{}} {}
139 struct UpdateContextRequestDefaultTypeInternal {
UpdateContextRequestDefaultTypeInternaltensorflow::eager::UpdateContextRequestDefaultTypeInternal140   PROTOBUF_CONSTEXPR UpdateContextRequestDefaultTypeInternal()
141       : _instance(::_pbi::ConstantInitialized{}) {}
~UpdateContextRequestDefaultTypeInternaltensorflow::eager::UpdateContextRequestDefaultTypeInternal142   ~UpdateContextRequestDefaultTypeInternal() {}
143   union {  // NOLINT(misc-non-private-member-variables-in-classes)
144     UpdateContextRequest _instance;
145   };
146 };
147 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 UpdateContextRequestDefaultTypeInternal _UpdateContextRequest_default_instance_;
UpdateContextResponse(::_pbi::ConstantInitialized)148 PROTOBUF_CONSTEXPR UpdateContextResponse::UpdateContextResponse(
149     ::_pbi::ConstantInitialized): _impl_{
150     /*decltype(_impl_.device_attributes_)*/{}
151   , /*decltype(_impl_._cached_size_)*/{}} {}
152 struct UpdateContextResponseDefaultTypeInternal {
UpdateContextResponseDefaultTypeInternaltensorflow::eager::UpdateContextResponseDefaultTypeInternal153   PROTOBUF_CONSTEXPR UpdateContextResponseDefaultTypeInternal()
154       : _instance(::_pbi::ConstantInitialized{}) {}
~UpdateContextResponseDefaultTypeInternaltensorflow::eager::UpdateContextResponseDefaultTypeInternal155   ~UpdateContextResponseDefaultTypeInternal() {}
156   union {  // NOLINT(misc-non-private-member-variables-in-classes)
157     UpdateContextResponse _instance;
158   };
159 };
160 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 UpdateContextResponseDefaultTypeInternal _UpdateContextResponse_default_instance_;
EnqueueRequest(::_pbi::ConstantInitialized)161 PROTOBUF_CONSTEXPR EnqueueRequest::EnqueueRequest(
162     ::_pbi::ConstantInitialized): _impl_{
163     /*decltype(_impl_.queue_)*/{}
164   , /*decltype(_impl_.context_id_)*/::uint64_t{0u}
165   , /*decltype(_impl_._cached_size_)*/{}} {}
166 struct EnqueueRequestDefaultTypeInternal {
EnqueueRequestDefaultTypeInternaltensorflow::eager::EnqueueRequestDefaultTypeInternal167   PROTOBUF_CONSTEXPR EnqueueRequestDefaultTypeInternal()
168       : _instance(::_pbi::ConstantInitialized{}) {}
~EnqueueRequestDefaultTypeInternaltensorflow::eager::EnqueueRequestDefaultTypeInternal169   ~EnqueueRequestDefaultTypeInternal() {}
170   union {  // NOLINT(misc-non-private-member-variables-in-classes)
171     EnqueueRequest _instance;
172   };
173 };
174 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 EnqueueRequestDefaultTypeInternal _EnqueueRequest_default_instance_;
EnqueueResponse(::_pbi::ConstantInitialized)175 PROTOBUF_CONSTEXPR EnqueueResponse::EnqueueResponse(
176     ::_pbi::ConstantInitialized): _impl_{
177     /*decltype(_impl_.queue_response_)*/{}
178   , /*decltype(_impl_._cached_size_)*/{}} {}
179 struct EnqueueResponseDefaultTypeInternal {
EnqueueResponseDefaultTypeInternaltensorflow::eager::EnqueueResponseDefaultTypeInternal180   PROTOBUF_CONSTEXPR EnqueueResponseDefaultTypeInternal()
181       : _instance(::_pbi::ConstantInitialized{}) {}
~EnqueueResponseDefaultTypeInternaltensorflow::eager::EnqueueResponseDefaultTypeInternal182   ~EnqueueResponseDefaultTypeInternal() {}
183   union {  // NOLINT(misc-non-private-member-variables-in-classes)
184     EnqueueResponse _instance;
185   };
186 };
187 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 EnqueueResponseDefaultTypeInternal _EnqueueResponse_default_instance_;
WaitQueueDoneRequest(::_pbi::ConstantInitialized)188 PROTOBUF_CONSTEXPR WaitQueueDoneRequest::WaitQueueDoneRequest(
189     ::_pbi::ConstantInitialized): _impl_{
190     /*decltype(_impl_.op_id_)*/{}
191   , /*decltype(_impl_._op_id_cached_byte_size_)*/{0}
192   , /*decltype(_impl_.context_id_)*/::uint64_t{0u}
193   , /*decltype(_impl_._cached_size_)*/{}} {}
194 struct WaitQueueDoneRequestDefaultTypeInternal {
WaitQueueDoneRequestDefaultTypeInternaltensorflow::eager::WaitQueueDoneRequestDefaultTypeInternal195   PROTOBUF_CONSTEXPR WaitQueueDoneRequestDefaultTypeInternal()
196       : _instance(::_pbi::ConstantInitialized{}) {}
~WaitQueueDoneRequestDefaultTypeInternaltensorflow::eager::WaitQueueDoneRequestDefaultTypeInternal197   ~WaitQueueDoneRequestDefaultTypeInternal() {}
198   union {  // NOLINT(misc-non-private-member-variables-in-classes)
199     WaitQueueDoneRequest _instance;
200   };
201 };
202 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 WaitQueueDoneRequestDefaultTypeInternal _WaitQueueDoneRequest_default_instance_;
WaitQueueDoneResponse(::_pbi::ConstantInitialized)203 PROTOBUF_CONSTEXPR WaitQueueDoneResponse::WaitQueueDoneResponse(
204     ::_pbi::ConstantInitialized): _impl_{
205     /*decltype(_impl_._cached_size_)*/{}} {}
206 struct WaitQueueDoneResponseDefaultTypeInternal {
WaitQueueDoneResponseDefaultTypeInternaltensorflow::eager::WaitQueueDoneResponseDefaultTypeInternal207   PROTOBUF_CONSTEXPR WaitQueueDoneResponseDefaultTypeInternal()
208       : _instance(::_pbi::ConstantInitialized{}) {}
~WaitQueueDoneResponseDefaultTypeInternaltensorflow::eager::WaitQueueDoneResponseDefaultTypeInternal209   ~WaitQueueDoneResponseDefaultTypeInternal() {}
210   union {  // NOLINT(misc-non-private-member-variables-in-classes)
211     WaitQueueDoneResponse _instance;
212   };
213 };
214 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 WaitQueueDoneResponseDefaultTypeInternal _WaitQueueDoneResponse_default_instance_;
RunComponentFunctionRequest(::_pbi::ConstantInitialized)215 PROTOBUF_CONSTEXPR RunComponentFunctionRequest::RunComponentFunctionRequest(
216     ::_pbi::ConstantInitialized): _impl_{
217     /*decltype(_impl_.output_num_)*/{}
218   , /*decltype(_impl_._output_num_cached_byte_size_)*/{0}
219   , /*decltype(_impl_.operation_)*/nullptr
220   , /*decltype(_impl_.context_id_)*/::uint64_t{0u}
221   , /*decltype(_impl_._cached_size_)*/{}} {}
222 struct RunComponentFunctionRequestDefaultTypeInternal {
RunComponentFunctionRequestDefaultTypeInternaltensorflow::eager::RunComponentFunctionRequestDefaultTypeInternal223   PROTOBUF_CONSTEXPR RunComponentFunctionRequestDefaultTypeInternal()
224       : _instance(::_pbi::ConstantInitialized{}) {}
~RunComponentFunctionRequestDefaultTypeInternaltensorflow::eager::RunComponentFunctionRequestDefaultTypeInternal225   ~RunComponentFunctionRequestDefaultTypeInternal() {}
226   union {  // NOLINT(misc-non-private-member-variables-in-classes)
227     RunComponentFunctionRequest _instance;
228   };
229 };
230 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RunComponentFunctionRequestDefaultTypeInternal _RunComponentFunctionRequest_default_instance_;
RunComponentFunctionResponse(::_pbi::ConstantInitialized)231 PROTOBUF_CONSTEXPR RunComponentFunctionResponse::RunComponentFunctionResponse(
232     ::_pbi::ConstantInitialized): _impl_{
233     /*decltype(_impl_.shape_)*/{}
234   , /*decltype(_impl_.tensor_)*/{}
235   , /*decltype(_impl_._cached_size_)*/{}} {}
236 struct RunComponentFunctionResponseDefaultTypeInternal {
RunComponentFunctionResponseDefaultTypeInternaltensorflow::eager::RunComponentFunctionResponseDefaultTypeInternal237   PROTOBUF_CONSTEXPR RunComponentFunctionResponseDefaultTypeInternal()
238       : _instance(::_pbi::ConstantInitialized{}) {}
~RunComponentFunctionResponseDefaultTypeInternaltensorflow::eager::RunComponentFunctionResponseDefaultTypeInternal239   ~RunComponentFunctionResponseDefaultTypeInternal() {}
240   union {  // NOLINT(misc-non-private-member-variables-in-classes)
241     RunComponentFunctionResponse _instance;
242   };
243 };
244 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RunComponentFunctionResponseDefaultTypeInternal _RunComponentFunctionResponse_default_instance_;
KeepAliveRequest(::_pbi::ConstantInitialized)245 PROTOBUF_CONSTEXPR KeepAliveRequest::KeepAliveRequest(
246     ::_pbi::ConstantInitialized): _impl_{
247     /*decltype(_impl_.context_id_)*/::uint64_t{0u}
248   , /*decltype(_impl_._cached_size_)*/{}} {}
249 struct KeepAliveRequestDefaultTypeInternal {
KeepAliveRequestDefaultTypeInternaltensorflow::eager::KeepAliveRequestDefaultTypeInternal250   PROTOBUF_CONSTEXPR KeepAliveRequestDefaultTypeInternal()
251       : _instance(::_pbi::ConstantInitialized{}) {}
~KeepAliveRequestDefaultTypeInternaltensorflow::eager::KeepAliveRequestDefaultTypeInternal252   ~KeepAliveRequestDefaultTypeInternal() {}
253   union {  // NOLINT(misc-non-private-member-variables-in-classes)
254     KeepAliveRequest _instance;
255   };
256 };
257 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 KeepAliveRequestDefaultTypeInternal _KeepAliveRequest_default_instance_;
KeepAliveResponse(::_pbi::ConstantInitialized)258 PROTOBUF_CONSTEXPR KeepAliveResponse::KeepAliveResponse(
259     ::_pbi::ConstantInitialized): _impl_{
260     /*decltype(_impl_.context_view_id_)*/::uint64_t{0u}
261   , /*decltype(_impl_._cached_size_)*/{}} {}
262 struct KeepAliveResponseDefaultTypeInternal {
KeepAliveResponseDefaultTypeInternaltensorflow::eager::KeepAliveResponseDefaultTypeInternal263   PROTOBUF_CONSTEXPR KeepAliveResponseDefaultTypeInternal()
264       : _instance(::_pbi::ConstantInitialized{}) {}
~KeepAliveResponseDefaultTypeInternaltensorflow::eager::KeepAliveResponseDefaultTypeInternal265   ~KeepAliveResponseDefaultTypeInternal() {}
266   union {  // NOLINT(misc-non-private-member-variables-in-classes)
267     KeepAliveResponse _instance;
268   };
269 };
270 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 KeepAliveResponseDefaultTypeInternal _KeepAliveResponse_default_instance_;
CloseContextRequest(::_pbi::ConstantInitialized)271 PROTOBUF_CONSTEXPR CloseContextRequest::CloseContextRequest(
272     ::_pbi::ConstantInitialized): _impl_{
273     /*decltype(_impl_.context_id_)*/::uint64_t{0u}
274   , /*decltype(_impl_.context_view_id_)*/::uint64_t{0u}
275   , /*decltype(_impl_._cached_size_)*/{}} {}
276 struct CloseContextRequestDefaultTypeInternal {
CloseContextRequestDefaultTypeInternaltensorflow::eager::CloseContextRequestDefaultTypeInternal277   PROTOBUF_CONSTEXPR CloseContextRequestDefaultTypeInternal()
278       : _instance(::_pbi::ConstantInitialized{}) {}
~CloseContextRequestDefaultTypeInternaltensorflow::eager::CloseContextRequestDefaultTypeInternal279   ~CloseContextRequestDefaultTypeInternal() {}
280   union {  // NOLINT(misc-non-private-member-variables-in-classes)
281     CloseContextRequest _instance;
282   };
283 };
284 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CloseContextRequestDefaultTypeInternal _CloseContextRequest_default_instance_;
CloseContextResponse(::_pbi::ConstantInitialized)285 PROTOBUF_CONSTEXPR CloseContextResponse::CloseContextResponse(
286     ::_pbi::ConstantInitialized): _impl_{
287     /*decltype(_impl_._cached_size_)*/{}} {}
288 struct CloseContextResponseDefaultTypeInternal {
CloseContextResponseDefaultTypeInternaltensorflow::eager::CloseContextResponseDefaultTypeInternal289   PROTOBUF_CONSTEXPR CloseContextResponseDefaultTypeInternal()
290       : _instance(::_pbi::ConstantInitialized{}) {}
~CloseContextResponseDefaultTypeInternaltensorflow::eager::CloseContextResponseDefaultTypeInternal291   ~CloseContextResponseDefaultTypeInternal() {}
292   union {  // NOLINT(misc-non-private-member-variables-in-classes)
293     CloseContextResponse _instance;
294   };
295 };
296 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CloseContextResponseDefaultTypeInternal _CloseContextResponse_default_instance_;
RegisterFunctionOp(::_pbi::ConstantInitialized)297 PROTOBUF_CONSTEXPR RegisterFunctionOp::RegisterFunctionOp(
298     ::_pbi::ConstantInitialized): _impl_{
299     /*decltype(_impl_.function_def_)*/nullptr
300   , /*decltype(_impl_.library_)*/nullptr
301   , /*decltype(_impl_.is_component_function_)*/false
302   , /*decltype(_impl_._cached_size_)*/{}} {}
303 struct RegisterFunctionOpDefaultTypeInternal {
RegisterFunctionOpDefaultTypeInternaltensorflow::eager::RegisterFunctionOpDefaultTypeInternal304   PROTOBUF_CONSTEXPR RegisterFunctionOpDefaultTypeInternal()
305       : _instance(::_pbi::ConstantInitialized{}) {}
~RegisterFunctionOpDefaultTypeInternaltensorflow::eager::RegisterFunctionOpDefaultTypeInternal306   ~RegisterFunctionOpDefaultTypeInternal() {}
307   union {  // NOLINT(misc-non-private-member-variables-in-classes)
308     RegisterFunctionOp _instance;
309   };
310 };
311 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RegisterFunctionOpDefaultTypeInternal _RegisterFunctionOp_default_instance_;
CleanupFunctionOp(::_pbi::ConstantInitialized)312 PROTOBUF_CONSTEXPR CleanupFunctionOp::CleanupFunctionOp(
313     ::_pbi::ConstantInitialized): _impl_{
314     /*decltype(_impl_.step_id_)*/::int64_t{0}
315   , /*decltype(_impl_._cached_size_)*/{}} {}
316 struct CleanupFunctionOpDefaultTypeInternal {
CleanupFunctionOpDefaultTypeInternaltensorflow::eager::CleanupFunctionOpDefaultTypeInternal317   PROTOBUF_CONSTEXPR CleanupFunctionOpDefaultTypeInternal()
318       : _instance(::_pbi::ConstantInitialized{}) {}
~CleanupFunctionOpDefaultTypeInternaltensorflow::eager::CleanupFunctionOpDefaultTypeInternal319   ~CleanupFunctionOpDefaultTypeInternal() {}
320   union {  // NOLINT(misc-non-private-member-variables-in-classes)
321     CleanupFunctionOp _instance;
322   };
323 };
324 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CleanupFunctionOpDefaultTypeInternal _CleanupFunctionOp_default_instance_;
SyncRemoteExecutorForStream(::_pbi::ConstantInitialized)325 PROTOBUF_CONSTEXPR SyncRemoteExecutorForStream::SyncRemoteExecutorForStream(
326     ::_pbi::ConstantInitialized): _impl_{
327     /*decltype(_impl_._cached_size_)*/{}} {}
328 struct SyncRemoteExecutorForStreamDefaultTypeInternal {
SyncRemoteExecutorForStreamDefaultTypeInternaltensorflow::eager::SyncRemoteExecutorForStreamDefaultTypeInternal329   PROTOBUF_CONSTEXPR SyncRemoteExecutorForStreamDefaultTypeInternal()
330       : _instance(::_pbi::ConstantInitialized{}) {}
~SyncRemoteExecutorForStreamDefaultTypeInternaltensorflow::eager::SyncRemoteExecutorForStreamDefaultTypeInternal331   ~SyncRemoteExecutorForStreamDefaultTypeInternal() {}
332   union {  // NOLINT(misc-non-private-member-variables-in-classes)
333     SyncRemoteExecutorForStream _instance;
334   };
335 };
336 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SyncRemoteExecutorForStreamDefaultTypeInternal _SyncRemoteExecutorForStream_default_instance_;
SendTensorOp(::_pbi::ConstantInitialized)337 PROTOBUF_CONSTEXPR SendTensorOp::SendTensorOp(
338     ::_pbi::ConstantInitialized): _impl_{
339     /*decltype(_impl_.tensors_)*/{}
340   , /*decltype(_impl_.device_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
341   , /*decltype(_impl_.op_id_)*/::int64_t{0}
342   , /*decltype(_impl_._cached_size_)*/{}} {}
343 struct SendTensorOpDefaultTypeInternal {
SendTensorOpDefaultTypeInternaltensorflow::eager::SendTensorOpDefaultTypeInternal344   PROTOBUF_CONSTEXPR SendTensorOpDefaultTypeInternal()
345       : _instance(::_pbi::ConstantInitialized{}) {}
~SendTensorOpDefaultTypeInternaltensorflow::eager::SendTensorOpDefaultTypeInternal346   ~SendTensorOpDefaultTypeInternal() {}
347   union {  // NOLINT(misc-non-private-member-variables-in-classes)
348     SendTensorOp _instance;
349   };
350 };
351 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SendTensorOpDefaultTypeInternal _SendTensorOp_default_instance_;
SendPackedHandleOp_LocalTensorHandle(::_pbi::ConstantInitialized)352 PROTOBUF_CONSTEXPR SendPackedHandleOp_LocalTensorHandle::SendPackedHandleOp_LocalTensorHandle(
353     ::_pbi::ConstantInitialized): _impl_{
354     /*decltype(_impl_.device_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
355   , /*decltype(_impl_.tensor_)*/nullptr
356   , /*decltype(_impl_._cached_size_)*/{}} {}
357 struct SendPackedHandleOp_LocalTensorHandleDefaultTypeInternal {
SendPackedHandleOp_LocalTensorHandleDefaultTypeInternaltensorflow::eager::SendPackedHandleOp_LocalTensorHandleDefaultTypeInternal358   PROTOBUF_CONSTEXPR SendPackedHandleOp_LocalTensorHandleDefaultTypeInternal()
359       : _instance(::_pbi::ConstantInitialized{}) {}
~SendPackedHandleOp_LocalTensorHandleDefaultTypeInternaltensorflow::eager::SendPackedHandleOp_LocalTensorHandleDefaultTypeInternal360   ~SendPackedHandleOp_LocalTensorHandleDefaultTypeInternal() {}
361   union {  // NOLINT(misc-non-private-member-variables-in-classes)
362     SendPackedHandleOp_LocalTensorHandle _instance;
363   };
364 };
365 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SendPackedHandleOp_LocalTensorHandleDefaultTypeInternal _SendPackedHandleOp_LocalTensorHandle_default_instance_;
SendPackedHandleOp_Handle(::_pbi::ConstantInitialized)366 PROTOBUF_CONSTEXPR SendPackedHandleOp_Handle::SendPackedHandleOp_Handle(
367     ::_pbi::ConstantInitialized): _impl_{
368     /*decltype(_impl_.item_)*/{}
369   , /*decltype(_impl_._cached_size_)*/{}
370   , /*decltype(_impl_._oneof_case_)*/{}} {}
371 struct SendPackedHandleOp_HandleDefaultTypeInternal {
SendPackedHandleOp_HandleDefaultTypeInternaltensorflow::eager::SendPackedHandleOp_HandleDefaultTypeInternal372   PROTOBUF_CONSTEXPR SendPackedHandleOp_HandleDefaultTypeInternal()
373       : _instance(::_pbi::ConstantInitialized{}) {}
~SendPackedHandleOp_HandleDefaultTypeInternaltensorflow::eager::SendPackedHandleOp_HandleDefaultTypeInternal374   ~SendPackedHandleOp_HandleDefaultTypeInternal() {}
375   union {  // NOLINT(misc-non-private-member-variables-in-classes)
376     SendPackedHandleOp_Handle _instance;
377   };
378 };
379 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SendPackedHandleOp_HandleDefaultTypeInternal _SendPackedHandleOp_Handle_default_instance_;
SendPackedHandleOp(::_pbi::ConstantInitialized)380 PROTOBUF_CONSTEXPR SendPackedHandleOp::SendPackedHandleOp(
381     ::_pbi::ConstantInitialized): _impl_{
382     /*decltype(_impl_.handles_)*/{}
383   , /*decltype(_impl_.device_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
384   , /*decltype(_impl_.op_id_)*/::int64_t{0}
385   , /*decltype(_impl_._cached_size_)*/{}} {}
386 struct SendPackedHandleOpDefaultTypeInternal {
SendPackedHandleOpDefaultTypeInternaltensorflow::eager::SendPackedHandleOpDefaultTypeInternal387   PROTOBUF_CONSTEXPR SendPackedHandleOpDefaultTypeInternal()
388       : _instance(::_pbi::ConstantInitialized{}) {}
~SendPackedHandleOpDefaultTypeInternaltensorflow::eager::SendPackedHandleOpDefaultTypeInternal389   ~SendPackedHandleOpDefaultTypeInternal() {}
390   union {  // NOLINT(misc-non-private-member-variables-in-classes)
391     SendPackedHandleOp _instance;
392   };
393 };
394 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SendPackedHandleOpDefaultTypeInternal _SendPackedHandleOp_default_instance_;
395 }  // namespace eager
396 }  // namespace tensorflow
397 namespace tensorflow {
398 namespace eager {
399 
400 // ===================================================================
401 
402 class Operation_Input::_Internal {
403  public:
404   static const ::tensorflow::eager::RemoteTensorHandle& remote_handle(const Operation_Input* msg);
405   static const ::tensorflow::TensorProto& tensor(const Operation_Input* msg);
406 };
407 
408 const ::tensorflow::eager::RemoteTensorHandle&
remote_handle(const Operation_Input * msg)409 Operation_Input::_Internal::remote_handle(const Operation_Input* msg) {
410   return *msg->_impl_.item_.remote_handle_;
411 }
412 const ::tensorflow::TensorProto&
tensor(const Operation_Input * msg)413 Operation_Input::_Internal::tensor(const Operation_Input* msg) {
414   return *msg->_impl_.item_.tensor_;
415 }
set_allocated_remote_handle(::tensorflow::eager::RemoteTensorHandle * remote_handle)416 void Operation_Input::set_allocated_remote_handle(::tensorflow::eager::RemoteTensorHandle* remote_handle) {
417   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
418   clear_item();
419   if (remote_handle) {
420     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
421         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
422                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(remote_handle));
423     if (message_arena != submessage_arena) {
424       remote_handle = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
425           message_arena, remote_handle, submessage_arena);
426     }
427     set_has_remote_handle();
428     _impl_.item_.remote_handle_ = remote_handle;
429   }
430   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.Operation.Input.remote_handle)
431 }
clear_remote_handle()432 void Operation_Input::clear_remote_handle() {
433   if (_internal_has_remote_handle()) {
434     if (GetArenaForAllocation() == nullptr) {
435       delete _impl_.item_.remote_handle_;
436     }
437     clear_has_item();
438   }
439 }
set_allocated_tensor(::tensorflow::TensorProto * tensor)440 void Operation_Input::set_allocated_tensor(::tensorflow::TensorProto* tensor) {
441   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
442   clear_item();
443   if (tensor) {
444     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
445         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
446                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(tensor));
447     if (message_arena != submessage_arena) {
448       tensor = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
449           message_arena, tensor, submessage_arena);
450     }
451     set_has_tensor();
452     _impl_.item_.tensor_ = tensor;
453   }
454   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.Operation.Input.tensor)
455 }
clear_tensor()456 void Operation_Input::clear_tensor() {
457   if (_internal_has_tensor()) {
458     if (GetArenaForAllocation() == nullptr) {
459       delete _impl_.item_.tensor_;
460     }
461     clear_has_item();
462   }
463 }
Operation_Input(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)464 Operation_Input::Operation_Input(::PROTOBUF_NAMESPACE_ID::Arena* arena,
465                          bool is_message_owned)
466   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
467   SharedCtor(arena, is_message_owned);
468   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.Operation.Input)
469 }
Operation_Input(const Operation_Input & from)470 Operation_Input::Operation_Input(const Operation_Input& from)
471   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
472   Operation_Input* const _this = this; (void)_this;
473   new (&_impl_) Impl_{
474       decltype(_impl_.item_){}
475     , /*decltype(_impl_._cached_size_)*/{}
476     , /*decltype(_impl_._oneof_case_)*/{}};
477 
478   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
479   clear_has_item();
480   switch (from.item_case()) {
481     case kRemoteHandle: {
482       _this->_internal_mutable_remote_handle()->::tensorflow::eager::RemoteTensorHandle::MergeFrom(
483           from._internal_remote_handle());
484       break;
485     }
486     case kTensor: {
487       _this->_internal_mutable_tensor()->::tensorflow::TensorProto::MergeFrom(
488           from._internal_tensor());
489       break;
490     }
491     case ITEM_NOT_SET: {
492       break;
493     }
494   }
495   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.Operation.Input)
496 }
497 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)498 inline void Operation_Input::SharedCtor(
499     ::_pb::Arena* arena, bool is_message_owned) {
500   (void)arena;
501   (void)is_message_owned;
502   new (&_impl_) Impl_{
503       decltype(_impl_.item_){}
504     , /*decltype(_impl_._cached_size_)*/{}
505     , /*decltype(_impl_._oneof_case_)*/{}
506   };
507   clear_has_item();
508 }
509 
~Operation_Input()510 Operation_Input::~Operation_Input() {
511   // @@protoc_insertion_point(destructor:tensorflow.eager.Operation.Input)
512   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
513   (void)arena;
514     return;
515   }
516   SharedDtor();
517 }
518 
SharedDtor()519 inline void Operation_Input::SharedDtor() {
520   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
521   if (has_item()) {
522     clear_item();
523   }
524 }
525 
SetCachedSize(int size) const526 void Operation_Input::SetCachedSize(int size) const {
527   _impl_._cached_size_.Set(size);
528 }
529 
clear_item()530 void Operation_Input::clear_item() {
531 // @@protoc_insertion_point(one_of_clear_start:tensorflow.eager.Operation.Input)
532   switch (item_case()) {
533     case kRemoteHandle: {
534       if (GetArenaForAllocation() == nullptr) {
535         delete _impl_.item_.remote_handle_;
536       }
537       break;
538     }
539     case kTensor: {
540       if (GetArenaForAllocation() == nullptr) {
541         delete _impl_.item_.tensor_;
542       }
543       break;
544     }
545     case ITEM_NOT_SET: {
546       break;
547     }
548   }
549   _impl_._oneof_case_[0] = ITEM_NOT_SET;
550 }
551 
552 
Clear()553 void Operation_Input::Clear() {
554 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.Operation.Input)
555   ::uint32_t cached_has_bits = 0;
556   // Prevent compiler warnings about cached_has_bits being unused
557   (void) cached_has_bits;
558 
559   clear_item();
560   _internal_metadata_.Clear<std::string>();
561 }
562 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)563 const char* Operation_Input::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
564 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
565   while (!ctx->Done(&ptr)) {
566     ::uint32_t tag;
567     ptr = ::_pbi::ReadTag(ptr, &tag);
568     switch (tag >> 3) {
569       // .tensorflow.eager.RemoteTensorHandle remote_handle = 1;
570       case 1:
571         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
572           ptr = ctx->ParseMessage(_internal_mutable_remote_handle(), ptr);
573           CHK_(ptr);
574         } else {
575           goto handle_unusual;
576         }
577         continue;
578       // .tensorflow.TensorProto tensor = 2;
579       case 2:
580         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
581           ptr = ctx->ParseMessage(_internal_mutable_tensor(), ptr);
582           CHK_(ptr);
583         } else {
584           goto handle_unusual;
585         }
586         continue;
587       default:
588         goto handle_unusual;
589     }  // switch
590   handle_unusual:
591     if ((tag == 0) || ((tag & 7) == 4)) {
592       CHK_(ptr);
593       ctx->SetLastTag(tag);
594       goto message_done;
595     }
596     ptr = UnknownFieldParse(
597         tag,
598         _internal_metadata_.mutable_unknown_fields<std::string>(),
599         ptr, ctx);
600     CHK_(ptr != nullptr);
601   }  // while
602 message_done:
603   return ptr;
604 failure:
605   ptr = nullptr;
606   goto message_done;
607 #undef CHK_
608 }
609 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const610 ::uint8_t* Operation_Input::_InternalSerialize(
611     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
612   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.Operation.Input)
613   ::uint32_t cached_has_bits = 0;
614   (void) cached_has_bits;
615 
616   // .tensorflow.eager.RemoteTensorHandle remote_handle = 1;
617   if (_internal_has_remote_handle()) {
618     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
619       InternalWriteMessage(1, _Internal::remote_handle(this),
620         _Internal::remote_handle(this).GetCachedSize(), target, stream);
621   }
622 
623   // .tensorflow.TensorProto tensor = 2;
624   if (_internal_has_tensor()) {
625     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
626       InternalWriteMessage(2, _Internal::tensor(this),
627         _Internal::tensor(this).GetCachedSize(), target, stream);
628   }
629 
630   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
631     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
632         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
633   }
634   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.Operation.Input)
635   return target;
636 }
637 
ByteSizeLong() const638 size_t Operation_Input::ByteSizeLong() const {
639 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.Operation.Input)
640   size_t total_size = 0;
641 
642   switch (item_case()) {
643     // .tensorflow.eager.RemoteTensorHandle remote_handle = 1;
644     case kRemoteHandle: {
645       total_size += 1 +
646         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
647           *_impl_.item_.remote_handle_);
648       break;
649     }
650     // .tensorflow.TensorProto tensor = 2;
651     case kTensor: {
652       total_size += 1 +
653         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
654           *_impl_.item_.tensor_);
655       break;
656     }
657     case ITEM_NOT_SET: {
658       break;
659     }
660   }
661   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
662     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
663   }
664   int cached_size = ::_pbi::ToCachedSize(total_size);
665   SetCachedSize(cached_size);
666   return total_size;
667 }
668 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)669 void Operation_Input::CheckTypeAndMergeFrom(
670     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
671   MergeFrom(*::_pbi::DownCast<const Operation_Input*>(
672       &from));
673 }
674 
MergeFrom(const Operation_Input & from)675 void Operation_Input::MergeFrom(const Operation_Input& from) {
676   Operation_Input* const _this = this;
677   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.Operation.Input)
678   GOOGLE_DCHECK_NE(&from, _this);
679   ::uint32_t cached_has_bits = 0;
680   (void) cached_has_bits;
681 
682   switch (from.item_case()) {
683     case kRemoteHandle: {
684       _this->_internal_mutable_remote_handle()->::tensorflow::eager::RemoteTensorHandle::MergeFrom(
685           from._internal_remote_handle());
686       break;
687     }
688     case kTensor: {
689       _this->_internal_mutable_tensor()->::tensorflow::TensorProto::MergeFrom(
690           from._internal_tensor());
691       break;
692     }
693     case ITEM_NOT_SET: {
694       break;
695     }
696   }
697   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
698 }
699 
CopyFrom(const Operation_Input & from)700 void Operation_Input::CopyFrom(const Operation_Input& from) {
701 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.Operation.Input)
702   if (&from == this) return;
703   Clear();
704   MergeFrom(from);
705 }
706 
IsInitialized() const707 bool Operation_Input::IsInitialized() const {
708   return true;
709 }
710 
InternalSwap(Operation_Input * other)711 void Operation_Input::InternalSwap(Operation_Input* other) {
712   using std::swap;
713   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
714   swap(_impl_.item_, other->_impl_.item_);
715   swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
716 }
717 
GetTypeName() const718 std::string Operation_Input::GetTypeName() const {
719   return "tensorflow.eager.Operation.Input";
720 }
721 
722 
723 // ===================================================================
724 
Operation_AttrsEntry_DoNotUse()725 Operation_AttrsEntry_DoNotUse::Operation_AttrsEntry_DoNotUse() {}
Operation_AttrsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)726 Operation_AttrsEntry_DoNotUse::Operation_AttrsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
727     : SuperType(arena) {}
MergeFrom(const Operation_AttrsEntry_DoNotUse & other)728 void Operation_AttrsEntry_DoNotUse::MergeFrom(const Operation_AttrsEntry_DoNotUse& other) {
729   MergeFromInternal(other);
730 }
731 
732 // ===================================================================
733 
734 class Operation::_Internal {
735  public:
736 };
737 
clear_attrs()738 void Operation::clear_attrs() {
739   _impl_.attrs_.Clear();
740 }
Operation(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)741 Operation::Operation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
742                          bool is_message_owned)
743   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
744   SharedCtor(arena, is_message_owned);
745   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.Operation)
746 }
Operation(const Operation & from)747 Operation::Operation(const Operation& from)
748   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
749   Operation* const _this = this; (void)_this;
750   new (&_impl_) Impl_{
751       decltype(_impl_.control_op_ids_){from._impl_.control_op_ids_}
752     , /*decltype(_impl_._control_op_ids_cached_byte_size_)*/{0}
753     , /*decltype(_impl_.attrs_)*/{}
754     , decltype(_impl_.op_inputs_){from._impl_.op_inputs_}
755     , decltype(_impl_.name_){}
756     , decltype(_impl_.device_){}
757     , decltype(_impl_.id_){}
758     , decltype(_impl_.func_step_id_){}
759     , decltype(_impl_.is_component_function_){}
760     , decltype(_impl_.is_function_){}
761     , /*decltype(_impl_._cached_size_)*/{}};
762 
763   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
764   _this->_impl_.attrs_.MergeFrom(from._impl_.attrs_);
765   _impl_.name_.InitDefault();
766   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
767     _impl_.name_.Set("", GetArenaForAllocation());
768   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
769   if (!from._internal_name().empty()) {
770     _this->_impl_.name_.Set(from._internal_name(),
771       _this->GetArenaForAllocation());
772   }
773   _impl_.device_.InitDefault();
774   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
775     _impl_.device_.Set("", GetArenaForAllocation());
776   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
777   if (!from._internal_device().empty()) {
778     _this->_impl_.device_.Set(from._internal_device(),
779       _this->GetArenaForAllocation());
780   }
781   ::memcpy(&_impl_.id_, &from._impl_.id_,
782     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.is_function_) -
783     reinterpret_cast<char*>(&_impl_.id_)) + sizeof(_impl_.is_function_));
784   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.Operation)
785 }
786 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)787 inline void Operation::SharedCtor(
788     ::_pb::Arena* arena, bool is_message_owned) {
789   (void)arena;
790   (void)is_message_owned;
791   new (&_impl_) Impl_{
792       decltype(_impl_.control_op_ids_){arena}
793     , /*decltype(_impl_._control_op_ids_cached_byte_size_)*/{0}
794     , /*decltype(_impl_.attrs_)*/{::_pbi::ArenaInitialized(), arena}
795     , decltype(_impl_.op_inputs_){arena}
796     , decltype(_impl_.name_){}
797     , decltype(_impl_.device_){}
798     , decltype(_impl_.id_){::int64_t{0}}
799     , decltype(_impl_.func_step_id_){::int64_t{0}}
800     , decltype(_impl_.is_component_function_){false}
801     , decltype(_impl_.is_function_){false}
802     , /*decltype(_impl_._cached_size_)*/{}
803   };
804   _impl_.name_.InitDefault();
805   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
806     _impl_.name_.Set("", GetArenaForAllocation());
807   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
808   _impl_.device_.InitDefault();
809   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
810     _impl_.device_.Set("", GetArenaForAllocation());
811   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
812 }
813 
~Operation()814 Operation::~Operation() {
815   // @@protoc_insertion_point(destructor:tensorflow.eager.Operation)
816   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
817   (void)arena;
818     return;
819   }
820   SharedDtor();
821 }
822 
SharedDtor()823 inline void Operation::SharedDtor() {
824   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
825   _impl_.control_op_ids_.~RepeatedField();
826   _impl_.attrs_.Destruct();
827   _impl_.attrs_.~MapFieldLite();
828   _impl_.op_inputs_.~RepeatedPtrField();
829   _impl_.name_.Destroy();
830   _impl_.device_.Destroy();
831 }
832 
SetCachedSize(int size) const833 void Operation::SetCachedSize(int size) const {
834   _impl_._cached_size_.Set(size);
835 }
836 
Clear()837 void Operation::Clear() {
838 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.Operation)
839   ::uint32_t cached_has_bits = 0;
840   // Prevent compiler warnings about cached_has_bits being unused
841   (void) cached_has_bits;
842 
843   _impl_.control_op_ids_.Clear();
844   _impl_.attrs_.Clear();
845   _impl_.op_inputs_.Clear();
846   _impl_.name_.ClearToEmpty();
847   _impl_.device_.ClearToEmpty();
848   ::memset(&_impl_.id_, 0, static_cast<size_t>(
849       reinterpret_cast<char*>(&_impl_.is_function_) -
850       reinterpret_cast<char*>(&_impl_.id_)) + sizeof(_impl_.is_function_));
851   _internal_metadata_.Clear<std::string>();
852 }
853 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)854 const char* Operation::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
855 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
856   while (!ctx->Done(&ptr)) {
857     ::uint32_t tag;
858     ptr = ::_pbi::ReadTag(ptr, &tag);
859     switch (tag >> 3) {
860       // int64 id = 1;
861       case 1:
862         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
863           _impl_.id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
864           CHK_(ptr);
865         } else {
866           goto handle_unusual;
867         }
868         continue;
869       // string name = 2;
870       case 2:
871         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
872           auto str = _internal_mutable_name();
873           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
874           CHK_(ptr);
875           CHK_(::_pbi::VerifyUTF8(str, nullptr));
876         } else {
877           goto handle_unusual;
878         }
879         continue;
880       // repeated int64 control_op_ids = 4;
881       case 4:
882         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
883           ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(_internal_mutable_control_op_ids(), ptr, ctx);
884           CHK_(ptr);
885         } else if (static_cast<::uint8_t>(tag) == 32) {
886           _internal_add_control_op_ids(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
887           CHK_(ptr);
888         } else {
889           goto handle_unusual;
890         }
891         continue;
892       // map<string, .tensorflow.AttrValue> attrs = 5;
893       case 5:
894         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
895           ptr -= 1;
896           do {
897             ptr += 1;
898             ptr = ctx->ParseMessage(&_impl_.attrs_, ptr);
899             CHK_(ptr);
900             if (!ctx->DataAvailable(ptr)) break;
901           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<42>(ptr));
902         } else {
903           goto handle_unusual;
904         }
905         continue;
906       // string device = 6;
907       case 6:
908         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
909           auto str = _internal_mutable_device();
910           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
911           CHK_(ptr);
912           CHK_(::_pbi::VerifyUTF8(str, nullptr));
913         } else {
914           goto handle_unusual;
915         }
916         continue;
917       // bool is_component_function = 7;
918       case 7:
919         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
920           _impl_.is_component_function_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
921           CHK_(ptr);
922         } else {
923           goto handle_unusual;
924         }
925         continue;
926       // int64 func_step_id = 8;
927       case 8:
928         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
929           _impl_.func_step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
930           CHK_(ptr);
931         } else {
932           goto handle_unusual;
933         }
934         continue;
935       // bool is_function = 9;
936       case 9:
937         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
938           _impl_.is_function_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
939           CHK_(ptr);
940         } else {
941           goto handle_unusual;
942         }
943         continue;
944       // repeated .tensorflow.eager.Operation.Input op_inputs = 10;
945       case 10:
946         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 82)) {
947           ptr -= 1;
948           do {
949             ptr += 1;
950             ptr = ctx->ParseMessage(_internal_add_op_inputs(), ptr);
951             CHK_(ptr);
952             if (!ctx->DataAvailable(ptr)) break;
953           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<82>(ptr));
954         } else {
955           goto handle_unusual;
956         }
957         continue;
958       default:
959         goto handle_unusual;
960     }  // switch
961   handle_unusual:
962     if ((tag == 0) || ((tag & 7) == 4)) {
963       CHK_(ptr);
964       ctx->SetLastTag(tag);
965       goto message_done;
966     }
967     ptr = UnknownFieldParse(
968         tag,
969         _internal_metadata_.mutable_unknown_fields<std::string>(),
970         ptr, ctx);
971     CHK_(ptr != nullptr);
972   }  // while
973 message_done:
974   return ptr;
975 failure:
976   ptr = nullptr;
977   goto message_done;
978 #undef CHK_
979 }
980 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const981 ::uint8_t* Operation::_InternalSerialize(
982     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
983   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.Operation)
984   ::uint32_t cached_has_bits = 0;
985   (void) cached_has_bits;
986 
987   // int64 id = 1;
988   if (this->_internal_id() != 0) {
989     target = stream->EnsureSpace(target);
990     target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_id(), target);
991   }
992 
993   // string name = 2;
994   if (!this->_internal_name().empty()) {
995     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
996       this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
997       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
998       "tensorflow.eager.Operation.name");
999     target = stream->WriteStringMaybeAliased(
1000         2, this->_internal_name(), target);
1001   }
1002 
1003   // repeated int64 control_op_ids = 4;
1004   {
1005     int byte_size = _impl_._control_op_ids_cached_byte_size_.load(std::memory_order_relaxed);
1006     if (byte_size > 0) {
1007       target = stream->WriteInt64Packed(
1008           4, _internal_control_op_ids(), byte_size, target);
1009     }
1010   }
1011 
1012   // map<string, .tensorflow.AttrValue> attrs = 5;
1013   if (!this->_internal_attrs().empty()) {
1014     using MapType = ::_pb::Map<std::string, ::tensorflow::AttrValue>;
1015     using WireHelper = Operation_AttrsEntry_DoNotUse::Funcs;
1016     const auto& map_field = this->_internal_attrs();
1017     auto check_utf8 = [](const MapType::value_type& entry) {
1018       (void)entry;
1019       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1020         entry.first.data(), static_cast<int>(entry.first.length()),
1021         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1022         "tensorflow.eager.Operation.AttrsEntry.key");
1023     };
1024 
1025     if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
1026       for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
1027         target = WireHelper::InternalSerialize(5, entry.first, entry.second, target, stream);
1028         check_utf8(entry);
1029       }
1030     } else {
1031       for (const auto& entry : map_field) {
1032         target = WireHelper::InternalSerialize(5, entry.first, entry.second, target, stream);
1033         check_utf8(entry);
1034       }
1035     }
1036   }
1037 
1038   // string device = 6;
1039   if (!this->_internal_device().empty()) {
1040     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1041       this->_internal_device().data(), static_cast<int>(this->_internal_device().length()),
1042       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1043       "tensorflow.eager.Operation.device");
1044     target = stream->WriteStringMaybeAliased(
1045         6, this->_internal_device(), target);
1046   }
1047 
1048   // bool is_component_function = 7;
1049   if (this->_internal_is_component_function() != 0) {
1050     target = stream->EnsureSpace(target);
1051     target = ::_pbi::WireFormatLite::WriteBoolToArray(7, this->_internal_is_component_function(), target);
1052   }
1053 
1054   // int64 func_step_id = 8;
1055   if (this->_internal_func_step_id() != 0) {
1056     target = stream->EnsureSpace(target);
1057     target = ::_pbi::WireFormatLite::WriteInt64ToArray(8, this->_internal_func_step_id(), target);
1058   }
1059 
1060   // bool is_function = 9;
1061   if (this->_internal_is_function() != 0) {
1062     target = stream->EnsureSpace(target);
1063     target = ::_pbi::WireFormatLite::WriteBoolToArray(9, this->_internal_is_function(), target);
1064   }
1065 
1066   // repeated .tensorflow.eager.Operation.Input op_inputs = 10;
1067   for (unsigned i = 0,
1068       n = static_cast<unsigned>(this->_internal_op_inputs_size()); i < n; i++) {
1069     const auto& repfield = this->_internal_op_inputs(i);
1070     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1071         InternalWriteMessage(10, repfield, repfield.GetCachedSize(), target, stream);
1072   }
1073 
1074   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1075     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1076         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1077   }
1078   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.Operation)
1079   return target;
1080 }
1081 
ByteSizeLong() const1082 size_t Operation::ByteSizeLong() const {
1083 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.Operation)
1084   size_t total_size = 0;
1085 
1086   ::uint32_t cached_has_bits = 0;
1087   // Prevent compiler warnings about cached_has_bits being unused
1088   (void) cached_has_bits;
1089 
1090   // repeated int64 control_op_ids = 4;
1091   {
1092     size_t data_size = ::_pbi::WireFormatLite::
1093       Int64Size(this->_impl_.control_op_ids_);
1094     if (data_size > 0) {
1095       total_size += 1 +
1096         ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
1097     }
1098     int cached_size = ::_pbi::ToCachedSize(data_size);
1099     _impl_._control_op_ids_cached_byte_size_.store(cached_size,
1100                                     std::memory_order_relaxed);
1101     total_size += data_size;
1102   }
1103 
1104   // map<string, .tensorflow.AttrValue> attrs = 5;
1105   total_size += 1 *
1106       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_attrs_size());
1107   for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::AttrValue >::const_iterator
1108       it = this->_internal_attrs().begin();
1109       it != this->_internal_attrs().end(); ++it) {
1110     total_size += Operation_AttrsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
1111   }
1112 
1113   // repeated .tensorflow.eager.Operation.Input op_inputs = 10;
1114   total_size += 1UL * this->_internal_op_inputs_size();
1115   for (const auto& msg : this->_impl_.op_inputs_) {
1116     total_size +=
1117       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1118   }
1119 
1120   // string name = 2;
1121   if (!this->_internal_name().empty()) {
1122     total_size += 1 +
1123       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1124         this->_internal_name());
1125   }
1126 
1127   // string device = 6;
1128   if (!this->_internal_device().empty()) {
1129     total_size += 1 +
1130       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1131         this->_internal_device());
1132   }
1133 
1134   // int64 id = 1;
1135   if (this->_internal_id() != 0) {
1136     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_id());
1137   }
1138 
1139   // int64 func_step_id = 8;
1140   if (this->_internal_func_step_id() != 0) {
1141     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_func_step_id());
1142   }
1143 
1144   // bool is_component_function = 7;
1145   if (this->_internal_is_component_function() != 0) {
1146     total_size += 1 + 1;
1147   }
1148 
1149   // bool is_function = 9;
1150   if (this->_internal_is_function() != 0) {
1151     total_size += 1 + 1;
1152   }
1153 
1154   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1155     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1156   }
1157   int cached_size = ::_pbi::ToCachedSize(total_size);
1158   SetCachedSize(cached_size);
1159   return total_size;
1160 }
1161 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1162 void Operation::CheckTypeAndMergeFrom(
1163     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1164   MergeFrom(*::_pbi::DownCast<const Operation*>(
1165       &from));
1166 }
1167 
MergeFrom(const Operation & from)1168 void Operation::MergeFrom(const Operation& from) {
1169   Operation* const _this = this;
1170   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.Operation)
1171   GOOGLE_DCHECK_NE(&from, _this);
1172   ::uint32_t cached_has_bits = 0;
1173   (void) cached_has_bits;
1174 
1175   _this->_impl_.control_op_ids_.MergeFrom(from._impl_.control_op_ids_);
1176   _this->_impl_.attrs_.MergeFrom(from._impl_.attrs_);
1177   _this->_impl_.op_inputs_.MergeFrom(from._impl_.op_inputs_);
1178   if (!from._internal_name().empty()) {
1179     _this->_internal_set_name(from._internal_name());
1180   }
1181   if (!from._internal_device().empty()) {
1182     _this->_internal_set_device(from._internal_device());
1183   }
1184   if (from._internal_id() != 0) {
1185     _this->_internal_set_id(from._internal_id());
1186   }
1187   if (from._internal_func_step_id() != 0) {
1188     _this->_internal_set_func_step_id(from._internal_func_step_id());
1189   }
1190   if (from._internal_is_component_function() != 0) {
1191     _this->_internal_set_is_component_function(from._internal_is_component_function());
1192   }
1193   if (from._internal_is_function() != 0) {
1194     _this->_internal_set_is_function(from._internal_is_function());
1195   }
1196   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1197 }
1198 
CopyFrom(const Operation & from)1199 void Operation::CopyFrom(const Operation& from) {
1200 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.Operation)
1201   if (&from == this) return;
1202   Clear();
1203   MergeFrom(from);
1204 }
1205 
IsInitialized() const1206 bool Operation::IsInitialized() const {
1207   return true;
1208 }
1209 
InternalSwap(Operation * other)1210 void Operation::InternalSwap(Operation* other) {
1211   using std::swap;
1212   auto* lhs_arena = GetArenaForAllocation();
1213   auto* rhs_arena = other->GetArenaForAllocation();
1214   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1215   _impl_.control_op_ids_.InternalSwap(&other->_impl_.control_op_ids_);
1216   _impl_.attrs_.InternalSwap(&other->_impl_.attrs_);
1217   _impl_.op_inputs_.InternalSwap(&other->_impl_.op_inputs_);
1218   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1219       &_impl_.name_, lhs_arena,
1220       &other->_impl_.name_, rhs_arena
1221   );
1222   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1223       &_impl_.device_, lhs_arena,
1224       &other->_impl_.device_, rhs_arena
1225   );
1226   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1227       PROTOBUF_FIELD_OFFSET(Operation, _impl_.is_function_)
1228       + sizeof(Operation::_impl_.is_function_)  // NOLINT
1229       - PROTOBUF_FIELD_OFFSET(Operation, _impl_.id_)>(
1230           reinterpret_cast<char*>(&_impl_.id_),
1231           reinterpret_cast<char*>(&other->_impl_.id_));
1232 }
1233 
GetTypeName() const1234 std::string Operation::GetTypeName() const {
1235   return "tensorflow.eager.Operation";
1236 }
1237 
1238 
1239 // ===================================================================
1240 
1241 class QueueItem::_Internal {
1242  public:
1243   static const ::tensorflow::eager::RemoteTensorHandle& handle_to_decref(const QueueItem* msg);
1244   static const ::tensorflow::eager::Operation& operation(const QueueItem* msg);
1245   static const ::tensorflow::eager::SendTensorOp& send_tensor(const QueueItem* msg);
1246   static const ::tensorflow::eager::RegisterFunctionOp& register_function(const QueueItem* msg);
1247   static const ::tensorflow::eager::CleanupFunctionOp& cleanup_function(const QueueItem* msg);
1248   static const ::tensorflow::eager::SyncRemoteExecutorForStream& sync_remote_executor_for_stream(const QueueItem* msg);
1249   static const ::tensorflow::eager::SendPackedHandleOp& send_packed_handle(const QueueItem* msg);
1250 };
1251 
1252 const ::tensorflow::eager::RemoteTensorHandle&
handle_to_decref(const QueueItem * msg)1253 QueueItem::_Internal::handle_to_decref(const QueueItem* msg) {
1254   return *msg->_impl_.item_.handle_to_decref_;
1255 }
1256 const ::tensorflow::eager::Operation&
operation(const QueueItem * msg)1257 QueueItem::_Internal::operation(const QueueItem* msg) {
1258   return *msg->_impl_.item_.operation_;
1259 }
1260 const ::tensorflow::eager::SendTensorOp&
send_tensor(const QueueItem * msg)1261 QueueItem::_Internal::send_tensor(const QueueItem* msg) {
1262   return *msg->_impl_.item_.send_tensor_;
1263 }
1264 const ::tensorflow::eager::RegisterFunctionOp&
register_function(const QueueItem * msg)1265 QueueItem::_Internal::register_function(const QueueItem* msg) {
1266   return *msg->_impl_.item_.register_function_;
1267 }
1268 const ::tensorflow::eager::CleanupFunctionOp&
cleanup_function(const QueueItem * msg)1269 QueueItem::_Internal::cleanup_function(const QueueItem* msg) {
1270   return *msg->_impl_.item_.cleanup_function_;
1271 }
1272 const ::tensorflow::eager::SyncRemoteExecutorForStream&
sync_remote_executor_for_stream(const QueueItem * msg)1273 QueueItem::_Internal::sync_remote_executor_for_stream(const QueueItem* msg) {
1274   return *msg->_impl_.item_.sync_remote_executor_for_stream_;
1275 }
1276 const ::tensorflow::eager::SendPackedHandleOp&
send_packed_handle(const QueueItem * msg)1277 QueueItem::_Internal::send_packed_handle(const QueueItem* msg) {
1278   return *msg->_impl_.item_.send_packed_handle_;
1279 }
set_allocated_handle_to_decref(::tensorflow::eager::RemoteTensorHandle * handle_to_decref)1280 void QueueItem::set_allocated_handle_to_decref(::tensorflow::eager::RemoteTensorHandle* handle_to_decref) {
1281   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1282   clear_item();
1283   if (handle_to_decref) {
1284     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1285         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
1286                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(handle_to_decref));
1287     if (message_arena != submessage_arena) {
1288       handle_to_decref = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1289           message_arena, handle_to_decref, submessage_arena);
1290     }
1291     set_has_handle_to_decref();
1292     _impl_.item_.handle_to_decref_ = handle_to_decref;
1293   }
1294   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.QueueItem.handle_to_decref)
1295 }
clear_handle_to_decref()1296 void QueueItem::clear_handle_to_decref() {
1297   if (_internal_has_handle_to_decref()) {
1298     if (GetArenaForAllocation() == nullptr) {
1299       delete _impl_.item_.handle_to_decref_;
1300     }
1301     clear_has_item();
1302   }
1303 }
set_allocated_operation(::tensorflow::eager::Operation * operation)1304 void QueueItem::set_allocated_operation(::tensorflow::eager::Operation* operation) {
1305   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1306   clear_item();
1307   if (operation) {
1308     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1309       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(operation);
1310     if (message_arena != submessage_arena) {
1311       operation = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1312           message_arena, operation, submessage_arena);
1313     }
1314     set_has_operation();
1315     _impl_.item_.operation_ = operation;
1316   }
1317   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.QueueItem.operation)
1318 }
set_allocated_send_tensor(::tensorflow::eager::SendTensorOp * send_tensor)1319 void QueueItem::set_allocated_send_tensor(::tensorflow::eager::SendTensorOp* send_tensor) {
1320   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1321   clear_item();
1322   if (send_tensor) {
1323     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1324       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(send_tensor);
1325     if (message_arena != submessage_arena) {
1326       send_tensor = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1327           message_arena, send_tensor, submessage_arena);
1328     }
1329     set_has_send_tensor();
1330     _impl_.item_.send_tensor_ = send_tensor;
1331   }
1332   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.QueueItem.send_tensor)
1333 }
set_allocated_register_function(::tensorflow::eager::RegisterFunctionOp * register_function)1334 void QueueItem::set_allocated_register_function(::tensorflow::eager::RegisterFunctionOp* register_function) {
1335   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1336   clear_item();
1337   if (register_function) {
1338     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1339       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(register_function);
1340     if (message_arena != submessage_arena) {
1341       register_function = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1342           message_arena, register_function, submessage_arena);
1343     }
1344     set_has_register_function();
1345     _impl_.item_.register_function_ = register_function;
1346   }
1347   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.QueueItem.register_function)
1348 }
set_allocated_cleanup_function(::tensorflow::eager::CleanupFunctionOp * cleanup_function)1349 void QueueItem::set_allocated_cleanup_function(::tensorflow::eager::CleanupFunctionOp* cleanup_function) {
1350   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1351   clear_item();
1352   if (cleanup_function) {
1353     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1354       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(cleanup_function);
1355     if (message_arena != submessage_arena) {
1356       cleanup_function = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1357           message_arena, cleanup_function, submessage_arena);
1358     }
1359     set_has_cleanup_function();
1360     _impl_.item_.cleanup_function_ = cleanup_function;
1361   }
1362   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.QueueItem.cleanup_function)
1363 }
set_allocated_sync_remote_executor_for_stream(::tensorflow::eager::SyncRemoteExecutorForStream * sync_remote_executor_for_stream)1364 void QueueItem::set_allocated_sync_remote_executor_for_stream(::tensorflow::eager::SyncRemoteExecutorForStream* sync_remote_executor_for_stream) {
1365   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1366   clear_item();
1367   if (sync_remote_executor_for_stream) {
1368     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1369       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(sync_remote_executor_for_stream);
1370     if (message_arena != submessage_arena) {
1371       sync_remote_executor_for_stream = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1372           message_arena, sync_remote_executor_for_stream, submessage_arena);
1373     }
1374     set_has_sync_remote_executor_for_stream();
1375     _impl_.item_.sync_remote_executor_for_stream_ = sync_remote_executor_for_stream;
1376   }
1377   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.QueueItem.sync_remote_executor_for_stream)
1378 }
set_allocated_send_packed_handle(::tensorflow::eager::SendPackedHandleOp * send_packed_handle)1379 void QueueItem::set_allocated_send_packed_handle(::tensorflow::eager::SendPackedHandleOp* send_packed_handle) {
1380   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1381   clear_item();
1382   if (send_packed_handle) {
1383     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1384       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(send_packed_handle);
1385     if (message_arena != submessage_arena) {
1386       send_packed_handle = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1387           message_arena, send_packed_handle, submessage_arena);
1388     }
1389     set_has_send_packed_handle();
1390     _impl_.item_.send_packed_handle_ = send_packed_handle;
1391   }
1392   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.QueueItem.send_packed_handle)
1393 }
QueueItem(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1394 QueueItem::QueueItem(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1395                          bool is_message_owned)
1396   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1397   SharedCtor(arena, is_message_owned);
1398   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.QueueItem)
1399 }
QueueItem(const QueueItem & from)1400 QueueItem::QueueItem(const QueueItem& from)
1401   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1402   QueueItem* const _this = this; (void)_this;
1403   new (&_impl_) Impl_{
1404       decltype(_impl_.item_){}
1405     , /*decltype(_impl_._cached_size_)*/{}
1406     , /*decltype(_impl_._oneof_case_)*/{}};
1407 
1408   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1409   clear_has_item();
1410   switch (from.item_case()) {
1411     case kHandleToDecref: {
1412       _this->_internal_mutable_handle_to_decref()->::tensorflow::eager::RemoteTensorHandle::MergeFrom(
1413           from._internal_handle_to_decref());
1414       break;
1415     }
1416     case kOperation: {
1417       _this->_internal_mutable_operation()->::tensorflow::eager::Operation::MergeFrom(
1418           from._internal_operation());
1419       break;
1420     }
1421     case kSendTensor: {
1422       _this->_internal_mutable_send_tensor()->::tensorflow::eager::SendTensorOp::MergeFrom(
1423           from._internal_send_tensor());
1424       break;
1425     }
1426     case kRegisterFunction: {
1427       _this->_internal_mutable_register_function()->::tensorflow::eager::RegisterFunctionOp::MergeFrom(
1428           from._internal_register_function());
1429       break;
1430     }
1431     case kCleanupFunction: {
1432       _this->_internal_mutable_cleanup_function()->::tensorflow::eager::CleanupFunctionOp::MergeFrom(
1433           from._internal_cleanup_function());
1434       break;
1435     }
1436     case kSyncRemoteExecutorForStream: {
1437       _this->_internal_mutable_sync_remote_executor_for_stream()->::tensorflow::eager::SyncRemoteExecutorForStream::MergeFrom(
1438           from._internal_sync_remote_executor_for_stream());
1439       break;
1440     }
1441     case kSendPackedHandle: {
1442       _this->_internal_mutable_send_packed_handle()->::tensorflow::eager::SendPackedHandleOp::MergeFrom(
1443           from._internal_send_packed_handle());
1444       break;
1445     }
1446     case ITEM_NOT_SET: {
1447       break;
1448     }
1449   }
1450   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.QueueItem)
1451 }
1452 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1453 inline void QueueItem::SharedCtor(
1454     ::_pb::Arena* arena, bool is_message_owned) {
1455   (void)arena;
1456   (void)is_message_owned;
1457   new (&_impl_) Impl_{
1458       decltype(_impl_.item_){}
1459     , /*decltype(_impl_._cached_size_)*/{}
1460     , /*decltype(_impl_._oneof_case_)*/{}
1461   };
1462   clear_has_item();
1463 }
1464 
~QueueItem()1465 QueueItem::~QueueItem() {
1466   // @@protoc_insertion_point(destructor:tensorflow.eager.QueueItem)
1467   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1468   (void)arena;
1469     return;
1470   }
1471   SharedDtor();
1472 }
1473 
SharedDtor()1474 inline void QueueItem::SharedDtor() {
1475   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1476   if (has_item()) {
1477     clear_item();
1478   }
1479 }
1480 
SetCachedSize(int size) const1481 void QueueItem::SetCachedSize(int size) const {
1482   _impl_._cached_size_.Set(size);
1483 }
1484 
clear_item()1485 void QueueItem::clear_item() {
1486 // @@protoc_insertion_point(one_of_clear_start:tensorflow.eager.QueueItem)
1487   switch (item_case()) {
1488     case kHandleToDecref: {
1489       if (GetArenaForAllocation() == nullptr) {
1490         delete _impl_.item_.handle_to_decref_;
1491       }
1492       break;
1493     }
1494     case kOperation: {
1495       if (GetArenaForAllocation() == nullptr) {
1496         delete _impl_.item_.operation_;
1497       }
1498       break;
1499     }
1500     case kSendTensor: {
1501       if (GetArenaForAllocation() == nullptr) {
1502         delete _impl_.item_.send_tensor_;
1503       }
1504       break;
1505     }
1506     case kRegisterFunction: {
1507       if (GetArenaForAllocation() == nullptr) {
1508         delete _impl_.item_.register_function_;
1509       }
1510       break;
1511     }
1512     case kCleanupFunction: {
1513       if (GetArenaForAllocation() == nullptr) {
1514         delete _impl_.item_.cleanup_function_;
1515       }
1516       break;
1517     }
1518     case kSyncRemoteExecutorForStream: {
1519       if (GetArenaForAllocation() == nullptr) {
1520         delete _impl_.item_.sync_remote_executor_for_stream_;
1521       }
1522       break;
1523     }
1524     case kSendPackedHandle: {
1525       if (GetArenaForAllocation() == nullptr) {
1526         delete _impl_.item_.send_packed_handle_;
1527       }
1528       break;
1529     }
1530     case ITEM_NOT_SET: {
1531       break;
1532     }
1533   }
1534   _impl_._oneof_case_[0] = ITEM_NOT_SET;
1535 }
1536 
1537 
Clear()1538 void QueueItem::Clear() {
1539 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.QueueItem)
1540   ::uint32_t cached_has_bits = 0;
1541   // Prevent compiler warnings about cached_has_bits being unused
1542   (void) cached_has_bits;
1543 
1544   clear_item();
1545   _internal_metadata_.Clear<std::string>();
1546 }
1547 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1548 const char* QueueItem::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1549 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1550   while (!ctx->Done(&ptr)) {
1551     ::uint32_t tag;
1552     ptr = ::_pbi::ReadTag(ptr, &tag);
1553     switch (tag >> 3) {
1554       // .tensorflow.eager.RemoteTensorHandle handle_to_decref = 1;
1555       case 1:
1556         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1557           ptr = ctx->ParseMessage(_internal_mutable_handle_to_decref(), ptr);
1558           CHK_(ptr);
1559         } else {
1560           goto handle_unusual;
1561         }
1562         continue;
1563       // .tensorflow.eager.Operation operation = 2;
1564       case 2:
1565         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
1566           ptr = ctx->ParseMessage(_internal_mutable_operation(), ptr);
1567           CHK_(ptr);
1568         } else {
1569           goto handle_unusual;
1570         }
1571         continue;
1572       // .tensorflow.eager.SendTensorOp send_tensor = 3;
1573       case 3:
1574         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
1575           ptr = ctx->ParseMessage(_internal_mutable_send_tensor(), ptr);
1576           CHK_(ptr);
1577         } else {
1578           goto handle_unusual;
1579         }
1580         continue;
1581       // .tensorflow.eager.RegisterFunctionOp register_function = 4;
1582       case 4:
1583         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
1584           ptr = ctx->ParseMessage(_internal_mutable_register_function(), ptr);
1585           CHK_(ptr);
1586         } else {
1587           goto handle_unusual;
1588         }
1589         continue;
1590       // .tensorflow.eager.CleanupFunctionOp cleanup_function = 5;
1591       case 5:
1592         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
1593           ptr = ctx->ParseMessage(_internal_mutable_cleanup_function(), ptr);
1594           CHK_(ptr);
1595         } else {
1596           goto handle_unusual;
1597         }
1598         continue;
1599       // .tensorflow.eager.SyncRemoteExecutorForStream sync_remote_executor_for_stream = 6;
1600       case 6:
1601         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
1602           ptr = ctx->ParseMessage(_internal_mutable_sync_remote_executor_for_stream(), ptr);
1603           CHK_(ptr);
1604         } else {
1605           goto handle_unusual;
1606         }
1607         continue;
1608       // .tensorflow.eager.SendPackedHandleOp send_packed_handle = 7;
1609       case 7:
1610         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 58)) {
1611           ptr = ctx->ParseMessage(_internal_mutable_send_packed_handle(), ptr);
1612           CHK_(ptr);
1613         } else {
1614           goto handle_unusual;
1615         }
1616         continue;
1617       default:
1618         goto handle_unusual;
1619     }  // switch
1620   handle_unusual:
1621     if ((tag == 0) || ((tag & 7) == 4)) {
1622       CHK_(ptr);
1623       ctx->SetLastTag(tag);
1624       goto message_done;
1625     }
1626     ptr = UnknownFieldParse(
1627         tag,
1628         _internal_metadata_.mutable_unknown_fields<std::string>(),
1629         ptr, ctx);
1630     CHK_(ptr != nullptr);
1631   }  // while
1632 message_done:
1633   return ptr;
1634 failure:
1635   ptr = nullptr;
1636   goto message_done;
1637 #undef CHK_
1638 }
1639 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1640 ::uint8_t* QueueItem::_InternalSerialize(
1641     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1642   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.QueueItem)
1643   ::uint32_t cached_has_bits = 0;
1644   (void) cached_has_bits;
1645 
1646   // .tensorflow.eager.RemoteTensorHandle handle_to_decref = 1;
1647   if (_internal_has_handle_to_decref()) {
1648     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1649       InternalWriteMessage(1, _Internal::handle_to_decref(this),
1650         _Internal::handle_to_decref(this).GetCachedSize(), target, stream);
1651   }
1652 
1653   // .tensorflow.eager.Operation operation = 2;
1654   if (_internal_has_operation()) {
1655     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1656       InternalWriteMessage(2, _Internal::operation(this),
1657         _Internal::operation(this).GetCachedSize(), target, stream);
1658   }
1659 
1660   // .tensorflow.eager.SendTensorOp send_tensor = 3;
1661   if (_internal_has_send_tensor()) {
1662     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1663       InternalWriteMessage(3, _Internal::send_tensor(this),
1664         _Internal::send_tensor(this).GetCachedSize(), target, stream);
1665   }
1666 
1667   // .tensorflow.eager.RegisterFunctionOp register_function = 4;
1668   if (_internal_has_register_function()) {
1669     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1670       InternalWriteMessage(4, _Internal::register_function(this),
1671         _Internal::register_function(this).GetCachedSize(), target, stream);
1672   }
1673 
1674   // .tensorflow.eager.CleanupFunctionOp cleanup_function = 5;
1675   if (_internal_has_cleanup_function()) {
1676     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1677       InternalWriteMessage(5, _Internal::cleanup_function(this),
1678         _Internal::cleanup_function(this).GetCachedSize(), target, stream);
1679   }
1680 
1681   // .tensorflow.eager.SyncRemoteExecutorForStream sync_remote_executor_for_stream = 6;
1682   if (_internal_has_sync_remote_executor_for_stream()) {
1683     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1684       InternalWriteMessage(6, _Internal::sync_remote_executor_for_stream(this),
1685         _Internal::sync_remote_executor_for_stream(this).GetCachedSize(), target, stream);
1686   }
1687 
1688   // .tensorflow.eager.SendPackedHandleOp send_packed_handle = 7;
1689   if (_internal_has_send_packed_handle()) {
1690     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1691       InternalWriteMessage(7, _Internal::send_packed_handle(this),
1692         _Internal::send_packed_handle(this).GetCachedSize(), target, stream);
1693   }
1694 
1695   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1696     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1697         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1698   }
1699   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.QueueItem)
1700   return target;
1701 }
1702 
ByteSizeLong() const1703 size_t QueueItem::ByteSizeLong() const {
1704 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.QueueItem)
1705   size_t total_size = 0;
1706 
1707   switch (item_case()) {
1708     // .tensorflow.eager.RemoteTensorHandle handle_to_decref = 1;
1709     case kHandleToDecref: {
1710       total_size += 1 +
1711         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1712           *_impl_.item_.handle_to_decref_);
1713       break;
1714     }
1715     // .tensorflow.eager.Operation operation = 2;
1716     case kOperation: {
1717       total_size += 1 +
1718         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1719           *_impl_.item_.operation_);
1720       break;
1721     }
1722     // .tensorflow.eager.SendTensorOp send_tensor = 3;
1723     case kSendTensor: {
1724       total_size += 1 +
1725         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1726           *_impl_.item_.send_tensor_);
1727       break;
1728     }
1729     // .tensorflow.eager.RegisterFunctionOp register_function = 4;
1730     case kRegisterFunction: {
1731       total_size += 1 +
1732         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1733           *_impl_.item_.register_function_);
1734       break;
1735     }
1736     // .tensorflow.eager.CleanupFunctionOp cleanup_function = 5;
1737     case kCleanupFunction: {
1738       total_size += 1 +
1739         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1740           *_impl_.item_.cleanup_function_);
1741       break;
1742     }
1743     // .tensorflow.eager.SyncRemoteExecutorForStream sync_remote_executor_for_stream = 6;
1744     case kSyncRemoteExecutorForStream: {
1745       total_size += 1 +
1746         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1747           *_impl_.item_.sync_remote_executor_for_stream_);
1748       break;
1749     }
1750     // .tensorflow.eager.SendPackedHandleOp send_packed_handle = 7;
1751     case kSendPackedHandle: {
1752       total_size += 1 +
1753         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1754           *_impl_.item_.send_packed_handle_);
1755       break;
1756     }
1757     case ITEM_NOT_SET: {
1758       break;
1759     }
1760   }
1761   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1762     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1763   }
1764   int cached_size = ::_pbi::ToCachedSize(total_size);
1765   SetCachedSize(cached_size);
1766   return total_size;
1767 }
1768 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1769 void QueueItem::CheckTypeAndMergeFrom(
1770     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1771   MergeFrom(*::_pbi::DownCast<const QueueItem*>(
1772       &from));
1773 }
1774 
MergeFrom(const QueueItem & from)1775 void QueueItem::MergeFrom(const QueueItem& from) {
1776   QueueItem* const _this = this;
1777   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.QueueItem)
1778   GOOGLE_DCHECK_NE(&from, _this);
1779   ::uint32_t cached_has_bits = 0;
1780   (void) cached_has_bits;
1781 
1782   switch (from.item_case()) {
1783     case kHandleToDecref: {
1784       _this->_internal_mutable_handle_to_decref()->::tensorflow::eager::RemoteTensorHandle::MergeFrom(
1785           from._internal_handle_to_decref());
1786       break;
1787     }
1788     case kOperation: {
1789       _this->_internal_mutable_operation()->::tensorflow::eager::Operation::MergeFrom(
1790           from._internal_operation());
1791       break;
1792     }
1793     case kSendTensor: {
1794       _this->_internal_mutable_send_tensor()->::tensorflow::eager::SendTensorOp::MergeFrom(
1795           from._internal_send_tensor());
1796       break;
1797     }
1798     case kRegisterFunction: {
1799       _this->_internal_mutable_register_function()->::tensorflow::eager::RegisterFunctionOp::MergeFrom(
1800           from._internal_register_function());
1801       break;
1802     }
1803     case kCleanupFunction: {
1804       _this->_internal_mutable_cleanup_function()->::tensorflow::eager::CleanupFunctionOp::MergeFrom(
1805           from._internal_cleanup_function());
1806       break;
1807     }
1808     case kSyncRemoteExecutorForStream: {
1809       _this->_internal_mutable_sync_remote_executor_for_stream()->::tensorflow::eager::SyncRemoteExecutorForStream::MergeFrom(
1810           from._internal_sync_remote_executor_for_stream());
1811       break;
1812     }
1813     case kSendPackedHandle: {
1814       _this->_internal_mutable_send_packed_handle()->::tensorflow::eager::SendPackedHandleOp::MergeFrom(
1815           from._internal_send_packed_handle());
1816       break;
1817     }
1818     case ITEM_NOT_SET: {
1819       break;
1820     }
1821   }
1822   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1823 }
1824 
CopyFrom(const QueueItem & from)1825 void QueueItem::CopyFrom(const QueueItem& from) {
1826 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.QueueItem)
1827   if (&from == this) return;
1828   Clear();
1829   MergeFrom(from);
1830 }
1831 
IsInitialized() const1832 bool QueueItem::IsInitialized() const {
1833   return true;
1834 }
1835 
InternalSwap(QueueItem * other)1836 void QueueItem::InternalSwap(QueueItem* other) {
1837   using std::swap;
1838   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1839   swap(_impl_.item_, other->_impl_.item_);
1840   swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
1841 }
1842 
GetTypeName() const1843 std::string QueueItem::GetTypeName() const {
1844   return "tensorflow.eager.QueueItem";
1845 }
1846 
1847 
1848 // ===================================================================
1849 
1850 class QueueResponse::_Internal {
1851  public:
1852 };
1853 
clear_shape()1854 void QueueResponse::clear_shape() {
1855   _impl_.shape_.Clear();
1856 }
clear_tensor()1857 void QueueResponse::clear_tensor() {
1858   _impl_.tensor_.Clear();
1859 }
QueueResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1860 QueueResponse::QueueResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1861                          bool is_message_owned)
1862   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1863   SharedCtor(arena, is_message_owned);
1864   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.QueueResponse)
1865 }
QueueResponse(const QueueResponse & from)1866 QueueResponse::QueueResponse(const QueueResponse& from)
1867   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1868   QueueResponse* const _this = this; (void)_this;
1869   new (&_impl_) Impl_{
1870       decltype(_impl_.shape_){from._impl_.shape_}
1871     , decltype(_impl_.tensor_){from._impl_.tensor_}
1872     , decltype(_impl_.device_){from._impl_.device_}
1873     , /*decltype(_impl_._cached_size_)*/{}};
1874 
1875   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1876   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.QueueResponse)
1877 }
1878 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1879 inline void QueueResponse::SharedCtor(
1880     ::_pb::Arena* arena, bool is_message_owned) {
1881   (void)arena;
1882   (void)is_message_owned;
1883   new (&_impl_) Impl_{
1884       decltype(_impl_.shape_){arena}
1885     , decltype(_impl_.tensor_){arena}
1886     , decltype(_impl_.device_){arena}
1887     , /*decltype(_impl_._cached_size_)*/{}
1888   };
1889 }
1890 
~QueueResponse()1891 QueueResponse::~QueueResponse() {
1892   // @@protoc_insertion_point(destructor:tensorflow.eager.QueueResponse)
1893   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1894   (void)arena;
1895     return;
1896   }
1897   SharedDtor();
1898 }
1899 
SharedDtor()1900 inline void QueueResponse::SharedDtor() {
1901   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1902   _impl_.shape_.~RepeatedPtrField();
1903   _impl_.tensor_.~RepeatedPtrField();
1904   _impl_.device_.~RepeatedPtrField();
1905 }
1906 
SetCachedSize(int size) const1907 void QueueResponse::SetCachedSize(int size) const {
1908   _impl_._cached_size_.Set(size);
1909 }
1910 
Clear()1911 void QueueResponse::Clear() {
1912 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.QueueResponse)
1913   ::uint32_t cached_has_bits = 0;
1914   // Prevent compiler warnings about cached_has_bits being unused
1915   (void) cached_has_bits;
1916 
1917   _impl_.shape_.Clear();
1918   _impl_.tensor_.Clear();
1919   _impl_.device_.Clear();
1920   _internal_metadata_.Clear<std::string>();
1921 }
1922 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1923 const char* QueueResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1924 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1925   while (!ctx->Done(&ptr)) {
1926     ::uint32_t tag;
1927     ptr = ::_pbi::ReadTag(ptr, &tag);
1928     switch (tag >> 3) {
1929       // repeated .tensorflow.TensorShapeProto shape = 1;
1930       case 1:
1931         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1932           ptr -= 1;
1933           do {
1934             ptr += 1;
1935             ptr = ctx->ParseMessage(_internal_add_shape(), ptr);
1936             CHK_(ptr);
1937             if (!ctx->DataAvailable(ptr)) break;
1938           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
1939         } else {
1940           goto handle_unusual;
1941         }
1942         continue;
1943       // repeated .tensorflow.TensorProto tensor = 2;
1944       case 2:
1945         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
1946           ptr -= 1;
1947           do {
1948             ptr += 1;
1949             ptr = ctx->ParseMessage(_internal_add_tensor(), ptr);
1950             CHK_(ptr);
1951             if (!ctx->DataAvailable(ptr)) break;
1952           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
1953         } else {
1954           goto handle_unusual;
1955         }
1956         continue;
1957       // repeated string device = 3;
1958       case 3:
1959         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
1960           ptr -= 1;
1961           do {
1962             ptr += 1;
1963             auto str = _internal_add_device();
1964             ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1965             CHK_(ptr);
1966             CHK_(::_pbi::VerifyUTF8(str, nullptr));
1967             if (!ctx->DataAvailable(ptr)) break;
1968           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr));
1969         } else {
1970           goto handle_unusual;
1971         }
1972         continue;
1973       default:
1974         goto handle_unusual;
1975     }  // switch
1976   handle_unusual:
1977     if ((tag == 0) || ((tag & 7) == 4)) {
1978       CHK_(ptr);
1979       ctx->SetLastTag(tag);
1980       goto message_done;
1981     }
1982     ptr = UnknownFieldParse(
1983         tag,
1984         _internal_metadata_.mutable_unknown_fields<std::string>(),
1985         ptr, ctx);
1986     CHK_(ptr != nullptr);
1987   }  // while
1988 message_done:
1989   return ptr;
1990 failure:
1991   ptr = nullptr;
1992   goto message_done;
1993 #undef CHK_
1994 }
1995 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1996 ::uint8_t* QueueResponse::_InternalSerialize(
1997     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1998   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.QueueResponse)
1999   ::uint32_t cached_has_bits = 0;
2000   (void) cached_has_bits;
2001 
2002   // repeated .tensorflow.TensorShapeProto shape = 1;
2003   for (unsigned i = 0,
2004       n = static_cast<unsigned>(this->_internal_shape_size()); i < n; i++) {
2005     const auto& repfield = this->_internal_shape(i);
2006     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2007         InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
2008   }
2009 
2010   // repeated .tensorflow.TensorProto tensor = 2;
2011   for (unsigned i = 0,
2012       n = static_cast<unsigned>(this->_internal_tensor_size()); i < n; i++) {
2013     const auto& repfield = this->_internal_tensor(i);
2014     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2015         InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
2016   }
2017 
2018   // repeated string device = 3;
2019   for (int i = 0, n = this->_internal_device_size(); i < n; i++) {
2020     const auto& s = this->_internal_device(i);
2021     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2022       s.data(), static_cast<int>(s.length()),
2023       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2024       "tensorflow.eager.QueueResponse.device");
2025     target = stream->WriteString(3, s, target);
2026   }
2027 
2028   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2029     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2030         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2031   }
2032   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.QueueResponse)
2033   return target;
2034 }
2035 
ByteSizeLong() const2036 size_t QueueResponse::ByteSizeLong() const {
2037 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.QueueResponse)
2038   size_t total_size = 0;
2039 
2040   ::uint32_t cached_has_bits = 0;
2041   // Prevent compiler warnings about cached_has_bits being unused
2042   (void) cached_has_bits;
2043 
2044   // repeated .tensorflow.TensorShapeProto shape = 1;
2045   total_size += 1UL * this->_internal_shape_size();
2046   for (const auto& msg : this->_impl_.shape_) {
2047     total_size +=
2048       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2049   }
2050 
2051   // repeated .tensorflow.TensorProto tensor = 2;
2052   total_size += 1UL * this->_internal_tensor_size();
2053   for (const auto& msg : this->_impl_.tensor_) {
2054     total_size +=
2055       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2056   }
2057 
2058   // repeated string device = 3;
2059   total_size += 1 *
2060       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.device_.size());
2061   for (int i = 0, n = _impl_.device_.size(); i < n; i++) {
2062     total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2063       _impl_.device_.Get(i));
2064   }
2065 
2066   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2067     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2068   }
2069   int cached_size = ::_pbi::ToCachedSize(total_size);
2070   SetCachedSize(cached_size);
2071   return total_size;
2072 }
2073 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2074 void QueueResponse::CheckTypeAndMergeFrom(
2075     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2076   MergeFrom(*::_pbi::DownCast<const QueueResponse*>(
2077       &from));
2078 }
2079 
MergeFrom(const QueueResponse & from)2080 void QueueResponse::MergeFrom(const QueueResponse& from) {
2081   QueueResponse* const _this = this;
2082   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.QueueResponse)
2083   GOOGLE_DCHECK_NE(&from, _this);
2084   ::uint32_t cached_has_bits = 0;
2085   (void) cached_has_bits;
2086 
2087   _this->_impl_.shape_.MergeFrom(from._impl_.shape_);
2088   _this->_impl_.tensor_.MergeFrom(from._impl_.tensor_);
2089   _this->_impl_.device_.MergeFrom(from._impl_.device_);
2090   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2091 }
2092 
CopyFrom(const QueueResponse & from)2093 void QueueResponse::CopyFrom(const QueueResponse& from) {
2094 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.QueueResponse)
2095   if (&from == this) return;
2096   Clear();
2097   MergeFrom(from);
2098 }
2099 
IsInitialized() const2100 bool QueueResponse::IsInitialized() const {
2101   return true;
2102 }
2103 
InternalSwap(QueueResponse * other)2104 void QueueResponse::InternalSwap(QueueResponse* other) {
2105   using std::swap;
2106   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2107   _impl_.shape_.InternalSwap(&other->_impl_.shape_);
2108   _impl_.tensor_.InternalSwap(&other->_impl_.tensor_);
2109   _impl_.device_.InternalSwap(&other->_impl_.device_);
2110 }
2111 
GetTypeName() const2112 std::string QueueResponse::GetTypeName() const {
2113   return "tensorflow.eager.QueueResponse";
2114 }
2115 
2116 
2117 // ===================================================================
2118 
2119 class CreateContextRequest::_Internal {
2120  public:
2121   static const ::tensorflow::ServerDef& server_def(const CreateContextRequest* msg);
2122   static const ::tensorflow::VersionDef& version_def(const CreateContextRequest* msg);
2123 };
2124 
2125 const ::tensorflow::ServerDef&
server_def(const CreateContextRequest * msg)2126 CreateContextRequest::_Internal::server_def(const CreateContextRequest* msg) {
2127   return *msg->_impl_.server_def_;
2128 }
2129 const ::tensorflow::VersionDef&
version_def(const CreateContextRequest * msg)2130 CreateContextRequest::_Internal::version_def(const CreateContextRequest* msg) {
2131   return *msg->_impl_.version_def_;
2132 }
clear_server_def()2133 void CreateContextRequest::clear_server_def() {
2134   if (GetArenaForAllocation() == nullptr && _impl_.server_def_ != nullptr) {
2135     delete _impl_.server_def_;
2136   }
2137   _impl_.server_def_ = nullptr;
2138 }
clear_version_def()2139 void CreateContextRequest::clear_version_def() {
2140   if (GetArenaForAllocation() == nullptr && _impl_.version_def_ != nullptr) {
2141     delete _impl_.version_def_;
2142   }
2143   _impl_.version_def_ = nullptr;
2144 }
clear_cluster_device_attributes()2145 void CreateContextRequest::clear_cluster_device_attributes() {
2146   _impl_.cluster_device_attributes_.Clear();
2147 }
CreateContextRequest(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2148 CreateContextRequest::CreateContextRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2149                          bool is_message_owned)
2150   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2151   SharedCtor(arena, is_message_owned);
2152   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.CreateContextRequest)
2153 }
CreateContextRequest(const CreateContextRequest & from)2154 CreateContextRequest::CreateContextRequest(const CreateContextRequest& from)
2155   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2156   CreateContextRequest* const _this = this; (void)_this;
2157   new (&_impl_) Impl_{
2158       decltype(_impl_.cluster_device_attributes_){from._impl_.cluster_device_attributes_}
2159     , decltype(_impl_.server_def_){nullptr}
2160     , decltype(_impl_.version_def_){nullptr}
2161     , decltype(_impl_.keep_alive_secs_){}
2162     , decltype(_impl_.context_id_){}
2163     , decltype(_impl_.context_view_id_){}
2164     , decltype(_impl_.async_){}
2165     , decltype(_impl_.lazy_copy_remote_function_inputs_){}
2166     , /*decltype(_impl_._cached_size_)*/{}};
2167 
2168   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2169   if (from._internal_has_server_def()) {
2170     _this->_impl_.server_def_ = new ::tensorflow::ServerDef(*from._impl_.server_def_);
2171   }
2172   if (from._internal_has_version_def()) {
2173     _this->_impl_.version_def_ = new ::tensorflow::VersionDef(*from._impl_.version_def_);
2174   }
2175   ::memcpy(&_impl_.keep_alive_secs_, &from._impl_.keep_alive_secs_,
2176     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.lazy_copy_remote_function_inputs_) -
2177     reinterpret_cast<char*>(&_impl_.keep_alive_secs_)) + sizeof(_impl_.lazy_copy_remote_function_inputs_));
2178   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.CreateContextRequest)
2179 }
2180 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2181 inline void CreateContextRequest::SharedCtor(
2182     ::_pb::Arena* arena, bool is_message_owned) {
2183   (void)arena;
2184   (void)is_message_owned;
2185   new (&_impl_) Impl_{
2186       decltype(_impl_.cluster_device_attributes_){arena}
2187     , decltype(_impl_.server_def_){nullptr}
2188     , decltype(_impl_.version_def_){nullptr}
2189     , decltype(_impl_.keep_alive_secs_){::int64_t{0}}
2190     , decltype(_impl_.context_id_){::uint64_t{0u}}
2191     , decltype(_impl_.context_view_id_){::uint64_t{0u}}
2192     , decltype(_impl_.async_){false}
2193     , decltype(_impl_.lazy_copy_remote_function_inputs_){false}
2194     , /*decltype(_impl_._cached_size_)*/{}
2195   };
2196 }
2197 
~CreateContextRequest()2198 CreateContextRequest::~CreateContextRequest() {
2199   // @@protoc_insertion_point(destructor:tensorflow.eager.CreateContextRequest)
2200   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2201   (void)arena;
2202     return;
2203   }
2204   SharedDtor();
2205 }
2206 
SharedDtor()2207 inline void CreateContextRequest::SharedDtor() {
2208   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2209   _impl_.cluster_device_attributes_.~RepeatedPtrField();
2210   if (this != internal_default_instance()) delete _impl_.server_def_;
2211   if (this != internal_default_instance()) delete _impl_.version_def_;
2212 }
2213 
SetCachedSize(int size) const2214 void CreateContextRequest::SetCachedSize(int size) const {
2215   _impl_._cached_size_.Set(size);
2216 }
2217 
Clear()2218 void CreateContextRequest::Clear() {
2219 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.CreateContextRequest)
2220   ::uint32_t cached_has_bits = 0;
2221   // Prevent compiler warnings about cached_has_bits being unused
2222   (void) cached_has_bits;
2223 
2224   _impl_.cluster_device_attributes_.Clear();
2225   if (GetArenaForAllocation() == nullptr && _impl_.server_def_ != nullptr) {
2226     delete _impl_.server_def_;
2227   }
2228   _impl_.server_def_ = nullptr;
2229   if (GetArenaForAllocation() == nullptr && _impl_.version_def_ != nullptr) {
2230     delete _impl_.version_def_;
2231   }
2232   _impl_.version_def_ = nullptr;
2233   ::memset(&_impl_.keep_alive_secs_, 0, static_cast<size_t>(
2234       reinterpret_cast<char*>(&_impl_.lazy_copy_remote_function_inputs_) -
2235       reinterpret_cast<char*>(&_impl_.keep_alive_secs_)) + sizeof(_impl_.lazy_copy_remote_function_inputs_));
2236   _internal_metadata_.Clear<std::string>();
2237 }
2238 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2239 const char* CreateContextRequest::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2240 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2241   while (!ctx->Done(&ptr)) {
2242     ::uint32_t tag;
2243     ptr = ::_pbi::ReadTag(ptr, &tag);
2244     switch (tag >> 3) {
2245       // .tensorflow.ServerDef server_def = 1;
2246       case 1:
2247         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2248           ptr = ctx->ParseMessage(_internal_mutable_server_def(), ptr);
2249           CHK_(ptr);
2250         } else {
2251           goto handle_unusual;
2252         }
2253         continue;
2254       // bool async = 2;
2255       case 2:
2256         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
2257           _impl_.async_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2258           CHK_(ptr);
2259         } else {
2260           goto handle_unusual;
2261         }
2262         continue;
2263       // int64 keep_alive_secs = 3;
2264       case 3:
2265         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
2266           _impl_.keep_alive_secs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2267           CHK_(ptr);
2268         } else {
2269           goto handle_unusual;
2270         }
2271         continue;
2272       // .tensorflow.VersionDef version_def = 4;
2273       case 4:
2274         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
2275           ptr = ctx->ParseMessage(_internal_mutable_version_def(), ptr);
2276           CHK_(ptr);
2277         } else {
2278           goto handle_unusual;
2279         }
2280         continue;
2281       // repeated .tensorflow.DeviceAttributes cluster_device_attributes = 6;
2282       case 6:
2283         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
2284           ptr -= 1;
2285           do {
2286             ptr += 1;
2287             ptr = ctx->ParseMessage(_internal_add_cluster_device_attributes(), ptr);
2288             CHK_(ptr);
2289             if (!ctx->DataAvailable(ptr)) break;
2290           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<50>(ptr));
2291         } else {
2292           goto handle_unusual;
2293         }
2294         continue;
2295       // fixed64 context_id = 7;
2296       case 7:
2297         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 57)) {
2298           _impl_.context_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
2299           ptr += sizeof(::uint64_t);
2300         } else {
2301           goto handle_unusual;
2302         }
2303         continue;
2304       // fixed64 context_view_id = 8;
2305       case 8:
2306         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 65)) {
2307           _impl_.context_view_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
2308           ptr += sizeof(::uint64_t);
2309         } else {
2310           goto handle_unusual;
2311         }
2312         continue;
2313       // bool lazy_copy_remote_function_inputs = 9;
2314       case 9:
2315         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
2316           _impl_.lazy_copy_remote_function_inputs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2317           CHK_(ptr);
2318         } else {
2319           goto handle_unusual;
2320         }
2321         continue;
2322       default:
2323         goto handle_unusual;
2324     }  // switch
2325   handle_unusual:
2326     if ((tag == 0) || ((tag & 7) == 4)) {
2327       CHK_(ptr);
2328       ctx->SetLastTag(tag);
2329       goto message_done;
2330     }
2331     ptr = UnknownFieldParse(
2332         tag,
2333         _internal_metadata_.mutable_unknown_fields<std::string>(),
2334         ptr, ctx);
2335     CHK_(ptr != nullptr);
2336   }  // while
2337 message_done:
2338   return ptr;
2339 failure:
2340   ptr = nullptr;
2341   goto message_done;
2342 #undef CHK_
2343 }
2344 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2345 ::uint8_t* CreateContextRequest::_InternalSerialize(
2346     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2347   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.CreateContextRequest)
2348   ::uint32_t cached_has_bits = 0;
2349   (void) cached_has_bits;
2350 
2351   // .tensorflow.ServerDef server_def = 1;
2352   if (this->_internal_has_server_def()) {
2353     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2354       InternalWriteMessage(1, _Internal::server_def(this),
2355         _Internal::server_def(this).GetCachedSize(), target, stream);
2356   }
2357 
2358   // bool async = 2;
2359   if (this->_internal_async() != 0) {
2360     target = stream->EnsureSpace(target);
2361     target = ::_pbi::WireFormatLite::WriteBoolToArray(2, this->_internal_async(), target);
2362   }
2363 
2364   // int64 keep_alive_secs = 3;
2365   if (this->_internal_keep_alive_secs() != 0) {
2366     target = stream->EnsureSpace(target);
2367     target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_keep_alive_secs(), target);
2368   }
2369 
2370   // .tensorflow.VersionDef version_def = 4;
2371   if (this->_internal_has_version_def()) {
2372     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2373       InternalWriteMessage(4, _Internal::version_def(this),
2374         _Internal::version_def(this).GetCachedSize(), target, stream);
2375   }
2376 
2377   // repeated .tensorflow.DeviceAttributes cluster_device_attributes = 6;
2378   for (unsigned i = 0,
2379       n = static_cast<unsigned>(this->_internal_cluster_device_attributes_size()); i < n; i++) {
2380     const auto& repfield = this->_internal_cluster_device_attributes(i);
2381     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2382         InternalWriteMessage(6, repfield, repfield.GetCachedSize(), target, stream);
2383   }
2384 
2385   // fixed64 context_id = 7;
2386   if (this->_internal_context_id() != 0) {
2387     target = stream->EnsureSpace(target);
2388     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(7, this->_internal_context_id(), target);
2389   }
2390 
2391   // fixed64 context_view_id = 8;
2392   if (this->_internal_context_view_id() != 0) {
2393     target = stream->EnsureSpace(target);
2394     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(8, this->_internal_context_view_id(), target);
2395   }
2396 
2397   // bool lazy_copy_remote_function_inputs = 9;
2398   if (this->_internal_lazy_copy_remote_function_inputs() != 0) {
2399     target = stream->EnsureSpace(target);
2400     target = ::_pbi::WireFormatLite::WriteBoolToArray(9, this->_internal_lazy_copy_remote_function_inputs(), target);
2401   }
2402 
2403   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2404     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2405         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2406   }
2407   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.CreateContextRequest)
2408   return target;
2409 }
2410 
ByteSizeLong() const2411 size_t CreateContextRequest::ByteSizeLong() const {
2412 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.CreateContextRequest)
2413   size_t total_size = 0;
2414 
2415   ::uint32_t cached_has_bits = 0;
2416   // Prevent compiler warnings about cached_has_bits being unused
2417   (void) cached_has_bits;
2418 
2419   // repeated .tensorflow.DeviceAttributes cluster_device_attributes = 6;
2420   total_size += 1UL * this->_internal_cluster_device_attributes_size();
2421   for (const auto& msg : this->_impl_.cluster_device_attributes_) {
2422     total_size +=
2423       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2424   }
2425 
2426   // .tensorflow.ServerDef server_def = 1;
2427   if (this->_internal_has_server_def()) {
2428     total_size += 1 +
2429       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2430         *_impl_.server_def_);
2431   }
2432 
2433   // .tensorflow.VersionDef version_def = 4;
2434   if (this->_internal_has_version_def()) {
2435     total_size += 1 +
2436       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2437         *_impl_.version_def_);
2438   }
2439 
2440   // int64 keep_alive_secs = 3;
2441   if (this->_internal_keep_alive_secs() != 0) {
2442     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_keep_alive_secs());
2443   }
2444 
2445   // fixed64 context_id = 7;
2446   if (this->_internal_context_id() != 0) {
2447     total_size += 1 + 8;
2448   }
2449 
2450   // fixed64 context_view_id = 8;
2451   if (this->_internal_context_view_id() != 0) {
2452     total_size += 1 + 8;
2453   }
2454 
2455   // bool async = 2;
2456   if (this->_internal_async() != 0) {
2457     total_size += 1 + 1;
2458   }
2459 
2460   // bool lazy_copy_remote_function_inputs = 9;
2461   if (this->_internal_lazy_copy_remote_function_inputs() != 0) {
2462     total_size += 1 + 1;
2463   }
2464 
2465   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2466     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2467   }
2468   int cached_size = ::_pbi::ToCachedSize(total_size);
2469   SetCachedSize(cached_size);
2470   return total_size;
2471 }
2472 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2473 void CreateContextRequest::CheckTypeAndMergeFrom(
2474     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2475   MergeFrom(*::_pbi::DownCast<const CreateContextRequest*>(
2476       &from));
2477 }
2478 
MergeFrom(const CreateContextRequest & from)2479 void CreateContextRequest::MergeFrom(const CreateContextRequest& from) {
2480   CreateContextRequest* const _this = this;
2481   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.CreateContextRequest)
2482   GOOGLE_DCHECK_NE(&from, _this);
2483   ::uint32_t cached_has_bits = 0;
2484   (void) cached_has_bits;
2485 
2486   _this->_impl_.cluster_device_attributes_.MergeFrom(from._impl_.cluster_device_attributes_);
2487   if (from._internal_has_server_def()) {
2488     _this->_internal_mutable_server_def()->::tensorflow::ServerDef::MergeFrom(
2489         from._internal_server_def());
2490   }
2491   if (from._internal_has_version_def()) {
2492     _this->_internal_mutable_version_def()->::tensorflow::VersionDef::MergeFrom(
2493         from._internal_version_def());
2494   }
2495   if (from._internal_keep_alive_secs() != 0) {
2496     _this->_internal_set_keep_alive_secs(from._internal_keep_alive_secs());
2497   }
2498   if (from._internal_context_id() != 0) {
2499     _this->_internal_set_context_id(from._internal_context_id());
2500   }
2501   if (from._internal_context_view_id() != 0) {
2502     _this->_internal_set_context_view_id(from._internal_context_view_id());
2503   }
2504   if (from._internal_async() != 0) {
2505     _this->_internal_set_async(from._internal_async());
2506   }
2507   if (from._internal_lazy_copy_remote_function_inputs() != 0) {
2508     _this->_internal_set_lazy_copy_remote_function_inputs(from._internal_lazy_copy_remote_function_inputs());
2509   }
2510   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2511 }
2512 
CopyFrom(const CreateContextRequest & from)2513 void CreateContextRequest::CopyFrom(const CreateContextRequest& from) {
2514 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.CreateContextRequest)
2515   if (&from == this) return;
2516   Clear();
2517   MergeFrom(from);
2518 }
2519 
IsInitialized() const2520 bool CreateContextRequest::IsInitialized() const {
2521   return true;
2522 }
2523 
InternalSwap(CreateContextRequest * other)2524 void CreateContextRequest::InternalSwap(CreateContextRequest* other) {
2525   using std::swap;
2526   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2527   _impl_.cluster_device_attributes_.InternalSwap(&other->_impl_.cluster_device_attributes_);
2528   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
2529       PROTOBUF_FIELD_OFFSET(CreateContextRequest, _impl_.lazy_copy_remote_function_inputs_)
2530       + sizeof(CreateContextRequest::_impl_.lazy_copy_remote_function_inputs_)  // NOLINT
2531       - PROTOBUF_FIELD_OFFSET(CreateContextRequest, _impl_.server_def_)>(
2532           reinterpret_cast<char*>(&_impl_.server_def_),
2533           reinterpret_cast<char*>(&other->_impl_.server_def_));
2534 }
2535 
GetTypeName() const2536 std::string CreateContextRequest::GetTypeName() const {
2537   return "tensorflow.eager.CreateContextRequest";
2538 }
2539 
2540 
2541 // ===================================================================
2542 
2543 class CreateContextResponse::_Internal {
2544  public:
2545 };
2546 
clear_device_attributes()2547 void CreateContextResponse::clear_device_attributes() {
2548   _impl_.device_attributes_.Clear();
2549 }
CreateContextResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2550 CreateContextResponse::CreateContextResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2551                          bool is_message_owned)
2552   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2553   SharedCtor(arena, is_message_owned);
2554   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.CreateContextResponse)
2555 }
CreateContextResponse(const CreateContextResponse & from)2556 CreateContextResponse::CreateContextResponse(const CreateContextResponse& from)
2557   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2558   CreateContextResponse* const _this = this; (void)_this;
2559   new (&_impl_) Impl_{
2560       decltype(_impl_.device_attributes_){from._impl_.device_attributes_}
2561     , /*decltype(_impl_._cached_size_)*/{}};
2562 
2563   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2564   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.CreateContextResponse)
2565 }
2566 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2567 inline void CreateContextResponse::SharedCtor(
2568     ::_pb::Arena* arena, bool is_message_owned) {
2569   (void)arena;
2570   (void)is_message_owned;
2571   new (&_impl_) Impl_{
2572       decltype(_impl_.device_attributes_){arena}
2573     , /*decltype(_impl_._cached_size_)*/{}
2574   };
2575 }
2576 
~CreateContextResponse()2577 CreateContextResponse::~CreateContextResponse() {
2578   // @@protoc_insertion_point(destructor:tensorflow.eager.CreateContextResponse)
2579   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2580   (void)arena;
2581     return;
2582   }
2583   SharedDtor();
2584 }
2585 
SharedDtor()2586 inline void CreateContextResponse::SharedDtor() {
2587   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2588   _impl_.device_attributes_.~RepeatedPtrField();
2589 }
2590 
SetCachedSize(int size) const2591 void CreateContextResponse::SetCachedSize(int size) const {
2592   _impl_._cached_size_.Set(size);
2593 }
2594 
Clear()2595 void CreateContextResponse::Clear() {
2596 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.CreateContextResponse)
2597   ::uint32_t cached_has_bits = 0;
2598   // Prevent compiler warnings about cached_has_bits being unused
2599   (void) cached_has_bits;
2600 
2601   _impl_.device_attributes_.Clear();
2602   _internal_metadata_.Clear<std::string>();
2603 }
2604 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2605 const char* CreateContextResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2606 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2607   while (!ctx->Done(&ptr)) {
2608     ::uint32_t tag;
2609     ptr = ::_pbi::ReadTag(ptr, &tag);
2610     switch (tag >> 3) {
2611       // repeated .tensorflow.DeviceAttributes device_attributes = 2;
2612       case 2:
2613         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2614           ptr -= 1;
2615           do {
2616             ptr += 1;
2617             ptr = ctx->ParseMessage(_internal_add_device_attributes(), ptr);
2618             CHK_(ptr);
2619             if (!ctx->DataAvailable(ptr)) break;
2620           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
2621         } else {
2622           goto handle_unusual;
2623         }
2624         continue;
2625       default:
2626         goto handle_unusual;
2627     }  // switch
2628   handle_unusual:
2629     if ((tag == 0) || ((tag & 7) == 4)) {
2630       CHK_(ptr);
2631       ctx->SetLastTag(tag);
2632       goto message_done;
2633     }
2634     ptr = UnknownFieldParse(
2635         tag,
2636         _internal_metadata_.mutable_unknown_fields<std::string>(),
2637         ptr, ctx);
2638     CHK_(ptr != nullptr);
2639   }  // while
2640 message_done:
2641   return ptr;
2642 failure:
2643   ptr = nullptr;
2644   goto message_done;
2645 #undef CHK_
2646 }
2647 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2648 ::uint8_t* CreateContextResponse::_InternalSerialize(
2649     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2650   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.CreateContextResponse)
2651   ::uint32_t cached_has_bits = 0;
2652   (void) cached_has_bits;
2653 
2654   // repeated .tensorflow.DeviceAttributes device_attributes = 2;
2655   for (unsigned i = 0,
2656       n = static_cast<unsigned>(this->_internal_device_attributes_size()); i < n; i++) {
2657     const auto& repfield = this->_internal_device_attributes(i);
2658     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2659         InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
2660   }
2661 
2662   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2663     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2664         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2665   }
2666   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.CreateContextResponse)
2667   return target;
2668 }
2669 
ByteSizeLong() const2670 size_t CreateContextResponse::ByteSizeLong() const {
2671 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.CreateContextResponse)
2672   size_t total_size = 0;
2673 
2674   ::uint32_t cached_has_bits = 0;
2675   // Prevent compiler warnings about cached_has_bits being unused
2676   (void) cached_has_bits;
2677 
2678   // repeated .tensorflow.DeviceAttributes device_attributes = 2;
2679   total_size += 1UL * this->_internal_device_attributes_size();
2680   for (const auto& msg : this->_impl_.device_attributes_) {
2681     total_size +=
2682       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2683   }
2684 
2685   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2686     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2687   }
2688   int cached_size = ::_pbi::ToCachedSize(total_size);
2689   SetCachedSize(cached_size);
2690   return total_size;
2691 }
2692 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2693 void CreateContextResponse::CheckTypeAndMergeFrom(
2694     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2695   MergeFrom(*::_pbi::DownCast<const CreateContextResponse*>(
2696       &from));
2697 }
2698 
MergeFrom(const CreateContextResponse & from)2699 void CreateContextResponse::MergeFrom(const CreateContextResponse& from) {
2700   CreateContextResponse* const _this = this;
2701   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.CreateContextResponse)
2702   GOOGLE_DCHECK_NE(&from, _this);
2703   ::uint32_t cached_has_bits = 0;
2704   (void) cached_has_bits;
2705 
2706   _this->_impl_.device_attributes_.MergeFrom(from._impl_.device_attributes_);
2707   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2708 }
2709 
CopyFrom(const CreateContextResponse & from)2710 void CreateContextResponse::CopyFrom(const CreateContextResponse& from) {
2711 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.CreateContextResponse)
2712   if (&from == this) return;
2713   Clear();
2714   MergeFrom(from);
2715 }
2716 
IsInitialized() const2717 bool CreateContextResponse::IsInitialized() const {
2718   return true;
2719 }
2720 
InternalSwap(CreateContextResponse * other)2721 void CreateContextResponse::InternalSwap(CreateContextResponse* other) {
2722   using std::swap;
2723   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2724   _impl_.device_attributes_.InternalSwap(&other->_impl_.device_attributes_);
2725 }
2726 
GetTypeName() const2727 std::string CreateContextResponse::GetTypeName() const {
2728   return "tensorflow.eager.CreateContextResponse";
2729 }
2730 
2731 
2732 // ===================================================================
2733 
2734 class UpdateContextRequest::_Internal {
2735  public:
2736   static const ::tensorflow::ServerDef& server_def(const UpdateContextRequest* msg);
2737 };
2738 
2739 const ::tensorflow::ServerDef&
server_def(const UpdateContextRequest * msg)2740 UpdateContextRequest::_Internal::server_def(const UpdateContextRequest* msg) {
2741   return *msg->_impl_.server_def_;
2742 }
clear_server_def()2743 void UpdateContextRequest::clear_server_def() {
2744   if (GetArenaForAllocation() == nullptr && _impl_.server_def_ != nullptr) {
2745     delete _impl_.server_def_;
2746   }
2747   _impl_.server_def_ = nullptr;
2748 }
clear_cluster_device_attributes()2749 void UpdateContextRequest::clear_cluster_device_attributes() {
2750   _impl_.cluster_device_attributes_.Clear();
2751 }
UpdateContextRequest(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2752 UpdateContextRequest::UpdateContextRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2753                          bool is_message_owned)
2754   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2755   SharedCtor(arena, is_message_owned);
2756   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.UpdateContextRequest)
2757 }
UpdateContextRequest(const UpdateContextRequest & from)2758 UpdateContextRequest::UpdateContextRequest(const UpdateContextRequest& from)
2759   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2760   UpdateContextRequest* const _this = this; (void)_this;
2761   new (&_impl_) Impl_{
2762       decltype(_impl_.cluster_device_attributes_){from._impl_.cluster_device_attributes_}
2763     , decltype(_impl_.server_def_){nullptr}
2764     , decltype(_impl_.context_id_){}
2765     , decltype(_impl_.context_view_id_){}
2766     , /*decltype(_impl_._cached_size_)*/{}};
2767 
2768   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2769   if (from._internal_has_server_def()) {
2770     _this->_impl_.server_def_ = new ::tensorflow::ServerDef(*from._impl_.server_def_);
2771   }
2772   ::memcpy(&_impl_.context_id_, &from._impl_.context_id_,
2773     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.context_view_id_) -
2774     reinterpret_cast<char*>(&_impl_.context_id_)) + sizeof(_impl_.context_view_id_));
2775   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.UpdateContextRequest)
2776 }
2777 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2778 inline void UpdateContextRequest::SharedCtor(
2779     ::_pb::Arena* arena, bool is_message_owned) {
2780   (void)arena;
2781   (void)is_message_owned;
2782   new (&_impl_) Impl_{
2783       decltype(_impl_.cluster_device_attributes_){arena}
2784     , decltype(_impl_.server_def_){nullptr}
2785     , decltype(_impl_.context_id_){::uint64_t{0u}}
2786     , decltype(_impl_.context_view_id_){::uint64_t{0u}}
2787     , /*decltype(_impl_._cached_size_)*/{}
2788   };
2789 }
2790 
~UpdateContextRequest()2791 UpdateContextRequest::~UpdateContextRequest() {
2792   // @@protoc_insertion_point(destructor:tensorflow.eager.UpdateContextRequest)
2793   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2794   (void)arena;
2795     return;
2796   }
2797   SharedDtor();
2798 }
2799 
SharedDtor()2800 inline void UpdateContextRequest::SharedDtor() {
2801   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2802   _impl_.cluster_device_attributes_.~RepeatedPtrField();
2803   if (this != internal_default_instance()) delete _impl_.server_def_;
2804 }
2805 
SetCachedSize(int size) const2806 void UpdateContextRequest::SetCachedSize(int size) const {
2807   _impl_._cached_size_.Set(size);
2808 }
2809 
Clear()2810 void UpdateContextRequest::Clear() {
2811 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.UpdateContextRequest)
2812   ::uint32_t cached_has_bits = 0;
2813   // Prevent compiler warnings about cached_has_bits being unused
2814   (void) cached_has_bits;
2815 
2816   _impl_.cluster_device_attributes_.Clear();
2817   if (GetArenaForAllocation() == nullptr && _impl_.server_def_ != nullptr) {
2818     delete _impl_.server_def_;
2819   }
2820   _impl_.server_def_ = nullptr;
2821   ::memset(&_impl_.context_id_, 0, static_cast<size_t>(
2822       reinterpret_cast<char*>(&_impl_.context_view_id_) -
2823       reinterpret_cast<char*>(&_impl_.context_id_)) + sizeof(_impl_.context_view_id_));
2824   _internal_metadata_.Clear<std::string>();
2825 }
2826 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2827 const char* UpdateContextRequest::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2828 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2829   while (!ctx->Done(&ptr)) {
2830     ::uint32_t tag;
2831     ptr = ::_pbi::ReadTag(ptr, &tag);
2832     switch (tag >> 3) {
2833       // .tensorflow.ServerDef server_def = 1;
2834       case 1:
2835         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2836           ptr = ctx->ParseMessage(_internal_mutable_server_def(), ptr);
2837           CHK_(ptr);
2838         } else {
2839           goto handle_unusual;
2840         }
2841         continue;
2842       // repeated .tensorflow.DeviceAttributes cluster_device_attributes = 2;
2843       case 2:
2844         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2845           ptr -= 1;
2846           do {
2847             ptr += 1;
2848             ptr = ctx->ParseMessage(_internal_add_cluster_device_attributes(), ptr);
2849             CHK_(ptr);
2850             if (!ctx->DataAvailable(ptr)) break;
2851           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
2852         } else {
2853           goto handle_unusual;
2854         }
2855         continue;
2856       // fixed64 context_id = 3;
2857       case 3:
2858         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 25)) {
2859           _impl_.context_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
2860           ptr += sizeof(::uint64_t);
2861         } else {
2862           goto handle_unusual;
2863         }
2864         continue;
2865       // fixed64 context_view_id = 4;
2866       case 4:
2867         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 33)) {
2868           _impl_.context_view_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
2869           ptr += sizeof(::uint64_t);
2870         } else {
2871           goto handle_unusual;
2872         }
2873         continue;
2874       default:
2875         goto handle_unusual;
2876     }  // switch
2877   handle_unusual:
2878     if ((tag == 0) || ((tag & 7) == 4)) {
2879       CHK_(ptr);
2880       ctx->SetLastTag(tag);
2881       goto message_done;
2882     }
2883     ptr = UnknownFieldParse(
2884         tag,
2885         _internal_metadata_.mutable_unknown_fields<std::string>(),
2886         ptr, ctx);
2887     CHK_(ptr != nullptr);
2888   }  // while
2889 message_done:
2890   return ptr;
2891 failure:
2892   ptr = nullptr;
2893   goto message_done;
2894 #undef CHK_
2895 }
2896 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2897 ::uint8_t* UpdateContextRequest::_InternalSerialize(
2898     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2899   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.UpdateContextRequest)
2900   ::uint32_t cached_has_bits = 0;
2901   (void) cached_has_bits;
2902 
2903   // .tensorflow.ServerDef server_def = 1;
2904   if (this->_internal_has_server_def()) {
2905     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2906       InternalWriteMessage(1, _Internal::server_def(this),
2907         _Internal::server_def(this).GetCachedSize(), target, stream);
2908   }
2909 
2910   // repeated .tensorflow.DeviceAttributes cluster_device_attributes = 2;
2911   for (unsigned i = 0,
2912       n = static_cast<unsigned>(this->_internal_cluster_device_attributes_size()); i < n; i++) {
2913     const auto& repfield = this->_internal_cluster_device_attributes(i);
2914     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2915         InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
2916   }
2917 
2918   // fixed64 context_id = 3;
2919   if (this->_internal_context_id() != 0) {
2920     target = stream->EnsureSpace(target);
2921     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(3, this->_internal_context_id(), target);
2922   }
2923 
2924   // fixed64 context_view_id = 4;
2925   if (this->_internal_context_view_id() != 0) {
2926     target = stream->EnsureSpace(target);
2927     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(4, this->_internal_context_view_id(), target);
2928   }
2929 
2930   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2931     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2932         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2933   }
2934   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.UpdateContextRequest)
2935   return target;
2936 }
2937 
ByteSizeLong() const2938 size_t UpdateContextRequest::ByteSizeLong() const {
2939 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.UpdateContextRequest)
2940   size_t total_size = 0;
2941 
2942   ::uint32_t cached_has_bits = 0;
2943   // Prevent compiler warnings about cached_has_bits being unused
2944   (void) cached_has_bits;
2945 
2946   // repeated .tensorflow.DeviceAttributes cluster_device_attributes = 2;
2947   total_size += 1UL * this->_internal_cluster_device_attributes_size();
2948   for (const auto& msg : this->_impl_.cluster_device_attributes_) {
2949     total_size +=
2950       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2951   }
2952 
2953   // .tensorflow.ServerDef server_def = 1;
2954   if (this->_internal_has_server_def()) {
2955     total_size += 1 +
2956       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2957         *_impl_.server_def_);
2958   }
2959 
2960   // fixed64 context_id = 3;
2961   if (this->_internal_context_id() != 0) {
2962     total_size += 1 + 8;
2963   }
2964 
2965   // fixed64 context_view_id = 4;
2966   if (this->_internal_context_view_id() != 0) {
2967     total_size += 1 + 8;
2968   }
2969 
2970   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2971     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2972   }
2973   int cached_size = ::_pbi::ToCachedSize(total_size);
2974   SetCachedSize(cached_size);
2975   return total_size;
2976 }
2977 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2978 void UpdateContextRequest::CheckTypeAndMergeFrom(
2979     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2980   MergeFrom(*::_pbi::DownCast<const UpdateContextRequest*>(
2981       &from));
2982 }
2983 
MergeFrom(const UpdateContextRequest & from)2984 void UpdateContextRequest::MergeFrom(const UpdateContextRequest& from) {
2985   UpdateContextRequest* const _this = this;
2986   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.UpdateContextRequest)
2987   GOOGLE_DCHECK_NE(&from, _this);
2988   ::uint32_t cached_has_bits = 0;
2989   (void) cached_has_bits;
2990 
2991   _this->_impl_.cluster_device_attributes_.MergeFrom(from._impl_.cluster_device_attributes_);
2992   if (from._internal_has_server_def()) {
2993     _this->_internal_mutable_server_def()->::tensorflow::ServerDef::MergeFrom(
2994         from._internal_server_def());
2995   }
2996   if (from._internal_context_id() != 0) {
2997     _this->_internal_set_context_id(from._internal_context_id());
2998   }
2999   if (from._internal_context_view_id() != 0) {
3000     _this->_internal_set_context_view_id(from._internal_context_view_id());
3001   }
3002   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3003 }
3004 
CopyFrom(const UpdateContextRequest & from)3005 void UpdateContextRequest::CopyFrom(const UpdateContextRequest& from) {
3006 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.UpdateContextRequest)
3007   if (&from == this) return;
3008   Clear();
3009   MergeFrom(from);
3010 }
3011 
IsInitialized() const3012 bool UpdateContextRequest::IsInitialized() const {
3013   return true;
3014 }
3015 
InternalSwap(UpdateContextRequest * other)3016 void UpdateContextRequest::InternalSwap(UpdateContextRequest* other) {
3017   using std::swap;
3018   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3019   _impl_.cluster_device_attributes_.InternalSwap(&other->_impl_.cluster_device_attributes_);
3020   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
3021       PROTOBUF_FIELD_OFFSET(UpdateContextRequest, _impl_.context_view_id_)
3022       + sizeof(UpdateContextRequest::_impl_.context_view_id_)  // NOLINT
3023       - PROTOBUF_FIELD_OFFSET(UpdateContextRequest, _impl_.server_def_)>(
3024           reinterpret_cast<char*>(&_impl_.server_def_),
3025           reinterpret_cast<char*>(&other->_impl_.server_def_));
3026 }
3027 
GetTypeName() const3028 std::string UpdateContextRequest::GetTypeName() const {
3029   return "tensorflow.eager.UpdateContextRequest";
3030 }
3031 
3032 
3033 // ===================================================================
3034 
3035 class UpdateContextResponse::_Internal {
3036  public:
3037 };
3038 
clear_device_attributes()3039 void UpdateContextResponse::clear_device_attributes() {
3040   _impl_.device_attributes_.Clear();
3041 }
UpdateContextResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3042 UpdateContextResponse::UpdateContextResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3043                          bool is_message_owned)
3044   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3045   SharedCtor(arena, is_message_owned);
3046   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.UpdateContextResponse)
3047 }
UpdateContextResponse(const UpdateContextResponse & from)3048 UpdateContextResponse::UpdateContextResponse(const UpdateContextResponse& from)
3049   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3050   UpdateContextResponse* const _this = this; (void)_this;
3051   new (&_impl_) Impl_{
3052       decltype(_impl_.device_attributes_){from._impl_.device_attributes_}
3053     , /*decltype(_impl_._cached_size_)*/{}};
3054 
3055   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3056   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.UpdateContextResponse)
3057 }
3058 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3059 inline void UpdateContextResponse::SharedCtor(
3060     ::_pb::Arena* arena, bool is_message_owned) {
3061   (void)arena;
3062   (void)is_message_owned;
3063   new (&_impl_) Impl_{
3064       decltype(_impl_.device_attributes_){arena}
3065     , /*decltype(_impl_._cached_size_)*/{}
3066   };
3067 }
3068 
~UpdateContextResponse()3069 UpdateContextResponse::~UpdateContextResponse() {
3070   // @@protoc_insertion_point(destructor:tensorflow.eager.UpdateContextResponse)
3071   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3072   (void)arena;
3073     return;
3074   }
3075   SharedDtor();
3076 }
3077 
SharedDtor()3078 inline void UpdateContextResponse::SharedDtor() {
3079   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3080   _impl_.device_attributes_.~RepeatedPtrField();
3081 }
3082 
SetCachedSize(int size) const3083 void UpdateContextResponse::SetCachedSize(int size) const {
3084   _impl_._cached_size_.Set(size);
3085 }
3086 
Clear()3087 void UpdateContextResponse::Clear() {
3088 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.UpdateContextResponse)
3089   ::uint32_t cached_has_bits = 0;
3090   // Prevent compiler warnings about cached_has_bits being unused
3091   (void) cached_has_bits;
3092 
3093   _impl_.device_attributes_.Clear();
3094   _internal_metadata_.Clear<std::string>();
3095 }
3096 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3097 const char* UpdateContextResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3098 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3099   while (!ctx->Done(&ptr)) {
3100     ::uint32_t tag;
3101     ptr = ::_pbi::ReadTag(ptr, &tag);
3102     switch (tag >> 3) {
3103       // repeated .tensorflow.DeviceAttributes device_attributes = 1;
3104       case 1:
3105         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
3106           ptr -= 1;
3107           do {
3108             ptr += 1;
3109             ptr = ctx->ParseMessage(_internal_add_device_attributes(), ptr);
3110             CHK_(ptr);
3111             if (!ctx->DataAvailable(ptr)) break;
3112           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
3113         } else {
3114           goto handle_unusual;
3115         }
3116         continue;
3117       default:
3118         goto handle_unusual;
3119     }  // switch
3120   handle_unusual:
3121     if ((tag == 0) || ((tag & 7) == 4)) {
3122       CHK_(ptr);
3123       ctx->SetLastTag(tag);
3124       goto message_done;
3125     }
3126     ptr = UnknownFieldParse(
3127         tag,
3128         _internal_metadata_.mutable_unknown_fields<std::string>(),
3129         ptr, ctx);
3130     CHK_(ptr != nullptr);
3131   }  // while
3132 message_done:
3133   return ptr;
3134 failure:
3135   ptr = nullptr;
3136   goto message_done;
3137 #undef CHK_
3138 }
3139 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3140 ::uint8_t* UpdateContextResponse::_InternalSerialize(
3141     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3142   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.UpdateContextResponse)
3143   ::uint32_t cached_has_bits = 0;
3144   (void) cached_has_bits;
3145 
3146   // repeated .tensorflow.DeviceAttributes device_attributes = 1;
3147   for (unsigned i = 0,
3148       n = static_cast<unsigned>(this->_internal_device_attributes_size()); i < n; i++) {
3149     const auto& repfield = this->_internal_device_attributes(i);
3150     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3151         InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
3152   }
3153 
3154   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3155     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3156         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3157   }
3158   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.UpdateContextResponse)
3159   return target;
3160 }
3161 
ByteSizeLong() const3162 size_t UpdateContextResponse::ByteSizeLong() const {
3163 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.UpdateContextResponse)
3164   size_t total_size = 0;
3165 
3166   ::uint32_t cached_has_bits = 0;
3167   // Prevent compiler warnings about cached_has_bits being unused
3168   (void) cached_has_bits;
3169 
3170   // repeated .tensorflow.DeviceAttributes device_attributes = 1;
3171   total_size += 1UL * this->_internal_device_attributes_size();
3172   for (const auto& msg : this->_impl_.device_attributes_) {
3173     total_size +=
3174       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
3175   }
3176 
3177   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3178     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3179   }
3180   int cached_size = ::_pbi::ToCachedSize(total_size);
3181   SetCachedSize(cached_size);
3182   return total_size;
3183 }
3184 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3185 void UpdateContextResponse::CheckTypeAndMergeFrom(
3186     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3187   MergeFrom(*::_pbi::DownCast<const UpdateContextResponse*>(
3188       &from));
3189 }
3190 
MergeFrom(const UpdateContextResponse & from)3191 void UpdateContextResponse::MergeFrom(const UpdateContextResponse& from) {
3192   UpdateContextResponse* const _this = this;
3193   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.UpdateContextResponse)
3194   GOOGLE_DCHECK_NE(&from, _this);
3195   ::uint32_t cached_has_bits = 0;
3196   (void) cached_has_bits;
3197 
3198   _this->_impl_.device_attributes_.MergeFrom(from._impl_.device_attributes_);
3199   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3200 }
3201 
CopyFrom(const UpdateContextResponse & from)3202 void UpdateContextResponse::CopyFrom(const UpdateContextResponse& from) {
3203 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.UpdateContextResponse)
3204   if (&from == this) return;
3205   Clear();
3206   MergeFrom(from);
3207 }
3208 
IsInitialized() const3209 bool UpdateContextResponse::IsInitialized() const {
3210   return true;
3211 }
3212 
InternalSwap(UpdateContextResponse * other)3213 void UpdateContextResponse::InternalSwap(UpdateContextResponse* other) {
3214   using std::swap;
3215   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3216   _impl_.device_attributes_.InternalSwap(&other->_impl_.device_attributes_);
3217 }
3218 
GetTypeName() const3219 std::string UpdateContextResponse::GetTypeName() const {
3220   return "tensorflow.eager.UpdateContextResponse";
3221 }
3222 
3223 
3224 // ===================================================================
3225 
3226 class EnqueueRequest::_Internal {
3227  public:
3228 };
3229 
EnqueueRequest(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3230 EnqueueRequest::EnqueueRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3231                          bool is_message_owned)
3232   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3233   SharedCtor(arena, is_message_owned);
3234   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.EnqueueRequest)
3235 }
EnqueueRequest(const EnqueueRequest & from)3236 EnqueueRequest::EnqueueRequest(const EnqueueRequest& from)
3237   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3238   EnqueueRequest* const _this = this; (void)_this;
3239   new (&_impl_) Impl_{
3240       decltype(_impl_.queue_){from._impl_.queue_}
3241     , decltype(_impl_.context_id_){}
3242     , /*decltype(_impl_._cached_size_)*/{}};
3243 
3244   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3245   _this->_impl_.context_id_ = from._impl_.context_id_;
3246   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.EnqueueRequest)
3247 }
3248 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3249 inline void EnqueueRequest::SharedCtor(
3250     ::_pb::Arena* arena, bool is_message_owned) {
3251   (void)arena;
3252   (void)is_message_owned;
3253   new (&_impl_) Impl_{
3254       decltype(_impl_.queue_){arena}
3255     , decltype(_impl_.context_id_){::uint64_t{0u}}
3256     , /*decltype(_impl_._cached_size_)*/{}
3257   };
3258 }
3259 
~EnqueueRequest()3260 EnqueueRequest::~EnqueueRequest() {
3261   // @@protoc_insertion_point(destructor:tensorflow.eager.EnqueueRequest)
3262   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3263   (void)arena;
3264     return;
3265   }
3266   SharedDtor();
3267 }
3268 
SharedDtor()3269 inline void EnqueueRequest::SharedDtor() {
3270   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3271   _impl_.queue_.~RepeatedPtrField();
3272 }
3273 
SetCachedSize(int size) const3274 void EnqueueRequest::SetCachedSize(int size) const {
3275   _impl_._cached_size_.Set(size);
3276 }
3277 
Clear()3278 void EnqueueRequest::Clear() {
3279 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.EnqueueRequest)
3280   ::uint32_t cached_has_bits = 0;
3281   // Prevent compiler warnings about cached_has_bits being unused
3282   (void) cached_has_bits;
3283 
3284   _impl_.queue_.Clear();
3285   _impl_.context_id_ = ::uint64_t{0u};
3286   _internal_metadata_.Clear<std::string>();
3287 }
3288 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3289 const char* EnqueueRequest::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3290 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3291   while (!ctx->Done(&ptr)) {
3292     ::uint32_t tag;
3293     ptr = ::_pbi::ReadTag(ptr, &tag);
3294     switch (tag >> 3) {
3295       // fixed64 context_id = 1;
3296       case 1:
3297         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
3298           _impl_.context_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
3299           ptr += sizeof(::uint64_t);
3300         } else {
3301           goto handle_unusual;
3302         }
3303         continue;
3304       // repeated .tensorflow.eager.QueueItem queue = 3;
3305       case 3:
3306         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
3307           ptr -= 1;
3308           do {
3309             ptr += 1;
3310             ptr = ctx->ParseMessage(_internal_add_queue(), ptr);
3311             CHK_(ptr);
3312             if (!ctx->DataAvailable(ptr)) break;
3313           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr));
3314         } else {
3315           goto handle_unusual;
3316         }
3317         continue;
3318       default:
3319         goto handle_unusual;
3320     }  // switch
3321   handle_unusual:
3322     if ((tag == 0) || ((tag & 7) == 4)) {
3323       CHK_(ptr);
3324       ctx->SetLastTag(tag);
3325       goto message_done;
3326     }
3327     ptr = UnknownFieldParse(
3328         tag,
3329         _internal_metadata_.mutable_unknown_fields<std::string>(),
3330         ptr, ctx);
3331     CHK_(ptr != nullptr);
3332   }  // while
3333 message_done:
3334   return ptr;
3335 failure:
3336   ptr = nullptr;
3337   goto message_done;
3338 #undef CHK_
3339 }
3340 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3341 ::uint8_t* EnqueueRequest::_InternalSerialize(
3342     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3343   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.EnqueueRequest)
3344   ::uint32_t cached_has_bits = 0;
3345   (void) cached_has_bits;
3346 
3347   // fixed64 context_id = 1;
3348   if (this->_internal_context_id() != 0) {
3349     target = stream->EnsureSpace(target);
3350     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(1, this->_internal_context_id(), target);
3351   }
3352 
3353   // repeated .tensorflow.eager.QueueItem queue = 3;
3354   for (unsigned i = 0,
3355       n = static_cast<unsigned>(this->_internal_queue_size()); i < n; i++) {
3356     const auto& repfield = this->_internal_queue(i);
3357     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3358         InternalWriteMessage(3, repfield, repfield.GetCachedSize(), target, stream);
3359   }
3360 
3361   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3362     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3363         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3364   }
3365   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.EnqueueRequest)
3366   return target;
3367 }
3368 
ByteSizeLong() const3369 size_t EnqueueRequest::ByteSizeLong() const {
3370 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.EnqueueRequest)
3371   size_t total_size = 0;
3372 
3373   ::uint32_t cached_has_bits = 0;
3374   // Prevent compiler warnings about cached_has_bits being unused
3375   (void) cached_has_bits;
3376 
3377   // repeated .tensorflow.eager.QueueItem queue = 3;
3378   total_size += 1UL * this->_internal_queue_size();
3379   for (const auto& msg : this->_impl_.queue_) {
3380     total_size +=
3381       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
3382   }
3383 
3384   // fixed64 context_id = 1;
3385   if (this->_internal_context_id() != 0) {
3386     total_size += 1 + 8;
3387   }
3388 
3389   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3390     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3391   }
3392   int cached_size = ::_pbi::ToCachedSize(total_size);
3393   SetCachedSize(cached_size);
3394   return total_size;
3395 }
3396 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3397 void EnqueueRequest::CheckTypeAndMergeFrom(
3398     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3399   MergeFrom(*::_pbi::DownCast<const EnqueueRequest*>(
3400       &from));
3401 }
3402 
MergeFrom(const EnqueueRequest & from)3403 void EnqueueRequest::MergeFrom(const EnqueueRequest& from) {
3404   EnqueueRequest* const _this = this;
3405   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.EnqueueRequest)
3406   GOOGLE_DCHECK_NE(&from, _this);
3407   ::uint32_t cached_has_bits = 0;
3408   (void) cached_has_bits;
3409 
3410   _this->_impl_.queue_.MergeFrom(from._impl_.queue_);
3411   if (from._internal_context_id() != 0) {
3412     _this->_internal_set_context_id(from._internal_context_id());
3413   }
3414   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3415 }
3416 
CopyFrom(const EnqueueRequest & from)3417 void EnqueueRequest::CopyFrom(const EnqueueRequest& from) {
3418 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.EnqueueRequest)
3419   if (&from == this) return;
3420   Clear();
3421   MergeFrom(from);
3422 }
3423 
IsInitialized() const3424 bool EnqueueRequest::IsInitialized() const {
3425   return true;
3426 }
3427 
InternalSwap(EnqueueRequest * other)3428 void EnqueueRequest::InternalSwap(EnqueueRequest* other) {
3429   using std::swap;
3430   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3431   _impl_.queue_.InternalSwap(&other->_impl_.queue_);
3432   swap(_impl_.context_id_, other->_impl_.context_id_);
3433 }
3434 
GetTypeName() const3435 std::string EnqueueRequest::GetTypeName() const {
3436   return "tensorflow.eager.EnqueueRequest";
3437 }
3438 
3439 
3440 // ===================================================================
3441 
3442 class EnqueueResponse::_Internal {
3443  public:
3444 };
3445 
EnqueueResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3446 EnqueueResponse::EnqueueResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3447                          bool is_message_owned)
3448   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3449   SharedCtor(arena, is_message_owned);
3450   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.EnqueueResponse)
3451 }
EnqueueResponse(const EnqueueResponse & from)3452 EnqueueResponse::EnqueueResponse(const EnqueueResponse& from)
3453   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3454   EnqueueResponse* const _this = this; (void)_this;
3455   new (&_impl_) Impl_{
3456       decltype(_impl_.queue_response_){from._impl_.queue_response_}
3457     , /*decltype(_impl_._cached_size_)*/{}};
3458 
3459   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3460   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.EnqueueResponse)
3461 }
3462 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3463 inline void EnqueueResponse::SharedCtor(
3464     ::_pb::Arena* arena, bool is_message_owned) {
3465   (void)arena;
3466   (void)is_message_owned;
3467   new (&_impl_) Impl_{
3468       decltype(_impl_.queue_response_){arena}
3469     , /*decltype(_impl_._cached_size_)*/{}
3470   };
3471 }
3472 
~EnqueueResponse()3473 EnqueueResponse::~EnqueueResponse() {
3474   // @@protoc_insertion_point(destructor:tensorflow.eager.EnqueueResponse)
3475   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3476   (void)arena;
3477     return;
3478   }
3479   SharedDtor();
3480 }
3481 
SharedDtor()3482 inline void EnqueueResponse::SharedDtor() {
3483   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3484   _impl_.queue_response_.~RepeatedPtrField();
3485 }
3486 
SetCachedSize(int size) const3487 void EnqueueResponse::SetCachedSize(int size) const {
3488   _impl_._cached_size_.Set(size);
3489 }
3490 
Clear()3491 void EnqueueResponse::Clear() {
3492 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.EnqueueResponse)
3493   ::uint32_t cached_has_bits = 0;
3494   // Prevent compiler warnings about cached_has_bits being unused
3495   (void) cached_has_bits;
3496 
3497   _impl_.queue_response_.Clear();
3498   _internal_metadata_.Clear<std::string>();
3499 }
3500 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3501 const char* EnqueueResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3502 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3503   while (!ctx->Done(&ptr)) {
3504     ::uint32_t tag;
3505     ptr = ::_pbi::ReadTag(ptr, &tag);
3506     switch (tag >> 3) {
3507       // repeated .tensorflow.eager.QueueResponse queue_response = 1;
3508       case 1:
3509         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
3510           ptr -= 1;
3511           do {
3512             ptr += 1;
3513             ptr = ctx->ParseMessage(_internal_add_queue_response(), ptr);
3514             CHK_(ptr);
3515             if (!ctx->DataAvailable(ptr)) break;
3516           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
3517         } else {
3518           goto handle_unusual;
3519         }
3520         continue;
3521       default:
3522         goto handle_unusual;
3523     }  // switch
3524   handle_unusual:
3525     if ((tag == 0) || ((tag & 7) == 4)) {
3526       CHK_(ptr);
3527       ctx->SetLastTag(tag);
3528       goto message_done;
3529     }
3530     ptr = UnknownFieldParse(
3531         tag,
3532         _internal_metadata_.mutable_unknown_fields<std::string>(),
3533         ptr, ctx);
3534     CHK_(ptr != nullptr);
3535   }  // while
3536 message_done:
3537   return ptr;
3538 failure:
3539   ptr = nullptr;
3540   goto message_done;
3541 #undef CHK_
3542 }
3543 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3544 ::uint8_t* EnqueueResponse::_InternalSerialize(
3545     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3546   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.EnqueueResponse)
3547   ::uint32_t cached_has_bits = 0;
3548   (void) cached_has_bits;
3549 
3550   // repeated .tensorflow.eager.QueueResponse queue_response = 1;
3551   for (unsigned i = 0,
3552       n = static_cast<unsigned>(this->_internal_queue_response_size()); i < n; i++) {
3553     const auto& repfield = this->_internal_queue_response(i);
3554     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3555         InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
3556   }
3557 
3558   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3559     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3560         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3561   }
3562   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.EnqueueResponse)
3563   return target;
3564 }
3565 
ByteSizeLong() const3566 size_t EnqueueResponse::ByteSizeLong() const {
3567 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.EnqueueResponse)
3568   size_t total_size = 0;
3569 
3570   ::uint32_t cached_has_bits = 0;
3571   // Prevent compiler warnings about cached_has_bits being unused
3572   (void) cached_has_bits;
3573 
3574   // repeated .tensorflow.eager.QueueResponse queue_response = 1;
3575   total_size += 1UL * this->_internal_queue_response_size();
3576   for (const auto& msg : this->_impl_.queue_response_) {
3577     total_size +=
3578       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
3579   }
3580 
3581   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3582     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3583   }
3584   int cached_size = ::_pbi::ToCachedSize(total_size);
3585   SetCachedSize(cached_size);
3586   return total_size;
3587 }
3588 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3589 void EnqueueResponse::CheckTypeAndMergeFrom(
3590     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3591   MergeFrom(*::_pbi::DownCast<const EnqueueResponse*>(
3592       &from));
3593 }
3594 
MergeFrom(const EnqueueResponse & from)3595 void EnqueueResponse::MergeFrom(const EnqueueResponse& from) {
3596   EnqueueResponse* const _this = this;
3597   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.EnqueueResponse)
3598   GOOGLE_DCHECK_NE(&from, _this);
3599   ::uint32_t cached_has_bits = 0;
3600   (void) cached_has_bits;
3601 
3602   _this->_impl_.queue_response_.MergeFrom(from._impl_.queue_response_);
3603   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3604 }
3605 
CopyFrom(const EnqueueResponse & from)3606 void EnqueueResponse::CopyFrom(const EnqueueResponse& from) {
3607 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.EnqueueResponse)
3608   if (&from == this) return;
3609   Clear();
3610   MergeFrom(from);
3611 }
3612 
IsInitialized() const3613 bool EnqueueResponse::IsInitialized() const {
3614   return true;
3615 }
3616 
InternalSwap(EnqueueResponse * other)3617 void EnqueueResponse::InternalSwap(EnqueueResponse* other) {
3618   using std::swap;
3619   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3620   _impl_.queue_response_.InternalSwap(&other->_impl_.queue_response_);
3621 }
3622 
GetTypeName() const3623 std::string EnqueueResponse::GetTypeName() const {
3624   return "tensorflow.eager.EnqueueResponse";
3625 }
3626 
3627 
3628 // ===================================================================
3629 
3630 class WaitQueueDoneRequest::_Internal {
3631  public:
3632 };
3633 
WaitQueueDoneRequest(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3634 WaitQueueDoneRequest::WaitQueueDoneRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3635                          bool is_message_owned)
3636   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3637   SharedCtor(arena, is_message_owned);
3638   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.WaitQueueDoneRequest)
3639 }
WaitQueueDoneRequest(const WaitQueueDoneRequest & from)3640 WaitQueueDoneRequest::WaitQueueDoneRequest(const WaitQueueDoneRequest& from)
3641   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3642   WaitQueueDoneRequest* const _this = this; (void)_this;
3643   new (&_impl_) Impl_{
3644       decltype(_impl_.op_id_){from._impl_.op_id_}
3645     , /*decltype(_impl_._op_id_cached_byte_size_)*/{0}
3646     , decltype(_impl_.context_id_){}
3647     , /*decltype(_impl_._cached_size_)*/{}};
3648 
3649   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3650   _this->_impl_.context_id_ = from._impl_.context_id_;
3651   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.WaitQueueDoneRequest)
3652 }
3653 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3654 inline void WaitQueueDoneRequest::SharedCtor(
3655     ::_pb::Arena* arena, bool is_message_owned) {
3656   (void)arena;
3657   (void)is_message_owned;
3658   new (&_impl_) Impl_{
3659       decltype(_impl_.op_id_){arena}
3660     , /*decltype(_impl_._op_id_cached_byte_size_)*/{0}
3661     , decltype(_impl_.context_id_){::uint64_t{0u}}
3662     , /*decltype(_impl_._cached_size_)*/{}
3663   };
3664 }
3665 
~WaitQueueDoneRequest()3666 WaitQueueDoneRequest::~WaitQueueDoneRequest() {
3667   // @@protoc_insertion_point(destructor:tensorflow.eager.WaitQueueDoneRequest)
3668   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3669   (void)arena;
3670     return;
3671   }
3672   SharedDtor();
3673 }
3674 
SharedDtor()3675 inline void WaitQueueDoneRequest::SharedDtor() {
3676   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3677   _impl_.op_id_.~RepeatedField();
3678 }
3679 
SetCachedSize(int size) const3680 void WaitQueueDoneRequest::SetCachedSize(int size) const {
3681   _impl_._cached_size_.Set(size);
3682 }
3683 
Clear()3684 void WaitQueueDoneRequest::Clear() {
3685 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.WaitQueueDoneRequest)
3686   ::uint32_t cached_has_bits = 0;
3687   // Prevent compiler warnings about cached_has_bits being unused
3688   (void) cached_has_bits;
3689 
3690   _impl_.op_id_.Clear();
3691   _impl_.context_id_ = ::uint64_t{0u};
3692   _internal_metadata_.Clear<std::string>();
3693 }
3694 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3695 const char* WaitQueueDoneRequest::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3696 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3697   while (!ctx->Done(&ptr)) {
3698     ::uint32_t tag;
3699     ptr = ::_pbi::ReadTag(ptr, &tag);
3700     switch (tag >> 3) {
3701       // fixed64 context_id = 1;
3702       case 1:
3703         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
3704           _impl_.context_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
3705           ptr += sizeof(::uint64_t);
3706         } else {
3707           goto handle_unusual;
3708         }
3709         continue;
3710       // repeated int64 op_id = 2;
3711       case 2:
3712         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
3713           ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(_internal_mutable_op_id(), ptr, ctx);
3714           CHK_(ptr);
3715         } else if (static_cast<::uint8_t>(tag) == 16) {
3716           _internal_add_op_id(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
3717           CHK_(ptr);
3718         } else {
3719           goto handle_unusual;
3720         }
3721         continue;
3722       default:
3723         goto handle_unusual;
3724     }  // switch
3725   handle_unusual:
3726     if ((tag == 0) || ((tag & 7) == 4)) {
3727       CHK_(ptr);
3728       ctx->SetLastTag(tag);
3729       goto message_done;
3730     }
3731     ptr = UnknownFieldParse(
3732         tag,
3733         _internal_metadata_.mutable_unknown_fields<std::string>(),
3734         ptr, ctx);
3735     CHK_(ptr != nullptr);
3736   }  // while
3737 message_done:
3738   return ptr;
3739 failure:
3740   ptr = nullptr;
3741   goto message_done;
3742 #undef CHK_
3743 }
3744 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3745 ::uint8_t* WaitQueueDoneRequest::_InternalSerialize(
3746     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3747   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.WaitQueueDoneRequest)
3748   ::uint32_t cached_has_bits = 0;
3749   (void) cached_has_bits;
3750 
3751   // fixed64 context_id = 1;
3752   if (this->_internal_context_id() != 0) {
3753     target = stream->EnsureSpace(target);
3754     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(1, this->_internal_context_id(), target);
3755   }
3756 
3757   // repeated int64 op_id = 2;
3758   {
3759     int byte_size = _impl_._op_id_cached_byte_size_.load(std::memory_order_relaxed);
3760     if (byte_size > 0) {
3761       target = stream->WriteInt64Packed(
3762           2, _internal_op_id(), byte_size, target);
3763     }
3764   }
3765 
3766   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3767     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3768         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3769   }
3770   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.WaitQueueDoneRequest)
3771   return target;
3772 }
3773 
ByteSizeLong() const3774 size_t WaitQueueDoneRequest::ByteSizeLong() const {
3775 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.WaitQueueDoneRequest)
3776   size_t total_size = 0;
3777 
3778   ::uint32_t cached_has_bits = 0;
3779   // Prevent compiler warnings about cached_has_bits being unused
3780   (void) cached_has_bits;
3781 
3782   // repeated int64 op_id = 2;
3783   {
3784     size_t data_size = ::_pbi::WireFormatLite::
3785       Int64Size(this->_impl_.op_id_);
3786     if (data_size > 0) {
3787       total_size += 1 +
3788         ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
3789     }
3790     int cached_size = ::_pbi::ToCachedSize(data_size);
3791     _impl_._op_id_cached_byte_size_.store(cached_size,
3792                                     std::memory_order_relaxed);
3793     total_size += data_size;
3794   }
3795 
3796   // fixed64 context_id = 1;
3797   if (this->_internal_context_id() != 0) {
3798     total_size += 1 + 8;
3799   }
3800 
3801   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3802     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3803   }
3804   int cached_size = ::_pbi::ToCachedSize(total_size);
3805   SetCachedSize(cached_size);
3806   return total_size;
3807 }
3808 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3809 void WaitQueueDoneRequest::CheckTypeAndMergeFrom(
3810     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3811   MergeFrom(*::_pbi::DownCast<const WaitQueueDoneRequest*>(
3812       &from));
3813 }
3814 
MergeFrom(const WaitQueueDoneRequest & from)3815 void WaitQueueDoneRequest::MergeFrom(const WaitQueueDoneRequest& from) {
3816   WaitQueueDoneRequest* const _this = this;
3817   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.WaitQueueDoneRequest)
3818   GOOGLE_DCHECK_NE(&from, _this);
3819   ::uint32_t cached_has_bits = 0;
3820   (void) cached_has_bits;
3821 
3822   _this->_impl_.op_id_.MergeFrom(from._impl_.op_id_);
3823   if (from._internal_context_id() != 0) {
3824     _this->_internal_set_context_id(from._internal_context_id());
3825   }
3826   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3827 }
3828 
CopyFrom(const WaitQueueDoneRequest & from)3829 void WaitQueueDoneRequest::CopyFrom(const WaitQueueDoneRequest& from) {
3830 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.WaitQueueDoneRequest)
3831   if (&from == this) return;
3832   Clear();
3833   MergeFrom(from);
3834 }
3835 
IsInitialized() const3836 bool WaitQueueDoneRequest::IsInitialized() const {
3837   return true;
3838 }
3839 
InternalSwap(WaitQueueDoneRequest * other)3840 void WaitQueueDoneRequest::InternalSwap(WaitQueueDoneRequest* other) {
3841   using std::swap;
3842   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3843   _impl_.op_id_.InternalSwap(&other->_impl_.op_id_);
3844   swap(_impl_.context_id_, other->_impl_.context_id_);
3845 }
3846 
GetTypeName() const3847 std::string WaitQueueDoneRequest::GetTypeName() const {
3848   return "tensorflow.eager.WaitQueueDoneRequest";
3849 }
3850 
3851 
3852 // ===================================================================
3853 
3854 class WaitQueueDoneResponse::_Internal {
3855  public:
3856 };
3857 
WaitQueueDoneResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3858 WaitQueueDoneResponse::WaitQueueDoneResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3859                          bool is_message_owned)
3860   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3861   SharedCtor(arena, is_message_owned);
3862   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.WaitQueueDoneResponse)
3863 }
WaitQueueDoneResponse(const WaitQueueDoneResponse & from)3864 WaitQueueDoneResponse::WaitQueueDoneResponse(const WaitQueueDoneResponse& from)
3865   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3866   WaitQueueDoneResponse* const _this = this; (void)_this;
3867   new (&_impl_) Impl_{
3868       /*decltype(_impl_._cached_size_)*/{}};
3869 
3870   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3871   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.WaitQueueDoneResponse)
3872 }
3873 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3874 inline void WaitQueueDoneResponse::SharedCtor(
3875     ::_pb::Arena* arena, bool is_message_owned) {
3876   (void)arena;
3877   (void)is_message_owned;
3878   new (&_impl_) Impl_{
3879       /*decltype(_impl_._cached_size_)*/{}
3880   };
3881 }
3882 
~WaitQueueDoneResponse()3883 WaitQueueDoneResponse::~WaitQueueDoneResponse() {
3884   // @@protoc_insertion_point(destructor:tensorflow.eager.WaitQueueDoneResponse)
3885   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3886   (void)arena;
3887     return;
3888   }
3889   SharedDtor();
3890 }
3891 
SharedDtor()3892 inline void WaitQueueDoneResponse::SharedDtor() {
3893   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3894 }
3895 
SetCachedSize(int size) const3896 void WaitQueueDoneResponse::SetCachedSize(int size) const {
3897   _impl_._cached_size_.Set(size);
3898 }
3899 
Clear()3900 void WaitQueueDoneResponse::Clear() {
3901 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.WaitQueueDoneResponse)
3902   ::uint32_t cached_has_bits = 0;
3903   // Prevent compiler warnings about cached_has_bits being unused
3904   (void) cached_has_bits;
3905 
3906   _internal_metadata_.Clear<std::string>();
3907 }
3908 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3909 const char* WaitQueueDoneResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3910 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3911   while (!ctx->Done(&ptr)) {
3912     ::uint32_t tag;
3913     ptr = ::_pbi::ReadTag(ptr, &tag);
3914     if ((tag == 0) || ((tag & 7) == 4)) {
3915       CHK_(ptr);
3916       ctx->SetLastTag(tag);
3917       goto message_done;
3918     }
3919     ptr = UnknownFieldParse(
3920         tag,
3921         _internal_metadata_.mutable_unknown_fields<std::string>(),
3922         ptr, ctx);
3923     CHK_(ptr != nullptr);
3924   }  // while
3925 message_done:
3926   return ptr;
3927 failure:
3928   ptr = nullptr;
3929   goto message_done;
3930 #undef CHK_
3931 }
3932 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3933 ::uint8_t* WaitQueueDoneResponse::_InternalSerialize(
3934     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3935   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.WaitQueueDoneResponse)
3936   ::uint32_t cached_has_bits = 0;
3937   (void) cached_has_bits;
3938 
3939   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3940     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3941         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3942   }
3943   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.WaitQueueDoneResponse)
3944   return target;
3945 }
3946 
ByteSizeLong() const3947 size_t WaitQueueDoneResponse::ByteSizeLong() const {
3948 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.WaitQueueDoneResponse)
3949   size_t total_size = 0;
3950 
3951   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3952     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3953   }
3954   int cached_size = ::_pbi::ToCachedSize(total_size);
3955   SetCachedSize(cached_size);
3956   return total_size;
3957 }
3958 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3959 void WaitQueueDoneResponse::CheckTypeAndMergeFrom(
3960     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3961   MergeFrom(*::_pbi::DownCast<const WaitQueueDoneResponse*>(
3962       &from));
3963 }
3964 
MergeFrom(const WaitQueueDoneResponse & from)3965 void WaitQueueDoneResponse::MergeFrom(const WaitQueueDoneResponse& from) {
3966   WaitQueueDoneResponse* const _this = this;
3967   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.WaitQueueDoneResponse)
3968   GOOGLE_DCHECK_NE(&from, _this);
3969   ::uint32_t cached_has_bits = 0;
3970   (void) cached_has_bits;
3971 
3972   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3973 }
3974 
CopyFrom(const WaitQueueDoneResponse & from)3975 void WaitQueueDoneResponse::CopyFrom(const WaitQueueDoneResponse& from) {
3976 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.WaitQueueDoneResponse)
3977   if (&from == this) return;
3978   Clear();
3979   MergeFrom(from);
3980 }
3981 
IsInitialized() const3982 bool WaitQueueDoneResponse::IsInitialized() const {
3983   return true;
3984 }
3985 
InternalSwap(WaitQueueDoneResponse * other)3986 void WaitQueueDoneResponse::InternalSwap(WaitQueueDoneResponse* other) {
3987   using std::swap;
3988   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3989 }
3990 
GetTypeName() const3991 std::string WaitQueueDoneResponse::GetTypeName() const {
3992   return "tensorflow.eager.WaitQueueDoneResponse";
3993 }
3994 
3995 
3996 // ===================================================================
3997 
3998 class RunComponentFunctionRequest::_Internal {
3999  public:
4000   static const ::tensorflow::eager::Operation& operation(const RunComponentFunctionRequest* msg);
4001 };
4002 
4003 const ::tensorflow::eager::Operation&
operation(const RunComponentFunctionRequest * msg)4004 RunComponentFunctionRequest::_Internal::operation(const RunComponentFunctionRequest* msg) {
4005   return *msg->_impl_.operation_;
4006 }
RunComponentFunctionRequest(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)4007 RunComponentFunctionRequest::RunComponentFunctionRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4008                          bool is_message_owned)
4009   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
4010   SharedCtor(arena, is_message_owned);
4011   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.RunComponentFunctionRequest)
4012 }
RunComponentFunctionRequest(const RunComponentFunctionRequest & from)4013 RunComponentFunctionRequest::RunComponentFunctionRequest(const RunComponentFunctionRequest& from)
4014   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
4015   RunComponentFunctionRequest* const _this = this; (void)_this;
4016   new (&_impl_) Impl_{
4017       decltype(_impl_.output_num_){from._impl_.output_num_}
4018     , /*decltype(_impl_._output_num_cached_byte_size_)*/{0}
4019     , decltype(_impl_.operation_){nullptr}
4020     , decltype(_impl_.context_id_){}
4021     , /*decltype(_impl_._cached_size_)*/{}};
4022 
4023   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4024   if (from._internal_has_operation()) {
4025     _this->_impl_.operation_ = new ::tensorflow::eager::Operation(*from._impl_.operation_);
4026   }
4027   _this->_impl_.context_id_ = from._impl_.context_id_;
4028   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.RunComponentFunctionRequest)
4029 }
4030 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)4031 inline void RunComponentFunctionRequest::SharedCtor(
4032     ::_pb::Arena* arena, bool is_message_owned) {
4033   (void)arena;
4034   (void)is_message_owned;
4035   new (&_impl_) Impl_{
4036       decltype(_impl_.output_num_){arena}
4037     , /*decltype(_impl_._output_num_cached_byte_size_)*/{0}
4038     , decltype(_impl_.operation_){nullptr}
4039     , decltype(_impl_.context_id_){::uint64_t{0u}}
4040     , /*decltype(_impl_._cached_size_)*/{}
4041   };
4042 }
4043 
~RunComponentFunctionRequest()4044 RunComponentFunctionRequest::~RunComponentFunctionRequest() {
4045   // @@protoc_insertion_point(destructor:tensorflow.eager.RunComponentFunctionRequest)
4046   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
4047   (void)arena;
4048     return;
4049   }
4050   SharedDtor();
4051 }
4052 
SharedDtor()4053 inline void RunComponentFunctionRequest::SharedDtor() {
4054   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
4055   _impl_.output_num_.~RepeatedField();
4056   if (this != internal_default_instance()) delete _impl_.operation_;
4057 }
4058 
SetCachedSize(int size) const4059 void RunComponentFunctionRequest::SetCachedSize(int size) const {
4060   _impl_._cached_size_.Set(size);
4061 }
4062 
Clear()4063 void RunComponentFunctionRequest::Clear() {
4064 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.RunComponentFunctionRequest)
4065   ::uint32_t cached_has_bits = 0;
4066   // Prevent compiler warnings about cached_has_bits being unused
4067   (void) cached_has_bits;
4068 
4069   _impl_.output_num_.Clear();
4070   if (GetArenaForAllocation() == nullptr && _impl_.operation_ != nullptr) {
4071     delete _impl_.operation_;
4072   }
4073   _impl_.operation_ = nullptr;
4074   _impl_.context_id_ = ::uint64_t{0u};
4075   _internal_metadata_.Clear<std::string>();
4076 }
4077 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)4078 const char* RunComponentFunctionRequest::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
4079 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4080   while (!ctx->Done(&ptr)) {
4081     ::uint32_t tag;
4082     ptr = ::_pbi::ReadTag(ptr, &tag);
4083     switch (tag >> 3) {
4084       // fixed64 context_id = 1;
4085       case 1:
4086         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
4087           _impl_.context_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
4088           ptr += sizeof(::uint64_t);
4089         } else {
4090           goto handle_unusual;
4091         }
4092         continue;
4093       // .tensorflow.eager.Operation operation = 2;
4094       case 2:
4095         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
4096           ptr = ctx->ParseMessage(_internal_mutable_operation(), ptr);
4097           CHK_(ptr);
4098         } else {
4099           goto handle_unusual;
4100         }
4101         continue;
4102       // repeated int32 output_num = 3;
4103       case 3:
4104         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
4105           ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_output_num(), ptr, ctx);
4106           CHK_(ptr);
4107         } else if (static_cast<::uint8_t>(tag) == 24) {
4108           _internal_add_output_num(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
4109           CHK_(ptr);
4110         } else {
4111           goto handle_unusual;
4112         }
4113         continue;
4114       default:
4115         goto handle_unusual;
4116     }  // switch
4117   handle_unusual:
4118     if ((tag == 0) || ((tag & 7) == 4)) {
4119       CHK_(ptr);
4120       ctx->SetLastTag(tag);
4121       goto message_done;
4122     }
4123     ptr = UnknownFieldParse(
4124         tag,
4125         _internal_metadata_.mutable_unknown_fields<std::string>(),
4126         ptr, ctx);
4127     CHK_(ptr != nullptr);
4128   }  // while
4129 message_done:
4130   return ptr;
4131 failure:
4132   ptr = nullptr;
4133   goto message_done;
4134 #undef CHK_
4135 }
4136 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const4137 ::uint8_t* RunComponentFunctionRequest::_InternalSerialize(
4138     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
4139   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.RunComponentFunctionRequest)
4140   ::uint32_t cached_has_bits = 0;
4141   (void) cached_has_bits;
4142 
4143   // fixed64 context_id = 1;
4144   if (this->_internal_context_id() != 0) {
4145     target = stream->EnsureSpace(target);
4146     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(1, this->_internal_context_id(), target);
4147   }
4148 
4149   // .tensorflow.eager.Operation operation = 2;
4150   if (this->_internal_has_operation()) {
4151     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4152       InternalWriteMessage(2, _Internal::operation(this),
4153         _Internal::operation(this).GetCachedSize(), target, stream);
4154   }
4155 
4156   // repeated int32 output_num = 3;
4157   {
4158     int byte_size = _impl_._output_num_cached_byte_size_.load(std::memory_order_relaxed);
4159     if (byte_size > 0) {
4160       target = stream->WriteInt32Packed(
4161           3, _internal_output_num(), byte_size, target);
4162     }
4163   }
4164 
4165   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4166     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
4167         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
4168   }
4169   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.RunComponentFunctionRequest)
4170   return target;
4171 }
4172 
ByteSizeLong() const4173 size_t RunComponentFunctionRequest::ByteSizeLong() const {
4174 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.RunComponentFunctionRequest)
4175   size_t total_size = 0;
4176 
4177   ::uint32_t cached_has_bits = 0;
4178   // Prevent compiler warnings about cached_has_bits being unused
4179   (void) cached_has_bits;
4180 
4181   // repeated int32 output_num = 3;
4182   {
4183     size_t data_size = ::_pbi::WireFormatLite::
4184       Int32Size(this->_impl_.output_num_);
4185     if (data_size > 0) {
4186       total_size += 1 +
4187         ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
4188     }
4189     int cached_size = ::_pbi::ToCachedSize(data_size);
4190     _impl_._output_num_cached_byte_size_.store(cached_size,
4191                                     std::memory_order_relaxed);
4192     total_size += data_size;
4193   }
4194 
4195   // .tensorflow.eager.Operation operation = 2;
4196   if (this->_internal_has_operation()) {
4197     total_size += 1 +
4198       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4199         *_impl_.operation_);
4200   }
4201 
4202   // fixed64 context_id = 1;
4203   if (this->_internal_context_id() != 0) {
4204     total_size += 1 + 8;
4205   }
4206 
4207   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4208     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
4209   }
4210   int cached_size = ::_pbi::ToCachedSize(total_size);
4211   SetCachedSize(cached_size);
4212   return total_size;
4213 }
4214 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)4215 void RunComponentFunctionRequest::CheckTypeAndMergeFrom(
4216     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
4217   MergeFrom(*::_pbi::DownCast<const RunComponentFunctionRequest*>(
4218       &from));
4219 }
4220 
MergeFrom(const RunComponentFunctionRequest & from)4221 void RunComponentFunctionRequest::MergeFrom(const RunComponentFunctionRequest& from) {
4222   RunComponentFunctionRequest* const _this = this;
4223   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.RunComponentFunctionRequest)
4224   GOOGLE_DCHECK_NE(&from, _this);
4225   ::uint32_t cached_has_bits = 0;
4226   (void) cached_has_bits;
4227 
4228   _this->_impl_.output_num_.MergeFrom(from._impl_.output_num_);
4229   if (from._internal_has_operation()) {
4230     _this->_internal_mutable_operation()->::tensorflow::eager::Operation::MergeFrom(
4231         from._internal_operation());
4232   }
4233   if (from._internal_context_id() != 0) {
4234     _this->_internal_set_context_id(from._internal_context_id());
4235   }
4236   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4237 }
4238 
CopyFrom(const RunComponentFunctionRequest & from)4239 void RunComponentFunctionRequest::CopyFrom(const RunComponentFunctionRequest& from) {
4240 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.RunComponentFunctionRequest)
4241   if (&from == this) return;
4242   Clear();
4243   MergeFrom(from);
4244 }
4245 
IsInitialized() const4246 bool RunComponentFunctionRequest::IsInitialized() const {
4247   return true;
4248 }
4249 
InternalSwap(RunComponentFunctionRequest * other)4250 void RunComponentFunctionRequest::InternalSwap(RunComponentFunctionRequest* other) {
4251   using std::swap;
4252   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
4253   _impl_.output_num_.InternalSwap(&other->_impl_.output_num_);
4254   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
4255       PROTOBUF_FIELD_OFFSET(RunComponentFunctionRequest, _impl_.context_id_)
4256       + sizeof(RunComponentFunctionRequest::_impl_.context_id_)  // NOLINT
4257       - PROTOBUF_FIELD_OFFSET(RunComponentFunctionRequest, _impl_.operation_)>(
4258           reinterpret_cast<char*>(&_impl_.operation_),
4259           reinterpret_cast<char*>(&other->_impl_.operation_));
4260 }
4261 
GetTypeName() const4262 std::string RunComponentFunctionRequest::GetTypeName() const {
4263   return "tensorflow.eager.RunComponentFunctionRequest";
4264 }
4265 
4266 
4267 // ===================================================================
4268 
4269 class RunComponentFunctionResponse::_Internal {
4270  public:
4271 };
4272 
clear_shape()4273 void RunComponentFunctionResponse::clear_shape() {
4274   _impl_.shape_.Clear();
4275 }
clear_tensor()4276 void RunComponentFunctionResponse::clear_tensor() {
4277   _impl_.tensor_.Clear();
4278 }
RunComponentFunctionResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)4279 RunComponentFunctionResponse::RunComponentFunctionResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4280                          bool is_message_owned)
4281   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
4282   SharedCtor(arena, is_message_owned);
4283   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.RunComponentFunctionResponse)
4284 }
RunComponentFunctionResponse(const RunComponentFunctionResponse & from)4285 RunComponentFunctionResponse::RunComponentFunctionResponse(const RunComponentFunctionResponse& from)
4286   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
4287   RunComponentFunctionResponse* const _this = this; (void)_this;
4288   new (&_impl_) Impl_{
4289       decltype(_impl_.shape_){from._impl_.shape_}
4290     , decltype(_impl_.tensor_){from._impl_.tensor_}
4291     , /*decltype(_impl_._cached_size_)*/{}};
4292 
4293   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4294   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.RunComponentFunctionResponse)
4295 }
4296 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)4297 inline void RunComponentFunctionResponse::SharedCtor(
4298     ::_pb::Arena* arena, bool is_message_owned) {
4299   (void)arena;
4300   (void)is_message_owned;
4301   new (&_impl_) Impl_{
4302       decltype(_impl_.shape_){arena}
4303     , decltype(_impl_.tensor_){arena}
4304     , /*decltype(_impl_._cached_size_)*/{}
4305   };
4306 }
4307 
~RunComponentFunctionResponse()4308 RunComponentFunctionResponse::~RunComponentFunctionResponse() {
4309   // @@protoc_insertion_point(destructor:tensorflow.eager.RunComponentFunctionResponse)
4310   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
4311   (void)arena;
4312     return;
4313   }
4314   SharedDtor();
4315 }
4316 
SharedDtor()4317 inline void RunComponentFunctionResponse::SharedDtor() {
4318   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
4319   _impl_.shape_.~RepeatedPtrField();
4320   _impl_.tensor_.~RepeatedPtrField();
4321 }
4322 
SetCachedSize(int size) const4323 void RunComponentFunctionResponse::SetCachedSize(int size) const {
4324   _impl_._cached_size_.Set(size);
4325 }
4326 
Clear()4327 void RunComponentFunctionResponse::Clear() {
4328 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.RunComponentFunctionResponse)
4329   ::uint32_t cached_has_bits = 0;
4330   // Prevent compiler warnings about cached_has_bits being unused
4331   (void) cached_has_bits;
4332 
4333   _impl_.shape_.Clear();
4334   _impl_.tensor_.Clear();
4335   _internal_metadata_.Clear<std::string>();
4336 }
4337 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)4338 const char* RunComponentFunctionResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
4339 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4340   while (!ctx->Done(&ptr)) {
4341     ::uint32_t tag;
4342     ptr = ::_pbi::ReadTag(ptr, &tag);
4343     switch (tag >> 3) {
4344       // repeated .tensorflow.TensorShapeProto shape = 1;
4345       case 1:
4346         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
4347           ptr -= 1;
4348           do {
4349             ptr += 1;
4350             ptr = ctx->ParseMessage(_internal_add_shape(), ptr);
4351             CHK_(ptr);
4352             if (!ctx->DataAvailable(ptr)) break;
4353           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
4354         } else {
4355           goto handle_unusual;
4356         }
4357         continue;
4358       // repeated .tensorflow.TensorProto tensor = 2;
4359       case 2:
4360         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
4361           ptr -= 1;
4362           do {
4363             ptr += 1;
4364             ptr = ctx->ParseMessage(_internal_add_tensor(), ptr);
4365             CHK_(ptr);
4366             if (!ctx->DataAvailable(ptr)) break;
4367           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
4368         } else {
4369           goto handle_unusual;
4370         }
4371         continue;
4372       default:
4373         goto handle_unusual;
4374     }  // switch
4375   handle_unusual:
4376     if ((tag == 0) || ((tag & 7) == 4)) {
4377       CHK_(ptr);
4378       ctx->SetLastTag(tag);
4379       goto message_done;
4380     }
4381     ptr = UnknownFieldParse(
4382         tag,
4383         _internal_metadata_.mutable_unknown_fields<std::string>(),
4384         ptr, ctx);
4385     CHK_(ptr != nullptr);
4386   }  // while
4387 message_done:
4388   return ptr;
4389 failure:
4390   ptr = nullptr;
4391   goto message_done;
4392 #undef CHK_
4393 }
4394 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const4395 ::uint8_t* RunComponentFunctionResponse::_InternalSerialize(
4396     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
4397   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.RunComponentFunctionResponse)
4398   ::uint32_t cached_has_bits = 0;
4399   (void) cached_has_bits;
4400 
4401   // repeated .tensorflow.TensorShapeProto shape = 1;
4402   for (unsigned i = 0,
4403       n = static_cast<unsigned>(this->_internal_shape_size()); i < n; i++) {
4404     const auto& repfield = this->_internal_shape(i);
4405     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4406         InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
4407   }
4408 
4409   // repeated .tensorflow.TensorProto tensor = 2;
4410   for (unsigned i = 0,
4411       n = static_cast<unsigned>(this->_internal_tensor_size()); i < n; i++) {
4412     const auto& repfield = this->_internal_tensor(i);
4413     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4414         InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
4415   }
4416 
4417   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4418     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
4419         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
4420   }
4421   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.RunComponentFunctionResponse)
4422   return target;
4423 }
4424 
ByteSizeLong() const4425 size_t RunComponentFunctionResponse::ByteSizeLong() const {
4426 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.RunComponentFunctionResponse)
4427   size_t total_size = 0;
4428 
4429   ::uint32_t cached_has_bits = 0;
4430   // Prevent compiler warnings about cached_has_bits being unused
4431   (void) cached_has_bits;
4432 
4433   // repeated .tensorflow.TensorShapeProto shape = 1;
4434   total_size += 1UL * this->_internal_shape_size();
4435   for (const auto& msg : this->_impl_.shape_) {
4436     total_size +=
4437       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
4438   }
4439 
4440   // repeated .tensorflow.TensorProto tensor = 2;
4441   total_size += 1UL * this->_internal_tensor_size();
4442   for (const auto& msg : this->_impl_.tensor_) {
4443     total_size +=
4444       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
4445   }
4446 
4447   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4448     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
4449   }
4450   int cached_size = ::_pbi::ToCachedSize(total_size);
4451   SetCachedSize(cached_size);
4452   return total_size;
4453 }
4454 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)4455 void RunComponentFunctionResponse::CheckTypeAndMergeFrom(
4456     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
4457   MergeFrom(*::_pbi::DownCast<const RunComponentFunctionResponse*>(
4458       &from));
4459 }
4460 
MergeFrom(const RunComponentFunctionResponse & from)4461 void RunComponentFunctionResponse::MergeFrom(const RunComponentFunctionResponse& from) {
4462   RunComponentFunctionResponse* const _this = this;
4463   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.RunComponentFunctionResponse)
4464   GOOGLE_DCHECK_NE(&from, _this);
4465   ::uint32_t cached_has_bits = 0;
4466   (void) cached_has_bits;
4467 
4468   _this->_impl_.shape_.MergeFrom(from._impl_.shape_);
4469   _this->_impl_.tensor_.MergeFrom(from._impl_.tensor_);
4470   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4471 }
4472 
CopyFrom(const RunComponentFunctionResponse & from)4473 void RunComponentFunctionResponse::CopyFrom(const RunComponentFunctionResponse& from) {
4474 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.RunComponentFunctionResponse)
4475   if (&from == this) return;
4476   Clear();
4477   MergeFrom(from);
4478 }
4479 
IsInitialized() const4480 bool RunComponentFunctionResponse::IsInitialized() const {
4481   return true;
4482 }
4483 
InternalSwap(RunComponentFunctionResponse * other)4484 void RunComponentFunctionResponse::InternalSwap(RunComponentFunctionResponse* other) {
4485   using std::swap;
4486   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
4487   _impl_.shape_.InternalSwap(&other->_impl_.shape_);
4488   _impl_.tensor_.InternalSwap(&other->_impl_.tensor_);
4489 }
4490 
GetTypeName() const4491 std::string RunComponentFunctionResponse::GetTypeName() const {
4492   return "tensorflow.eager.RunComponentFunctionResponse";
4493 }
4494 
4495 
4496 // ===================================================================
4497 
4498 class KeepAliveRequest::_Internal {
4499  public:
4500 };
4501 
KeepAliveRequest(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)4502 KeepAliveRequest::KeepAliveRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4503                          bool is_message_owned)
4504   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
4505   SharedCtor(arena, is_message_owned);
4506   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.KeepAliveRequest)
4507 }
KeepAliveRequest(const KeepAliveRequest & from)4508 KeepAliveRequest::KeepAliveRequest(const KeepAliveRequest& from)
4509   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
4510   KeepAliveRequest* const _this = this; (void)_this;
4511   new (&_impl_) Impl_{
4512       decltype(_impl_.context_id_){}
4513     , /*decltype(_impl_._cached_size_)*/{}};
4514 
4515   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4516   _this->_impl_.context_id_ = from._impl_.context_id_;
4517   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.KeepAliveRequest)
4518 }
4519 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)4520 inline void KeepAliveRequest::SharedCtor(
4521     ::_pb::Arena* arena, bool is_message_owned) {
4522   (void)arena;
4523   (void)is_message_owned;
4524   new (&_impl_) Impl_{
4525       decltype(_impl_.context_id_){::uint64_t{0u}}
4526     , /*decltype(_impl_._cached_size_)*/{}
4527   };
4528 }
4529 
~KeepAliveRequest()4530 KeepAliveRequest::~KeepAliveRequest() {
4531   // @@protoc_insertion_point(destructor:tensorflow.eager.KeepAliveRequest)
4532   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
4533   (void)arena;
4534     return;
4535   }
4536   SharedDtor();
4537 }
4538 
SharedDtor()4539 inline void KeepAliveRequest::SharedDtor() {
4540   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
4541 }
4542 
SetCachedSize(int size) const4543 void KeepAliveRequest::SetCachedSize(int size) const {
4544   _impl_._cached_size_.Set(size);
4545 }
4546 
Clear()4547 void KeepAliveRequest::Clear() {
4548 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.KeepAliveRequest)
4549   ::uint32_t cached_has_bits = 0;
4550   // Prevent compiler warnings about cached_has_bits being unused
4551   (void) cached_has_bits;
4552 
4553   _impl_.context_id_ = ::uint64_t{0u};
4554   _internal_metadata_.Clear<std::string>();
4555 }
4556 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)4557 const char* KeepAliveRequest::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
4558 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4559   while (!ctx->Done(&ptr)) {
4560     ::uint32_t tag;
4561     ptr = ::_pbi::ReadTag(ptr, &tag);
4562     switch (tag >> 3) {
4563       // fixed64 context_id = 1;
4564       case 1:
4565         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
4566           _impl_.context_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
4567           ptr += sizeof(::uint64_t);
4568         } else {
4569           goto handle_unusual;
4570         }
4571         continue;
4572       default:
4573         goto handle_unusual;
4574     }  // switch
4575   handle_unusual:
4576     if ((tag == 0) || ((tag & 7) == 4)) {
4577       CHK_(ptr);
4578       ctx->SetLastTag(tag);
4579       goto message_done;
4580     }
4581     ptr = UnknownFieldParse(
4582         tag,
4583         _internal_metadata_.mutable_unknown_fields<std::string>(),
4584         ptr, ctx);
4585     CHK_(ptr != nullptr);
4586   }  // while
4587 message_done:
4588   return ptr;
4589 failure:
4590   ptr = nullptr;
4591   goto message_done;
4592 #undef CHK_
4593 }
4594 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const4595 ::uint8_t* KeepAliveRequest::_InternalSerialize(
4596     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
4597   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.KeepAliveRequest)
4598   ::uint32_t cached_has_bits = 0;
4599   (void) cached_has_bits;
4600 
4601   // fixed64 context_id = 1;
4602   if (this->_internal_context_id() != 0) {
4603     target = stream->EnsureSpace(target);
4604     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(1, this->_internal_context_id(), target);
4605   }
4606 
4607   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4608     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
4609         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
4610   }
4611   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.KeepAliveRequest)
4612   return target;
4613 }
4614 
ByteSizeLong() const4615 size_t KeepAliveRequest::ByteSizeLong() const {
4616 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.KeepAliveRequest)
4617   size_t total_size = 0;
4618 
4619   ::uint32_t cached_has_bits = 0;
4620   // Prevent compiler warnings about cached_has_bits being unused
4621   (void) cached_has_bits;
4622 
4623   // fixed64 context_id = 1;
4624   if (this->_internal_context_id() != 0) {
4625     total_size += 1 + 8;
4626   }
4627 
4628   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4629     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
4630   }
4631   int cached_size = ::_pbi::ToCachedSize(total_size);
4632   SetCachedSize(cached_size);
4633   return total_size;
4634 }
4635 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)4636 void KeepAliveRequest::CheckTypeAndMergeFrom(
4637     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
4638   MergeFrom(*::_pbi::DownCast<const KeepAliveRequest*>(
4639       &from));
4640 }
4641 
MergeFrom(const KeepAliveRequest & from)4642 void KeepAliveRequest::MergeFrom(const KeepAliveRequest& from) {
4643   KeepAliveRequest* const _this = this;
4644   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.KeepAliveRequest)
4645   GOOGLE_DCHECK_NE(&from, _this);
4646   ::uint32_t cached_has_bits = 0;
4647   (void) cached_has_bits;
4648 
4649   if (from._internal_context_id() != 0) {
4650     _this->_internal_set_context_id(from._internal_context_id());
4651   }
4652   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4653 }
4654 
CopyFrom(const KeepAliveRequest & from)4655 void KeepAliveRequest::CopyFrom(const KeepAliveRequest& from) {
4656 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.KeepAliveRequest)
4657   if (&from == this) return;
4658   Clear();
4659   MergeFrom(from);
4660 }
4661 
IsInitialized() const4662 bool KeepAliveRequest::IsInitialized() const {
4663   return true;
4664 }
4665 
InternalSwap(KeepAliveRequest * other)4666 void KeepAliveRequest::InternalSwap(KeepAliveRequest* other) {
4667   using std::swap;
4668   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
4669   swap(_impl_.context_id_, other->_impl_.context_id_);
4670 }
4671 
GetTypeName() const4672 std::string KeepAliveRequest::GetTypeName() const {
4673   return "tensorflow.eager.KeepAliveRequest";
4674 }
4675 
4676 
4677 // ===================================================================
4678 
4679 class KeepAliveResponse::_Internal {
4680  public:
4681 };
4682 
KeepAliveResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)4683 KeepAliveResponse::KeepAliveResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4684                          bool is_message_owned)
4685   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
4686   SharedCtor(arena, is_message_owned);
4687   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.KeepAliveResponse)
4688 }
KeepAliveResponse(const KeepAliveResponse & from)4689 KeepAliveResponse::KeepAliveResponse(const KeepAliveResponse& from)
4690   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
4691   KeepAliveResponse* const _this = this; (void)_this;
4692   new (&_impl_) Impl_{
4693       decltype(_impl_.context_view_id_){}
4694     , /*decltype(_impl_._cached_size_)*/{}};
4695 
4696   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4697   _this->_impl_.context_view_id_ = from._impl_.context_view_id_;
4698   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.KeepAliveResponse)
4699 }
4700 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)4701 inline void KeepAliveResponse::SharedCtor(
4702     ::_pb::Arena* arena, bool is_message_owned) {
4703   (void)arena;
4704   (void)is_message_owned;
4705   new (&_impl_) Impl_{
4706       decltype(_impl_.context_view_id_){::uint64_t{0u}}
4707     , /*decltype(_impl_._cached_size_)*/{}
4708   };
4709 }
4710 
~KeepAliveResponse()4711 KeepAliveResponse::~KeepAliveResponse() {
4712   // @@protoc_insertion_point(destructor:tensorflow.eager.KeepAliveResponse)
4713   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
4714   (void)arena;
4715     return;
4716   }
4717   SharedDtor();
4718 }
4719 
SharedDtor()4720 inline void KeepAliveResponse::SharedDtor() {
4721   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
4722 }
4723 
SetCachedSize(int size) const4724 void KeepAliveResponse::SetCachedSize(int size) const {
4725   _impl_._cached_size_.Set(size);
4726 }
4727 
Clear()4728 void KeepAliveResponse::Clear() {
4729 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.KeepAliveResponse)
4730   ::uint32_t cached_has_bits = 0;
4731   // Prevent compiler warnings about cached_has_bits being unused
4732   (void) cached_has_bits;
4733 
4734   _impl_.context_view_id_ = ::uint64_t{0u};
4735   _internal_metadata_.Clear<std::string>();
4736 }
4737 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)4738 const char* KeepAliveResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
4739 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4740   while (!ctx->Done(&ptr)) {
4741     ::uint32_t tag;
4742     ptr = ::_pbi::ReadTag(ptr, &tag);
4743     switch (tag >> 3) {
4744       // fixed64 context_view_id = 1;
4745       case 1:
4746         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
4747           _impl_.context_view_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
4748           ptr += sizeof(::uint64_t);
4749         } else {
4750           goto handle_unusual;
4751         }
4752         continue;
4753       default:
4754         goto handle_unusual;
4755     }  // switch
4756   handle_unusual:
4757     if ((tag == 0) || ((tag & 7) == 4)) {
4758       CHK_(ptr);
4759       ctx->SetLastTag(tag);
4760       goto message_done;
4761     }
4762     ptr = UnknownFieldParse(
4763         tag,
4764         _internal_metadata_.mutable_unknown_fields<std::string>(),
4765         ptr, ctx);
4766     CHK_(ptr != nullptr);
4767   }  // while
4768 message_done:
4769   return ptr;
4770 failure:
4771   ptr = nullptr;
4772   goto message_done;
4773 #undef CHK_
4774 }
4775 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const4776 ::uint8_t* KeepAliveResponse::_InternalSerialize(
4777     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
4778   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.KeepAliveResponse)
4779   ::uint32_t cached_has_bits = 0;
4780   (void) cached_has_bits;
4781 
4782   // fixed64 context_view_id = 1;
4783   if (this->_internal_context_view_id() != 0) {
4784     target = stream->EnsureSpace(target);
4785     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(1, this->_internal_context_view_id(), target);
4786   }
4787 
4788   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4789     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
4790         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
4791   }
4792   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.KeepAliveResponse)
4793   return target;
4794 }
4795 
ByteSizeLong() const4796 size_t KeepAliveResponse::ByteSizeLong() const {
4797 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.KeepAliveResponse)
4798   size_t total_size = 0;
4799 
4800   ::uint32_t cached_has_bits = 0;
4801   // Prevent compiler warnings about cached_has_bits being unused
4802   (void) cached_has_bits;
4803 
4804   // fixed64 context_view_id = 1;
4805   if (this->_internal_context_view_id() != 0) {
4806     total_size += 1 + 8;
4807   }
4808 
4809   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4810     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
4811   }
4812   int cached_size = ::_pbi::ToCachedSize(total_size);
4813   SetCachedSize(cached_size);
4814   return total_size;
4815 }
4816 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)4817 void KeepAliveResponse::CheckTypeAndMergeFrom(
4818     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
4819   MergeFrom(*::_pbi::DownCast<const KeepAliveResponse*>(
4820       &from));
4821 }
4822 
MergeFrom(const KeepAliveResponse & from)4823 void KeepAliveResponse::MergeFrom(const KeepAliveResponse& from) {
4824   KeepAliveResponse* const _this = this;
4825   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.KeepAliveResponse)
4826   GOOGLE_DCHECK_NE(&from, _this);
4827   ::uint32_t cached_has_bits = 0;
4828   (void) cached_has_bits;
4829 
4830   if (from._internal_context_view_id() != 0) {
4831     _this->_internal_set_context_view_id(from._internal_context_view_id());
4832   }
4833   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4834 }
4835 
CopyFrom(const KeepAliveResponse & from)4836 void KeepAliveResponse::CopyFrom(const KeepAliveResponse& from) {
4837 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.KeepAliveResponse)
4838   if (&from == this) return;
4839   Clear();
4840   MergeFrom(from);
4841 }
4842 
IsInitialized() const4843 bool KeepAliveResponse::IsInitialized() const {
4844   return true;
4845 }
4846 
InternalSwap(KeepAliveResponse * other)4847 void KeepAliveResponse::InternalSwap(KeepAliveResponse* other) {
4848   using std::swap;
4849   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
4850   swap(_impl_.context_view_id_, other->_impl_.context_view_id_);
4851 }
4852 
GetTypeName() const4853 std::string KeepAliveResponse::GetTypeName() const {
4854   return "tensorflow.eager.KeepAliveResponse";
4855 }
4856 
4857 
4858 // ===================================================================
4859 
4860 class CloseContextRequest::_Internal {
4861  public:
4862 };
4863 
CloseContextRequest(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)4864 CloseContextRequest::CloseContextRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4865                          bool is_message_owned)
4866   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
4867   SharedCtor(arena, is_message_owned);
4868   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.CloseContextRequest)
4869 }
CloseContextRequest(const CloseContextRequest & from)4870 CloseContextRequest::CloseContextRequest(const CloseContextRequest& from)
4871   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
4872   CloseContextRequest* const _this = this; (void)_this;
4873   new (&_impl_) Impl_{
4874       decltype(_impl_.context_id_){}
4875     , decltype(_impl_.context_view_id_){}
4876     , /*decltype(_impl_._cached_size_)*/{}};
4877 
4878   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4879   ::memcpy(&_impl_.context_id_, &from._impl_.context_id_,
4880     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.context_view_id_) -
4881     reinterpret_cast<char*>(&_impl_.context_id_)) + sizeof(_impl_.context_view_id_));
4882   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.CloseContextRequest)
4883 }
4884 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)4885 inline void CloseContextRequest::SharedCtor(
4886     ::_pb::Arena* arena, bool is_message_owned) {
4887   (void)arena;
4888   (void)is_message_owned;
4889   new (&_impl_) Impl_{
4890       decltype(_impl_.context_id_){::uint64_t{0u}}
4891     , decltype(_impl_.context_view_id_){::uint64_t{0u}}
4892     , /*decltype(_impl_._cached_size_)*/{}
4893   };
4894 }
4895 
~CloseContextRequest()4896 CloseContextRequest::~CloseContextRequest() {
4897   // @@protoc_insertion_point(destructor:tensorflow.eager.CloseContextRequest)
4898   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
4899   (void)arena;
4900     return;
4901   }
4902   SharedDtor();
4903 }
4904 
SharedDtor()4905 inline void CloseContextRequest::SharedDtor() {
4906   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
4907 }
4908 
SetCachedSize(int size) const4909 void CloseContextRequest::SetCachedSize(int size) const {
4910   _impl_._cached_size_.Set(size);
4911 }
4912 
Clear()4913 void CloseContextRequest::Clear() {
4914 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.CloseContextRequest)
4915   ::uint32_t cached_has_bits = 0;
4916   // Prevent compiler warnings about cached_has_bits being unused
4917   (void) cached_has_bits;
4918 
4919   ::memset(&_impl_.context_id_, 0, static_cast<size_t>(
4920       reinterpret_cast<char*>(&_impl_.context_view_id_) -
4921       reinterpret_cast<char*>(&_impl_.context_id_)) + sizeof(_impl_.context_view_id_));
4922   _internal_metadata_.Clear<std::string>();
4923 }
4924 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)4925 const char* CloseContextRequest::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
4926 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4927   while (!ctx->Done(&ptr)) {
4928     ::uint32_t tag;
4929     ptr = ::_pbi::ReadTag(ptr, &tag);
4930     switch (tag >> 3) {
4931       // fixed64 context_id = 1;
4932       case 1:
4933         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
4934           _impl_.context_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
4935           ptr += sizeof(::uint64_t);
4936         } else {
4937           goto handle_unusual;
4938         }
4939         continue;
4940       // fixed64 context_view_id = 2;
4941       case 2:
4942         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 17)) {
4943           _impl_.context_view_id_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::uint64_t>(ptr);
4944           ptr += sizeof(::uint64_t);
4945         } else {
4946           goto handle_unusual;
4947         }
4948         continue;
4949       default:
4950         goto handle_unusual;
4951     }  // switch
4952   handle_unusual:
4953     if ((tag == 0) || ((tag & 7) == 4)) {
4954       CHK_(ptr);
4955       ctx->SetLastTag(tag);
4956       goto message_done;
4957     }
4958     ptr = UnknownFieldParse(
4959         tag,
4960         _internal_metadata_.mutable_unknown_fields<std::string>(),
4961         ptr, ctx);
4962     CHK_(ptr != nullptr);
4963   }  // while
4964 message_done:
4965   return ptr;
4966 failure:
4967   ptr = nullptr;
4968   goto message_done;
4969 #undef CHK_
4970 }
4971 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const4972 ::uint8_t* CloseContextRequest::_InternalSerialize(
4973     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
4974   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.CloseContextRequest)
4975   ::uint32_t cached_has_bits = 0;
4976   (void) cached_has_bits;
4977 
4978   // fixed64 context_id = 1;
4979   if (this->_internal_context_id() != 0) {
4980     target = stream->EnsureSpace(target);
4981     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(1, this->_internal_context_id(), target);
4982   }
4983 
4984   // fixed64 context_view_id = 2;
4985   if (this->_internal_context_view_id() != 0) {
4986     target = stream->EnsureSpace(target);
4987     target = ::_pbi::WireFormatLite::WriteFixed64ToArray(2, this->_internal_context_view_id(), target);
4988   }
4989 
4990   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4991     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
4992         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
4993   }
4994   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.CloseContextRequest)
4995   return target;
4996 }
4997 
ByteSizeLong() const4998 size_t CloseContextRequest::ByteSizeLong() const {
4999 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.CloseContextRequest)
5000   size_t total_size = 0;
5001 
5002   ::uint32_t cached_has_bits = 0;
5003   // Prevent compiler warnings about cached_has_bits being unused
5004   (void) cached_has_bits;
5005 
5006   // fixed64 context_id = 1;
5007   if (this->_internal_context_id() != 0) {
5008     total_size += 1 + 8;
5009   }
5010 
5011   // fixed64 context_view_id = 2;
5012   if (this->_internal_context_view_id() != 0) {
5013     total_size += 1 + 8;
5014   }
5015 
5016   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5017     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5018   }
5019   int cached_size = ::_pbi::ToCachedSize(total_size);
5020   SetCachedSize(cached_size);
5021   return total_size;
5022 }
5023 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5024 void CloseContextRequest::CheckTypeAndMergeFrom(
5025     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5026   MergeFrom(*::_pbi::DownCast<const CloseContextRequest*>(
5027       &from));
5028 }
5029 
MergeFrom(const CloseContextRequest & from)5030 void CloseContextRequest::MergeFrom(const CloseContextRequest& from) {
5031   CloseContextRequest* const _this = this;
5032   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.CloseContextRequest)
5033   GOOGLE_DCHECK_NE(&from, _this);
5034   ::uint32_t cached_has_bits = 0;
5035   (void) cached_has_bits;
5036 
5037   if (from._internal_context_id() != 0) {
5038     _this->_internal_set_context_id(from._internal_context_id());
5039   }
5040   if (from._internal_context_view_id() != 0) {
5041     _this->_internal_set_context_view_id(from._internal_context_view_id());
5042   }
5043   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5044 }
5045 
CopyFrom(const CloseContextRequest & from)5046 void CloseContextRequest::CopyFrom(const CloseContextRequest& from) {
5047 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.CloseContextRequest)
5048   if (&from == this) return;
5049   Clear();
5050   MergeFrom(from);
5051 }
5052 
IsInitialized() const5053 bool CloseContextRequest::IsInitialized() const {
5054   return true;
5055 }
5056 
InternalSwap(CloseContextRequest * other)5057 void CloseContextRequest::InternalSwap(CloseContextRequest* other) {
5058   using std::swap;
5059   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5060   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
5061       PROTOBUF_FIELD_OFFSET(CloseContextRequest, _impl_.context_view_id_)
5062       + sizeof(CloseContextRequest::_impl_.context_view_id_)  // NOLINT
5063       - PROTOBUF_FIELD_OFFSET(CloseContextRequest, _impl_.context_id_)>(
5064           reinterpret_cast<char*>(&_impl_.context_id_),
5065           reinterpret_cast<char*>(&other->_impl_.context_id_));
5066 }
5067 
GetTypeName() const5068 std::string CloseContextRequest::GetTypeName() const {
5069   return "tensorflow.eager.CloseContextRequest";
5070 }
5071 
5072 
5073 // ===================================================================
5074 
5075 class CloseContextResponse::_Internal {
5076  public:
5077 };
5078 
CloseContextResponse(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5079 CloseContextResponse::CloseContextResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5080                          bool is_message_owned)
5081   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5082   SharedCtor(arena, is_message_owned);
5083   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.CloseContextResponse)
5084 }
CloseContextResponse(const CloseContextResponse & from)5085 CloseContextResponse::CloseContextResponse(const CloseContextResponse& from)
5086   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5087   CloseContextResponse* const _this = this; (void)_this;
5088   new (&_impl_) Impl_{
5089       /*decltype(_impl_._cached_size_)*/{}};
5090 
5091   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5092   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.CloseContextResponse)
5093 }
5094 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5095 inline void CloseContextResponse::SharedCtor(
5096     ::_pb::Arena* arena, bool is_message_owned) {
5097   (void)arena;
5098   (void)is_message_owned;
5099   new (&_impl_) Impl_{
5100       /*decltype(_impl_._cached_size_)*/{}
5101   };
5102 }
5103 
~CloseContextResponse()5104 CloseContextResponse::~CloseContextResponse() {
5105   // @@protoc_insertion_point(destructor:tensorflow.eager.CloseContextResponse)
5106   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5107   (void)arena;
5108     return;
5109   }
5110   SharedDtor();
5111 }
5112 
SharedDtor()5113 inline void CloseContextResponse::SharedDtor() {
5114   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5115 }
5116 
SetCachedSize(int size) const5117 void CloseContextResponse::SetCachedSize(int size) const {
5118   _impl_._cached_size_.Set(size);
5119 }
5120 
Clear()5121 void CloseContextResponse::Clear() {
5122 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.CloseContextResponse)
5123   ::uint32_t cached_has_bits = 0;
5124   // Prevent compiler warnings about cached_has_bits being unused
5125   (void) cached_has_bits;
5126 
5127   _internal_metadata_.Clear<std::string>();
5128 }
5129 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5130 const char* CloseContextResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5131 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5132   while (!ctx->Done(&ptr)) {
5133     ::uint32_t tag;
5134     ptr = ::_pbi::ReadTag(ptr, &tag);
5135     if ((tag == 0) || ((tag & 7) == 4)) {
5136       CHK_(ptr);
5137       ctx->SetLastTag(tag);
5138       goto message_done;
5139     }
5140     ptr = UnknownFieldParse(
5141         tag,
5142         _internal_metadata_.mutable_unknown_fields<std::string>(),
5143         ptr, ctx);
5144     CHK_(ptr != nullptr);
5145   }  // while
5146 message_done:
5147   return ptr;
5148 failure:
5149   ptr = nullptr;
5150   goto message_done;
5151 #undef CHK_
5152 }
5153 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5154 ::uint8_t* CloseContextResponse::_InternalSerialize(
5155     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5156   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.CloseContextResponse)
5157   ::uint32_t cached_has_bits = 0;
5158   (void) cached_has_bits;
5159 
5160   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5161     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5162         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5163   }
5164   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.CloseContextResponse)
5165   return target;
5166 }
5167 
ByteSizeLong() const5168 size_t CloseContextResponse::ByteSizeLong() const {
5169 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.CloseContextResponse)
5170   size_t total_size = 0;
5171 
5172   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5173     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5174   }
5175   int cached_size = ::_pbi::ToCachedSize(total_size);
5176   SetCachedSize(cached_size);
5177   return total_size;
5178 }
5179 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5180 void CloseContextResponse::CheckTypeAndMergeFrom(
5181     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5182   MergeFrom(*::_pbi::DownCast<const CloseContextResponse*>(
5183       &from));
5184 }
5185 
MergeFrom(const CloseContextResponse & from)5186 void CloseContextResponse::MergeFrom(const CloseContextResponse& from) {
5187   CloseContextResponse* const _this = this;
5188   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.CloseContextResponse)
5189   GOOGLE_DCHECK_NE(&from, _this);
5190   ::uint32_t cached_has_bits = 0;
5191   (void) cached_has_bits;
5192 
5193   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5194 }
5195 
CopyFrom(const CloseContextResponse & from)5196 void CloseContextResponse::CopyFrom(const CloseContextResponse& from) {
5197 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.CloseContextResponse)
5198   if (&from == this) return;
5199   Clear();
5200   MergeFrom(from);
5201 }
5202 
IsInitialized() const5203 bool CloseContextResponse::IsInitialized() const {
5204   return true;
5205 }
5206 
InternalSwap(CloseContextResponse * other)5207 void CloseContextResponse::InternalSwap(CloseContextResponse* other) {
5208   using std::swap;
5209   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5210 }
5211 
GetTypeName() const5212 std::string CloseContextResponse::GetTypeName() const {
5213   return "tensorflow.eager.CloseContextResponse";
5214 }
5215 
5216 
5217 // ===================================================================
5218 
5219 class RegisterFunctionOp::_Internal {
5220  public:
5221   static const ::tensorflow::FunctionDef& function_def(const RegisterFunctionOp* msg);
5222   static const ::tensorflow::FunctionDefLibrary& library(const RegisterFunctionOp* msg);
5223 };
5224 
5225 const ::tensorflow::FunctionDef&
function_def(const RegisterFunctionOp * msg)5226 RegisterFunctionOp::_Internal::function_def(const RegisterFunctionOp* msg) {
5227   return *msg->_impl_.function_def_;
5228 }
5229 const ::tensorflow::FunctionDefLibrary&
library(const RegisterFunctionOp * msg)5230 RegisterFunctionOp::_Internal::library(const RegisterFunctionOp* msg) {
5231   return *msg->_impl_.library_;
5232 }
clear_function_def()5233 void RegisterFunctionOp::clear_function_def() {
5234   if (GetArenaForAllocation() == nullptr && _impl_.function_def_ != nullptr) {
5235     delete _impl_.function_def_;
5236   }
5237   _impl_.function_def_ = nullptr;
5238 }
clear_library()5239 void RegisterFunctionOp::clear_library() {
5240   if (GetArenaForAllocation() == nullptr && _impl_.library_ != nullptr) {
5241     delete _impl_.library_;
5242   }
5243   _impl_.library_ = nullptr;
5244 }
RegisterFunctionOp(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5245 RegisterFunctionOp::RegisterFunctionOp(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5246                          bool is_message_owned)
5247   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5248   SharedCtor(arena, is_message_owned);
5249   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.RegisterFunctionOp)
5250 }
RegisterFunctionOp(const RegisterFunctionOp & from)5251 RegisterFunctionOp::RegisterFunctionOp(const RegisterFunctionOp& from)
5252   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5253   RegisterFunctionOp* const _this = this; (void)_this;
5254   new (&_impl_) Impl_{
5255       decltype(_impl_.function_def_){nullptr}
5256     , decltype(_impl_.library_){nullptr}
5257     , decltype(_impl_.is_component_function_){}
5258     , /*decltype(_impl_._cached_size_)*/{}};
5259 
5260   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5261   if (from._internal_has_function_def()) {
5262     _this->_impl_.function_def_ = new ::tensorflow::FunctionDef(*from._impl_.function_def_);
5263   }
5264   if (from._internal_has_library()) {
5265     _this->_impl_.library_ = new ::tensorflow::FunctionDefLibrary(*from._impl_.library_);
5266   }
5267   _this->_impl_.is_component_function_ = from._impl_.is_component_function_;
5268   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.RegisterFunctionOp)
5269 }
5270 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5271 inline void RegisterFunctionOp::SharedCtor(
5272     ::_pb::Arena* arena, bool is_message_owned) {
5273   (void)arena;
5274   (void)is_message_owned;
5275   new (&_impl_) Impl_{
5276       decltype(_impl_.function_def_){nullptr}
5277     , decltype(_impl_.library_){nullptr}
5278     , decltype(_impl_.is_component_function_){false}
5279     , /*decltype(_impl_._cached_size_)*/{}
5280   };
5281 }
5282 
~RegisterFunctionOp()5283 RegisterFunctionOp::~RegisterFunctionOp() {
5284   // @@protoc_insertion_point(destructor:tensorflow.eager.RegisterFunctionOp)
5285   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5286   (void)arena;
5287     return;
5288   }
5289   SharedDtor();
5290 }
5291 
SharedDtor()5292 inline void RegisterFunctionOp::SharedDtor() {
5293   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5294   if (this != internal_default_instance()) delete _impl_.function_def_;
5295   if (this != internal_default_instance()) delete _impl_.library_;
5296 }
5297 
SetCachedSize(int size) const5298 void RegisterFunctionOp::SetCachedSize(int size) const {
5299   _impl_._cached_size_.Set(size);
5300 }
5301 
Clear()5302 void RegisterFunctionOp::Clear() {
5303 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.RegisterFunctionOp)
5304   ::uint32_t cached_has_bits = 0;
5305   // Prevent compiler warnings about cached_has_bits being unused
5306   (void) cached_has_bits;
5307 
5308   if (GetArenaForAllocation() == nullptr && _impl_.function_def_ != nullptr) {
5309     delete _impl_.function_def_;
5310   }
5311   _impl_.function_def_ = nullptr;
5312   if (GetArenaForAllocation() == nullptr && _impl_.library_ != nullptr) {
5313     delete _impl_.library_;
5314   }
5315   _impl_.library_ = nullptr;
5316   _impl_.is_component_function_ = false;
5317   _internal_metadata_.Clear<std::string>();
5318 }
5319 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5320 const char* RegisterFunctionOp::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5321 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5322   while (!ctx->Done(&ptr)) {
5323     ::uint32_t tag;
5324     ptr = ::_pbi::ReadTag(ptr, &tag);
5325     switch (tag >> 3) {
5326       // .tensorflow.FunctionDef function_def = 1;
5327       case 1:
5328         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
5329           ptr = ctx->ParseMessage(_internal_mutable_function_def(), ptr);
5330           CHK_(ptr);
5331         } else {
5332           goto handle_unusual;
5333         }
5334         continue;
5335       // bool is_component_function = 2;
5336       case 2:
5337         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
5338           _impl_.is_component_function_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5339           CHK_(ptr);
5340         } else {
5341           goto handle_unusual;
5342         }
5343         continue;
5344       // .tensorflow.FunctionDefLibrary library = 3;
5345       case 3:
5346         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
5347           ptr = ctx->ParseMessage(_internal_mutable_library(), ptr);
5348           CHK_(ptr);
5349         } else {
5350           goto handle_unusual;
5351         }
5352         continue;
5353       default:
5354         goto handle_unusual;
5355     }  // switch
5356   handle_unusual:
5357     if ((tag == 0) || ((tag & 7) == 4)) {
5358       CHK_(ptr);
5359       ctx->SetLastTag(tag);
5360       goto message_done;
5361     }
5362     ptr = UnknownFieldParse(
5363         tag,
5364         _internal_metadata_.mutable_unknown_fields<std::string>(),
5365         ptr, ctx);
5366     CHK_(ptr != nullptr);
5367   }  // while
5368 message_done:
5369   return ptr;
5370 failure:
5371   ptr = nullptr;
5372   goto message_done;
5373 #undef CHK_
5374 }
5375 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5376 ::uint8_t* RegisterFunctionOp::_InternalSerialize(
5377     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5378   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.RegisterFunctionOp)
5379   ::uint32_t cached_has_bits = 0;
5380   (void) cached_has_bits;
5381 
5382   // .tensorflow.FunctionDef function_def = 1;
5383   if (this->_internal_has_function_def()) {
5384     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5385       InternalWriteMessage(1, _Internal::function_def(this),
5386         _Internal::function_def(this).GetCachedSize(), target, stream);
5387   }
5388 
5389   // bool is_component_function = 2;
5390   if (this->_internal_is_component_function() != 0) {
5391     target = stream->EnsureSpace(target);
5392     target = ::_pbi::WireFormatLite::WriteBoolToArray(2, this->_internal_is_component_function(), target);
5393   }
5394 
5395   // .tensorflow.FunctionDefLibrary library = 3;
5396   if (this->_internal_has_library()) {
5397     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5398       InternalWriteMessage(3, _Internal::library(this),
5399         _Internal::library(this).GetCachedSize(), target, stream);
5400   }
5401 
5402   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5403     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5404         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5405   }
5406   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.RegisterFunctionOp)
5407   return target;
5408 }
5409 
ByteSizeLong() const5410 size_t RegisterFunctionOp::ByteSizeLong() const {
5411 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.RegisterFunctionOp)
5412   size_t total_size = 0;
5413 
5414   ::uint32_t cached_has_bits = 0;
5415   // Prevent compiler warnings about cached_has_bits being unused
5416   (void) cached_has_bits;
5417 
5418   // .tensorflow.FunctionDef function_def = 1;
5419   if (this->_internal_has_function_def()) {
5420     total_size += 1 +
5421       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
5422         *_impl_.function_def_);
5423   }
5424 
5425   // .tensorflow.FunctionDefLibrary library = 3;
5426   if (this->_internal_has_library()) {
5427     total_size += 1 +
5428       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
5429         *_impl_.library_);
5430   }
5431 
5432   // bool is_component_function = 2;
5433   if (this->_internal_is_component_function() != 0) {
5434     total_size += 1 + 1;
5435   }
5436 
5437   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5438     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5439   }
5440   int cached_size = ::_pbi::ToCachedSize(total_size);
5441   SetCachedSize(cached_size);
5442   return total_size;
5443 }
5444 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5445 void RegisterFunctionOp::CheckTypeAndMergeFrom(
5446     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5447   MergeFrom(*::_pbi::DownCast<const RegisterFunctionOp*>(
5448       &from));
5449 }
5450 
MergeFrom(const RegisterFunctionOp & from)5451 void RegisterFunctionOp::MergeFrom(const RegisterFunctionOp& from) {
5452   RegisterFunctionOp* const _this = this;
5453   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.RegisterFunctionOp)
5454   GOOGLE_DCHECK_NE(&from, _this);
5455   ::uint32_t cached_has_bits = 0;
5456   (void) cached_has_bits;
5457 
5458   if (from._internal_has_function_def()) {
5459     _this->_internal_mutable_function_def()->::tensorflow::FunctionDef::MergeFrom(
5460         from._internal_function_def());
5461   }
5462   if (from._internal_has_library()) {
5463     _this->_internal_mutable_library()->::tensorflow::FunctionDefLibrary::MergeFrom(
5464         from._internal_library());
5465   }
5466   if (from._internal_is_component_function() != 0) {
5467     _this->_internal_set_is_component_function(from._internal_is_component_function());
5468   }
5469   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5470 }
5471 
CopyFrom(const RegisterFunctionOp & from)5472 void RegisterFunctionOp::CopyFrom(const RegisterFunctionOp& from) {
5473 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.RegisterFunctionOp)
5474   if (&from == this) return;
5475   Clear();
5476   MergeFrom(from);
5477 }
5478 
IsInitialized() const5479 bool RegisterFunctionOp::IsInitialized() const {
5480   return true;
5481 }
5482 
InternalSwap(RegisterFunctionOp * other)5483 void RegisterFunctionOp::InternalSwap(RegisterFunctionOp* other) {
5484   using std::swap;
5485   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5486   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
5487       PROTOBUF_FIELD_OFFSET(RegisterFunctionOp, _impl_.is_component_function_)
5488       + sizeof(RegisterFunctionOp::_impl_.is_component_function_)  // NOLINT
5489       - PROTOBUF_FIELD_OFFSET(RegisterFunctionOp, _impl_.function_def_)>(
5490           reinterpret_cast<char*>(&_impl_.function_def_),
5491           reinterpret_cast<char*>(&other->_impl_.function_def_));
5492 }
5493 
GetTypeName() const5494 std::string RegisterFunctionOp::GetTypeName() const {
5495   return "tensorflow.eager.RegisterFunctionOp";
5496 }
5497 
5498 
5499 // ===================================================================
5500 
5501 class CleanupFunctionOp::_Internal {
5502  public:
5503 };
5504 
CleanupFunctionOp(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5505 CleanupFunctionOp::CleanupFunctionOp(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5506                          bool is_message_owned)
5507   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5508   SharedCtor(arena, is_message_owned);
5509   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.CleanupFunctionOp)
5510 }
CleanupFunctionOp(const CleanupFunctionOp & from)5511 CleanupFunctionOp::CleanupFunctionOp(const CleanupFunctionOp& from)
5512   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5513   CleanupFunctionOp* const _this = this; (void)_this;
5514   new (&_impl_) Impl_{
5515       decltype(_impl_.step_id_){}
5516     , /*decltype(_impl_._cached_size_)*/{}};
5517 
5518   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5519   _this->_impl_.step_id_ = from._impl_.step_id_;
5520   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.CleanupFunctionOp)
5521 }
5522 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5523 inline void CleanupFunctionOp::SharedCtor(
5524     ::_pb::Arena* arena, bool is_message_owned) {
5525   (void)arena;
5526   (void)is_message_owned;
5527   new (&_impl_) Impl_{
5528       decltype(_impl_.step_id_){::int64_t{0}}
5529     , /*decltype(_impl_._cached_size_)*/{}
5530   };
5531 }
5532 
~CleanupFunctionOp()5533 CleanupFunctionOp::~CleanupFunctionOp() {
5534   // @@protoc_insertion_point(destructor:tensorflow.eager.CleanupFunctionOp)
5535   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5536   (void)arena;
5537     return;
5538   }
5539   SharedDtor();
5540 }
5541 
SharedDtor()5542 inline void CleanupFunctionOp::SharedDtor() {
5543   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5544 }
5545 
SetCachedSize(int size) const5546 void CleanupFunctionOp::SetCachedSize(int size) const {
5547   _impl_._cached_size_.Set(size);
5548 }
5549 
Clear()5550 void CleanupFunctionOp::Clear() {
5551 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.CleanupFunctionOp)
5552   ::uint32_t cached_has_bits = 0;
5553   // Prevent compiler warnings about cached_has_bits being unused
5554   (void) cached_has_bits;
5555 
5556   _impl_.step_id_ = ::int64_t{0};
5557   _internal_metadata_.Clear<std::string>();
5558 }
5559 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5560 const char* CleanupFunctionOp::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5561 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5562   while (!ctx->Done(&ptr)) {
5563     ::uint32_t tag;
5564     ptr = ::_pbi::ReadTag(ptr, &tag);
5565     switch (tag >> 3) {
5566       // int64 step_id = 1;
5567       case 1:
5568         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
5569           _impl_.step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5570           CHK_(ptr);
5571         } else {
5572           goto handle_unusual;
5573         }
5574         continue;
5575       default:
5576         goto handle_unusual;
5577     }  // switch
5578   handle_unusual:
5579     if ((tag == 0) || ((tag & 7) == 4)) {
5580       CHK_(ptr);
5581       ctx->SetLastTag(tag);
5582       goto message_done;
5583     }
5584     ptr = UnknownFieldParse(
5585         tag,
5586         _internal_metadata_.mutable_unknown_fields<std::string>(),
5587         ptr, ctx);
5588     CHK_(ptr != nullptr);
5589   }  // while
5590 message_done:
5591   return ptr;
5592 failure:
5593   ptr = nullptr;
5594   goto message_done;
5595 #undef CHK_
5596 }
5597 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5598 ::uint8_t* CleanupFunctionOp::_InternalSerialize(
5599     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5600   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.CleanupFunctionOp)
5601   ::uint32_t cached_has_bits = 0;
5602   (void) cached_has_bits;
5603 
5604   // int64 step_id = 1;
5605   if (this->_internal_step_id() != 0) {
5606     target = stream->EnsureSpace(target);
5607     target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_step_id(), target);
5608   }
5609 
5610   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5611     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5612         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5613   }
5614   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.CleanupFunctionOp)
5615   return target;
5616 }
5617 
ByteSizeLong() const5618 size_t CleanupFunctionOp::ByteSizeLong() const {
5619 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.CleanupFunctionOp)
5620   size_t total_size = 0;
5621 
5622   ::uint32_t cached_has_bits = 0;
5623   // Prevent compiler warnings about cached_has_bits being unused
5624   (void) cached_has_bits;
5625 
5626   // int64 step_id = 1;
5627   if (this->_internal_step_id() != 0) {
5628     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_step_id());
5629   }
5630 
5631   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5632     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5633   }
5634   int cached_size = ::_pbi::ToCachedSize(total_size);
5635   SetCachedSize(cached_size);
5636   return total_size;
5637 }
5638 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5639 void CleanupFunctionOp::CheckTypeAndMergeFrom(
5640     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5641   MergeFrom(*::_pbi::DownCast<const CleanupFunctionOp*>(
5642       &from));
5643 }
5644 
MergeFrom(const CleanupFunctionOp & from)5645 void CleanupFunctionOp::MergeFrom(const CleanupFunctionOp& from) {
5646   CleanupFunctionOp* const _this = this;
5647   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.CleanupFunctionOp)
5648   GOOGLE_DCHECK_NE(&from, _this);
5649   ::uint32_t cached_has_bits = 0;
5650   (void) cached_has_bits;
5651 
5652   if (from._internal_step_id() != 0) {
5653     _this->_internal_set_step_id(from._internal_step_id());
5654   }
5655   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5656 }
5657 
CopyFrom(const CleanupFunctionOp & from)5658 void CleanupFunctionOp::CopyFrom(const CleanupFunctionOp& from) {
5659 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.CleanupFunctionOp)
5660   if (&from == this) return;
5661   Clear();
5662   MergeFrom(from);
5663 }
5664 
IsInitialized() const5665 bool CleanupFunctionOp::IsInitialized() const {
5666   return true;
5667 }
5668 
InternalSwap(CleanupFunctionOp * other)5669 void CleanupFunctionOp::InternalSwap(CleanupFunctionOp* other) {
5670   using std::swap;
5671   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5672   swap(_impl_.step_id_, other->_impl_.step_id_);
5673 }
5674 
GetTypeName() const5675 std::string CleanupFunctionOp::GetTypeName() const {
5676   return "tensorflow.eager.CleanupFunctionOp";
5677 }
5678 
5679 
5680 // ===================================================================
5681 
5682 class SyncRemoteExecutorForStream::_Internal {
5683  public:
5684 };
5685 
SyncRemoteExecutorForStream(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5686 SyncRemoteExecutorForStream::SyncRemoteExecutorForStream(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5687                          bool is_message_owned)
5688   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5689   SharedCtor(arena, is_message_owned);
5690   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.SyncRemoteExecutorForStream)
5691 }
SyncRemoteExecutorForStream(const SyncRemoteExecutorForStream & from)5692 SyncRemoteExecutorForStream::SyncRemoteExecutorForStream(const SyncRemoteExecutorForStream& from)
5693   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5694   SyncRemoteExecutorForStream* const _this = this; (void)_this;
5695   new (&_impl_) Impl_{
5696       /*decltype(_impl_._cached_size_)*/{}};
5697 
5698   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5699   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.SyncRemoteExecutorForStream)
5700 }
5701 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5702 inline void SyncRemoteExecutorForStream::SharedCtor(
5703     ::_pb::Arena* arena, bool is_message_owned) {
5704   (void)arena;
5705   (void)is_message_owned;
5706   new (&_impl_) Impl_{
5707       /*decltype(_impl_._cached_size_)*/{}
5708   };
5709 }
5710 
~SyncRemoteExecutorForStream()5711 SyncRemoteExecutorForStream::~SyncRemoteExecutorForStream() {
5712   // @@protoc_insertion_point(destructor:tensorflow.eager.SyncRemoteExecutorForStream)
5713   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5714   (void)arena;
5715     return;
5716   }
5717   SharedDtor();
5718 }
5719 
SharedDtor()5720 inline void SyncRemoteExecutorForStream::SharedDtor() {
5721   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5722 }
5723 
SetCachedSize(int size) const5724 void SyncRemoteExecutorForStream::SetCachedSize(int size) const {
5725   _impl_._cached_size_.Set(size);
5726 }
5727 
Clear()5728 void SyncRemoteExecutorForStream::Clear() {
5729 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.SyncRemoteExecutorForStream)
5730   ::uint32_t cached_has_bits = 0;
5731   // Prevent compiler warnings about cached_has_bits being unused
5732   (void) cached_has_bits;
5733 
5734   _internal_metadata_.Clear<std::string>();
5735 }
5736 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5737 const char* SyncRemoteExecutorForStream::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5738 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5739   while (!ctx->Done(&ptr)) {
5740     ::uint32_t tag;
5741     ptr = ::_pbi::ReadTag(ptr, &tag);
5742     if ((tag == 0) || ((tag & 7) == 4)) {
5743       CHK_(ptr);
5744       ctx->SetLastTag(tag);
5745       goto message_done;
5746     }
5747     ptr = UnknownFieldParse(
5748         tag,
5749         _internal_metadata_.mutable_unknown_fields<std::string>(),
5750         ptr, ctx);
5751     CHK_(ptr != nullptr);
5752   }  // while
5753 message_done:
5754   return ptr;
5755 failure:
5756   ptr = nullptr;
5757   goto message_done;
5758 #undef CHK_
5759 }
5760 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5761 ::uint8_t* SyncRemoteExecutorForStream::_InternalSerialize(
5762     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5763   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.SyncRemoteExecutorForStream)
5764   ::uint32_t cached_has_bits = 0;
5765   (void) cached_has_bits;
5766 
5767   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5768     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5769         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5770   }
5771   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.SyncRemoteExecutorForStream)
5772   return target;
5773 }
5774 
ByteSizeLong() const5775 size_t SyncRemoteExecutorForStream::ByteSizeLong() const {
5776 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.SyncRemoteExecutorForStream)
5777   size_t total_size = 0;
5778 
5779   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5780     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5781   }
5782   int cached_size = ::_pbi::ToCachedSize(total_size);
5783   SetCachedSize(cached_size);
5784   return total_size;
5785 }
5786 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5787 void SyncRemoteExecutorForStream::CheckTypeAndMergeFrom(
5788     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5789   MergeFrom(*::_pbi::DownCast<const SyncRemoteExecutorForStream*>(
5790       &from));
5791 }
5792 
MergeFrom(const SyncRemoteExecutorForStream & from)5793 void SyncRemoteExecutorForStream::MergeFrom(const SyncRemoteExecutorForStream& from) {
5794   SyncRemoteExecutorForStream* const _this = this;
5795   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.SyncRemoteExecutorForStream)
5796   GOOGLE_DCHECK_NE(&from, _this);
5797   ::uint32_t cached_has_bits = 0;
5798   (void) cached_has_bits;
5799 
5800   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5801 }
5802 
CopyFrom(const SyncRemoteExecutorForStream & from)5803 void SyncRemoteExecutorForStream::CopyFrom(const SyncRemoteExecutorForStream& from) {
5804 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.SyncRemoteExecutorForStream)
5805   if (&from == this) return;
5806   Clear();
5807   MergeFrom(from);
5808 }
5809 
IsInitialized() const5810 bool SyncRemoteExecutorForStream::IsInitialized() const {
5811   return true;
5812 }
5813 
InternalSwap(SyncRemoteExecutorForStream * other)5814 void SyncRemoteExecutorForStream::InternalSwap(SyncRemoteExecutorForStream* other) {
5815   using std::swap;
5816   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5817 }
5818 
GetTypeName() const5819 std::string SyncRemoteExecutorForStream::GetTypeName() const {
5820   return "tensorflow.eager.SyncRemoteExecutorForStream";
5821 }
5822 
5823 
5824 // ===================================================================
5825 
5826 class SendTensorOp::_Internal {
5827  public:
5828 };
5829 
clear_tensors()5830 void SendTensorOp::clear_tensors() {
5831   _impl_.tensors_.Clear();
5832 }
SendTensorOp(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5833 SendTensorOp::SendTensorOp(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5834                          bool is_message_owned)
5835   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5836   SharedCtor(arena, is_message_owned);
5837   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.SendTensorOp)
5838 }
SendTensorOp(const SendTensorOp & from)5839 SendTensorOp::SendTensorOp(const SendTensorOp& from)
5840   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5841   SendTensorOp* const _this = this; (void)_this;
5842   new (&_impl_) Impl_{
5843       decltype(_impl_.tensors_){from._impl_.tensors_}
5844     , decltype(_impl_.device_name_){}
5845     , decltype(_impl_.op_id_){}
5846     , /*decltype(_impl_._cached_size_)*/{}};
5847 
5848   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5849   _impl_.device_name_.InitDefault();
5850   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5851     _impl_.device_name_.Set("", GetArenaForAllocation());
5852   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5853   if (!from._internal_device_name().empty()) {
5854     _this->_impl_.device_name_.Set(from._internal_device_name(),
5855       _this->GetArenaForAllocation());
5856   }
5857   _this->_impl_.op_id_ = from._impl_.op_id_;
5858   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.SendTensorOp)
5859 }
5860 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5861 inline void SendTensorOp::SharedCtor(
5862     ::_pb::Arena* arena, bool is_message_owned) {
5863   (void)arena;
5864   (void)is_message_owned;
5865   new (&_impl_) Impl_{
5866       decltype(_impl_.tensors_){arena}
5867     , decltype(_impl_.device_name_){}
5868     , decltype(_impl_.op_id_){::int64_t{0}}
5869     , /*decltype(_impl_._cached_size_)*/{}
5870   };
5871   _impl_.device_name_.InitDefault();
5872   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5873     _impl_.device_name_.Set("", GetArenaForAllocation());
5874   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5875 }
5876 
~SendTensorOp()5877 SendTensorOp::~SendTensorOp() {
5878   // @@protoc_insertion_point(destructor:tensorflow.eager.SendTensorOp)
5879   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5880   (void)arena;
5881     return;
5882   }
5883   SharedDtor();
5884 }
5885 
SharedDtor()5886 inline void SendTensorOp::SharedDtor() {
5887   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5888   _impl_.tensors_.~RepeatedPtrField();
5889   _impl_.device_name_.Destroy();
5890 }
5891 
SetCachedSize(int size) const5892 void SendTensorOp::SetCachedSize(int size) const {
5893   _impl_._cached_size_.Set(size);
5894 }
5895 
Clear()5896 void SendTensorOp::Clear() {
5897 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.SendTensorOp)
5898   ::uint32_t cached_has_bits = 0;
5899   // Prevent compiler warnings about cached_has_bits being unused
5900   (void) cached_has_bits;
5901 
5902   _impl_.tensors_.Clear();
5903   _impl_.device_name_.ClearToEmpty();
5904   _impl_.op_id_ = ::int64_t{0};
5905   _internal_metadata_.Clear<std::string>();
5906 }
5907 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5908 const char* SendTensorOp::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5909 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5910   while (!ctx->Done(&ptr)) {
5911     ::uint32_t tag;
5912     ptr = ::_pbi::ReadTag(ptr, &tag);
5913     switch (tag >> 3) {
5914       // int64 op_id = 1;
5915       case 1:
5916         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
5917           _impl_.op_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5918           CHK_(ptr);
5919         } else {
5920           goto handle_unusual;
5921         }
5922         continue;
5923       // repeated .tensorflow.TensorProto tensors = 2;
5924       case 2:
5925         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
5926           ptr -= 1;
5927           do {
5928             ptr += 1;
5929             ptr = ctx->ParseMessage(_internal_add_tensors(), ptr);
5930             CHK_(ptr);
5931             if (!ctx->DataAvailable(ptr)) break;
5932           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
5933         } else {
5934           goto handle_unusual;
5935         }
5936         continue;
5937       // string device_name = 3;
5938       case 3:
5939         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
5940           auto str = _internal_mutable_device_name();
5941           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
5942           CHK_(ptr);
5943           CHK_(::_pbi::VerifyUTF8(str, nullptr));
5944         } else {
5945           goto handle_unusual;
5946         }
5947         continue;
5948       default:
5949         goto handle_unusual;
5950     }  // switch
5951   handle_unusual:
5952     if ((tag == 0) || ((tag & 7) == 4)) {
5953       CHK_(ptr);
5954       ctx->SetLastTag(tag);
5955       goto message_done;
5956     }
5957     ptr = UnknownFieldParse(
5958         tag,
5959         _internal_metadata_.mutable_unknown_fields<std::string>(),
5960         ptr, ctx);
5961     CHK_(ptr != nullptr);
5962   }  // while
5963 message_done:
5964   return ptr;
5965 failure:
5966   ptr = nullptr;
5967   goto message_done;
5968 #undef CHK_
5969 }
5970 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5971 ::uint8_t* SendTensorOp::_InternalSerialize(
5972     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5973   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.SendTensorOp)
5974   ::uint32_t cached_has_bits = 0;
5975   (void) cached_has_bits;
5976 
5977   // int64 op_id = 1;
5978   if (this->_internal_op_id() != 0) {
5979     target = stream->EnsureSpace(target);
5980     target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_op_id(), target);
5981   }
5982 
5983   // repeated .tensorflow.TensorProto tensors = 2;
5984   for (unsigned i = 0,
5985       n = static_cast<unsigned>(this->_internal_tensors_size()); i < n; i++) {
5986     const auto& repfield = this->_internal_tensors(i);
5987     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5988         InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
5989   }
5990 
5991   // string device_name = 3;
5992   if (!this->_internal_device_name().empty()) {
5993     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5994       this->_internal_device_name().data(), static_cast<int>(this->_internal_device_name().length()),
5995       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
5996       "tensorflow.eager.SendTensorOp.device_name");
5997     target = stream->WriteStringMaybeAliased(
5998         3, this->_internal_device_name(), target);
5999   }
6000 
6001   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6002     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
6003         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
6004   }
6005   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.SendTensorOp)
6006   return target;
6007 }
6008 
ByteSizeLong() const6009 size_t SendTensorOp::ByteSizeLong() const {
6010 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.SendTensorOp)
6011   size_t total_size = 0;
6012 
6013   ::uint32_t cached_has_bits = 0;
6014   // Prevent compiler warnings about cached_has_bits being unused
6015   (void) cached_has_bits;
6016 
6017   // repeated .tensorflow.TensorProto tensors = 2;
6018   total_size += 1UL * this->_internal_tensors_size();
6019   for (const auto& msg : this->_impl_.tensors_) {
6020     total_size +=
6021       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
6022   }
6023 
6024   // string device_name = 3;
6025   if (!this->_internal_device_name().empty()) {
6026     total_size += 1 +
6027       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
6028         this->_internal_device_name());
6029   }
6030 
6031   // int64 op_id = 1;
6032   if (this->_internal_op_id() != 0) {
6033     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_op_id());
6034   }
6035 
6036   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6037     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
6038   }
6039   int cached_size = ::_pbi::ToCachedSize(total_size);
6040   SetCachedSize(cached_size);
6041   return total_size;
6042 }
6043 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)6044 void SendTensorOp::CheckTypeAndMergeFrom(
6045     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
6046   MergeFrom(*::_pbi::DownCast<const SendTensorOp*>(
6047       &from));
6048 }
6049 
MergeFrom(const SendTensorOp & from)6050 void SendTensorOp::MergeFrom(const SendTensorOp& from) {
6051   SendTensorOp* const _this = this;
6052   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.SendTensorOp)
6053   GOOGLE_DCHECK_NE(&from, _this);
6054   ::uint32_t cached_has_bits = 0;
6055   (void) cached_has_bits;
6056 
6057   _this->_impl_.tensors_.MergeFrom(from._impl_.tensors_);
6058   if (!from._internal_device_name().empty()) {
6059     _this->_internal_set_device_name(from._internal_device_name());
6060   }
6061   if (from._internal_op_id() != 0) {
6062     _this->_internal_set_op_id(from._internal_op_id());
6063   }
6064   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6065 }
6066 
CopyFrom(const SendTensorOp & from)6067 void SendTensorOp::CopyFrom(const SendTensorOp& from) {
6068 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.SendTensorOp)
6069   if (&from == this) return;
6070   Clear();
6071   MergeFrom(from);
6072 }
6073 
IsInitialized() const6074 bool SendTensorOp::IsInitialized() const {
6075   return true;
6076 }
6077 
InternalSwap(SendTensorOp * other)6078 void SendTensorOp::InternalSwap(SendTensorOp* other) {
6079   using std::swap;
6080   auto* lhs_arena = GetArenaForAllocation();
6081   auto* rhs_arena = other->GetArenaForAllocation();
6082   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
6083   _impl_.tensors_.InternalSwap(&other->_impl_.tensors_);
6084   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
6085       &_impl_.device_name_, lhs_arena,
6086       &other->_impl_.device_name_, rhs_arena
6087   );
6088   swap(_impl_.op_id_, other->_impl_.op_id_);
6089 }
6090 
GetTypeName() const6091 std::string SendTensorOp::GetTypeName() const {
6092   return "tensorflow.eager.SendTensorOp";
6093 }
6094 
6095 
6096 // ===================================================================
6097 
6098 class SendPackedHandleOp_LocalTensorHandle::_Internal {
6099  public:
6100   static const ::tensorflow::TensorProto& tensor(const SendPackedHandleOp_LocalTensorHandle* msg);
6101 };
6102 
6103 const ::tensorflow::TensorProto&
tensor(const SendPackedHandleOp_LocalTensorHandle * msg)6104 SendPackedHandleOp_LocalTensorHandle::_Internal::tensor(const SendPackedHandleOp_LocalTensorHandle* msg) {
6105   return *msg->_impl_.tensor_;
6106 }
clear_tensor()6107 void SendPackedHandleOp_LocalTensorHandle::clear_tensor() {
6108   if (GetArenaForAllocation() == nullptr && _impl_.tensor_ != nullptr) {
6109     delete _impl_.tensor_;
6110   }
6111   _impl_.tensor_ = nullptr;
6112 }
SendPackedHandleOp_LocalTensorHandle(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)6113 SendPackedHandleOp_LocalTensorHandle::SendPackedHandleOp_LocalTensorHandle(::PROTOBUF_NAMESPACE_ID::Arena* arena,
6114                          bool is_message_owned)
6115   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
6116   SharedCtor(arena, is_message_owned);
6117   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6118 }
SendPackedHandleOp_LocalTensorHandle(const SendPackedHandleOp_LocalTensorHandle & from)6119 SendPackedHandleOp_LocalTensorHandle::SendPackedHandleOp_LocalTensorHandle(const SendPackedHandleOp_LocalTensorHandle& from)
6120   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
6121   SendPackedHandleOp_LocalTensorHandle* const _this = this; (void)_this;
6122   new (&_impl_) Impl_{
6123       decltype(_impl_.device_){}
6124     , decltype(_impl_.tensor_){nullptr}
6125     , /*decltype(_impl_._cached_size_)*/{}};
6126 
6127   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6128   _impl_.device_.InitDefault();
6129   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6130     _impl_.device_.Set("", GetArenaForAllocation());
6131   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6132   if (!from._internal_device().empty()) {
6133     _this->_impl_.device_.Set(from._internal_device(),
6134       _this->GetArenaForAllocation());
6135   }
6136   if (from._internal_has_tensor()) {
6137     _this->_impl_.tensor_ = new ::tensorflow::TensorProto(*from._impl_.tensor_);
6138   }
6139   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6140 }
6141 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)6142 inline void SendPackedHandleOp_LocalTensorHandle::SharedCtor(
6143     ::_pb::Arena* arena, bool is_message_owned) {
6144   (void)arena;
6145   (void)is_message_owned;
6146   new (&_impl_) Impl_{
6147       decltype(_impl_.device_){}
6148     , decltype(_impl_.tensor_){nullptr}
6149     , /*decltype(_impl_._cached_size_)*/{}
6150   };
6151   _impl_.device_.InitDefault();
6152   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6153     _impl_.device_.Set("", GetArenaForAllocation());
6154   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6155 }
6156 
~SendPackedHandleOp_LocalTensorHandle()6157 SendPackedHandleOp_LocalTensorHandle::~SendPackedHandleOp_LocalTensorHandle() {
6158   // @@protoc_insertion_point(destructor:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6159   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
6160   (void)arena;
6161     return;
6162   }
6163   SharedDtor();
6164 }
6165 
SharedDtor()6166 inline void SendPackedHandleOp_LocalTensorHandle::SharedDtor() {
6167   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
6168   _impl_.device_.Destroy();
6169   if (this != internal_default_instance()) delete _impl_.tensor_;
6170 }
6171 
SetCachedSize(int size) const6172 void SendPackedHandleOp_LocalTensorHandle::SetCachedSize(int size) const {
6173   _impl_._cached_size_.Set(size);
6174 }
6175 
Clear()6176 void SendPackedHandleOp_LocalTensorHandle::Clear() {
6177 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6178   ::uint32_t cached_has_bits = 0;
6179   // Prevent compiler warnings about cached_has_bits being unused
6180   (void) cached_has_bits;
6181 
6182   _impl_.device_.ClearToEmpty();
6183   if (GetArenaForAllocation() == nullptr && _impl_.tensor_ != nullptr) {
6184     delete _impl_.tensor_;
6185   }
6186   _impl_.tensor_ = nullptr;
6187   _internal_metadata_.Clear<std::string>();
6188 }
6189 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)6190 const char* SendPackedHandleOp_LocalTensorHandle::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
6191 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
6192   while (!ctx->Done(&ptr)) {
6193     ::uint32_t tag;
6194     ptr = ::_pbi::ReadTag(ptr, &tag);
6195     switch (tag >> 3) {
6196       // .tensorflow.TensorProto tensor = 1;
6197       case 1:
6198         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
6199           ptr = ctx->ParseMessage(_internal_mutable_tensor(), ptr);
6200           CHK_(ptr);
6201         } else {
6202           goto handle_unusual;
6203         }
6204         continue;
6205       // string device = 2;
6206       case 2:
6207         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
6208           auto str = _internal_mutable_device();
6209           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
6210           CHK_(ptr);
6211           CHK_(::_pbi::VerifyUTF8(str, nullptr));
6212         } else {
6213           goto handle_unusual;
6214         }
6215         continue;
6216       default:
6217         goto handle_unusual;
6218     }  // switch
6219   handle_unusual:
6220     if ((tag == 0) || ((tag & 7) == 4)) {
6221       CHK_(ptr);
6222       ctx->SetLastTag(tag);
6223       goto message_done;
6224     }
6225     ptr = UnknownFieldParse(
6226         tag,
6227         _internal_metadata_.mutable_unknown_fields<std::string>(),
6228         ptr, ctx);
6229     CHK_(ptr != nullptr);
6230   }  // while
6231 message_done:
6232   return ptr;
6233 failure:
6234   ptr = nullptr;
6235   goto message_done;
6236 #undef CHK_
6237 }
6238 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const6239 ::uint8_t* SendPackedHandleOp_LocalTensorHandle::_InternalSerialize(
6240     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
6241   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6242   ::uint32_t cached_has_bits = 0;
6243   (void) cached_has_bits;
6244 
6245   // .tensorflow.TensorProto tensor = 1;
6246   if (this->_internal_has_tensor()) {
6247     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6248       InternalWriteMessage(1, _Internal::tensor(this),
6249         _Internal::tensor(this).GetCachedSize(), target, stream);
6250   }
6251 
6252   // string device = 2;
6253   if (!this->_internal_device().empty()) {
6254     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6255       this->_internal_device().data(), static_cast<int>(this->_internal_device().length()),
6256       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
6257       "tensorflow.eager.SendPackedHandleOp.LocalTensorHandle.device");
6258     target = stream->WriteStringMaybeAliased(
6259         2, this->_internal_device(), target);
6260   }
6261 
6262   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6263     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
6264         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
6265   }
6266   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6267   return target;
6268 }
6269 
ByteSizeLong() const6270 size_t SendPackedHandleOp_LocalTensorHandle::ByteSizeLong() const {
6271 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6272   size_t total_size = 0;
6273 
6274   ::uint32_t cached_has_bits = 0;
6275   // Prevent compiler warnings about cached_has_bits being unused
6276   (void) cached_has_bits;
6277 
6278   // string device = 2;
6279   if (!this->_internal_device().empty()) {
6280     total_size += 1 +
6281       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
6282         this->_internal_device());
6283   }
6284 
6285   // .tensorflow.TensorProto tensor = 1;
6286   if (this->_internal_has_tensor()) {
6287     total_size += 1 +
6288       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6289         *_impl_.tensor_);
6290   }
6291 
6292   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6293     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
6294   }
6295   int cached_size = ::_pbi::ToCachedSize(total_size);
6296   SetCachedSize(cached_size);
6297   return total_size;
6298 }
6299 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)6300 void SendPackedHandleOp_LocalTensorHandle::CheckTypeAndMergeFrom(
6301     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
6302   MergeFrom(*::_pbi::DownCast<const SendPackedHandleOp_LocalTensorHandle*>(
6303       &from));
6304 }
6305 
MergeFrom(const SendPackedHandleOp_LocalTensorHandle & from)6306 void SendPackedHandleOp_LocalTensorHandle::MergeFrom(const SendPackedHandleOp_LocalTensorHandle& from) {
6307   SendPackedHandleOp_LocalTensorHandle* const _this = this;
6308   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6309   GOOGLE_DCHECK_NE(&from, _this);
6310   ::uint32_t cached_has_bits = 0;
6311   (void) cached_has_bits;
6312 
6313   if (!from._internal_device().empty()) {
6314     _this->_internal_set_device(from._internal_device());
6315   }
6316   if (from._internal_has_tensor()) {
6317     _this->_internal_mutable_tensor()->::tensorflow::TensorProto::MergeFrom(
6318         from._internal_tensor());
6319   }
6320   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6321 }
6322 
CopyFrom(const SendPackedHandleOp_LocalTensorHandle & from)6323 void SendPackedHandleOp_LocalTensorHandle::CopyFrom(const SendPackedHandleOp_LocalTensorHandle& from) {
6324 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
6325   if (&from == this) return;
6326   Clear();
6327   MergeFrom(from);
6328 }
6329 
IsInitialized() const6330 bool SendPackedHandleOp_LocalTensorHandle::IsInitialized() const {
6331   return true;
6332 }
6333 
InternalSwap(SendPackedHandleOp_LocalTensorHandle * other)6334 void SendPackedHandleOp_LocalTensorHandle::InternalSwap(SendPackedHandleOp_LocalTensorHandle* other) {
6335   using std::swap;
6336   auto* lhs_arena = GetArenaForAllocation();
6337   auto* rhs_arena = other->GetArenaForAllocation();
6338   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
6339   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
6340       &_impl_.device_, lhs_arena,
6341       &other->_impl_.device_, rhs_arena
6342   );
6343   swap(_impl_.tensor_, other->_impl_.tensor_);
6344 }
6345 
GetTypeName() const6346 std::string SendPackedHandleOp_LocalTensorHandle::GetTypeName() const {
6347   return "tensorflow.eager.SendPackedHandleOp.LocalTensorHandle";
6348 }
6349 
6350 
6351 // ===================================================================
6352 
6353 class SendPackedHandleOp_Handle::_Internal {
6354  public:
6355   static const ::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle& local_handle(const SendPackedHandleOp_Handle* msg);
6356   static const ::tensorflow::eager::RemoteTensorHandle& remote_handle(const SendPackedHandleOp_Handle* msg);
6357 };
6358 
6359 const ::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle&
local_handle(const SendPackedHandleOp_Handle * msg)6360 SendPackedHandleOp_Handle::_Internal::local_handle(const SendPackedHandleOp_Handle* msg) {
6361   return *msg->_impl_.item_.local_handle_;
6362 }
6363 const ::tensorflow::eager::RemoteTensorHandle&
remote_handle(const SendPackedHandleOp_Handle * msg)6364 SendPackedHandleOp_Handle::_Internal::remote_handle(const SendPackedHandleOp_Handle* msg) {
6365   return *msg->_impl_.item_.remote_handle_;
6366 }
set_allocated_local_handle(::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle * local_handle)6367 void SendPackedHandleOp_Handle::set_allocated_local_handle(::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle* local_handle) {
6368   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6369   clear_item();
6370   if (local_handle) {
6371     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6372       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(local_handle);
6373     if (message_arena != submessage_arena) {
6374       local_handle = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6375           message_arena, local_handle, submessage_arena);
6376     }
6377     set_has_local_handle();
6378     _impl_.item_.local_handle_ = local_handle;
6379   }
6380   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.SendPackedHandleOp.Handle.local_handle)
6381 }
set_allocated_remote_handle(::tensorflow::eager::RemoteTensorHandle * remote_handle)6382 void SendPackedHandleOp_Handle::set_allocated_remote_handle(::tensorflow::eager::RemoteTensorHandle* remote_handle) {
6383   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6384   clear_item();
6385   if (remote_handle) {
6386     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6387         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
6388                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(remote_handle));
6389     if (message_arena != submessage_arena) {
6390       remote_handle = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6391           message_arena, remote_handle, submessage_arena);
6392     }
6393     set_has_remote_handle();
6394     _impl_.item_.remote_handle_ = remote_handle;
6395   }
6396   // @@protoc_insertion_point(field_set_allocated:tensorflow.eager.SendPackedHandleOp.Handle.remote_handle)
6397 }
clear_remote_handle()6398 void SendPackedHandleOp_Handle::clear_remote_handle() {
6399   if (_internal_has_remote_handle()) {
6400     if (GetArenaForAllocation() == nullptr) {
6401       delete _impl_.item_.remote_handle_;
6402     }
6403     clear_has_item();
6404   }
6405 }
SendPackedHandleOp_Handle(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)6406 SendPackedHandleOp_Handle::SendPackedHandleOp_Handle(::PROTOBUF_NAMESPACE_ID::Arena* arena,
6407                          bool is_message_owned)
6408   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
6409   SharedCtor(arena, is_message_owned);
6410   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.SendPackedHandleOp.Handle)
6411 }
SendPackedHandleOp_Handle(const SendPackedHandleOp_Handle & from)6412 SendPackedHandleOp_Handle::SendPackedHandleOp_Handle(const SendPackedHandleOp_Handle& from)
6413   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
6414   SendPackedHandleOp_Handle* const _this = this; (void)_this;
6415   new (&_impl_) Impl_{
6416       decltype(_impl_.item_){}
6417     , /*decltype(_impl_._cached_size_)*/{}
6418     , /*decltype(_impl_._oneof_case_)*/{}};
6419 
6420   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6421   clear_has_item();
6422   switch (from.item_case()) {
6423     case kLocalHandle: {
6424       _this->_internal_mutable_local_handle()->::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle::MergeFrom(
6425           from._internal_local_handle());
6426       break;
6427     }
6428     case kRemoteHandle: {
6429       _this->_internal_mutable_remote_handle()->::tensorflow::eager::RemoteTensorHandle::MergeFrom(
6430           from._internal_remote_handle());
6431       break;
6432     }
6433     case ITEM_NOT_SET: {
6434       break;
6435     }
6436   }
6437   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.SendPackedHandleOp.Handle)
6438 }
6439 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)6440 inline void SendPackedHandleOp_Handle::SharedCtor(
6441     ::_pb::Arena* arena, bool is_message_owned) {
6442   (void)arena;
6443   (void)is_message_owned;
6444   new (&_impl_) Impl_{
6445       decltype(_impl_.item_){}
6446     , /*decltype(_impl_._cached_size_)*/{}
6447     , /*decltype(_impl_._oneof_case_)*/{}
6448   };
6449   clear_has_item();
6450 }
6451 
~SendPackedHandleOp_Handle()6452 SendPackedHandleOp_Handle::~SendPackedHandleOp_Handle() {
6453   // @@protoc_insertion_point(destructor:tensorflow.eager.SendPackedHandleOp.Handle)
6454   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
6455   (void)arena;
6456     return;
6457   }
6458   SharedDtor();
6459 }
6460 
SharedDtor()6461 inline void SendPackedHandleOp_Handle::SharedDtor() {
6462   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
6463   if (has_item()) {
6464     clear_item();
6465   }
6466 }
6467 
SetCachedSize(int size) const6468 void SendPackedHandleOp_Handle::SetCachedSize(int size) const {
6469   _impl_._cached_size_.Set(size);
6470 }
6471 
clear_item()6472 void SendPackedHandleOp_Handle::clear_item() {
6473 // @@protoc_insertion_point(one_of_clear_start:tensorflow.eager.SendPackedHandleOp.Handle)
6474   switch (item_case()) {
6475     case kLocalHandle: {
6476       if (GetArenaForAllocation() == nullptr) {
6477         delete _impl_.item_.local_handle_;
6478       }
6479       break;
6480     }
6481     case kRemoteHandle: {
6482       if (GetArenaForAllocation() == nullptr) {
6483         delete _impl_.item_.remote_handle_;
6484       }
6485       break;
6486     }
6487     case ITEM_NOT_SET: {
6488       break;
6489     }
6490   }
6491   _impl_._oneof_case_[0] = ITEM_NOT_SET;
6492 }
6493 
6494 
Clear()6495 void SendPackedHandleOp_Handle::Clear() {
6496 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.SendPackedHandleOp.Handle)
6497   ::uint32_t cached_has_bits = 0;
6498   // Prevent compiler warnings about cached_has_bits being unused
6499   (void) cached_has_bits;
6500 
6501   clear_item();
6502   _internal_metadata_.Clear<std::string>();
6503 }
6504 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)6505 const char* SendPackedHandleOp_Handle::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
6506 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
6507   while (!ctx->Done(&ptr)) {
6508     ::uint32_t tag;
6509     ptr = ::_pbi::ReadTag(ptr, &tag);
6510     switch (tag >> 3) {
6511       // .tensorflow.eager.SendPackedHandleOp.LocalTensorHandle local_handle = 1;
6512       case 1:
6513         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
6514           ptr = ctx->ParseMessage(_internal_mutable_local_handle(), ptr);
6515           CHK_(ptr);
6516         } else {
6517           goto handle_unusual;
6518         }
6519         continue;
6520       // .tensorflow.eager.RemoteTensorHandle remote_handle = 2;
6521       case 2:
6522         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
6523           ptr = ctx->ParseMessage(_internal_mutable_remote_handle(), ptr);
6524           CHK_(ptr);
6525         } else {
6526           goto handle_unusual;
6527         }
6528         continue;
6529       default:
6530         goto handle_unusual;
6531     }  // switch
6532   handle_unusual:
6533     if ((tag == 0) || ((tag & 7) == 4)) {
6534       CHK_(ptr);
6535       ctx->SetLastTag(tag);
6536       goto message_done;
6537     }
6538     ptr = UnknownFieldParse(
6539         tag,
6540         _internal_metadata_.mutable_unknown_fields<std::string>(),
6541         ptr, ctx);
6542     CHK_(ptr != nullptr);
6543   }  // while
6544 message_done:
6545   return ptr;
6546 failure:
6547   ptr = nullptr;
6548   goto message_done;
6549 #undef CHK_
6550 }
6551 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const6552 ::uint8_t* SendPackedHandleOp_Handle::_InternalSerialize(
6553     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
6554   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.SendPackedHandleOp.Handle)
6555   ::uint32_t cached_has_bits = 0;
6556   (void) cached_has_bits;
6557 
6558   // .tensorflow.eager.SendPackedHandleOp.LocalTensorHandle local_handle = 1;
6559   if (_internal_has_local_handle()) {
6560     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6561       InternalWriteMessage(1, _Internal::local_handle(this),
6562         _Internal::local_handle(this).GetCachedSize(), target, stream);
6563   }
6564 
6565   // .tensorflow.eager.RemoteTensorHandle remote_handle = 2;
6566   if (_internal_has_remote_handle()) {
6567     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6568       InternalWriteMessage(2, _Internal::remote_handle(this),
6569         _Internal::remote_handle(this).GetCachedSize(), target, stream);
6570   }
6571 
6572   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6573     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
6574         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
6575   }
6576   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.SendPackedHandleOp.Handle)
6577   return target;
6578 }
6579 
ByteSizeLong() const6580 size_t SendPackedHandleOp_Handle::ByteSizeLong() const {
6581 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.SendPackedHandleOp.Handle)
6582   size_t total_size = 0;
6583 
6584   switch (item_case()) {
6585     // .tensorflow.eager.SendPackedHandleOp.LocalTensorHandle local_handle = 1;
6586     case kLocalHandle: {
6587       total_size += 1 +
6588         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6589           *_impl_.item_.local_handle_);
6590       break;
6591     }
6592     // .tensorflow.eager.RemoteTensorHandle remote_handle = 2;
6593     case kRemoteHandle: {
6594       total_size += 1 +
6595         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6596           *_impl_.item_.remote_handle_);
6597       break;
6598     }
6599     case ITEM_NOT_SET: {
6600       break;
6601     }
6602   }
6603   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6604     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
6605   }
6606   int cached_size = ::_pbi::ToCachedSize(total_size);
6607   SetCachedSize(cached_size);
6608   return total_size;
6609 }
6610 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)6611 void SendPackedHandleOp_Handle::CheckTypeAndMergeFrom(
6612     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
6613   MergeFrom(*::_pbi::DownCast<const SendPackedHandleOp_Handle*>(
6614       &from));
6615 }
6616 
MergeFrom(const SendPackedHandleOp_Handle & from)6617 void SendPackedHandleOp_Handle::MergeFrom(const SendPackedHandleOp_Handle& from) {
6618   SendPackedHandleOp_Handle* const _this = this;
6619   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.SendPackedHandleOp.Handle)
6620   GOOGLE_DCHECK_NE(&from, _this);
6621   ::uint32_t cached_has_bits = 0;
6622   (void) cached_has_bits;
6623 
6624   switch (from.item_case()) {
6625     case kLocalHandle: {
6626       _this->_internal_mutable_local_handle()->::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle::MergeFrom(
6627           from._internal_local_handle());
6628       break;
6629     }
6630     case kRemoteHandle: {
6631       _this->_internal_mutable_remote_handle()->::tensorflow::eager::RemoteTensorHandle::MergeFrom(
6632           from._internal_remote_handle());
6633       break;
6634     }
6635     case ITEM_NOT_SET: {
6636       break;
6637     }
6638   }
6639   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6640 }
6641 
CopyFrom(const SendPackedHandleOp_Handle & from)6642 void SendPackedHandleOp_Handle::CopyFrom(const SendPackedHandleOp_Handle& from) {
6643 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.SendPackedHandleOp.Handle)
6644   if (&from == this) return;
6645   Clear();
6646   MergeFrom(from);
6647 }
6648 
IsInitialized() const6649 bool SendPackedHandleOp_Handle::IsInitialized() const {
6650   return true;
6651 }
6652 
InternalSwap(SendPackedHandleOp_Handle * other)6653 void SendPackedHandleOp_Handle::InternalSwap(SendPackedHandleOp_Handle* other) {
6654   using std::swap;
6655   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
6656   swap(_impl_.item_, other->_impl_.item_);
6657   swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
6658 }
6659 
GetTypeName() const6660 std::string SendPackedHandleOp_Handle::GetTypeName() const {
6661   return "tensorflow.eager.SendPackedHandleOp.Handle";
6662 }
6663 
6664 
6665 // ===================================================================
6666 
6667 class SendPackedHandleOp::_Internal {
6668  public:
6669 };
6670 
SendPackedHandleOp(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)6671 SendPackedHandleOp::SendPackedHandleOp(::PROTOBUF_NAMESPACE_ID::Arena* arena,
6672                          bool is_message_owned)
6673   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
6674   SharedCtor(arena, is_message_owned);
6675   // @@protoc_insertion_point(arena_constructor:tensorflow.eager.SendPackedHandleOp)
6676 }
SendPackedHandleOp(const SendPackedHandleOp & from)6677 SendPackedHandleOp::SendPackedHandleOp(const SendPackedHandleOp& from)
6678   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
6679   SendPackedHandleOp* const _this = this; (void)_this;
6680   new (&_impl_) Impl_{
6681       decltype(_impl_.handles_){from._impl_.handles_}
6682     , decltype(_impl_.device_name_){}
6683     , decltype(_impl_.op_id_){}
6684     , /*decltype(_impl_._cached_size_)*/{}};
6685 
6686   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6687   _impl_.device_name_.InitDefault();
6688   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6689     _impl_.device_name_.Set("", GetArenaForAllocation());
6690   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6691   if (!from._internal_device_name().empty()) {
6692     _this->_impl_.device_name_.Set(from._internal_device_name(),
6693       _this->GetArenaForAllocation());
6694   }
6695   _this->_impl_.op_id_ = from._impl_.op_id_;
6696   // @@protoc_insertion_point(copy_constructor:tensorflow.eager.SendPackedHandleOp)
6697 }
6698 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)6699 inline void SendPackedHandleOp::SharedCtor(
6700     ::_pb::Arena* arena, bool is_message_owned) {
6701   (void)arena;
6702   (void)is_message_owned;
6703   new (&_impl_) Impl_{
6704       decltype(_impl_.handles_){arena}
6705     , decltype(_impl_.device_name_){}
6706     , decltype(_impl_.op_id_){::int64_t{0}}
6707     , /*decltype(_impl_._cached_size_)*/{}
6708   };
6709   _impl_.device_name_.InitDefault();
6710   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6711     _impl_.device_name_.Set("", GetArenaForAllocation());
6712   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6713 }
6714 
~SendPackedHandleOp()6715 SendPackedHandleOp::~SendPackedHandleOp() {
6716   // @@protoc_insertion_point(destructor:tensorflow.eager.SendPackedHandleOp)
6717   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
6718   (void)arena;
6719     return;
6720   }
6721   SharedDtor();
6722 }
6723 
SharedDtor()6724 inline void SendPackedHandleOp::SharedDtor() {
6725   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
6726   _impl_.handles_.~RepeatedPtrField();
6727   _impl_.device_name_.Destroy();
6728 }
6729 
SetCachedSize(int size) const6730 void SendPackedHandleOp::SetCachedSize(int size) const {
6731   _impl_._cached_size_.Set(size);
6732 }
6733 
Clear()6734 void SendPackedHandleOp::Clear() {
6735 // @@protoc_insertion_point(message_clear_start:tensorflow.eager.SendPackedHandleOp)
6736   ::uint32_t cached_has_bits = 0;
6737   // Prevent compiler warnings about cached_has_bits being unused
6738   (void) cached_has_bits;
6739 
6740   _impl_.handles_.Clear();
6741   _impl_.device_name_.ClearToEmpty();
6742   _impl_.op_id_ = ::int64_t{0};
6743   _internal_metadata_.Clear<std::string>();
6744 }
6745 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)6746 const char* SendPackedHandleOp::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
6747 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
6748   while (!ctx->Done(&ptr)) {
6749     ::uint32_t tag;
6750     ptr = ::_pbi::ReadTag(ptr, &tag);
6751     switch (tag >> 3) {
6752       // int64 op_id = 1;
6753       case 1:
6754         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
6755           _impl_.op_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
6756           CHK_(ptr);
6757         } else {
6758           goto handle_unusual;
6759         }
6760         continue;
6761       // repeated .tensorflow.eager.SendPackedHandleOp.Handle handles = 2;
6762       case 2:
6763         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
6764           ptr -= 1;
6765           do {
6766             ptr += 1;
6767             ptr = ctx->ParseMessage(_internal_add_handles(), ptr);
6768             CHK_(ptr);
6769             if (!ctx->DataAvailable(ptr)) break;
6770           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
6771         } else {
6772           goto handle_unusual;
6773         }
6774         continue;
6775       // string device_name = 3;
6776       case 3:
6777         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
6778           auto str = _internal_mutable_device_name();
6779           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
6780           CHK_(ptr);
6781           CHK_(::_pbi::VerifyUTF8(str, nullptr));
6782         } else {
6783           goto handle_unusual;
6784         }
6785         continue;
6786       default:
6787         goto handle_unusual;
6788     }  // switch
6789   handle_unusual:
6790     if ((tag == 0) || ((tag & 7) == 4)) {
6791       CHK_(ptr);
6792       ctx->SetLastTag(tag);
6793       goto message_done;
6794     }
6795     ptr = UnknownFieldParse(
6796         tag,
6797         _internal_metadata_.mutable_unknown_fields<std::string>(),
6798         ptr, ctx);
6799     CHK_(ptr != nullptr);
6800   }  // while
6801 message_done:
6802   return ptr;
6803 failure:
6804   ptr = nullptr;
6805   goto message_done;
6806 #undef CHK_
6807 }
6808 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const6809 ::uint8_t* SendPackedHandleOp::_InternalSerialize(
6810     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
6811   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.eager.SendPackedHandleOp)
6812   ::uint32_t cached_has_bits = 0;
6813   (void) cached_has_bits;
6814 
6815   // int64 op_id = 1;
6816   if (this->_internal_op_id() != 0) {
6817     target = stream->EnsureSpace(target);
6818     target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_op_id(), target);
6819   }
6820 
6821   // repeated .tensorflow.eager.SendPackedHandleOp.Handle handles = 2;
6822   for (unsigned i = 0,
6823       n = static_cast<unsigned>(this->_internal_handles_size()); i < n; i++) {
6824     const auto& repfield = this->_internal_handles(i);
6825     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6826         InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
6827   }
6828 
6829   // string device_name = 3;
6830   if (!this->_internal_device_name().empty()) {
6831     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6832       this->_internal_device_name().data(), static_cast<int>(this->_internal_device_name().length()),
6833       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
6834       "tensorflow.eager.SendPackedHandleOp.device_name");
6835     target = stream->WriteStringMaybeAliased(
6836         3, this->_internal_device_name(), target);
6837   }
6838 
6839   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6840     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
6841         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
6842   }
6843   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.eager.SendPackedHandleOp)
6844   return target;
6845 }
6846 
ByteSizeLong() const6847 size_t SendPackedHandleOp::ByteSizeLong() const {
6848 // @@protoc_insertion_point(message_byte_size_start:tensorflow.eager.SendPackedHandleOp)
6849   size_t total_size = 0;
6850 
6851   ::uint32_t cached_has_bits = 0;
6852   // Prevent compiler warnings about cached_has_bits being unused
6853   (void) cached_has_bits;
6854 
6855   // repeated .tensorflow.eager.SendPackedHandleOp.Handle handles = 2;
6856   total_size += 1UL * this->_internal_handles_size();
6857   for (const auto& msg : this->_impl_.handles_) {
6858     total_size +=
6859       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
6860   }
6861 
6862   // string device_name = 3;
6863   if (!this->_internal_device_name().empty()) {
6864     total_size += 1 +
6865       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
6866         this->_internal_device_name());
6867   }
6868 
6869   // int64 op_id = 1;
6870   if (this->_internal_op_id() != 0) {
6871     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_op_id());
6872   }
6873 
6874   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6875     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
6876   }
6877   int cached_size = ::_pbi::ToCachedSize(total_size);
6878   SetCachedSize(cached_size);
6879   return total_size;
6880 }
6881 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)6882 void SendPackedHandleOp::CheckTypeAndMergeFrom(
6883     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
6884   MergeFrom(*::_pbi::DownCast<const SendPackedHandleOp*>(
6885       &from));
6886 }
6887 
MergeFrom(const SendPackedHandleOp & from)6888 void SendPackedHandleOp::MergeFrom(const SendPackedHandleOp& from) {
6889   SendPackedHandleOp* const _this = this;
6890   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.eager.SendPackedHandleOp)
6891   GOOGLE_DCHECK_NE(&from, _this);
6892   ::uint32_t cached_has_bits = 0;
6893   (void) cached_has_bits;
6894 
6895   _this->_impl_.handles_.MergeFrom(from._impl_.handles_);
6896   if (!from._internal_device_name().empty()) {
6897     _this->_internal_set_device_name(from._internal_device_name());
6898   }
6899   if (from._internal_op_id() != 0) {
6900     _this->_internal_set_op_id(from._internal_op_id());
6901   }
6902   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6903 }
6904 
CopyFrom(const SendPackedHandleOp & from)6905 void SendPackedHandleOp::CopyFrom(const SendPackedHandleOp& from) {
6906 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.eager.SendPackedHandleOp)
6907   if (&from == this) return;
6908   Clear();
6909   MergeFrom(from);
6910 }
6911 
IsInitialized() const6912 bool SendPackedHandleOp::IsInitialized() const {
6913   return true;
6914 }
6915 
InternalSwap(SendPackedHandleOp * other)6916 void SendPackedHandleOp::InternalSwap(SendPackedHandleOp* other) {
6917   using std::swap;
6918   auto* lhs_arena = GetArenaForAllocation();
6919   auto* rhs_arena = other->GetArenaForAllocation();
6920   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
6921   _impl_.handles_.InternalSwap(&other->_impl_.handles_);
6922   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
6923       &_impl_.device_name_, lhs_arena,
6924       &other->_impl_.device_name_, rhs_arena
6925   );
6926   swap(_impl_.op_id_, other->_impl_.op_id_);
6927 }
6928 
GetTypeName() const6929 std::string SendPackedHandleOp::GetTypeName() const {
6930   return "tensorflow.eager.SendPackedHandleOp";
6931 }
6932 
6933 
6934 // @@protoc_insertion_point(namespace_scope)
6935 }  // namespace eager
6936 }  // namespace tensorflow
6937 PROTOBUF_NAMESPACE_OPEN
6938 template<> PROTOBUF_NOINLINE ::tensorflow::eager::Operation_Input*
CreateMaybeMessage(Arena * arena)6939 Arena::CreateMaybeMessage< ::tensorflow::eager::Operation_Input >(Arena* arena) {
6940   return Arena::CreateMessageInternal< ::tensorflow::eager::Operation_Input >(arena);
6941 }
6942 template<> PROTOBUF_NOINLINE ::tensorflow::eager::Operation_AttrsEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)6943 Arena::CreateMaybeMessage< ::tensorflow::eager::Operation_AttrsEntry_DoNotUse >(Arena* arena) {
6944   return Arena::CreateMessageInternal< ::tensorflow::eager::Operation_AttrsEntry_DoNotUse >(arena);
6945 }
6946 template<> PROTOBUF_NOINLINE ::tensorflow::eager::Operation*
CreateMaybeMessage(Arena * arena)6947 Arena::CreateMaybeMessage< ::tensorflow::eager::Operation >(Arena* arena) {
6948   return Arena::CreateMessageInternal< ::tensorflow::eager::Operation >(arena);
6949 }
6950 template<> PROTOBUF_NOINLINE ::tensorflow::eager::QueueItem*
CreateMaybeMessage(Arena * arena)6951 Arena::CreateMaybeMessage< ::tensorflow::eager::QueueItem >(Arena* arena) {
6952   return Arena::CreateMessageInternal< ::tensorflow::eager::QueueItem >(arena);
6953 }
6954 template<> PROTOBUF_NOINLINE ::tensorflow::eager::QueueResponse*
CreateMaybeMessage(Arena * arena)6955 Arena::CreateMaybeMessage< ::tensorflow::eager::QueueResponse >(Arena* arena) {
6956   return Arena::CreateMessageInternal< ::tensorflow::eager::QueueResponse >(arena);
6957 }
6958 template<> PROTOBUF_NOINLINE ::tensorflow::eager::CreateContextRequest*
CreateMaybeMessage(Arena * arena)6959 Arena::CreateMaybeMessage< ::tensorflow::eager::CreateContextRequest >(Arena* arena) {
6960   return Arena::CreateMessageInternal< ::tensorflow::eager::CreateContextRequest >(arena);
6961 }
6962 template<> PROTOBUF_NOINLINE ::tensorflow::eager::CreateContextResponse*
CreateMaybeMessage(Arena * arena)6963 Arena::CreateMaybeMessage< ::tensorflow::eager::CreateContextResponse >(Arena* arena) {
6964   return Arena::CreateMessageInternal< ::tensorflow::eager::CreateContextResponse >(arena);
6965 }
6966 template<> PROTOBUF_NOINLINE ::tensorflow::eager::UpdateContextRequest*
CreateMaybeMessage(Arena * arena)6967 Arena::CreateMaybeMessage< ::tensorflow::eager::UpdateContextRequest >(Arena* arena) {
6968   return Arena::CreateMessageInternal< ::tensorflow::eager::UpdateContextRequest >(arena);
6969 }
6970 template<> PROTOBUF_NOINLINE ::tensorflow::eager::UpdateContextResponse*
CreateMaybeMessage(Arena * arena)6971 Arena::CreateMaybeMessage< ::tensorflow::eager::UpdateContextResponse >(Arena* arena) {
6972   return Arena::CreateMessageInternal< ::tensorflow::eager::UpdateContextResponse >(arena);
6973 }
6974 template<> PROTOBUF_NOINLINE ::tensorflow::eager::EnqueueRequest*
CreateMaybeMessage(Arena * arena)6975 Arena::CreateMaybeMessage< ::tensorflow::eager::EnqueueRequest >(Arena* arena) {
6976   return Arena::CreateMessageInternal< ::tensorflow::eager::EnqueueRequest >(arena);
6977 }
6978 template<> PROTOBUF_NOINLINE ::tensorflow::eager::EnqueueResponse*
CreateMaybeMessage(Arena * arena)6979 Arena::CreateMaybeMessage< ::tensorflow::eager::EnqueueResponse >(Arena* arena) {
6980   return Arena::CreateMessageInternal< ::tensorflow::eager::EnqueueResponse >(arena);
6981 }
6982 template<> PROTOBUF_NOINLINE ::tensorflow::eager::WaitQueueDoneRequest*
CreateMaybeMessage(Arena * arena)6983 Arena::CreateMaybeMessage< ::tensorflow::eager::WaitQueueDoneRequest >(Arena* arena) {
6984   return Arena::CreateMessageInternal< ::tensorflow::eager::WaitQueueDoneRequest >(arena);
6985 }
6986 template<> PROTOBUF_NOINLINE ::tensorflow::eager::WaitQueueDoneResponse*
CreateMaybeMessage(Arena * arena)6987 Arena::CreateMaybeMessage< ::tensorflow::eager::WaitQueueDoneResponse >(Arena* arena) {
6988   return Arena::CreateMessageInternal< ::tensorflow::eager::WaitQueueDoneResponse >(arena);
6989 }
6990 template<> PROTOBUF_NOINLINE ::tensorflow::eager::RunComponentFunctionRequest*
CreateMaybeMessage(Arena * arena)6991 Arena::CreateMaybeMessage< ::tensorflow::eager::RunComponentFunctionRequest >(Arena* arena) {
6992   return Arena::CreateMessageInternal< ::tensorflow::eager::RunComponentFunctionRequest >(arena);
6993 }
6994 template<> PROTOBUF_NOINLINE ::tensorflow::eager::RunComponentFunctionResponse*
CreateMaybeMessage(Arena * arena)6995 Arena::CreateMaybeMessage< ::tensorflow::eager::RunComponentFunctionResponse >(Arena* arena) {
6996   return Arena::CreateMessageInternal< ::tensorflow::eager::RunComponentFunctionResponse >(arena);
6997 }
6998 template<> PROTOBUF_NOINLINE ::tensorflow::eager::KeepAliveRequest*
CreateMaybeMessage(Arena * arena)6999 Arena::CreateMaybeMessage< ::tensorflow::eager::KeepAliveRequest >(Arena* arena) {
7000   return Arena::CreateMessageInternal< ::tensorflow::eager::KeepAliveRequest >(arena);
7001 }
7002 template<> PROTOBUF_NOINLINE ::tensorflow::eager::KeepAliveResponse*
CreateMaybeMessage(Arena * arena)7003 Arena::CreateMaybeMessage< ::tensorflow::eager::KeepAliveResponse >(Arena* arena) {
7004   return Arena::CreateMessageInternal< ::tensorflow::eager::KeepAliveResponse >(arena);
7005 }
7006 template<> PROTOBUF_NOINLINE ::tensorflow::eager::CloseContextRequest*
CreateMaybeMessage(Arena * arena)7007 Arena::CreateMaybeMessage< ::tensorflow::eager::CloseContextRequest >(Arena* arena) {
7008   return Arena::CreateMessageInternal< ::tensorflow::eager::CloseContextRequest >(arena);
7009 }
7010 template<> PROTOBUF_NOINLINE ::tensorflow::eager::CloseContextResponse*
CreateMaybeMessage(Arena * arena)7011 Arena::CreateMaybeMessage< ::tensorflow::eager::CloseContextResponse >(Arena* arena) {
7012   return Arena::CreateMessageInternal< ::tensorflow::eager::CloseContextResponse >(arena);
7013 }
7014 template<> PROTOBUF_NOINLINE ::tensorflow::eager::RegisterFunctionOp*
CreateMaybeMessage(Arena * arena)7015 Arena::CreateMaybeMessage< ::tensorflow::eager::RegisterFunctionOp >(Arena* arena) {
7016   return Arena::CreateMessageInternal< ::tensorflow::eager::RegisterFunctionOp >(arena);
7017 }
7018 template<> PROTOBUF_NOINLINE ::tensorflow::eager::CleanupFunctionOp*
CreateMaybeMessage(Arena * arena)7019 Arena::CreateMaybeMessage< ::tensorflow::eager::CleanupFunctionOp >(Arena* arena) {
7020   return Arena::CreateMessageInternal< ::tensorflow::eager::CleanupFunctionOp >(arena);
7021 }
7022 template<> PROTOBUF_NOINLINE ::tensorflow::eager::SyncRemoteExecutorForStream*
CreateMaybeMessage(Arena * arena)7023 Arena::CreateMaybeMessage< ::tensorflow::eager::SyncRemoteExecutorForStream >(Arena* arena) {
7024   return Arena::CreateMessageInternal< ::tensorflow::eager::SyncRemoteExecutorForStream >(arena);
7025 }
7026 template<> PROTOBUF_NOINLINE ::tensorflow::eager::SendTensorOp*
CreateMaybeMessage(Arena * arena)7027 Arena::CreateMaybeMessage< ::tensorflow::eager::SendTensorOp >(Arena* arena) {
7028   return Arena::CreateMessageInternal< ::tensorflow::eager::SendTensorOp >(arena);
7029 }
7030 template<> PROTOBUF_NOINLINE ::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle*
CreateMaybeMessage(Arena * arena)7031 Arena::CreateMaybeMessage< ::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle >(Arena* arena) {
7032   return Arena::CreateMessageInternal< ::tensorflow::eager::SendPackedHandleOp_LocalTensorHandle >(arena);
7033 }
7034 template<> PROTOBUF_NOINLINE ::tensorflow::eager::SendPackedHandleOp_Handle*
CreateMaybeMessage(Arena * arena)7035 Arena::CreateMaybeMessage< ::tensorflow::eager::SendPackedHandleOp_Handle >(Arena* arena) {
7036   return Arena::CreateMessageInternal< ::tensorflow::eager::SendPackedHandleOp_Handle >(arena);
7037 }
7038 template<> PROTOBUF_NOINLINE ::tensorflow::eager::SendPackedHandleOp*
CreateMaybeMessage(Arena * arena)7039 Arena::CreateMaybeMessage< ::tensorflow::eager::SendPackedHandleOp >(Arena* arena) {
7040   return Arena::CreateMessageInternal< ::tensorflow::eager::SendPackedHandleOp >(arena);
7041 }
7042 PROTOBUF_NAMESPACE_CLOSE
7043 
7044 // @@protoc_insertion_point(global_scope)
7045 #include <google/protobuf/port_undef.inc>
7046