1 // Generated by the protocol buffer compiler.  DO NOT EDIT!
2 // source: tensorflow/core/protobuf/config.proto
3 
4 #include "tensorflow/core/protobuf/config.pb.h"
5 
6 #include <algorithm>
7 #include <cstdint>
8 
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15 
16 PROTOBUF_PRAGMA_INIT_SEG
17 
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20 
21 namespace tensorflow {
GPUOptions_Experimental_VirtualDevices(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR GPUOptions_Experimental_VirtualDevices::GPUOptions_Experimental_VirtualDevices(
23     ::_pbi::ConstantInitialized): _impl_{
24     /*decltype(_impl_.memory_limit_mb_)*/{}
25   , /*decltype(_impl_.priority_)*/{}
26   , /*decltype(_impl_._priority_cached_byte_size_)*/{0}
27   , /*decltype(_impl_.device_ordinal_)*/{}
28   , /*decltype(_impl_._device_ordinal_cached_byte_size_)*/{0}
29   , /*decltype(_impl_._cached_size_)*/{}} {}
30 struct GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal {
GPUOptions_Experimental_VirtualDevicesDefaultTypeInternaltensorflow::GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal31   PROTOBUF_CONSTEXPR GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal()
32       : _instance(::_pbi::ConstantInitialized{}) {}
~GPUOptions_Experimental_VirtualDevicesDefaultTypeInternaltensorflow::GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal33   ~GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal() {}
34   union {  // NOLINT(misc-non-private-member-variables-in-classes)
35     GPUOptions_Experimental_VirtualDevices _instance;
36   };
37 };
38 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal _GPUOptions_Experimental_VirtualDevices_default_instance_;
GPUOptions_Experimental(::_pbi::ConstantInitialized)39 PROTOBUF_CONSTEXPR GPUOptions_Experimental::GPUOptions_Experimental(
40     ::_pbi::ConstantInitialized): _impl_{
41     /*decltype(_impl_.virtual_devices_)*/{}
42   , /*decltype(_impl_.collective_ring_order_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
43   , /*decltype(_impl_.num_dev_to_dev_copy_streams_)*/0
44   , /*decltype(_impl_.kernel_tracker_max_interval_)*/0
45   , /*decltype(_impl_.use_unified_memory_)*/false
46   , /*decltype(_impl_.timestamped_allocator_)*/false
47   , /*decltype(_impl_.use_cuda_malloc_async_)*/false
48   , /*decltype(_impl_.disallow_retry_on_allocation_failure_)*/false
49   , /*decltype(_impl_.kernel_tracker_max_bytes_)*/0
50   , /*decltype(_impl_.internal_fragmentation_fraction_)*/0
51   , /*decltype(_impl_.kernel_tracker_max_pending_)*/0
52   , /*decltype(_impl_._cached_size_)*/{}} {}
53 struct GPUOptions_ExperimentalDefaultTypeInternal {
GPUOptions_ExperimentalDefaultTypeInternaltensorflow::GPUOptions_ExperimentalDefaultTypeInternal54   PROTOBUF_CONSTEXPR GPUOptions_ExperimentalDefaultTypeInternal()
55       : _instance(::_pbi::ConstantInitialized{}) {}
~GPUOptions_ExperimentalDefaultTypeInternaltensorflow::GPUOptions_ExperimentalDefaultTypeInternal56   ~GPUOptions_ExperimentalDefaultTypeInternal() {}
57   union {  // NOLINT(misc-non-private-member-variables-in-classes)
58     GPUOptions_Experimental _instance;
59   };
60 };
61 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 GPUOptions_ExperimentalDefaultTypeInternal _GPUOptions_Experimental_default_instance_;
GPUOptions(::_pbi::ConstantInitialized)62 PROTOBUF_CONSTEXPR GPUOptions::GPUOptions(
63     ::_pbi::ConstantInitialized): _impl_{
64     /*decltype(_impl_.allocator_type_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
65   , /*decltype(_impl_.visible_device_list_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
66   , /*decltype(_impl_.experimental_)*/nullptr
67   , /*decltype(_impl_.per_process_gpu_memory_fraction_)*/0
68   , /*decltype(_impl_.deferred_deletion_bytes_)*/::int64_t{0}
69   , /*decltype(_impl_.polling_active_delay_usecs_)*/0
70   , /*decltype(_impl_.allow_growth_)*/false
71   , /*decltype(_impl_.force_gpu_compatible_)*/false
72   , /*decltype(_impl_.polling_inactive_delay_msecs_)*/0
73   , /*decltype(_impl_._cached_size_)*/{}} {}
74 struct GPUOptionsDefaultTypeInternal {
GPUOptionsDefaultTypeInternaltensorflow::GPUOptionsDefaultTypeInternal75   PROTOBUF_CONSTEXPR GPUOptionsDefaultTypeInternal()
76       : _instance(::_pbi::ConstantInitialized{}) {}
~GPUOptionsDefaultTypeInternaltensorflow::GPUOptionsDefaultTypeInternal77   ~GPUOptionsDefaultTypeInternal() {}
78   union {  // NOLINT(misc-non-private-member-variables-in-classes)
79     GPUOptions _instance;
80   };
81 };
82 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 GPUOptionsDefaultTypeInternal _GPUOptions_default_instance_;
OptimizerOptions(::_pbi::ConstantInitialized)83 PROTOBUF_CONSTEXPR OptimizerOptions::OptimizerOptions(
84     ::_pbi::ConstantInitialized): _impl_{
85     /*decltype(_impl_.opt_level_)*/0
86   , /*decltype(_impl_.do_common_subexpression_elimination_)*/false
87   , /*decltype(_impl_.do_constant_folding_)*/false
88   , /*decltype(_impl_.do_function_inlining_)*/false
89   , /*decltype(_impl_.cpu_global_jit_)*/false
90   , /*decltype(_impl_.max_folded_constant_in_bytes_)*/::int64_t{0}
91   , /*decltype(_impl_.global_jit_level_)*/0
92   , /*decltype(_impl_._cached_size_)*/{}} {}
93 struct OptimizerOptionsDefaultTypeInternal {
OptimizerOptionsDefaultTypeInternaltensorflow::OptimizerOptionsDefaultTypeInternal94   PROTOBUF_CONSTEXPR OptimizerOptionsDefaultTypeInternal()
95       : _instance(::_pbi::ConstantInitialized{}) {}
~OptimizerOptionsDefaultTypeInternaltensorflow::OptimizerOptionsDefaultTypeInternal96   ~OptimizerOptionsDefaultTypeInternal() {}
97   union {  // NOLINT(misc-non-private-member-variables-in-classes)
98     OptimizerOptions _instance;
99   };
100 };
101 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OptimizerOptionsDefaultTypeInternal _OptimizerOptions_default_instance_;
GraphOptions(::_pbi::ConstantInitialized)102 PROTOBUF_CONSTEXPR GraphOptions::GraphOptions(
103     ::_pbi::ConstantInitialized): _impl_{
104     /*decltype(_impl_.optimizer_options_)*/nullptr
105   , /*decltype(_impl_.rewrite_options_)*/nullptr
106   , /*decltype(_impl_.build_cost_model_)*/::int64_t{0}
107   , /*decltype(_impl_.enable_recv_scheduling_)*/false
108   , /*decltype(_impl_.infer_shapes_)*/false
109   , /*decltype(_impl_.place_pruned_graph_)*/false
110   , /*decltype(_impl_.enable_bfloat16_sendrecv_)*/false
111   , /*decltype(_impl_.timeline_step_)*/0
112   , /*decltype(_impl_.build_cost_model_after_)*/::int64_t{0}
113   , /*decltype(_impl_._cached_size_)*/{}} {}
114 struct GraphOptionsDefaultTypeInternal {
GraphOptionsDefaultTypeInternaltensorflow::GraphOptionsDefaultTypeInternal115   PROTOBUF_CONSTEXPR GraphOptionsDefaultTypeInternal()
116       : _instance(::_pbi::ConstantInitialized{}) {}
~GraphOptionsDefaultTypeInternaltensorflow::GraphOptionsDefaultTypeInternal117   ~GraphOptionsDefaultTypeInternal() {}
118   union {  // NOLINT(misc-non-private-member-variables-in-classes)
119     GraphOptions _instance;
120   };
121 };
122 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 GraphOptionsDefaultTypeInternal _GraphOptions_default_instance_;
ThreadPoolOptionProto(::_pbi::ConstantInitialized)123 PROTOBUF_CONSTEXPR ThreadPoolOptionProto::ThreadPoolOptionProto(
124     ::_pbi::ConstantInitialized): _impl_{
125     /*decltype(_impl_.global_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
126   , /*decltype(_impl_.num_threads_)*/0
127   , /*decltype(_impl_._cached_size_)*/{}} {}
128 struct ThreadPoolOptionProtoDefaultTypeInternal {
ThreadPoolOptionProtoDefaultTypeInternaltensorflow::ThreadPoolOptionProtoDefaultTypeInternal129   PROTOBUF_CONSTEXPR ThreadPoolOptionProtoDefaultTypeInternal()
130       : _instance(::_pbi::ConstantInitialized{}) {}
~ThreadPoolOptionProtoDefaultTypeInternaltensorflow::ThreadPoolOptionProtoDefaultTypeInternal131   ~ThreadPoolOptionProtoDefaultTypeInternal() {}
132   union {  // NOLINT(misc-non-private-member-variables-in-classes)
133     ThreadPoolOptionProto _instance;
134   };
135 };
136 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ThreadPoolOptionProtoDefaultTypeInternal _ThreadPoolOptionProto_default_instance_;
RPCOptions(::_pbi::ConstantInitialized)137 PROTOBUF_CONSTEXPR RPCOptions::RPCOptions(
138     ::_pbi::ConstantInitialized): _impl_{
139     /*decltype(_impl_.compression_algorithm_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
140   , /*decltype(_impl_.compression_level_)*/0
141   , /*decltype(_impl_.use_rpc_for_inprocess_master_)*/false
142   , /*decltype(_impl_.cache_rpc_response_)*/false
143   , /*decltype(_impl_.disable_session_connection_sharing_)*/false
144   , /*decltype(_impl_.num_channels_per_target_)*/0
145   , /*decltype(_impl_._cached_size_)*/{}} {}
146 struct RPCOptionsDefaultTypeInternal {
RPCOptionsDefaultTypeInternaltensorflow::RPCOptionsDefaultTypeInternal147   PROTOBUF_CONSTEXPR RPCOptionsDefaultTypeInternal()
148       : _instance(::_pbi::ConstantInitialized{}) {}
~RPCOptionsDefaultTypeInternaltensorflow::RPCOptionsDefaultTypeInternal149   ~RPCOptionsDefaultTypeInternal() {}
150   union {  // NOLINT(misc-non-private-member-variables-in-classes)
151     RPCOptions _instance;
152   };
153 };
154 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RPCOptionsDefaultTypeInternal _RPCOptions_default_instance_;
SessionMetadata(::_pbi::ConstantInitialized)155 PROTOBUF_CONSTEXPR SessionMetadata::SessionMetadata(
156     ::_pbi::ConstantInitialized): _impl_{
157     /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
158   , /*decltype(_impl_.version_)*/::int64_t{0}
159   , /*decltype(_impl_._cached_size_)*/{}} {}
160 struct SessionMetadataDefaultTypeInternal {
SessionMetadataDefaultTypeInternaltensorflow::SessionMetadataDefaultTypeInternal161   PROTOBUF_CONSTEXPR SessionMetadataDefaultTypeInternal()
162       : _instance(::_pbi::ConstantInitialized{}) {}
~SessionMetadataDefaultTypeInternaltensorflow::SessionMetadataDefaultTypeInternal163   ~SessionMetadataDefaultTypeInternal() {}
164   union {  // NOLINT(misc-non-private-member-variables-in-classes)
165     SessionMetadata _instance;
166   };
167 };
168 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SessionMetadataDefaultTypeInternal _SessionMetadata_default_instance_;
ConfigProto_DeviceCountEntry_DoNotUse(::_pbi::ConstantInitialized)169 PROTOBUF_CONSTEXPR ConfigProto_DeviceCountEntry_DoNotUse::ConfigProto_DeviceCountEntry_DoNotUse(
170     ::_pbi::ConstantInitialized) {}
171 struct ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal {
ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternaltensorflow::ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal172   PROTOBUF_CONSTEXPR ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal()
173       : _instance(::_pbi::ConstantInitialized{}) {}
~ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternaltensorflow::ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal174   ~ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal() {}
175   union {  // NOLINT(misc-non-private-member-variables-in-classes)
176     ConfigProto_DeviceCountEntry_DoNotUse _instance;
177   };
178 };
179 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal _ConfigProto_DeviceCountEntry_DoNotUse_default_instance_;
ConfigProto_Experimental(::_pbi::ConstantInitialized)180 PROTOBUF_CONSTEXPR ConfigProto_Experimental::ConfigProto_Experimental(
181     ::_pbi::ConstantInitialized): _impl_{
182     /*decltype(_impl_.collective_group_leader_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
183   , /*decltype(_impl_.executor_type_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
184   , /*decltype(_impl_.session_metadata_)*/nullptr
185   , /*decltype(_impl_.coordination_config_)*/nullptr
186   , /*decltype(_impl_.recv_buf_max_chunk_)*/0
187   , /*decltype(_impl_.use_numa_affinity_)*/false
188   , /*decltype(_impl_.collective_deterministic_sequential_execution_)*/false
189   , /*decltype(_impl_.collective_nccl_)*/false
190   , /*decltype(_impl_.share_session_state_in_clusterspec_propagation_)*/false
191   , /*decltype(_impl_.disable_thread_spinning_)*/false
192   , /*decltype(_impl_.share_cluster_devices_in_session_)*/false
193   , /*decltype(_impl_.optimize_for_static_graph_)*/false
194   , /*decltype(_impl_.enable_mlir_bridge_)*/false
195   , /*decltype(_impl_.mlir_bridge_rollout_)*/0
196   , /*decltype(_impl_.xla_fusion_autotuner_thresh_)*/::int64_t{0}
197   , /*decltype(_impl_.enable_mlir_graph_optimization_)*/false
198   , /*decltype(_impl_.disable_output_partition_graphs_)*/false
199   , /*decltype(_impl_.use_tfrt_)*/false
200   , /*decltype(_impl_.disable_functional_ops_lowering_)*/false
201   , /*decltype(_impl_.xla_prefer_single_graph_cluster_)*/false
202   , /*decltype(_impl_._cached_size_)*/{}} {}
203 struct ConfigProto_ExperimentalDefaultTypeInternal {
ConfigProto_ExperimentalDefaultTypeInternaltensorflow::ConfigProto_ExperimentalDefaultTypeInternal204   PROTOBUF_CONSTEXPR ConfigProto_ExperimentalDefaultTypeInternal()
205       : _instance(::_pbi::ConstantInitialized{}) {}
~ConfigProto_ExperimentalDefaultTypeInternaltensorflow::ConfigProto_ExperimentalDefaultTypeInternal206   ~ConfigProto_ExperimentalDefaultTypeInternal() {}
207   union {  // NOLINT(misc-non-private-member-variables-in-classes)
208     ConfigProto_Experimental _instance;
209   };
210 };
211 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ConfigProto_ExperimentalDefaultTypeInternal _ConfigProto_Experimental_default_instance_;
ConfigProto(::_pbi::ConstantInitialized)212 PROTOBUF_CONSTEXPR ConfigProto::ConfigProto(
213     ::_pbi::ConstantInitialized): _impl_{
214     /*decltype(_impl_.device_count_)*/{}
215   , /*decltype(_impl_.device_filters_)*/{}
216   , /*decltype(_impl_.session_inter_op_thread_pool_)*/{}
217   , /*decltype(_impl_.gpu_options_)*/nullptr
218   , /*decltype(_impl_.graph_options_)*/nullptr
219   , /*decltype(_impl_.rpc_options_)*/nullptr
220   , /*decltype(_impl_.cluster_def_)*/nullptr
221   , /*decltype(_impl_.experimental_)*/nullptr
222   , /*decltype(_impl_.intra_op_parallelism_threads_)*/0
223   , /*decltype(_impl_.placement_period_)*/0
224   , /*decltype(_impl_.inter_op_parallelism_threads_)*/0
225   , /*decltype(_impl_.use_per_session_threads_)*/false
226   , /*decltype(_impl_.allow_soft_placement_)*/false
227   , /*decltype(_impl_.log_device_placement_)*/false
228   , /*decltype(_impl_.isolate_session_state_)*/false
229   , /*decltype(_impl_.operation_timeout_in_ms_)*/::int64_t{0}
230   , /*decltype(_impl_.share_cluster_devices_in_session_)*/false
231   , /*decltype(_impl_._cached_size_)*/{}} {}
232 struct ConfigProtoDefaultTypeInternal {
ConfigProtoDefaultTypeInternaltensorflow::ConfigProtoDefaultTypeInternal233   PROTOBUF_CONSTEXPR ConfigProtoDefaultTypeInternal()
234       : _instance(::_pbi::ConstantInitialized{}) {}
~ConfigProtoDefaultTypeInternaltensorflow::ConfigProtoDefaultTypeInternal235   ~ConfigProtoDefaultTypeInternal() {}
236   union {  // NOLINT(misc-non-private-member-variables-in-classes)
237     ConfigProto _instance;
238   };
239 };
240 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ConfigProtoDefaultTypeInternal _ConfigProto_default_instance_;
RunOptions_Experimental_RunHandlerPoolOptions(::_pbi::ConstantInitialized)241 PROTOBUF_CONSTEXPR RunOptions_Experimental_RunHandlerPoolOptions::RunOptions_Experimental_RunHandlerPoolOptions(
242     ::_pbi::ConstantInitialized): _impl_{
243     /*decltype(_impl_.priority_)*/::int64_t{0}
244   , /*decltype(_impl_._cached_size_)*/{}} {}
245 struct RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal {
RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternaltensorflow::RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal246   PROTOBUF_CONSTEXPR RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal()
247       : _instance(::_pbi::ConstantInitialized{}) {}
~RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternaltensorflow::RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal248   ~RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal() {}
249   union {  // NOLINT(misc-non-private-member-variables-in-classes)
250     RunOptions_Experimental_RunHandlerPoolOptions _instance;
251   };
252 };
253 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal _RunOptions_Experimental_RunHandlerPoolOptions_default_instance_;
RunOptions_Experimental(::_pbi::ConstantInitialized)254 PROTOBUF_CONSTEXPR RunOptions_Experimental::RunOptions_Experimental(
255     ::_pbi::ConstantInitialized): _impl_{
256     /*decltype(_impl_.run_handler_pool_options_)*/nullptr
257   , /*decltype(_impl_.collective_graph_key_)*/::int64_t{0}
258   , /*decltype(_impl_.use_run_handler_pool_)*/false
259   , /*decltype(_impl_._cached_size_)*/{}} {}
260 struct RunOptions_ExperimentalDefaultTypeInternal {
RunOptions_ExperimentalDefaultTypeInternaltensorflow::RunOptions_ExperimentalDefaultTypeInternal261   PROTOBUF_CONSTEXPR RunOptions_ExperimentalDefaultTypeInternal()
262       : _instance(::_pbi::ConstantInitialized{}) {}
~RunOptions_ExperimentalDefaultTypeInternaltensorflow::RunOptions_ExperimentalDefaultTypeInternal263   ~RunOptions_ExperimentalDefaultTypeInternal() {}
264   union {  // NOLINT(misc-non-private-member-variables-in-classes)
265     RunOptions_Experimental _instance;
266   };
267 };
268 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RunOptions_ExperimentalDefaultTypeInternal _RunOptions_Experimental_default_instance_;
RunOptions(::_pbi::ConstantInitialized)269 PROTOBUF_CONSTEXPR RunOptions::RunOptions(
270     ::_pbi::ConstantInitialized): _impl_{
271     /*decltype(_impl_.debug_options_)*/nullptr
272   , /*decltype(_impl_.experimental_)*/nullptr
273   , /*decltype(_impl_.timeout_in_ms_)*/::int64_t{0}
274   , /*decltype(_impl_.trace_level_)*/0
275   , /*decltype(_impl_.inter_op_thread_pool_)*/0
276   , /*decltype(_impl_.output_partition_graphs_)*/false
277   , /*decltype(_impl_.report_tensor_allocations_upon_oom_)*/false
278   , /*decltype(_impl_._cached_size_)*/{}} {}
279 struct RunOptionsDefaultTypeInternal {
RunOptionsDefaultTypeInternaltensorflow::RunOptionsDefaultTypeInternal280   PROTOBUF_CONSTEXPR RunOptionsDefaultTypeInternal()
281       : _instance(::_pbi::ConstantInitialized{}) {}
~RunOptionsDefaultTypeInternaltensorflow::RunOptionsDefaultTypeInternal282   ~RunOptionsDefaultTypeInternal() {}
283   union {  // NOLINT(misc-non-private-member-variables-in-classes)
284     RunOptions _instance;
285   };
286 };
287 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RunOptionsDefaultTypeInternal _RunOptions_default_instance_;
RunMetadata_FunctionGraphs(::_pbi::ConstantInitialized)288 PROTOBUF_CONSTEXPR RunMetadata_FunctionGraphs::RunMetadata_FunctionGraphs(
289     ::_pbi::ConstantInitialized): _impl_{
290     /*decltype(_impl_.partition_graphs_)*/{}
291   , /*decltype(_impl_.pre_optimization_graph_)*/nullptr
292   , /*decltype(_impl_.post_optimization_graph_)*/nullptr
293   , /*decltype(_impl_._cached_size_)*/{}} {}
294 struct RunMetadata_FunctionGraphsDefaultTypeInternal {
RunMetadata_FunctionGraphsDefaultTypeInternaltensorflow::RunMetadata_FunctionGraphsDefaultTypeInternal295   PROTOBUF_CONSTEXPR RunMetadata_FunctionGraphsDefaultTypeInternal()
296       : _instance(::_pbi::ConstantInitialized{}) {}
~RunMetadata_FunctionGraphsDefaultTypeInternaltensorflow::RunMetadata_FunctionGraphsDefaultTypeInternal297   ~RunMetadata_FunctionGraphsDefaultTypeInternal() {}
298   union {  // NOLINT(misc-non-private-member-variables-in-classes)
299     RunMetadata_FunctionGraphs _instance;
300   };
301 };
302 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RunMetadata_FunctionGraphsDefaultTypeInternal _RunMetadata_FunctionGraphs_default_instance_;
RunMetadata(::_pbi::ConstantInitialized)303 PROTOBUF_CONSTEXPR RunMetadata::RunMetadata(
304     ::_pbi::ConstantInitialized): _impl_{
305     /*decltype(_impl_.partition_graphs_)*/{}
306   , /*decltype(_impl_.function_graphs_)*/{}
307   , /*decltype(_impl_.step_stats_)*/nullptr
308   , /*decltype(_impl_.cost_graph_)*/nullptr
309   , /*decltype(_impl_.session_metadata_)*/nullptr
310   , /*decltype(_impl_._cached_size_)*/{}} {}
311 struct RunMetadataDefaultTypeInternal {
RunMetadataDefaultTypeInternaltensorflow::RunMetadataDefaultTypeInternal312   PROTOBUF_CONSTEXPR RunMetadataDefaultTypeInternal()
313       : _instance(::_pbi::ConstantInitialized{}) {}
~RunMetadataDefaultTypeInternaltensorflow::RunMetadataDefaultTypeInternal314   ~RunMetadataDefaultTypeInternal() {}
315   union {  // NOLINT(misc-non-private-member-variables-in-classes)
316     RunMetadata _instance;
317   };
318 };
319 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RunMetadataDefaultTypeInternal _RunMetadata_default_instance_;
TensorConnection(::_pbi::ConstantInitialized)320 PROTOBUF_CONSTEXPR TensorConnection::TensorConnection(
321     ::_pbi::ConstantInitialized): _impl_{
322     /*decltype(_impl_.from_tensor_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
323   , /*decltype(_impl_.to_tensor_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
324   , /*decltype(_impl_._cached_size_)*/{}} {}
325 struct TensorConnectionDefaultTypeInternal {
TensorConnectionDefaultTypeInternaltensorflow::TensorConnectionDefaultTypeInternal326   PROTOBUF_CONSTEXPR TensorConnectionDefaultTypeInternal()
327       : _instance(::_pbi::ConstantInitialized{}) {}
~TensorConnectionDefaultTypeInternaltensorflow::TensorConnectionDefaultTypeInternal328   ~TensorConnectionDefaultTypeInternal() {}
329   union {  // NOLINT(misc-non-private-member-variables-in-classes)
330     TensorConnection _instance;
331   };
332 };
333 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 TensorConnectionDefaultTypeInternal _TensorConnection_default_instance_;
CallableOptions_FeedDevicesEntry_DoNotUse(::_pbi::ConstantInitialized)334 PROTOBUF_CONSTEXPR CallableOptions_FeedDevicesEntry_DoNotUse::CallableOptions_FeedDevicesEntry_DoNotUse(
335     ::_pbi::ConstantInitialized) {}
336 struct CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal {
CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternaltensorflow::CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal337   PROTOBUF_CONSTEXPR CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal()
338       : _instance(::_pbi::ConstantInitialized{}) {}
~CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternaltensorflow::CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal339   ~CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal() {}
340   union {  // NOLINT(misc-non-private-member-variables-in-classes)
341     CallableOptions_FeedDevicesEntry_DoNotUse _instance;
342   };
343 };
344 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal _CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_;
CallableOptions_FetchDevicesEntry_DoNotUse(::_pbi::ConstantInitialized)345 PROTOBUF_CONSTEXPR CallableOptions_FetchDevicesEntry_DoNotUse::CallableOptions_FetchDevicesEntry_DoNotUse(
346     ::_pbi::ConstantInitialized) {}
347 struct CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal {
CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternaltensorflow::CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal348   PROTOBUF_CONSTEXPR CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal()
349       : _instance(::_pbi::ConstantInitialized{}) {}
~CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternaltensorflow::CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal350   ~CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal() {}
351   union {  // NOLINT(misc-non-private-member-variables-in-classes)
352     CallableOptions_FetchDevicesEntry_DoNotUse _instance;
353   };
354 };
355 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal _CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_;
CallableOptions(::_pbi::ConstantInitialized)356 PROTOBUF_CONSTEXPR CallableOptions::CallableOptions(
357     ::_pbi::ConstantInitialized): _impl_{
358     /*decltype(_impl_.feed_)*/{}
359   , /*decltype(_impl_.fetch_)*/{}
360   , /*decltype(_impl_.target_)*/{}
361   , /*decltype(_impl_.tensor_connection_)*/{}
362   , /*decltype(_impl_.feed_devices_)*/{}
363   , /*decltype(_impl_.fetch_devices_)*/{}
364   , /*decltype(_impl_.run_options_)*/nullptr
365   , /*decltype(_impl_.fetch_skip_sync_)*/false
366   , /*decltype(_impl_._cached_size_)*/{}} {}
367 struct CallableOptionsDefaultTypeInternal {
CallableOptionsDefaultTypeInternaltensorflow::CallableOptionsDefaultTypeInternal368   PROTOBUF_CONSTEXPR CallableOptionsDefaultTypeInternal()
369       : _instance(::_pbi::ConstantInitialized{}) {}
~CallableOptionsDefaultTypeInternaltensorflow::CallableOptionsDefaultTypeInternal370   ~CallableOptionsDefaultTypeInternal() {}
371   union {  // NOLINT(misc-non-private-member-variables-in-classes)
372     CallableOptions _instance;
373   };
374 };
375 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CallableOptionsDefaultTypeInternal _CallableOptions_default_instance_;
376 }  // namespace tensorflow
377 namespace tensorflow {
OptimizerOptions_Level_IsValid(int value)378 bool OptimizerOptions_Level_IsValid(int value) {
379   switch (value) {
380     case -1:
381     case 0:
382       return true;
383     default:
384       return false;
385   }
386 }
387 
388 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> OptimizerOptions_Level_strings[2] = {};
389 
390 static const char OptimizerOptions_Level_names[] =
391   "L0"
392   "L1";
393 
394 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry OptimizerOptions_Level_entries[] = {
395   { {OptimizerOptions_Level_names + 0, 2}, -1 },
396   { {OptimizerOptions_Level_names + 2, 2}, 0 },
397 };
398 
399 static const int OptimizerOptions_Level_entries_by_number[] = {
400   0, // -1 -> L0
401   1, // 0 -> L1
402 };
403 
OptimizerOptions_Level_Name(OptimizerOptions_Level value)404 const std::string& OptimizerOptions_Level_Name(
405     OptimizerOptions_Level value) {
406   static const bool dummy =
407       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
408           OptimizerOptions_Level_entries,
409           OptimizerOptions_Level_entries_by_number,
410           2, OptimizerOptions_Level_strings);
411   (void) dummy;
412   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
413       OptimizerOptions_Level_entries,
414       OptimizerOptions_Level_entries_by_number,
415       2, value);
416   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
417                      OptimizerOptions_Level_strings[idx].get();
418 }
OptimizerOptions_Level_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,OptimizerOptions_Level * value)419 bool OptimizerOptions_Level_Parse(
420     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, OptimizerOptions_Level* value) {
421   int int_value;
422   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
423       OptimizerOptions_Level_entries, 2, name, &int_value);
424   if (success) {
425     *value = static_cast<OptimizerOptions_Level>(int_value);
426   }
427   return success;
428 }
429 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
430 constexpr OptimizerOptions_Level OptimizerOptions::L1;
431 constexpr OptimizerOptions_Level OptimizerOptions::L0;
432 constexpr OptimizerOptions_Level OptimizerOptions::Level_MIN;
433 constexpr OptimizerOptions_Level OptimizerOptions::Level_MAX;
434 constexpr int OptimizerOptions::Level_ARRAYSIZE;
435 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
OptimizerOptions_GlobalJitLevel_IsValid(int value)436 bool OptimizerOptions_GlobalJitLevel_IsValid(int value) {
437   switch (value) {
438     case -1:
439     case 0:
440     case 1:
441     case 2:
442       return true;
443     default:
444       return false;
445   }
446 }
447 
448 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> OptimizerOptions_GlobalJitLevel_strings[4] = {};
449 
450 static const char OptimizerOptions_GlobalJitLevel_names[] =
451   "DEFAULT"
452   "OFF"
453   "ON_1"
454   "ON_2";
455 
456 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry OptimizerOptions_GlobalJitLevel_entries[] = {
457   { {OptimizerOptions_GlobalJitLevel_names + 0, 7}, 0 },
458   { {OptimizerOptions_GlobalJitLevel_names + 7, 3}, -1 },
459   { {OptimizerOptions_GlobalJitLevel_names + 10, 4}, 1 },
460   { {OptimizerOptions_GlobalJitLevel_names + 14, 4}, 2 },
461 };
462 
463 static const int OptimizerOptions_GlobalJitLevel_entries_by_number[] = {
464   1, // -1 -> OFF
465   0, // 0 -> DEFAULT
466   2, // 1 -> ON_1
467   3, // 2 -> ON_2
468 };
469 
OptimizerOptions_GlobalJitLevel_Name(OptimizerOptions_GlobalJitLevel value)470 const std::string& OptimizerOptions_GlobalJitLevel_Name(
471     OptimizerOptions_GlobalJitLevel value) {
472   static const bool dummy =
473       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
474           OptimizerOptions_GlobalJitLevel_entries,
475           OptimizerOptions_GlobalJitLevel_entries_by_number,
476           4, OptimizerOptions_GlobalJitLevel_strings);
477   (void) dummy;
478   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
479       OptimizerOptions_GlobalJitLevel_entries,
480       OptimizerOptions_GlobalJitLevel_entries_by_number,
481       4, value);
482   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
483                      OptimizerOptions_GlobalJitLevel_strings[idx].get();
484 }
OptimizerOptions_GlobalJitLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,OptimizerOptions_GlobalJitLevel * value)485 bool OptimizerOptions_GlobalJitLevel_Parse(
486     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, OptimizerOptions_GlobalJitLevel* value) {
487   int int_value;
488   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
489       OptimizerOptions_GlobalJitLevel_entries, 4, name, &int_value);
490   if (success) {
491     *value = static_cast<OptimizerOptions_GlobalJitLevel>(int_value);
492   }
493   return success;
494 }
495 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
496 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::DEFAULT;
497 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::OFF;
498 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::ON_1;
499 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::ON_2;
500 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::GlobalJitLevel_MIN;
501 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::GlobalJitLevel_MAX;
502 constexpr int OptimizerOptions::GlobalJitLevel_ARRAYSIZE;
503 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
ConfigProto_Experimental_MlirBridgeRollout_IsValid(int value)504 bool ConfigProto_Experimental_MlirBridgeRollout_IsValid(int value) {
505   switch (value) {
506     case 0:
507     case 1:
508     case 2:
509     case 3:
510     case 4:
511       return true;
512     default:
513       return false;
514   }
515 }
516 
517 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> ConfigProto_Experimental_MlirBridgeRollout_strings[5] = {};
518 
519 static const char ConfigProto_Experimental_MlirBridgeRollout_names[] =
520   "MLIR_BRIDGE_ROLLOUT_DISABLED"
521   "MLIR_BRIDGE_ROLLOUT_ENABLED"
522   "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED"
523   "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED"
524   "MLIR_BRIDGE_ROLLOUT_UNSPECIFIED";
525 
526 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry ConfigProto_Experimental_MlirBridgeRollout_entries[] = {
527   { {ConfigProto_Experimental_MlirBridgeRollout_names + 0, 28}, 2 },
528   { {ConfigProto_Experimental_MlirBridgeRollout_names + 28, 27}, 1 },
529   { {ConfigProto_Experimental_MlirBridgeRollout_names + 55, 37}, 3 },
530   { {ConfigProto_Experimental_MlirBridgeRollout_names + 92, 46}, 4 },
531   { {ConfigProto_Experimental_MlirBridgeRollout_names + 138, 31}, 0 },
532 };
533 
534 static const int ConfigProto_Experimental_MlirBridgeRollout_entries_by_number[] = {
535   4, // 0 -> MLIR_BRIDGE_ROLLOUT_UNSPECIFIED
536   1, // 1 -> MLIR_BRIDGE_ROLLOUT_ENABLED
537   0, // 2 -> MLIR_BRIDGE_ROLLOUT_DISABLED
538   2, // 3 -> MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED
539   3, // 4 -> MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED
540 };
541 
ConfigProto_Experimental_MlirBridgeRollout_Name(ConfigProto_Experimental_MlirBridgeRollout value)542 const std::string& ConfigProto_Experimental_MlirBridgeRollout_Name(
543     ConfigProto_Experimental_MlirBridgeRollout value) {
544   static const bool dummy =
545       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
546           ConfigProto_Experimental_MlirBridgeRollout_entries,
547           ConfigProto_Experimental_MlirBridgeRollout_entries_by_number,
548           5, ConfigProto_Experimental_MlirBridgeRollout_strings);
549   (void) dummy;
550   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
551       ConfigProto_Experimental_MlirBridgeRollout_entries,
552       ConfigProto_Experimental_MlirBridgeRollout_entries_by_number,
553       5, value);
554   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
555                      ConfigProto_Experimental_MlirBridgeRollout_strings[idx].get();
556 }
ConfigProto_Experimental_MlirBridgeRollout_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,ConfigProto_Experimental_MlirBridgeRollout * value)557 bool ConfigProto_Experimental_MlirBridgeRollout_Parse(
558     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ConfigProto_Experimental_MlirBridgeRollout* value) {
559   int int_value;
560   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
561       ConfigProto_Experimental_MlirBridgeRollout_entries, 5, name, &int_value);
562   if (success) {
563     *value = static_cast<ConfigProto_Experimental_MlirBridgeRollout>(int_value);
564   }
565   return success;
566 }
567 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
568 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
569 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_ENABLED;
570 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
571 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED;
572 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED;
573 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MlirBridgeRollout_MIN;
574 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MlirBridgeRollout_MAX;
575 constexpr int ConfigProto_Experimental::MlirBridgeRollout_ARRAYSIZE;
576 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
RunOptions_TraceLevel_IsValid(int value)577 bool RunOptions_TraceLevel_IsValid(int value) {
578   switch (value) {
579     case 0:
580     case 1:
581     case 2:
582     case 3:
583       return true;
584     default:
585       return false;
586   }
587 }
588 
589 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> RunOptions_TraceLevel_strings[4] = {};
590 
591 static const char RunOptions_TraceLevel_names[] =
592   "FULL_TRACE"
593   "HARDWARE_TRACE"
594   "NO_TRACE"
595   "SOFTWARE_TRACE";
596 
597 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry RunOptions_TraceLevel_entries[] = {
598   { {RunOptions_TraceLevel_names + 0, 10}, 3 },
599   { {RunOptions_TraceLevel_names + 10, 14}, 2 },
600   { {RunOptions_TraceLevel_names + 24, 8}, 0 },
601   { {RunOptions_TraceLevel_names + 32, 14}, 1 },
602 };
603 
604 static const int RunOptions_TraceLevel_entries_by_number[] = {
605   2, // 0 -> NO_TRACE
606   3, // 1 -> SOFTWARE_TRACE
607   1, // 2 -> HARDWARE_TRACE
608   0, // 3 -> FULL_TRACE
609 };
610 
RunOptions_TraceLevel_Name(RunOptions_TraceLevel value)611 const std::string& RunOptions_TraceLevel_Name(
612     RunOptions_TraceLevel value) {
613   static const bool dummy =
614       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
615           RunOptions_TraceLevel_entries,
616           RunOptions_TraceLevel_entries_by_number,
617           4, RunOptions_TraceLevel_strings);
618   (void) dummy;
619   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
620       RunOptions_TraceLevel_entries,
621       RunOptions_TraceLevel_entries_by_number,
622       4, value);
623   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
624                      RunOptions_TraceLevel_strings[idx].get();
625 }
RunOptions_TraceLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,RunOptions_TraceLevel * value)626 bool RunOptions_TraceLevel_Parse(
627     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, RunOptions_TraceLevel* value) {
628   int int_value;
629   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
630       RunOptions_TraceLevel_entries, 4, name, &int_value);
631   if (success) {
632     *value = static_cast<RunOptions_TraceLevel>(int_value);
633   }
634   return success;
635 }
636 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
637 constexpr RunOptions_TraceLevel RunOptions::NO_TRACE;
638 constexpr RunOptions_TraceLevel RunOptions::SOFTWARE_TRACE;
639 constexpr RunOptions_TraceLevel RunOptions::HARDWARE_TRACE;
640 constexpr RunOptions_TraceLevel RunOptions::FULL_TRACE;
641 constexpr RunOptions_TraceLevel RunOptions::TraceLevel_MIN;
642 constexpr RunOptions_TraceLevel RunOptions::TraceLevel_MAX;
643 constexpr int RunOptions::TraceLevel_ARRAYSIZE;
644 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
645 
646 // ===================================================================
647 
648 class GPUOptions_Experimental_VirtualDevices::_Internal {
649  public:
650 };
651 
GPUOptions_Experimental_VirtualDevices(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)652 GPUOptions_Experimental_VirtualDevices::GPUOptions_Experimental_VirtualDevices(::PROTOBUF_NAMESPACE_ID::Arena* arena,
653                          bool is_message_owned)
654   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
655   SharedCtor(arena, is_message_owned);
656   // @@protoc_insertion_point(arena_constructor:tensorflow.GPUOptions.Experimental.VirtualDevices)
657 }
GPUOptions_Experimental_VirtualDevices(const GPUOptions_Experimental_VirtualDevices & from)658 GPUOptions_Experimental_VirtualDevices::GPUOptions_Experimental_VirtualDevices(const GPUOptions_Experimental_VirtualDevices& from)
659   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
660   GPUOptions_Experimental_VirtualDevices* const _this = this; (void)_this;
661   new (&_impl_) Impl_{
662       decltype(_impl_.memory_limit_mb_){from._impl_.memory_limit_mb_}
663     , decltype(_impl_.priority_){from._impl_.priority_}
664     , /*decltype(_impl_._priority_cached_byte_size_)*/{0}
665     , decltype(_impl_.device_ordinal_){from._impl_.device_ordinal_}
666     , /*decltype(_impl_._device_ordinal_cached_byte_size_)*/{0}
667     , /*decltype(_impl_._cached_size_)*/{}};
668 
669   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
670   // @@protoc_insertion_point(copy_constructor:tensorflow.GPUOptions.Experimental.VirtualDevices)
671 }
672 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)673 inline void GPUOptions_Experimental_VirtualDevices::SharedCtor(
674     ::_pb::Arena* arena, bool is_message_owned) {
675   (void)arena;
676   (void)is_message_owned;
677   new (&_impl_) Impl_{
678       decltype(_impl_.memory_limit_mb_){arena}
679     , decltype(_impl_.priority_){arena}
680     , /*decltype(_impl_._priority_cached_byte_size_)*/{0}
681     , decltype(_impl_.device_ordinal_){arena}
682     , /*decltype(_impl_._device_ordinal_cached_byte_size_)*/{0}
683     , /*decltype(_impl_._cached_size_)*/{}
684   };
685 }
686 
~GPUOptions_Experimental_VirtualDevices()687 GPUOptions_Experimental_VirtualDevices::~GPUOptions_Experimental_VirtualDevices() {
688   // @@protoc_insertion_point(destructor:tensorflow.GPUOptions.Experimental.VirtualDevices)
689   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
690   (void)arena;
691     return;
692   }
693   SharedDtor();
694 }
695 
SharedDtor()696 inline void GPUOptions_Experimental_VirtualDevices::SharedDtor() {
697   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
698   _impl_.memory_limit_mb_.~RepeatedField();
699   _impl_.priority_.~RepeatedField();
700   _impl_.device_ordinal_.~RepeatedField();
701 }
702 
SetCachedSize(int size) const703 void GPUOptions_Experimental_VirtualDevices::SetCachedSize(int size) const {
704   _impl_._cached_size_.Set(size);
705 }
706 
Clear()707 void GPUOptions_Experimental_VirtualDevices::Clear() {
708 // @@protoc_insertion_point(message_clear_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
709   ::uint32_t cached_has_bits = 0;
710   // Prevent compiler warnings about cached_has_bits being unused
711   (void) cached_has_bits;
712 
713   _impl_.memory_limit_mb_.Clear();
714   _impl_.priority_.Clear();
715   _impl_.device_ordinal_.Clear();
716   _internal_metadata_.Clear<std::string>();
717 }
718 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)719 const char* GPUOptions_Experimental_VirtualDevices::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
720 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
721   while (!ctx->Done(&ptr)) {
722     ::uint32_t tag;
723     ptr = ::_pbi::ReadTag(ptr, &tag);
724     switch (tag >> 3) {
725       // repeated float memory_limit_mb = 1;
726       case 1:
727         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
728           ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedFloatParser(_internal_mutable_memory_limit_mb(), ptr, ctx);
729           CHK_(ptr);
730         } else if (static_cast<::uint8_t>(tag) == 13) {
731           _internal_add_memory_limit_mb(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<float>(ptr));
732           ptr += sizeof(float);
733         } else {
734           goto handle_unusual;
735         }
736         continue;
737       // repeated int32 priority = 2;
738       case 2:
739         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
740           ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_priority(), ptr, ctx);
741           CHK_(ptr);
742         } else if (static_cast<::uint8_t>(tag) == 16) {
743           _internal_add_priority(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
744           CHK_(ptr);
745         } else {
746           goto handle_unusual;
747         }
748         continue;
749       // repeated int32 device_ordinal = 3;
750       case 3:
751         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
752           ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_device_ordinal(), ptr, ctx);
753           CHK_(ptr);
754         } else if (static_cast<::uint8_t>(tag) == 24) {
755           _internal_add_device_ordinal(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
756           CHK_(ptr);
757         } else {
758           goto handle_unusual;
759         }
760         continue;
761       default:
762         goto handle_unusual;
763     }  // switch
764   handle_unusual:
765     if ((tag == 0) || ((tag & 7) == 4)) {
766       CHK_(ptr);
767       ctx->SetLastTag(tag);
768       goto message_done;
769     }
770     ptr = UnknownFieldParse(
771         tag,
772         _internal_metadata_.mutable_unknown_fields<std::string>(),
773         ptr, ctx);
774     CHK_(ptr != nullptr);
775   }  // while
776 message_done:
777   return ptr;
778 failure:
779   ptr = nullptr;
780   goto message_done;
781 #undef CHK_
782 }
783 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const784 ::uint8_t* GPUOptions_Experimental_VirtualDevices::_InternalSerialize(
785     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
786   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
787   ::uint32_t cached_has_bits = 0;
788   (void) cached_has_bits;
789 
790   // repeated float memory_limit_mb = 1;
791   if (this->_internal_memory_limit_mb_size() > 0) {
792     target = stream->WriteFixedPacked(1, _internal_memory_limit_mb(), target);
793   }
794 
795   // repeated int32 priority = 2;
796   {
797     int byte_size = _impl_._priority_cached_byte_size_.load(std::memory_order_relaxed);
798     if (byte_size > 0) {
799       target = stream->WriteInt32Packed(
800           2, _internal_priority(), byte_size, target);
801     }
802   }
803 
804   // repeated int32 device_ordinal = 3;
805   {
806     int byte_size = _impl_._device_ordinal_cached_byte_size_.load(std::memory_order_relaxed);
807     if (byte_size > 0) {
808       target = stream->WriteInt32Packed(
809           3, _internal_device_ordinal(), byte_size, target);
810     }
811   }
812 
813   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
814     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
815         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
816   }
817   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GPUOptions.Experimental.VirtualDevices)
818   return target;
819 }
820 
ByteSizeLong() const821 size_t GPUOptions_Experimental_VirtualDevices::ByteSizeLong() const {
822 // @@protoc_insertion_point(message_byte_size_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
823   size_t total_size = 0;
824 
825   ::uint32_t cached_has_bits = 0;
826   // Prevent compiler warnings about cached_has_bits being unused
827   (void) cached_has_bits;
828 
829   // repeated float memory_limit_mb = 1;
830   {
831     unsigned int count = static_cast<unsigned int>(this->_internal_memory_limit_mb_size());
832     size_t data_size = 4UL * count;
833     if (data_size > 0) {
834       total_size += 1 +
835         ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
836     }
837     total_size += data_size;
838   }
839 
840   // repeated int32 priority = 2;
841   {
842     size_t data_size = ::_pbi::WireFormatLite::
843       Int32Size(this->_impl_.priority_);
844     if (data_size > 0) {
845       total_size += 1 +
846         ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
847     }
848     int cached_size = ::_pbi::ToCachedSize(data_size);
849     _impl_._priority_cached_byte_size_.store(cached_size,
850                                     std::memory_order_relaxed);
851     total_size += data_size;
852   }
853 
854   // repeated int32 device_ordinal = 3;
855   {
856     size_t data_size = ::_pbi::WireFormatLite::
857       Int32Size(this->_impl_.device_ordinal_);
858     if (data_size > 0) {
859       total_size += 1 +
860         ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
861     }
862     int cached_size = ::_pbi::ToCachedSize(data_size);
863     _impl_._device_ordinal_cached_byte_size_.store(cached_size,
864                                     std::memory_order_relaxed);
865     total_size += data_size;
866   }
867 
868   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
869     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
870   }
871   int cached_size = ::_pbi::ToCachedSize(total_size);
872   SetCachedSize(cached_size);
873   return total_size;
874 }
875 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)876 void GPUOptions_Experimental_VirtualDevices::CheckTypeAndMergeFrom(
877     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
878   MergeFrom(*::_pbi::DownCast<const GPUOptions_Experimental_VirtualDevices*>(
879       &from));
880 }
881 
MergeFrom(const GPUOptions_Experimental_VirtualDevices & from)882 void GPUOptions_Experimental_VirtualDevices::MergeFrom(const GPUOptions_Experimental_VirtualDevices& from) {
883   GPUOptions_Experimental_VirtualDevices* const _this = this;
884   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
885   GOOGLE_DCHECK_NE(&from, _this);
886   ::uint32_t cached_has_bits = 0;
887   (void) cached_has_bits;
888 
889   _this->_impl_.memory_limit_mb_.MergeFrom(from._impl_.memory_limit_mb_);
890   _this->_impl_.priority_.MergeFrom(from._impl_.priority_);
891   _this->_impl_.device_ordinal_.MergeFrom(from._impl_.device_ordinal_);
892   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
893 }
894 
CopyFrom(const GPUOptions_Experimental_VirtualDevices & from)895 void GPUOptions_Experimental_VirtualDevices::CopyFrom(const GPUOptions_Experimental_VirtualDevices& from) {
896 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
897   if (&from == this) return;
898   Clear();
899   MergeFrom(from);
900 }
901 
IsInitialized() const902 bool GPUOptions_Experimental_VirtualDevices::IsInitialized() const {
903   return true;
904 }
905 
InternalSwap(GPUOptions_Experimental_VirtualDevices * other)906 void GPUOptions_Experimental_VirtualDevices::InternalSwap(GPUOptions_Experimental_VirtualDevices* other) {
907   using std::swap;
908   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
909   _impl_.memory_limit_mb_.InternalSwap(&other->_impl_.memory_limit_mb_);
910   _impl_.priority_.InternalSwap(&other->_impl_.priority_);
911   _impl_.device_ordinal_.InternalSwap(&other->_impl_.device_ordinal_);
912 }
913 
GetTypeName() const914 std::string GPUOptions_Experimental_VirtualDevices::GetTypeName() const {
915   return "tensorflow.GPUOptions.Experimental.VirtualDevices";
916 }
917 
918 
919 // ===================================================================
920 
921 class GPUOptions_Experimental::_Internal {
922  public:
923 };
924 
GPUOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)925 GPUOptions_Experimental::GPUOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
926                          bool is_message_owned)
927   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
928   SharedCtor(arena, is_message_owned);
929   // @@protoc_insertion_point(arena_constructor:tensorflow.GPUOptions.Experimental)
930 }
GPUOptions_Experimental(const GPUOptions_Experimental & from)931 GPUOptions_Experimental::GPUOptions_Experimental(const GPUOptions_Experimental& from)
932   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
933   GPUOptions_Experimental* const _this = this; (void)_this;
934   new (&_impl_) Impl_{
935       decltype(_impl_.virtual_devices_){from._impl_.virtual_devices_}
936     , decltype(_impl_.collective_ring_order_){}
937     , decltype(_impl_.num_dev_to_dev_copy_streams_){}
938     , decltype(_impl_.kernel_tracker_max_interval_){}
939     , decltype(_impl_.use_unified_memory_){}
940     , decltype(_impl_.timestamped_allocator_){}
941     , decltype(_impl_.use_cuda_malloc_async_){}
942     , decltype(_impl_.disallow_retry_on_allocation_failure_){}
943     , decltype(_impl_.kernel_tracker_max_bytes_){}
944     , decltype(_impl_.internal_fragmentation_fraction_){}
945     , decltype(_impl_.kernel_tracker_max_pending_){}
946     , /*decltype(_impl_._cached_size_)*/{}};
947 
948   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
949   _impl_.collective_ring_order_.InitDefault();
950   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
951     _impl_.collective_ring_order_.Set("", GetArenaForAllocation());
952   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
953   if (!from._internal_collective_ring_order().empty()) {
954     _this->_impl_.collective_ring_order_.Set(from._internal_collective_ring_order(),
955       _this->GetArenaForAllocation());
956   }
957   ::memcpy(&_impl_.num_dev_to_dev_copy_streams_, &from._impl_.num_dev_to_dev_copy_streams_,
958     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.kernel_tracker_max_pending_) -
959     reinterpret_cast<char*>(&_impl_.num_dev_to_dev_copy_streams_)) + sizeof(_impl_.kernel_tracker_max_pending_));
960   // @@protoc_insertion_point(copy_constructor:tensorflow.GPUOptions.Experimental)
961 }
962 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)963 inline void GPUOptions_Experimental::SharedCtor(
964     ::_pb::Arena* arena, bool is_message_owned) {
965   (void)arena;
966   (void)is_message_owned;
967   new (&_impl_) Impl_{
968       decltype(_impl_.virtual_devices_){arena}
969     , decltype(_impl_.collective_ring_order_){}
970     , decltype(_impl_.num_dev_to_dev_copy_streams_){0}
971     , decltype(_impl_.kernel_tracker_max_interval_){0}
972     , decltype(_impl_.use_unified_memory_){false}
973     , decltype(_impl_.timestamped_allocator_){false}
974     , decltype(_impl_.use_cuda_malloc_async_){false}
975     , decltype(_impl_.disallow_retry_on_allocation_failure_){false}
976     , decltype(_impl_.kernel_tracker_max_bytes_){0}
977     , decltype(_impl_.internal_fragmentation_fraction_){0}
978     , decltype(_impl_.kernel_tracker_max_pending_){0}
979     , /*decltype(_impl_._cached_size_)*/{}
980   };
981   _impl_.collective_ring_order_.InitDefault();
982   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
983     _impl_.collective_ring_order_.Set("", GetArenaForAllocation());
984   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
985 }
986 
~GPUOptions_Experimental()987 GPUOptions_Experimental::~GPUOptions_Experimental() {
988   // @@protoc_insertion_point(destructor:tensorflow.GPUOptions.Experimental)
989   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
990   (void)arena;
991     return;
992   }
993   SharedDtor();
994 }
995 
SharedDtor()996 inline void GPUOptions_Experimental::SharedDtor() {
997   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
998   _impl_.virtual_devices_.~RepeatedPtrField();
999   _impl_.collective_ring_order_.Destroy();
1000 }
1001 
SetCachedSize(int size) const1002 void GPUOptions_Experimental::SetCachedSize(int size) const {
1003   _impl_._cached_size_.Set(size);
1004 }
1005 
Clear()1006 void GPUOptions_Experimental::Clear() {
1007 // @@protoc_insertion_point(message_clear_start:tensorflow.GPUOptions.Experimental)
1008   ::uint32_t cached_has_bits = 0;
1009   // Prevent compiler warnings about cached_has_bits being unused
1010   (void) cached_has_bits;
1011 
1012   _impl_.virtual_devices_.Clear();
1013   _impl_.collective_ring_order_.ClearToEmpty();
1014   ::memset(&_impl_.num_dev_to_dev_copy_streams_, 0, static_cast<size_t>(
1015       reinterpret_cast<char*>(&_impl_.kernel_tracker_max_pending_) -
1016       reinterpret_cast<char*>(&_impl_.num_dev_to_dev_copy_streams_)) + sizeof(_impl_.kernel_tracker_max_pending_));
1017   _internal_metadata_.Clear<std::string>();
1018 }
1019 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1020 const char* GPUOptions_Experimental::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1021 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1022   while (!ctx->Done(&ptr)) {
1023     ::uint32_t tag;
1024     ptr = ::_pbi::ReadTag(ptr, &tag);
1025     switch (tag >> 3) {
1026       // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1027       case 1:
1028         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1029           ptr -= 1;
1030           do {
1031             ptr += 1;
1032             ptr = ctx->ParseMessage(_internal_add_virtual_devices(), ptr);
1033             CHK_(ptr);
1034             if (!ctx->DataAvailable(ptr)) break;
1035           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
1036         } else {
1037           goto handle_unusual;
1038         }
1039         continue;
1040       // bool use_unified_memory = 2;
1041       case 2:
1042         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1043           _impl_.use_unified_memory_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1044           CHK_(ptr);
1045         } else {
1046           goto handle_unusual;
1047         }
1048         continue;
1049       // int32 num_dev_to_dev_copy_streams = 3;
1050       case 3:
1051         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1052           _impl_.num_dev_to_dev_copy_streams_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1053           CHK_(ptr);
1054         } else {
1055           goto handle_unusual;
1056         }
1057         continue;
1058       // string collective_ring_order = 4;
1059       case 4:
1060         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
1061           auto str = _internal_mutable_collective_ring_order();
1062           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1063           CHK_(ptr);
1064           CHK_(::_pbi::VerifyUTF8(str, nullptr));
1065         } else {
1066           goto handle_unusual;
1067         }
1068         continue;
1069       // bool timestamped_allocator = 5;
1070       case 5:
1071         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
1072           _impl_.timestamped_allocator_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1073           CHK_(ptr);
1074         } else {
1075           goto handle_unusual;
1076         }
1077         continue;
1078       // int32 kernel_tracker_max_interval = 7;
1079       case 7:
1080         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
1081           _impl_.kernel_tracker_max_interval_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1082           CHK_(ptr);
1083         } else {
1084           goto handle_unusual;
1085         }
1086         continue;
1087       // int32 kernel_tracker_max_bytes = 8;
1088       case 8:
1089         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
1090           _impl_.kernel_tracker_max_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1091           CHK_(ptr);
1092         } else {
1093           goto handle_unusual;
1094         }
1095         continue;
1096       // int32 kernel_tracker_max_pending = 9;
1097       case 9:
1098         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
1099           _impl_.kernel_tracker_max_pending_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1100           CHK_(ptr);
1101         } else {
1102           goto handle_unusual;
1103         }
1104         continue;
1105       // double internal_fragmentation_fraction = 10;
1106       case 10:
1107         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 81)) {
1108           _impl_.internal_fragmentation_fraction_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1109           ptr += sizeof(double);
1110         } else {
1111           goto handle_unusual;
1112         }
1113         continue;
1114       // bool use_cuda_malloc_async = 11;
1115       case 11:
1116         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 88)) {
1117           _impl_.use_cuda_malloc_async_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1118           CHK_(ptr);
1119         } else {
1120           goto handle_unusual;
1121         }
1122         continue;
1123       // bool disallow_retry_on_allocation_failure = 12;
1124       case 12:
1125         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 96)) {
1126           _impl_.disallow_retry_on_allocation_failure_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1127           CHK_(ptr);
1128         } else {
1129           goto handle_unusual;
1130         }
1131         continue;
1132       default:
1133         goto handle_unusual;
1134     }  // switch
1135   handle_unusual:
1136     if ((tag == 0) || ((tag & 7) == 4)) {
1137       CHK_(ptr);
1138       ctx->SetLastTag(tag);
1139       goto message_done;
1140     }
1141     ptr = UnknownFieldParse(
1142         tag,
1143         _internal_metadata_.mutable_unknown_fields<std::string>(),
1144         ptr, ctx);
1145     CHK_(ptr != nullptr);
1146   }  // while
1147 message_done:
1148   return ptr;
1149 failure:
1150   ptr = nullptr;
1151   goto message_done;
1152 #undef CHK_
1153 }
1154 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1155 ::uint8_t* GPUOptions_Experimental::_InternalSerialize(
1156     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1157   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GPUOptions.Experimental)
1158   ::uint32_t cached_has_bits = 0;
1159   (void) cached_has_bits;
1160 
1161   // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1162   for (unsigned i = 0,
1163       n = static_cast<unsigned>(this->_internal_virtual_devices_size()); i < n; i++) {
1164     const auto& repfield = this->_internal_virtual_devices(i);
1165     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1166         InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
1167   }
1168 
1169   // bool use_unified_memory = 2;
1170   if (this->_internal_use_unified_memory() != 0) {
1171     target = stream->EnsureSpace(target);
1172     target = ::_pbi::WireFormatLite::WriteBoolToArray(2, this->_internal_use_unified_memory(), target);
1173   }
1174 
1175   // int32 num_dev_to_dev_copy_streams = 3;
1176   if (this->_internal_num_dev_to_dev_copy_streams() != 0) {
1177     target = stream->EnsureSpace(target);
1178     target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_num_dev_to_dev_copy_streams(), target);
1179   }
1180 
1181   // string collective_ring_order = 4;
1182   if (!this->_internal_collective_ring_order().empty()) {
1183     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1184       this->_internal_collective_ring_order().data(), static_cast<int>(this->_internal_collective_ring_order().length()),
1185       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1186       "tensorflow.GPUOptions.Experimental.collective_ring_order");
1187     target = stream->WriteStringMaybeAliased(
1188         4, this->_internal_collective_ring_order(), target);
1189   }
1190 
1191   // bool timestamped_allocator = 5;
1192   if (this->_internal_timestamped_allocator() != 0) {
1193     target = stream->EnsureSpace(target);
1194     target = ::_pbi::WireFormatLite::WriteBoolToArray(5, this->_internal_timestamped_allocator(), target);
1195   }
1196 
1197   // int32 kernel_tracker_max_interval = 7;
1198   if (this->_internal_kernel_tracker_max_interval() != 0) {
1199     target = stream->EnsureSpace(target);
1200     target = ::_pbi::WireFormatLite::WriteInt32ToArray(7, this->_internal_kernel_tracker_max_interval(), target);
1201   }
1202 
1203   // int32 kernel_tracker_max_bytes = 8;
1204   if (this->_internal_kernel_tracker_max_bytes() != 0) {
1205     target = stream->EnsureSpace(target);
1206     target = ::_pbi::WireFormatLite::WriteInt32ToArray(8, this->_internal_kernel_tracker_max_bytes(), target);
1207   }
1208 
1209   // int32 kernel_tracker_max_pending = 9;
1210   if (this->_internal_kernel_tracker_max_pending() != 0) {
1211     target = stream->EnsureSpace(target);
1212     target = ::_pbi::WireFormatLite::WriteInt32ToArray(9, this->_internal_kernel_tracker_max_pending(), target);
1213   }
1214 
1215   // double internal_fragmentation_fraction = 10;
1216   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1217   double tmp_internal_fragmentation_fraction = this->_internal_internal_fragmentation_fraction();
1218   ::uint64_t raw_internal_fragmentation_fraction;
1219   memcpy(&raw_internal_fragmentation_fraction, &tmp_internal_fragmentation_fraction, sizeof(tmp_internal_fragmentation_fraction));
1220   if (raw_internal_fragmentation_fraction != 0) {
1221     target = stream->EnsureSpace(target);
1222     target = ::_pbi::WireFormatLite::WriteDoubleToArray(10, this->_internal_internal_fragmentation_fraction(), target);
1223   }
1224 
1225   // bool use_cuda_malloc_async = 11;
1226   if (this->_internal_use_cuda_malloc_async() != 0) {
1227     target = stream->EnsureSpace(target);
1228     target = ::_pbi::WireFormatLite::WriteBoolToArray(11, this->_internal_use_cuda_malloc_async(), target);
1229   }
1230 
1231   // bool disallow_retry_on_allocation_failure = 12;
1232   if (this->_internal_disallow_retry_on_allocation_failure() != 0) {
1233     target = stream->EnsureSpace(target);
1234     target = ::_pbi::WireFormatLite::WriteBoolToArray(12, this->_internal_disallow_retry_on_allocation_failure(), target);
1235   }
1236 
1237   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1238     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1239         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1240   }
1241   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GPUOptions.Experimental)
1242   return target;
1243 }
1244 
ByteSizeLong() const1245 size_t GPUOptions_Experimental::ByteSizeLong() const {
1246 // @@protoc_insertion_point(message_byte_size_start:tensorflow.GPUOptions.Experimental)
1247   size_t total_size = 0;
1248 
1249   ::uint32_t cached_has_bits = 0;
1250   // Prevent compiler warnings about cached_has_bits being unused
1251   (void) cached_has_bits;
1252 
1253   // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1254   total_size += 1UL * this->_internal_virtual_devices_size();
1255   for (const auto& msg : this->_impl_.virtual_devices_) {
1256     total_size +=
1257       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1258   }
1259 
1260   // string collective_ring_order = 4;
1261   if (!this->_internal_collective_ring_order().empty()) {
1262     total_size += 1 +
1263       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1264         this->_internal_collective_ring_order());
1265   }
1266 
1267   // int32 num_dev_to_dev_copy_streams = 3;
1268   if (this->_internal_num_dev_to_dev_copy_streams() != 0) {
1269     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_num_dev_to_dev_copy_streams());
1270   }
1271 
1272   // int32 kernel_tracker_max_interval = 7;
1273   if (this->_internal_kernel_tracker_max_interval() != 0) {
1274     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_kernel_tracker_max_interval());
1275   }
1276 
1277   // bool use_unified_memory = 2;
1278   if (this->_internal_use_unified_memory() != 0) {
1279     total_size += 1 + 1;
1280   }
1281 
1282   // bool timestamped_allocator = 5;
1283   if (this->_internal_timestamped_allocator() != 0) {
1284     total_size += 1 + 1;
1285   }
1286 
1287   // bool use_cuda_malloc_async = 11;
1288   if (this->_internal_use_cuda_malloc_async() != 0) {
1289     total_size += 1 + 1;
1290   }
1291 
1292   // bool disallow_retry_on_allocation_failure = 12;
1293   if (this->_internal_disallow_retry_on_allocation_failure() != 0) {
1294     total_size += 1 + 1;
1295   }
1296 
1297   // int32 kernel_tracker_max_bytes = 8;
1298   if (this->_internal_kernel_tracker_max_bytes() != 0) {
1299     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_kernel_tracker_max_bytes());
1300   }
1301 
1302   // double internal_fragmentation_fraction = 10;
1303   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1304   double tmp_internal_fragmentation_fraction = this->_internal_internal_fragmentation_fraction();
1305   ::uint64_t raw_internal_fragmentation_fraction;
1306   memcpy(&raw_internal_fragmentation_fraction, &tmp_internal_fragmentation_fraction, sizeof(tmp_internal_fragmentation_fraction));
1307   if (raw_internal_fragmentation_fraction != 0) {
1308     total_size += 1 + 8;
1309   }
1310 
1311   // int32 kernel_tracker_max_pending = 9;
1312   if (this->_internal_kernel_tracker_max_pending() != 0) {
1313     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_kernel_tracker_max_pending());
1314   }
1315 
1316   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1317     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1318   }
1319   int cached_size = ::_pbi::ToCachedSize(total_size);
1320   SetCachedSize(cached_size);
1321   return total_size;
1322 }
1323 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1324 void GPUOptions_Experimental::CheckTypeAndMergeFrom(
1325     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1326   MergeFrom(*::_pbi::DownCast<const GPUOptions_Experimental*>(
1327       &from));
1328 }
1329 
MergeFrom(const GPUOptions_Experimental & from)1330 void GPUOptions_Experimental::MergeFrom(const GPUOptions_Experimental& from) {
1331   GPUOptions_Experimental* const _this = this;
1332   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GPUOptions.Experimental)
1333   GOOGLE_DCHECK_NE(&from, _this);
1334   ::uint32_t cached_has_bits = 0;
1335   (void) cached_has_bits;
1336 
1337   _this->_impl_.virtual_devices_.MergeFrom(from._impl_.virtual_devices_);
1338   if (!from._internal_collective_ring_order().empty()) {
1339     _this->_internal_set_collective_ring_order(from._internal_collective_ring_order());
1340   }
1341   if (from._internal_num_dev_to_dev_copy_streams() != 0) {
1342     _this->_internal_set_num_dev_to_dev_copy_streams(from._internal_num_dev_to_dev_copy_streams());
1343   }
1344   if (from._internal_kernel_tracker_max_interval() != 0) {
1345     _this->_internal_set_kernel_tracker_max_interval(from._internal_kernel_tracker_max_interval());
1346   }
1347   if (from._internal_use_unified_memory() != 0) {
1348     _this->_internal_set_use_unified_memory(from._internal_use_unified_memory());
1349   }
1350   if (from._internal_timestamped_allocator() != 0) {
1351     _this->_internal_set_timestamped_allocator(from._internal_timestamped_allocator());
1352   }
1353   if (from._internal_use_cuda_malloc_async() != 0) {
1354     _this->_internal_set_use_cuda_malloc_async(from._internal_use_cuda_malloc_async());
1355   }
1356   if (from._internal_disallow_retry_on_allocation_failure() != 0) {
1357     _this->_internal_set_disallow_retry_on_allocation_failure(from._internal_disallow_retry_on_allocation_failure());
1358   }
1359   if (from._internal_kernel_tracker_max_bytes() != 0) {
1360     _this->_internal_set_kernel_tracker_max_bytes(from._internal_kernel_tracker_max_bytes());
1361   }
1362   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1363   double tmp_internal_fragmentation_fraction = from._internal_internal_fragmentation_fraction();
1364   ::uint64_t raw_internal_fragmentation_fraction;
1365   memcpy(&raw_internal_fragmentation_fraction, &tmp_internal_fragmentation_fraction, sizeof(tmp_internal_fragmentation_fraction));
1366   if (raw_internal_fragmentation_fraction != 0) {
1367     _this->_internal_set_internal_fragmentation_fraction(from._internal_internal_fragmentation_fraction());
1368   }
1369   if (from._internal_kernel_tracker_max_pending() != 0) {
1370     _this->_internal_set_kernel_tracker_max_pending(from._internal_kernel_tracker_max_pending());
1371   }
1372   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1373 }
1374 
CopyFrom(const GPUOptions_Experimental & from)1375 void GPUOptions_Experimental::CopyFrom(const GPUOptions_Experimental& from) {
1376 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GPUOptions.Experimental)
1377   if (&from == this) return;
1378   Clear();
1379   MergeFrom(from);
1380 }
1381 
IsInitialized() const1382 bool GPUOptions_Experimental::IsInitialized() const {
1383   return true;
1384 }
1385 
InternalSwap(GPUOptions_Experimental * other)1386 void GPUOptions_Experimental::InternalSwap(GPUOptions_Experimental* other) {
1387   using std::swap;
1388   auto* lhs_arena = GetArenaForAllocation();
1389   auto* rhs_arena = other->GetArenaForAllocation();
1390   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1391   _impl_.virtual_devices_.InternalSwap(&other->_impl_.virtual_devices_);
1392   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1393       &_impl_.collective_ring_order_, lhs_arena,
1394       &other->_impl_.collective_ring_order_, rhs_arena
1395   );
1396   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1397       PROTOBUF_FIELD_OFFSET(GPUOptions_Experimental, _impl_.kernel_tracker_max_pending_)
1398       + sizeof(GPUOptions_Experimental::_impl_.kernel_tracker_max_pending_)  // NOLINT
1399       - PROTOBUF_FIELD_OFFSET(GPUOptions_Experimental, _impl_.num_dev_to_dev_copy_streams_)>(
1400           reinterpret_cast<char*>(&_impl_.num_dev_to_dev_copy_streams_),
1401           reinterpret_cast<char*>(&other->_impl_.num_dev_to_dev_copy_streams_));
1402 }
1403 
GetTypeName() const1404 std::string GPUOptions_Experimental::GetTypeName() const {
1405   return "tensorflow.GPUOptions.Experimental";
1406 }
1407 
1408 
1409 // ===================================================================
1410 
1411 class GPUOptions::_Internal {
1412  public:
1413   static const ::tensorflow::GPUOptions_Experimental& experimental(const GPUOptions* msg);
1414 };
1415 
1416 const ::tensorflow::GPUOptions_Experimental&
experimental(const GPUOptions * msg)1417 GPUOptions::_Internal::experimental(const GPUOptions* msg) {
1418   return *msg->_impl_.experimental_;
1419 }
GPUOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1420 GPUOptions::GPUOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1421                          bool is_message_owned)
1422   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1423   SharedCtor(arena, is_message_owned);
1424   // @@protoc_insertion_point(arena_constructor:tensorflow.GPUOptions)
1425 }
GPUOptions(const GPUOptions & from)1426 GPUOptions::GPUOptions(const GPUOptions& from)
1427   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1428   GPUOptions* const _this = this; (void)_this;
1429   new (&_impl_) Impl_{
1430       decltype(_impl_.allocator_type_){}
1431     , decltype(_impl_.visible_device_list_){}
1432     , decltype(_impl_.experimental_){nullptr}
1433     , decltype(_impl_.per_process_gpu_memory_fraction_){}
1434     , decltype(_impl_.deferred_deletion_bytes_){}
1435     , decltype(_impl_.polling_active_delay_usecs_){}
1436     , decltype(_impl_.allow_growth_){}
1437     , decltype(_impl_.force_gpu_compatible_){}
1438     , decltype(_impl_.polling_inactive_delay_msecs_){}
1439     , /*decltype(_impl_._cached_size_)*/{}};
1440 
1441   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1442   _impl_.allocator_type_.InitDefault();
1443   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1444     _impl_.allocator_type_.Set("", GetArenaForAllocation());
1445   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1446   if (!from._internal_allocator_type().empty()) {
1447     _this->_impl_.allocator_type_.Set(from._internal_allocator_type(),
1448       _this->GetArenaForAllocation());
1449   }
1450   _impl_.visible_device_list_.InitDefault();
1451   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1452     _impl_.visible_device_list_.Set("", GetArenaForAllocation());
1453   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1454   if (!from._internal_visible_device_list().empty()) {
1455     _this->_impl_.visible_device_list_.Set(from._internal_visible_device_list(),
1456       _this->GetArenaForAllocation());
1457   }
1458   if (from._internal_has_experimental()) {
1459     _this->_impl_.experimental_ = new ::tensorflow::GPUOptions_Experimental(*from._impl_.experimental_);
1460   }
1461   ::memcpy(&_impl_.per_process_gpu_memory_fraction_, &from._impl_.per_process_gpu_memory_fraction_,
1462     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.polling_inactive_delay_msecs_) -
1463     reinterpret_cast<char*>(&_impl_.per_process_gpu_memory_fraction_)) + sizeof(_impl_.polling_inactive_delay_msecs_));
1464   // @@protoc_insertion_point(copy_constructor:tensorflow.GPUOptions)
1465 }
1466 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1467 inline void GPUOptions::SharedCtor(
1468     ::_pb::Arena* arena, bool is_message_owned) {
1469   (void)arena;
1470   (void)is_message_owned;
1471   new (&_impl_) Impl_{
1472       decltype(_impl_.allocator_type_){}
1473     , decltype(_impl_.visible_device_list_){}
1474     , decltype(_impl_.experimental_){nullptr}
1475     , decltype(_impl_.per_process_gpu_memory_fraction_){0}
1476     , decltype(_impl_.deferred_deletion_bytes_){::int64_t{0}}
1477     , decltype(_impl_.polling_active_delay_usecs_){0}
1478     , decltype(_impl_.allow_growth_){false}
1479     , decltype(_impl_.force_gpu_compatible_){false}
1480     , decltype(_impl_.polling_inactive_delay_msecs_){0}
1481     , /*decltype(_impl_._cached_size_)*/{}
1482   };
1483   _impl_.allocator_type_.InitDefault();
1484   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1485     _impl_.allocator_type_.Set("", GetArenaForAllocation());
1486   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1487   _impl_.visible_device_list_.InitDefault();
1488   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1489     _impl_.visible_device_list_.Set("", GetArenaForAllocation());
1490   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1491 }
1492 
~GPUOptions()1493 GPUOptions::~GPUOptions() {
1494   // @@protoc_insertion_point(destructor:tensorflow.GPUOptions)
1495   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1496   (void)arena;
1497     return;
1498   }
1499   SharedDtor();
1500 }
1501 
SharedDtor()1502 inline void GPUOptions::SharedDtor() {
1503   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1504   _impl_.allocator_type_.Destroy();
1505   _impl_.visible_device_list_.Destroy();
1506   if (this != internal_default_instance()) delete _impl_.experimental_;
1507 }
1508 
SetCachedSize(int size) const1509 void GPUOptions::SetCachedSize(int size) const {
1510   _impl_._cached_size_.Set(size);
1511 }
1512 
Clear()1513 void GPUOptions::Clear() {
1514 // @@protoc_insertion_point(message_clear_start:tensorflow.GPUOptions)
1515   ::uint32_t cached_has_bits = 0;
1516   // Prevent compiler warnings about cached_has_bits being unused
1517   (void) cached_has_bits;
1518 
1519   _impl_.allocator_type_.ClearToEmpty();
1520   _impl_.visible_device_list_.ClearToEmpty();
1521   if (GetArenaForAllocation() == nullptr && _impl_.experimental_ != nullptr) {
1522     delete _impl_.experimental_;
1523   }
1524   _impl_.experimental_ = nullptr;
1525   ::memset(&_impl_.per_process_gpu_memory_fraction_, 0, static_cast<size_t>(
1526       reinterpret_cast<char*>(&_impl_.polling_inactive_delay_msecs_) -
1527       reinterpret_cast<char*>(&_impl_.per_process_gpu_memory_fraction_)) + sizeof(_impl_.polling_inactive_delay_msecs_));
1528   _internal_metadata_.Clear<std::string>();
1529 }
1530 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1531 const char* GPUOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1532 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1533   while (!ctx->Done(&ptr)) {
1534     ::uint32_t tag;
1535     ptr = ::_pbi::ReadTag(ptr, &tag);
1536     switch (tag >> 3) {
1537       // double per_process_gpu_memory_fraction = 1;
1538       case 1:
1539         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
1540           _impl_.per_process_gpu_memory_fraction_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1541           ptr += sizeof(double);
1542         } else {
1543           goto handle_unusual;
1544         }
1545         continue;
1546       // string allocator_type = 2;
1547       case 2:
1548         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
1549           auto str = _internal_mutable_allocator_type();
1550           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1551           CHK_(ptr);
1552           CHK_(::_pbi::VerifyUTF8(str, nullptr));
1553         } else {
1554           goto handle_unusual;
1555         }
1556         continue;
1557       // int64 deferred_deletion_bytes = 3;
1558       case 3:
1559         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1560           _impl_.deferred_deletion_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1561           CHK_(ptr);
1562         } else {
1563           goto handle_unusual;
1564         }
1565         continue;
1566       // bool allow_growth = 4;
1567       case 4:
1568         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
1569           _impl_.allow_growth_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1570           CHK_(ptr);
1571         } else {
1572           goto handle_unusual;
1573         }
1574         continue;
1575       // string visible_device_list = 5;
1576       case 5:
1577         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
1578           auto str = _internal_mutable_visible_device_list();
1579           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1580           CHK_(ptr);
1581           CHK_(::_pbi::VerifyUTF8(str, nullptr));
1582         } else {
1583           goto handle_unusual;
1584         }
1585         continue;
1586       // int32 polling_active_delay_usecs = 6;
1587       case 6:
1588         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
1589           _impl_.polling_active_delay_usecs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1590           CHK_(ptr);
1591         } else {
1592           goto handle_unusual;
1593         }
1594         continue;
1595       // int32 polling_inactive_delay_msecs = 7;
1596       case 7:
1597         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
1598           _impl_.polling_inactive_delay_msecs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1599           CHK_(ptr);
1600         } else {
1601           goto handle_unusual;
1602         }
1603         continue;
1604       // bool force_gpu_compatible = 8;
1605       case 8:
1606         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
1607           _impl_.force_gpu_compatible_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1608           CHK_(ptr);
1609         } else {
1610           goto handle_unusual;
1611         }
1612         continue;
1613       // .tensorflow.GPUOptions.Experimental experimental = 9;
1614       case 9:
1615         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 74)) {
1616           ptr = ctx->ParseMessage(_internal_mutable_experimental(), ptr);
1617           CHK_(ptr);
1618         } else {
1619           goto handle_unusual;
1620         }
1621         continue;
1622       default:
1623         goto handle_unusual;
1624     }  // switch
1625   handle_unusual:
1626     if ((tag == 0) || ((tag & 7) == 4)) {
1627       CHK_(ptr);
1628       ctx->SetLastTag(tag);
1629       goto message_done;
1630     }
1631     ptr = UnknownFieldParse(
1632         tag,
1633         _internal_metadata_.mutable_unknown_fields<std::string>(),
1634         ptr, ctx);
1635     CHK_(ptr != nullptr);
1636   }  // while
1637 message_done:
1638   return ptr;
1639 failure:
1640   ptr = nullptr;
1641   goto message_done;
1642 #undef CHK_
1643 }
1644 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1645 ::uint8_t* GPUOptions::_InternalSerialize(
1646     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1647   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GPUOptions)
1648   ::uint32_t cached_has_bits = 0;
1649   (void) cached_has_bits;
1650 
1651   // double per_process_gpu_memory_fraction = 1;
1652   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1653   double tmp_per_process_gpu_memory_fraction = this->_internal_per_process_gpu_memory_fraction();
1654   ::uint64_t raw_per_process_gpu_memory_fraction;
1655   memcpy(&raw_per_process_gpu_memory_fraction, &tmp_per_process_gpu_memory_fraction, sizeof(tmp_per_process_gpu_memory_fraction));
1656   if (raw_per_process_gpu_memory_fraction != 0) {
1657     target = stream->EnsureSpace(target);
1658     target = ::_pbi::WireFormatLite::WriteDoubleToArray(1, this->_internal_per_process_gpu_memory_fraction(), target);
1659   }
1660 
1661   // string allocator_type = 2;
1662   if (!this->_internal_allocator_type().empty()) {
1663     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1664       this->_internal_allocator_type().data(), static_cast<int>(this->_internal_allocator_type().length()),
1665       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1666       "tensorflow.GPUOptions.allocator_type");
1667     target = stream->WriteStringMaybeAliased(
1668         2, this->_internal_allocator_type(), target);
1669   }
1670 
1671   // int64 deferred_deletion_bytes = 3;
1672   if (this->_internal_deferred_deletion_bytes() != 0) {
1673     target = stream->EnsureSpace(target);
1674     target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_deferred_deletion_bytes(), target);
1675   }
1676 
1677   // bool allow_growth = 4;
1678   if (this->_internal_allow_growth() != 0) {
1679     target = stream->EnsureSpace(target);
1680     target = ::_pbi::WireFormatLite::WriteBoolToArray(4, this->_internal_allow_growth(), target);
1681   }
1682 
1683   // string visible_device_list = 5;
1684   if (!this->_internal_visible_device_list().empty()) {
1685     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1686       this->_internal_visible_device_list().data(), static_cast<int>(this->_internal_visible_device_list().length()),
1687       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1688       "tensorflow.GPUOptions.visible_device_list");
1689     target = stream->WriteStringMaybeAliased(
1690         5, this->_internal_visible_device_list(), target);
1691   }
1692 
1693   // int32 polling_active_delay_usecs = 6;
1694   if (this->_internal_polling_active_delay_usecs() != 0) {
1695     target = stream->EnsureSpace(target);
1696     target = ::_pbi::WireFormatLite::WriteInt32ToArray(6, this->_internal_polling_active_delay_usecs(), target);
1697   }
1698 
1699   // int32 polling_inactive_delay_msecs = 7;
1700   if (this->_internal_polling_inactive_delay_msecs() != 0) {
1701     target = stream->EnsureSpace(target);
1702     target = ::_pbi::WireFormatLite::WriteInt32ToArray(7, this->_internal_polling_inactive_delay_msecs(), target);
1703   }
1704 
1705   // bool force_gpu_compatible = 8;
1706   if (this->_internal_force_gpu_compatible() != 0) {
1707     target = stream->EnsureSpace(target);
1708     target = ::_pbi::WireFormatLite::WriteBoolToArray(8, this->_internal_force_gpu_compatible(), target);
1709   }
1710 
1711   // .tensorflow.GPUOptions.Experimental experimental = 9;
1712   if (this->_internal_has_experimental()) {
1713     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1714       InternalWriteMessage(9, _Internal::experimental(this),
1715         _Internal::experimental(this).GetCachedSize(), target, stream);
1716   }
1717 
1718   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1719     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1720         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1721   }
1722   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GPUOptions)
1723   return target;
1724 }
1725 
ByteSizeLong() const1726 size_t GPUOptions::ByteSizeLong() const {
1727 // @@protoc_insertion_point(message_byte_size_start:tensorflow.GPUOptions)
1728   size_t total_size = 0;
1729 
1730   ::uint32_t cached_has_bits = 0;
1731   // Prevent compiler warnings about cached_has_bits being unused
1732   (void) cached_has_bits;
1733 
1734   // string allocator_type = 2;
1735   if (!this->_internal_allocator_type().empty()) {
1736     total_size += 1 +
1737       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1738         this->_internal_allocator_type());
1739   }
1740 
1741   // string visible_device_list = 5;
1742   if (!this->_internal_visible_device_list().empty()) {
1743     total_size += 1 +
1744       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1745         this->_internal_visible_device_list());
1746   }
1747 
1748   // .tensorflow.GPUOptions.Experimental experimental = 9;
1749   if (this->_internal_has_experimental()) {
1750     total_size += 1 +
1751       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1752         *_impl_.experimental_);
1753   }
1754 
1755   // double per_process_gpu_memory_fraction = 1;
1756   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1757   double tmp_per_process_gpu_memory_fraction = this->_internal_per_process_gpu_memory_fraction();
1758   ::uint64_t raw_per_process_gpu_memory_fraction;
1759   memcpy(&raw_per_process_gpu_memory_fraction, &tmp_per_process_gpu_memory_fraction, sizeof(tmp_per_process_gpu_memory_fraction));
1760   if (raw_per_process_gpu_memory_fraction != 0) {
1761     total_size += 1 + 8;
1762   }
1763 
1764   // int64 deferred_deletion_bytes = 3;
1765   if (this->_internal_deferred_deletion_bytes() != 0) {
1766     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_deferred_deletion_bytes());
1767   }
1768 
1769   // int32 polling_active_delay_usecs = 6;
1770   if (this->_internal_polling_active_delay_usecs() != 0) {
1771     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_polling_active_delay_usecs());
1772   }
1773 
1774   // bool allow_growth = 4;
1775   if (this->_internal_allow_growth() != 0) {
1776     total_size += 1 + 1;
1777   }
1778 
1779   // bool force_gpu_compatible = 8;
1780   if (this->_internal_force_gpu_compatible() != 0) {
1781     total_size += 1 + 1;
1782   }
1783 
1784   // int32 polling_inactive_delay_msecs = 7;
1785   if (this->_internal_polling_inactive_delay_msecs() != 0) {
1786     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_polling_inactive_delay_msecs());
1787   }
1788 
1789   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1790     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1791   }
1792   int cached_size = ::_pbi::ToCachedSize(total_size);
1793   SetCachedSize(cached_size);
1794   return total_size;
1795 }
1796 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1797 void GPUOptions::CheckTypeAndMergeFrom(
1798     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1799   MergeFrom(*::_pbi::DownCast<const GPUOptions*>(
1800       &from));
1801 }
1802 
MergeFrom(const GPUOptions & from)1803 void GPUOptions::MergeFrom(const GPUOptions& from) {
1804   GPUOptions* const _this = this;
1805   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GPUOptions)
1806   GOOGLE_DCHECK_NE(&from, _this);
1807   ::uint32_t cached_has_bits = 0;
1808   (void) cached_has_bits;
1809 
1810   if (!from._internal_allocator_type().empty()) {
1811     _this->_internal_set_allocator_type(from._internal_allocator_type());
1812   }
1813   if (!from._internal_visible_device_list().empty()) {
1814     _this->_internal_set_visible_device_list(from._internal_visible_device_list());
1815   }
1816   if (from._internal_has_experimental()) {
1817     _this->_internal_mutable_experimental()->::tensorflow::GPUOptions_Experimental::MergeFrom(
1818         from._internal_experimental());
1819   }
1820   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1821   double tmp_per_process_gpu_memory_fraction = from._internal_per_process_gpu_memory_fraction();
1822   ::uint64_t raw_per_process_gpu_memory_fraction;
1823   memcpy(&raw_per_process_gpu_memory_fraction, &tmp_per_process_gpu_memory_fraction, sizeof(tmp_per_process_gpu_memory_fraction));
1824   if (raw_per_process_gpu_memory_fraction != 0) {
1825     _this->_internal_set_per_process_gpu_memory_fraction(from._internal_per_process_gpu_memory_fraction());
1826   }
1827   if (from._internal_deferred_deletion_bytes() != 0) {
1828     _this->_internal_set_deferred_deletion_bytes(from._internal_deferred_deletion_bytes());
1829   }
1830   if (from._internal_polling_active_delay_usecs() != 0) {
1831     _this->_internal_set_polling_active_delay_usecs(from._internal_polling_active_delay_usecs());
1832   }
1833   if (from._internal_allow_growth() != 0) {
1834     _this->_internal_set_allow_growth(from._internal_allow_growth());
1835   }
1836   if (from._internal_force_gpu_compatible() != 0) {
1837     _this->_internal_set_force_gpu_compatible(from._internal_force_gpu_compatible());
1838   }
1839   if (from._internal_polling_inactive_delay_msecs() != 0) {
1840     _this->_internal_set_polling_inactive_delay_msecs(from._internal_polling_inactive_delay_msecs());
1841   }
1842   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1843 }
1844 
CopyFrom(const GPUOptions & from)1845 void GPUOptions::CopyFrom(const GPUOptions& from) {
1846 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GPUOptions)
1847   if (&from == this) return;
1848   Clear();
1849   MergeFrom(from);
1850 }
1851 
IsInitialized() const1852 bool GPUOptions::IsInitialized() const {
1853   return true;
1854 }
1855 
InternalSwap(GPUOptions * other)1856 void GPUOptions::InternalSwap(GPUOptions* other) {
1857   using std::swap;
1858   auto* lhs_arena = GetArenaForAllocation();
1859   auto* rhs_arena = other->GetArenaForAllocation();
1860   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1861   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1862       &_impl_.allocator_type_, lhs_arena,
1863       &other->_impl_.allocator_type_, rhs_arena
1864   );
1865   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1866       &_impl_.visible_device_list_, lhs_arena,
1867       &other->_impl_.visible_device_list_, rhs_arena
1868   );
1869   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1870       PROTOBUF_FIELD_OFFSET(GPUOptions, _impl_.polling_inactive_delay_msecs_)
1871       + sizeof(GPUOptions::_impl_.polling_inactive_delay_msecs_)  // NOLINT
1872       - PROTOBUF_FIELD_OFFSET(GPUOptions, _impl_.experimental_)>(
1873           reinterpret_cast<char*>(&_impl_.experimental_),
1874           reinterpret_cast<char*>(&other->_impl_.experimental_));
1875 }
1876 
GetTypeName() const1877 std::string GPUOptions::GetTypeName() const {
1878   return "tensorflow.GPUOptions";
1879 }
1880 
1881 
1882 // ===================================================================
1883 
1884 class OptimizerOptions::_Internal {
1885  public:
1886 };
1887 
OptimizerOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1888 OptimizerOptions::OptimizerOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1889                          bool is_message_owned)
1890   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1891   SharedCtor(arena, is_message_owned);
1892   // @@protoc_insertion_point(arena_constructor:tensorflow.OptimizerOptions)
1893 }
OptimizerOptions(const OptimizerOptions & from)1894 OptimizerOptions::OptimizerOptions(const OptimizerOptions& from)
1895   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1896   OptimizerOptions* const _this = this; (void)_this;
1897   new (&_impl_) Impl_{
1898       decltype(_impl_.opt_level_){}
1899     , decltype(_impl_.do_common_subexpression_elimination_){}
1900     , decltype(_impl_.do_constant_folding_){}
1901     , decltype(_impl_.do_function_inlining_){}
1902     , decltype(_impl_.cpu_global_jit_){}
1903     , decltype(_impl_.max_folded_constant_in_bytes_){}
1904     , decltype(_impl_.global_jit_level_){}
1905     , /*decltype(_impl_._cached_size_)*/{}};
1906 
1907   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1908   ::memcpy(&_impl_.opt_level_, &from._impl_.opt_level_,
1909     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.global_jit_level_) -
1910     reinterpret_cast<char*>(&_impl_.opt_level_)) + sizeof(_impl_.global_jit_level_));
1911   // @@protoc_insertion_point(copy_constructor:tensorflow.OptimizerOptions)
1912 }
1913 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1914 inline void OptimizerOptions::SharedCtor(
1915     ::_pb::Arena* arena, bool is_message_owned) {
1916   (void)arena;
1917   (void)is_message_owned;
1918   new (&_impl_) Impl_{
1919       decltype(_impl_.opt_level_){0}
1920     , decltype(_impl_.do_common_subexpression_elimination_){false}
1921     , decltype(_impl_.do_constant_folding_){false}
1922     , decltype(_impl_.do_function_inlining_){false}
1923     , decltype(_impl_.cpu_global_jit_){false}
1924     , decltype(_impl_.max_folded_constant_in_bytes_){::int64_t{0}}
1925     , decltype(_impl_.global_jit_level_){0}
1926     , /*decltype(_impl_._cached_size_)*/{}
1927   };
1928 }
1929 
~OptimizerOptions()1930 OptimizerOptions::~OptimizerOptions() {
1931   // @@protoc_insertion_point(destructor:tensorflow.OptimizerOptions)
1932   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1933   (void)arena;
1934     return;
1935   }
1936   SharedDtor();
1937 }
1938 
SharedDtor()1939 inline void OptimizerOptions::SharedDtor() {
1940   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1941 }
1942 
SetCachedSize(int size) const1943 void OptimizerOptions::SetCachedSize(int size) const {
1944   _impl_._cached_size_.Set(size);
1945 }
1946 
Clear()1947 void OptimizerOptions::Clear() {
1948 // @@protoc_insertion_point(message_clear_start:tensorflow.OptimizerOptions)
1949   ::uint32_t cached_has_bits = 0;
1950   // Prevent compiler warnings about cached_has_bits being unused
1951   (void) cached_has_bits;
1952 
1953   ::memset(&_impl_.opt_level_, 0, static_cast<size_t>(
1954       reinterpret_cast<char*>(&_impl_.global_jit_level_) -
1955       reinterpret_cast<char*>(&_impl_.opt_level_)) + sizeof(_impl_.global_jit_level_));
1956   _internal_metadata_.Clear<std::string>();
1957 }
1958 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1959 const char* OptimizerOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1960 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1961   while (!ctx->Done(&ptr)) {
1962     ::uint32_t tag;
1963     ptr = ::_pbi::ReadTag(ptr, &tag);
1964     switch (tag >> 3) {
1965       // bool do_common_subexpression_elimination = 1;
1966       case 1:
1967         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1968           _impl_.do_common_subexpression_elimination_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1969           CHK_(ptr);
1970         } else {
1971           goto handle_unusual;
1972         }
1973         continue;
1974       // bool do_constant_folding = 2;
1975       case 2:
1976         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1977           _impl_.do_constant_folding_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1978           CHK_(ptr);
1979         } else {
1980           goto handle_unusual;
1981         }
1982         continue;
1983       // .tensorflow.OptimizerOptions.Level opt_level = 3;
1984       case 3:
1985         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1986           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1987           CHK_(ptr);
1988           _internal_set_opt_level(static_cast<::tensorflow::OptimizerOptions_Level>(val));
1989         } else {
1990           goto handle_unusual;
1991         }
1992         continue;
1993       // bool do_function_inlining = 4;
1994       case 4:
1995         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
1996           _impl_.do_function_inlining_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1997           CHK_(ptr);
1998         } else {
1999           goto handle_unusual;
2000         }
2001         continue;
2002       // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
2003       case 5:
2004         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
2005           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2006           CHK_(ptr);
2007           _internal_set_global_jit_level(static_cast<::tensorflow::OptimizerOptions_GlobalJitLevel>(val));
2008         } else {
2009           goto handle_unusual;
2010         }
2011         continue;
2012       // int64 max_folded_constant_in_bytes = 6;
2013       case 6:
2014         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
2015           _impl_.max_folded_constant_in_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2016           CHK_(ptr);
2017         } else {
2018           goto handle_unusual;
2019         }
2020         continue;
2021       // bool cpu_global_jit = 7;
2022       case 7:
2023         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
2024           _impl_.cpu_global_jit_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2025           CHK_(ptr);
2026         } else {
2027           goto handle_unusual;
2028         }
2029         continue;
2030       default:
2031         goto handle_unusual;
2032     }  // switch
2033   handle_unusual:
2034     if ((tag == 0) || ((tag & 7) == 4)) {
2035       CHK_(ptr);
2036       ctx->SetLastTag(tag);
2037       goto message_done;
2038     }
2039     ptr = UnknownFieldParse(
2040         tag,
2041         _internal_metadata_.mutable_unknown_fields<std::string>(),
2042         ptr, ctx);
2043     CHK_(ptr != nullptr);
2044   }  // while
2045 message_done:
2046   return ptr;
2047 failure:
2048   ptr = nullptr;
2049   goto message_done;
2050 #undef CHK_
2051 }
2052 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2053 ::uint8_t* OptimizerOptions::_InternalSerialize(
2054     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2055   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.OptimizerOptions)
2056   ::uint32_t cached_has_bits = 0;
2057   (void) cached_has_bits;
2058 
2059   // bool do_common_subexpression_elimination = 1;
2060   if (this->_internal_do_common_subexpression_elimination() != 0) {
2061     target = stream->EnsureSpace(target);
2062     target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_do_common_subexpression_elimination(), target);
2063   }
2064 
2065   // bool do_constant_folding = 2;
2066   if (this->_internal_do_constant_folding() != 0) {
2067     target = stream->EnsureSpace(target);
2068     target = ::_pbi::WireFormatLite::WriteBoolToArray(2, this->_internal_do_constant_folding(), target);
2069   }
2070 
2071   // .tensorflow.OptimizerOptions.Level opt_level = 3;
2072   if (this->_internal_opt_level() != 0) {
2073     target = stream->EnsureSpace(target);
2074     target = ::_pbi::WireFormatLite::WriteEnumToArray(
2075       3, this->_internal_opt_level(), target);
2076   }
2077 
2078   // bool do_function_inlining = 4;
2079   if (this->_internal_do_function_inlining() != 0) {
2080     target = stream->EnsureSpace(target);
2081     target = ::_pbi::WireFormatLite::WriteBoolToArray(4, this->_internal_do_function_inlining(), target);
2082   }
2083 
2084   // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
2085   if (this->_internal_global_jit_level() != 0) {
2086     target = stream->EnsureSpace(target);
2087     target = ::_pbi::WireFormatLite::WriteEnumToArray(
2088       5, this->_internal_global_jit_level(), target);
2089   }
2090 
2091   // int64 max_folded_constant_in_bytes = 6;
2092   if (this->_internal_max_folded_constant_in_bytes() != 0) {
2093     target = stream->EnsureSpace(target);
2094     target = ::_pbi::WireFormatLite::WriteInt64ToArray(6, this->_internal_max_folded_constant_in_bytes(), target);
2095   }
2096 
2097   // bool cpu_global_jit = 7;
2098   if (this->_internal_cpu_global_jit() != 0) {
2099     target = stream->EnsureSpace(target);
2100     target = ::_pbi::WireFormatLite::WriteBoolToArray(7, this->_internal_cpu_global_jit(), target);
2101   }
2102 
2103   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2104     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2105         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2106   }
2107   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.OptimizerOptions)
2108   return target;
2109 }
2110 
ByteSizeLong() const2111 size_t OptimizerOptions::ByteSizeLong() const {
2112 // @@protoc_insertion_point(message_byte_size_start:tensorflow.OptimizerOptions)
2113   size_t total_size = 0;
2114 
2115   ::uint32_t cached_has_bits = 0;
2116   // Prevent compiler warnings about cached_has_bits being unused
2117   (void) cached_has_bits;
2118 
2119   // .tensorflow.OptimizerOptions.Level opt_level = 3;
2120   if (this->_internal_opt_level() != 0) {
2121     total_size += 1 +
2122       ::_pbi::WireFormatLite::EnumSize(this->_internal_opt_level());
2123   }
2124 
2125   // bool do_common_subexpression_elimination = 1;
2126   if (this->_internal_do_common_subexpression_elimination() != 0) {
2127     total_size += 1 + 1;
2128   }
2129 
2130   // bool do_constant_folding = 2;
2131   if (this->_internal_do_constant_folding() != 0) {
2132     total_size += 1 + 1;
2133   }
2134 
2135   // bool do_function_inlining = 4;
2136   if (this->_internal_do_function_inlining() != 0) {
2137     total_size += 1 + 1;
2138   }
2139 
2140   // bool cpu_global_jit = 7;
2141   if (this->_internal_cpu_global_jit() != 0) {
2142     total_size += 1 + 1;
2143   }
2144 
2145   // int64 max_folded_constant_in_bytes = 6;
2146   if (this->_internal_max_folded_constant_in_bytes() != 0) {
2147     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_max_folded_constant_in_bytes());
2148   }
2149 
2150   // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
2151   if (this->_internal_global_jit_level() != 0) {
2152     total_size += 1 +
2153       ::_pbi::WireFormatLite::EnumSize(this->_internal_global_jit_level());
2154   }
2155 
2156   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2157     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2158   }
2159   int cached_size = ::_pbi::ToCachedSize(total_size);
2160   SetCachedSize(cached_size);
2161   return total_size;
2162 }
2163 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2164 void OptimizerOptions::CheckTypeAndMergeFrom(
2165     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2166   MergeFrom(*::_pbi::DownCast<const OptimizerOptions*>(
2167       &from));
2168 }
2169 
MergeFrom(const OptimizerOptions & from)2170 void OptimizerOptions::MergeFrom(const OptimizerOptions& from) {
2171   OptimizerOptions* const _this = this;
2172   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.OptimizerOptions)
2173   GOOGLE_DCHECK_NE(&from, _this);
2174   ::uint32_t cached_has_bits = 0;
2175   (void) cached_has_bits;
2176 
2177   if (from._internal_opt_level() != 0) {
2178     _this->_internal_set_opt_level(from._internal_opt_level());
2179   }
2180   if (from._internal_do_common_subexpression_elimination() != 0) {
2181     _this->_internal_set_do_common_subexpression_elimination(from._internal_do_common_subexpression_elimination());
2182   }
2183   if (from._internal_do_constant_folding() != 0) {
2184     _this->_internal_set_do_constant_folding(from._internal_do_constant_folding());
2185   }
2186   if (from._internal_do_function_inlining() != 0) {
2187     _this->_internal_set_do_function_inlining(from._internal_do_function_inlining());
2188   }
2189   if (from._internal_cpu_global_jit() != 0) {
2190     _this->_internal_set_cpu_global_jit(from._internal_cpu_global_jit());
2191   }
2192   if (from._internal_max_folded_constant_in_bytes() != 0) {
2193     _this->_internal_set_max_folded_constant_in_bytes(from._internal_max_folded_constant_in_bytes());
2194   }
2195   if (from._internal_global_jit_level() != 0) {
2196     _this->_internal_set_global_jit_level(from._internal_global_jit_level());
2197   }
2198   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2199 }
2200 
CopyFrom(const OptimizerOptions & from)2201 void OptimizerOptions::CopyFrom(const OptimizerOptions& from) {
2202 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.OptimizerOptions)
2203   if (&from == this) return;
2204   Clear();
2205   MergeFrom(from);
2206 }
2207 
IsInitialized() const2208 bool OptimizerOptions::IsInitialized() const {
2209   return true;
2210 }
2211 
InternalSwap(OptimizerOptions * other)2212 void OptimizerOptions::InternalSwap(OptimizerOptions* other) {
2213   using std::swap;
2214   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2215   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
2216       PROTOBUF_FIELD_OFFSET(OptimizerOptions, _impl_.global_jit_level_)
2217       + sizeof(OptimizerOptions::_impl_.global_jit_level_)  // NOLINT
2218       - PROTOBUF_FIELD_OFFSET(OptimizerOptions, _impl_.opt_level_)>(
2219           reinterpret_cast<char*>(&_impl_.opt_level_),
2220           reinterpret_cast<char*>(&other->_impl_.opt_level_));
2221 }
2222 
GetTypeName() const2223 std::string OptimizerOptions::GetTypeName() const {
2224   return "tensorflow.OptimizerOptions";
2225 }
2226 
2227 
2228 // ===================================================================
2229 
2230 class GraphOptions::_Internal {
2231  public:
2232   static const ::tensorflow::OptimizerOptions& optimizer_options(const GraphOptions* msg);
2233   static const ::tensorflow::RewriterConfig& rewrite_options(const GraphOptions* msg);
2234 };
2235 
2236 const ::tensorflow::OptimizerOptions&
optimizer_options(const GraphOptions * msg)2237 GraphOptions::_Internal::optimizer_options(const GraphOptions* msg) {
2238   return *msg->_impl_.optimizer_options_;
2239 }
2240 const ::tensorflow::RewriterConfig&
rewrite_options(const GraphOptions * msg)2241 GraphOptions::_Internal::rewrite_options(const GraphOptions* msg) {
2242   return *msg->_impl_.rewrite_options_;
2243 }
clear_rewrite_options()2244 void GraphOptions::clear_rewrite_options() {
2245   if (GetArenaForAllocation() == nullptr && _impl_.rewrite_options_ != nullptr) {
2246     delete _impl_.rewrite_options_;
2247   }
2248   _impl_.rewrite_options_ = nullptr;
2249 }
GraphOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2250 GraphOptions::GraphOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2251                          bool is_message_owned)
2252   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2253   SharedCtor(arena, is_message_owned);
2254   // @@protoc_insertion_point(arena_constructor:tensorflow.GraphOptions)
2255 }
GraphOptions(const GraphOptions & from)2256 GraphOptions::GraphOptions(const GraphOptions& from)
2257   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2258   GraphOptions* const _this = this; (void)_this;
2259   new (&_impl_) Impl_{
2260       decltype(_impl_.optimizer_options_){nullptr}
2261     , decltype(_impl_.rewrite_options_){nullptr}
2262     , decltype(_impl_.build_cost_model_){}
2263     , decltype(_impl_.enable_recv_scheduling_){}
2264     , decltype(_impl_.infer_shapes_){}
2265     , decltype(_impl_.place_pruned_graph_){}
2266     , decltype(_impl_.enable_bfloat16_sendrecv_){}
2267     , decltype(_impl_.timeline_step_){}
2268     , decltype(_impl_.build_cost_model_after_){}
2269     , /*decltype(_impl_._cached_size_)*/{}};
2270 
2271   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2272   if (from._internal_has_optimizer_options()) {
2273     _this->_impl_.optimizer_options_ = new ::tensorflow::OptimizerOptions(*from._impl_.optimizer_options_);
2274   }
2275   if (from._internal_has_rewrite_options()) {
2276     _this->_impl_.rewrite_options_ = new ::tensorflow::RewriterConfig(*from._impl_.rewrite_options_);
2277   }
2278   ::memcpy(&_impl_.build_cost_model_, &from._impl_.build_cost_model_,
2279     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.build_cost_model_after_) -
2280     reinterpret_cast<char*>(&_impl_.build_cost_model_)) + sizeof(_impl_.build_cost_model_after_));
2281   // @@protoc_insertion_point(copy_constructor:tensorflow.GraphOptions)
2282 }
2283 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2284 inline void GraphOptions::SharedCtor(
2285     ::_pb::Arena* arena, bool is_message_owned) {
2286   (void)arena;
2287   (void)is_message_owned;
2288   new (&_impl_) Impl_{
2289       decltype(_impl_.optimizer_options_){nullptr}
2290     , decltype(_impl_.rewrite_options_){nullptr}
2291     , decltype(_impl_.build_cost_model_){::int64_t{0}}
2292     , decltype(_impl_.enable_recv_scheduling_){false}
2293     , decltype(_impl_.infer_shapes_){false}
2294     , decltype(_impl_.place_pruned_graph_){false}
2295     , decltype(_impl_.enable_bfloat16_sendrecv_){false}
2296     , decltype(_impl_.timeline_step_){0}
2297     , decltype(_impl_.build_cost_model_after_){::int64_t{0}}
2298     , /*decltype(_impl_._cached_size_)*/{}
2299   };
2300 }
2301 
~GraphOptions()2302 GraphOptions::~GraphOptions() {
2303   // @@protoc_insertion_point(destructor:tensorflow.GraphOptions)
2304   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2305   (void)arena;
2306     return;
2307   }
2308   SharedDtor();
2309 }
2310 
SharedDtor()2311 inline void GraphOptions::SharedDtor() {
2312   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2313   if (this != internal_default_instance()) delete _impl_.optimizer_options_;
2314   if (this != internal_default_instance()) delete _impl_.rewrite_options_;
2315 }
2316 
SetCachedSize(int size) const2317 void GraphOptions::SetCachedSize(int size) const {
2318   _impl_._cached_size_.Set(size);
2319 }
2320 
Clear()2321 void GraphOptions::Clear() {
2322 // @@protoc_insertion_point(message_clear_start:tensorflow.GraphOptions)
2323   ::uint32_t cached_has_bits = 0;
2324   // Prevent compiler warnings about cached_has_bits being unused
2325   (void) cached_has_bits;
2326 
2327   if (GetArenaForAllocation() == nullptr && _impl_.optimizer_options_ != nullptr) {
2328     delete _impl_.optimizer_options_;
2329   }
2330   _impl_.optimizer_options_ = nullptr;
2331   if (GetArenaForAllocation() == nullptr && _impl_.rewrite_options_ != nullptr) {
2332     delete _impl_.rewrite_options_;
2333   }
2334   _impl_.rewrite_options_ = nullptr;
2335   ::memset(&_impl_.build_cost_model_, 0, static_cast<size_t>(
2336       reinterpret_cast<char*>(&_impl_.build_cost_model_after_) -
2337       reinterpret_cast<char*>(&_impl_.build_cost_model_)) + sizeof(_impl_.build_cost_model_after_));
2338   _internal_metadata_.Clear<std::string>();
2339 }
2340 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2341 const char* GraphOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2342 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2343   while (!ctx->Done(&ptr)) {
2344     ::uint32_t tag;
2345     ptr = ::_pbi::ReadTag(ptr, &tag);
2346     switch (tag >> 3) {
2347       // bool enable_recv_scheduling = 2;
2348       case 2:
2349         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
2350           _impl_.enable_recv_scheduling_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2351           CHK_(ptr);
2352         } else {
2353           goto handle_unusual;
2354         }
2355         continue;
2356       // .tensorflow.OptimizerOptions optimizer_options = 3;
2357       case 3:
2358         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
2359           ptr = ctx->ParseMessage(_internal_mutable_optimizer_options(), ptr);
2360           CHK_(ptr);
2361         } else {
2362           goto handle_unusual;
2363         }
2364         continue;
2365       // int64 build_cost_model = 4;
2366       case 4:
2367         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
2368           _impl_.build_cost_model_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2369           CHK_(ptr);
2370         } else {
2371           goto handle_unusual;
2372         }
2373         continue;
2374       // bool infer_shapes = 5;
2375       case 5:
2376         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
2377           _impl_.infer_shapes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2378           CHK_(ptr);
2379         } else {
2380           goto handle_unusual;
2381         }
2382         continue;
2383       // bool place_pruned_graph = 6;
2384       case 6:
2385         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
2386           _impl_.place_pruned_graph_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2387           CHK_(ptr);
2388         } else {
2389           goto handle_unusual;
2390         }
2391         continue;
2392       // bool enable_bfloat16_sendrecv = 7;
2393       case 7:
2394         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
2395           _impl_.enable_bfloat16_sendrecv_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2396           CHK_(ptr);
2397         } else {
2398           goto handle_unusual;
2399         }
2400         continue;
2401       // int32 timeline_step = 8;
2402       case 8:
2403         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
2404           _impl_.timeline_step_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
2405           CHK_(ptr);
2406         } else {
2407           goto handle_unusual;
2408         }
2409         continue;
2410       // int64 build_cost_model_after = 9;
2411       case 9:
2412         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
2413           _impl_.build_cost_model_after_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2414           CHK_(ptr);
2415         } else {
2416           goto handle_unusual;
2417         }
2418         continue;
2419       // .tensorflow.RewriterConfig rewrite_options = 10;
2420       case 10:
2421         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 82)) {
2422           ptr = ctx->ParseMessage(_internal_mutable_rewrite_options(), ptr);
2423           CHK_(ptr);
2424         } else {
2425           goto handle_unusual;
2426         }
2427         continue;
2428       default:
2429         goto handle_unusual;
2430     }  // switch
2431   handle_unusual:
2432     if ((tag == 0) || ((tag & 7) == 4)) {
2433       CHK_(ptr);
2434       ctx->SetLastTag(tag);
2435       goto message_done;
2436     }
2437     ptr = UnknownFieldParse(
2438         tag,
2439         _internal_metadata_.mutable_unknown_fields<std::string>(),
2440         ptr, ctx);
2441     CHK_(ptr != nullptr);
2442   }  // while
2443 message_done:
2444   return ptr;
2445 failure:
2446   ptr = nullptr;
2447   goto message_done;
2448 #undef CHK_
2449 }
2450 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2451 ::uint8_t* GraphOptions::_InternalSerialize(
2452     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2453   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GraphOptions)
2454   ::uint32_t cached_has_bits = 0;
2455   (void) cached_has_bits;
2456 
2457   // bool enable_recv_scheduling = 2;
2458   if (this->_internal_enable_recv_scheduling() != 0) {
2459     target = stream->EnsureSpace(target);
2460     target = ::_pbi::WireFormatLite::WriteBoolToArray(2, this->_internal_enable_recv_scheduling(), target);
2461   }
2462 
2463   // .tensorflow.OptimizerOptions optimizer_options = 3;
2464   if (this->_internal_has_optimizer_options()) {
2465     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2466       InternalWriteMessage(3, _Internal::optimizer_options(this),
2467         _Internal::optimizer_options(this).GetCachedSize(), target, stream);
2468   }
2469 
2470   // int64 build_cost_model = 4;
2471   if (this->_internal_build_cost_model() != 0) {
2472     target = stream->EnsureSpace(target);
2473     target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_build_cost_model(), target);
2474   }
2475 
2476   // bool infer_shapes = 5;
2477   if (this->_internal_infer_shapes() != 0) {
2478     target = stream->EnsureSpace(target);
2479     target = ::_pbi::WireFormatLite::WriteBoolToArray(5, this->_internal_infer_shapes(), target);
2480   }
2481 
2482   // bool place_pruned_graph = 6;
2483   if (this->_internal_place_pruned_graph() != 0) {
2484     target = stream->EnsureSpace(target);
2485     target = ::_pbi::WireFormatLite::WriteBoolToArray(6, this->_internal_place_pruned_graph(), target);
2486   }
2487 
2488   // bool enable_bfloat16_sendrecv = 7;
2489   if (this->_internal_enable_bfloat16_sendrecv() != 0) {
2490     target = stream->EnsureSpace(target);
2491     target = ::_pbi::WireFormatLite::WriteBoolToArray(7, this->_internal_enable_bfloat16_sendrecv(), target);
2492   }
2493 
2494   // int32 timeline_step = 8;
2495   if (this->_internal_timeline_step() != 0) {
2496     target = stream->EnsureSpace(target);
2497     target = ::_pbi::WireFormatLite::WriteInt32ToArray(8, this->_internal_timeline_step(), target);
2498   }
2499 
2500   // int64 build_cost_model_after = 9;
2501   if (this->_internal_build_cost_model_after() != 0) {
2502     target = stream->EnsureSpace(target);
2503     target = ::_pbi::WireFormatLite::WriteInt64ToArray(9, this->_internal_build_cost_model_after(), target);
2504   }
2505 
2506   // .tensorflow.RewriterConfig rewrite_options = 10;
2507   if (this->_internal_has_rewrite_options()) {
2508     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2509       InternalWriteMessage(10, _Internal::rewrite_options(this),
2510         _Internal::rewrite_options(this).GetCachedSize(), target, stream);
2511   }
2512 
2513   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2514     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2515         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2516   }
2517   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GraphOptions)
2518   return target;
2519 }
2520 
ByteSizeLong() const2521 size_t GraphOptions::ByteSizeLong() const {
2522 // @@protoc_insertion_point(message_byte_size_start:tensorflow.GraphOptions)
2523   size_t total_size = 0;
2524 
2525   ::uint32_t cached_has_bits = 0;
2526   // Prevent compiler warnings about cached_has_bits being unused
2527   (void) cached_has_bits;
2528 
2529   // .tensorflow.OptimizerOptions optimizer_options = 3;
2530   if (this->_internal_has_optimizer_options()) {
2531     total_size += 1 +
2532       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2533         *_impl_.optimizer_options_);
2534   }
2535 
2536   // .tensorflow.RewriterConfig rewrite_options = 10;
2537   if (this->_internal_has_rewrite_options()) {
2538     total_size += 1 +
2539       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2540         *_impl_.rewrite_options_);
2541   }
2542 
2543   // int64 build_cost_model = 4;
2544   if (this->_internal_build_cost_model() != 0) {
2545     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_build_cost_model());
2546   }
2547 
2548   // bool enable_recv_scheduling = 2;
2549   if (this->_internal_enable_recv_scheduling() != 0) {
2550     total_size += 1 + 1;
2551   }
2552 
2553   // bool infer_shapes = 5;
2554   if (this->_internal_infer_shapes() != 0) {
2555     total_size += 1 + 1;
2556   }
2557 
2558   // bool place_pruned_graph = 6;
2559   if (this->_internal_place_pruned_graph() != 0) {
2560     total_size += 1 + 1;
2561   }
2562 
2563   // bool enable_bfloat16_sendrecv = 7;
2564   if (this->_internal_enable_bfloat16_sendrecv() != 0) {
2565     total_size += 1 + 1;
2566   }
2567 
2568   // int32 timeline_step = 8;
2569   if (this->_internal_timeline_step() != 0) {
2570     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_timeline_step());
2571   }
2572 
2573   // int64 build_cost_model_after = 9;
2574   if (this->_internal_build_cost_model_after() != 0) {
2575     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_build_cost_model_after());
2576   }
2577 
2578   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2579     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2580   }
2581   int cached_size = ::_pbi::ToCachedSize(total_size);
2582   SetCachedSize(cached_size);
2583   return total_size;
2584 }
2585 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2586 void GraphOptions::CheckTypeAndMergeFrom(
2587     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2588   MergeFrom(*::_pbi::DownCast<const GraphOptions*>(
2589       &from));
2590 }
2591 
MergeFrom(const GraphOptions & from)2592 void GraphOptions::MergeFrom(const GraphOptions& from) {
2593   GraphOptions* const _this = this;
2594   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GraphOptions)
2595   GOOGLE_DCHECK_NE(&from, _this);
2596   ::uint32_t cached_has_bits = 0;
2597   (void) cached_has_bits;
2598 
2599   if (from._internal_has_optimizer_options()) {
2600     _this->_internal_mutable_optimizer_options()->::tensorflow::OptimizerOptions::MergeFrom(
2601         from._internal_optimizer_options());
2602   }
2603   if (from._internal_has_rewrite_options()) {
2604     _this->_internal_mutable_rewrite_options()->::tensorflow::RewriterConfig::MergeFrom(
2605         from._internal_rewrite_options());
2606   }
2607   if (from._internal_build_cost_model() != 0) {
2608     _this->_internal_set_build_cost_model(from._internal_build_cost_model());
2609   }
2610   if (from._internal_enable_recv_scheduling() != 0) {
2611     _this->_internal_set_enable_recv_scheduling(from._internal_enable_recv_scheduling());
2612   }
2613   if (from._internal_infer_shapes() != 0) {
2614     _this->_internal_set_infer_shapes(from._internal_infer_shapes());
2615   }
2616   if (from._internal_place_pruned_graph() != 0) {
2617     _this->_internal_set_place_pruned_graph(from._internal_place_pruned_graph());
2618   }
2619   if (from._internal_enable_bfloat16_sendrecv() != 0) {
2620     _this->_internal_set_enable_bfloat16_sendrecv(from._internal_enable_bfloat16_sendrecv());
2621   }
2622   if (from._internal_timeline_step() != 0) {
2623     _this->_internal_set_timeline_step(from._internal_timeline_step());
2624   }
2625   if (from._internal_build_cost_model_after() != 0) {
2626     _this->_internal_set_build_cost_model_after(from._internal_build_cost_model_after());
2627   }
2628   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2629 }
2630 
CopyFrom(const GraphOptions & from)2631 void GraphOptions::CopyFrom(const GraphOptions& from) {
2632 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GraphOptions)
2633   if (&from == this) return;
2634   Clear();
2635   MergeFrom(from);
2636 }
2637 
IsInitialized() const2638 bool GraphOptions::IsInitialized() const {
2639   return true;
2640 }
2641 
InternalSwap(GraphOptions * other)2642 void GraphOptions::InternalSwap(GraphOptions* other) {
2643   using std::swap;
2644   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2645   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
2646       PROTOBUF_FIELD_OFFSET(GraphOptions, _impl_.build_cost_model_after_)
2647       + sizeof(GraphOptions::_impl_.build_cost_model_after_)  // NOLINT
2648       - PROTOBUF_FIELD_OFFSET(GraphOptions, _impl_.optimizer_options_)>(
2649           reinterpret_cast<char*>(&_impl_.optimizer_options_),
2650           reinterpret_cast<char*>(&other->_impl_.optimizer_options_));
2651 }
2652 
GetTypeName() const2653 std::string GraphOptions::GetTypeName() const {
2654   return "tensorflow.GraphOptions";
2655 }
2656 
2657 
2658 // ===================================================================
2659 
2660 class ThreadPoolOptionProto::_Internal {
2661  public:
2662 };
2663 
ThreadPoolOptionProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2664 ThreadPoolOptionProto::ThreadPoolOptionProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2665                          bool is_message_owned)
2666   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2667   SharedCtor(arena, is_message_owned);
2668   // @@protoc_insertion_point(arena_constructor:tensorflow.ThreadPoolOptionProto)
2669 }
ThreadPoolOptionProto(const ThreadPoolOptionProto & from)2670 ThreadPoolOptionProto::ThreadPoolOptionProto(const ThreadPoolOptionProto& from)
2671   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2672   ThreadPoolOptionProto* const _this = this; (void)_this;
2673   new (&_impl_) Impl_{
2674       decltype(_impl_.global_name_){}
2675     , decltype(_impl_.num_threads_){}
2676     , /*decltype(_impl_._cached_size_)*/{}};
2677 
2678   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2679   _impl_.global_name_.InitDefault();
2680   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2681     _impl_.global_name_.Set("", GetArenaForAllocation());
2682   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2683   if (!from._internal_global_name().empty()) {
2684     _this->_impl_.global_name_.Set(from._internal_global_name(),
2685       _this->GetArenaForAllocation());
2686   }
2687   _this->_impl_.num_threads_ = from._impl_.num_threads_;
2688   // @@protoc_insertion_point(copy_constructor:tensorflow.ThreadPoolOptionProto)
2689 }
2690 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2691 inline void ThreadPoolOptionProto::SharedCtor(
2692     ::_pb::Arena* arena, bool is_message_owned) {
2693   (void)arena;
2694   (void)is_message_owned;
2695   new (&_impl_) Impl_{
2696       decltype(_impl_.global_name_){}
2697     , decltype(_impl_.num_threads_){0}
2698     , /*decltype(_impl_._cached_size_)*/{}
2699   };
2700   _impl_.global_name_.InitDefault();
2701   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2702     _impl_.global_name_.Set("", GetArenaForAllocation());
2703   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2704 }
2705 
~ThreadPoolOptionProto()2706 ThreadPoolOptionProto::~ThreadPoolOptionProto() {
2707   // @@protoc_insertion_point(destructor:tensorflow.ThreadPoolOptionProto)
2708   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2709   (void)arena;
2710     return;
2711   }
2712   SharedDtor();
2713 }
2714 
SharedDtor()2715 inline void ThreadPoolOptionProto::SharedDtor() {
2716   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2717   _impl_.global_name_.Destroy();
2718 }
2719 
SetCachedSize(int size) const2720 void ThreadPoolOptionProto::SetCachedSize(int size) const {
2721   _impl_._cached_size_.Set(size);
2722 }
2723 
Clear()2724 void ThreadPoolOptionProto::Clear() {
2725 // @@protoc_insertion_point(message_clear_start:tensorflow.ThreadPoolOptionProto)
2726   ::uint32_t cached_has_bits = 0;
2727   // Prevent compiler warnings about cached_has_bits being unused
2728   (void) cached_has_bits;
2729 
2730   _impl_.global_name_.ClearToEmpty();
2731   _impl_.num_threads_ = 0;
2732   _internal_metadata_.Clear<std::string>();
2733 }
2734 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2735 const char* ThreadPoolOptionProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2736 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2737   while (!ctx->Done(&ptr)) {
2738     ::uint32_t tag;
2739     ptr = ::_pbi::ReadTag(ptr, &tag);
2740     switch (tag >> 3) {
2741       // int32 num_threads = 1;
2742       case 1:
2743         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
2744           _impl_.num_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
2745           CHK_(ptr);
2746         } else {
2747           goto handle_unusual;
2748         }
2749         continue;
2750       // string global_name = 2;
2751       case 2:
2752         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2753           auto str = _internal_mutable_global_name();
2754           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
2755           CHK_(ptr);
2756           CHK_(::_pbi::VerifyUTF8(str, nullptr));
2757         } else {
2758           goto handle_unusual;
2759         }
2760         continue;
2761       default:
2762         goto handle_unusual;
2763     }  // switch
2764   handle_unusual:
2765     if ((tag == 0) || ((tag & 7) == 4)) {
2766       CHK_(ptr);
2767       ctx->SetLastTag(tag);
2768       goto message_done;
2769     }
2770     ptr = UnknownFieldParse(
2771         tag,
2772         _internal_metadata_.mutable_unknown_fields<std::string>(),
2773         ptr, ctx);
2774     CHK_(ptr != nullptr);
2775   }  // while
2776 message_done:
2777   return ptr;
2778 failure:
2779   ptr = nullptr;
2780   goto message_done;
2781 #undef CHK_
2782 }
2783 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2784 ::uint8_t* ThreadPoolOptionProto::_InternalSerialize(
2785     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2786   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ThreadPoolOptionProto)
2787   ::uint32_t cached_has_bits = 0;
2788   (void) cached_has_bits;
2789 
2790   // int32 num_threads = 1;
2791   if (this->_internal_num_threads() != 0) {
2792     target = stream->EnsureSpace(target);
2793     target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_num_threads(), target);
2794   }
2795 
2796   // string global_name = 2;
2797   if (!this->_internal_global_name().empty()) {
2798     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2799       this->_internal_global_name().data(), static_cast<int>(this->_internal_global_name().length()),
2800       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2801       "tensorflow.ThreadPoolOptionProto.global_name");
2802     target = stream->WriteStringMaybeAliased(
2803         2, this->_internal_global_name(), target);
2804   }
2805 
2806   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2807     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2808         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2809   }
2810   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ThreadPoolOptionProto)
2811   return target;
2812 }
2813 
ByteSizeLong() const2814 size_t ThreadPoolOptionProto::ByteSizeLong() const {
2815 // @@protoc_insertion_point(message_byte_size_start:tensorflow.ThreadPoolOptionProto)
2816   size_t total_size = 0;
2817 
2818   ::uint32_t cached_has_bits = 0;
2819   // Prevent compiler warnings about cached_has_bits being unused
2820   (void) cached_has_bits;
2821 
2822   // string global_name = 2;
2823   if (!this->_internal_global_name().empty()) {
2824     total_size += 1 +
2825       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2826         this->_internal_global_name());
2827   }
2828 
2829   // int32 num_threads = 1;
2830   if (this->_internal_num_threads() != 0) {
2831     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_num_threads());
2832   }
2833 
2834   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2835     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2836   }
2837   int cached_size = ::_pbi::ToCachedSize(total_size);
2838   SetCachedSize(cached_size);
2839   return total_size;
2840 }
2841 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2842 void ThreadPoolOptionProto::CheckTypeAndMergeFrom(
2843     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2844   MergeFrom(*::_pbi::DownCast<const ThreadPoolOptionProto*>(
2845       &from));
2846 }
2847 
MergeFrom(const ThreadPoolOptionProto & from)2848 void ThreadPoolOptionProto::MergeFrom(const ThreadPoolOptionProto& from) {
2849   ThreadPoolOptionProto* const _this = this;
2850   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ThreadPoolOptionProto)
2851   GOOGLE_DCHECK_NE(&from, _this);
2852   ::uint32_t cached_has_bits = 0;
2853   (void) cached_has_bits;
2854 
2855   if (!from._internal_global_name().empty()) {
2856     _this->_internal_set_global_name(from._internal_global_name());
2857   }
2858   if (from._internal_num_threads() != 0) {
2859     _this->_internal_set_num_threads(from._internal_num_threads());
2860   }
2861   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2862 }
2863 
CopyFrom(const ThreadPoolOptionProto & from)2864 void ThreadPoolOptionProto::CopyFrom(const ThreadPoolOptionProto& from) {
2865 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ThreadPoolOptionProto)
2866   if (&from == this) return;
2867   Clear();
2868   MergeFrom(from);
2869 }
2870 
IsInitialized() const2871 bool ThreadPoolOptionProto::IsInitialized() const {
2872   return true;
2873 }
2874 
InternalSwap(ThreadPoolOptionProto * other)2875 void ThreadPoolOptionProto::InternalSwap(ThreadPoolOptionProto* other) {
2876   using std::swap;
2877   auto* lhs_arena = GetArenaForAllocation();
2878   auto* rhs_arena = other->GetArenaForAllocation();
2879   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2880   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
2881       &_impl_.global_name_, lhs_arena,
2882       &other->_impl_.global_name_, rhs_arena
2883   );
2884   swap(_impl_.num_threads_, other->_impl_.num_threads_);
2885 }
2886 
GetTypeName() const2887 std::string ThreadPoolOptionProto::GetTypeName() const {
2888   return "tensorflow.ThreadPoolOptionProto";
2889 }
2890 
2891 
2892 // ===================================================================
2893 
2894 class RPCOptions::_Internal {
2895  public:
2896 };
2897 
RPCOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2898 RPCOptions::RPCOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2899                          bool is_message_owned)
2900   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2901   SharedCtor(arena, is_message_owned);
2902   // @@protoc_insertion_point(arena_constructor:tensorflow.RPCOptions)
2903 }
RPCOptions(const RPCOptions & from)2904 RPCOptions::RPCOptions(const RPCOptions& from)
2905   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2906   RPCOptions* const _this = this; (void)_this;
2907   new (&_impl_) Impl_{
2908       decltype(_impl_.compression_algorithm_){}
2909     , decltype(_impl_.compression_level_){}
2910     , decltype(_impl_.use_rpc_for_inprocess_master_){}
2911     , decltype(_impl_.cache_rpc_response_){}
2912     , decltype(_impl_.disable_session_connection_sharing_){}
2913     , decltype(_impl_.num_channels_per_target_){}
2914     , /*decltype(_impl_._cached_size_)*/{}};
2915 
2916   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2917   _impl_.compression_algorithm_.InitDefault();
2918   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2919     _impl_.compression_algorithm_.Set("", GetArenaForAllocation());
2920   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2921   if (!from._internal_compression_algorithm().empty()) {
2922     _this->_impl_.compression_algorithm_.Set(from._internal_compression_algorithm(),
2923       _this->GetArenaForAllocation());
2924   }
2925   ::memcpy(&_impl_.compression_level_, &from._impl_.compression_level_,
2926     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.num_channels_per_target_) -
2927     reinterpret_cast<char*>(&_impl_.compression_level_)) + sizeof(_impl_.num_channels_per_target_));
2928   // @@protoc_insertion_point(copy_constructor:tensorflow.RPCOptions)
2929 }
2930 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2931 inline void RPCOptions::SharedCtor(
2932     ::_pb::Arena* arena, bool is_message_owned) {
2933   (void)arena;
2934   (void)is_message_owned;
2935   new (&_impl_) Impl_{
2936       decltype(_impl_.compression_algorithm_){}
2937     , decltype(_impl_.compression_level_){0}
2938     , decltype(_impl_.use_rpc_for_inprocess_master_){false}
2939     , decltype(_impl_.cache_rpc_response_){false}
2940     , decltype(_impl_.disable_session_connection_sharing_){false}
2941     , decltype(_impl_.num_channels_per_target_){0}
2942     , /*decltype(_impl_._cached_size_)*/{}
2943   };
2944   _impl_.compression_algorithm_.InitDefault();
2945   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2946     _impl_.compression_algorithm_.Set("", GetArenaForAllocation());
2947   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2948 }
2949 
~RPCOptions()2950 RPCOptions::~RPCOptions() {
2951   // @@protoc_insertion_point(destructor:tensorflow.RPCOptions)
2952   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2953   (void)arena;
2954     return;
2955   }
2956   SharedDtor();
2957 }
2958 
SharedDtor()2959 inline void RPCOptions::SharedDtor() {
2960   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2961   _impl_.compression_algorithm_.Destroy();
2962 }
2963 
SetCachedSize(int size) const2964 void RPCOptions::SetCachedSize(int size) const {
2965   _impl_._cached_size_.Set(size);
2966 }
2967 
Clear()2968 void RPCOptions::Clear() {
2969 // @@protoc_insertion_point(message_clear_start:tensorflow.RPCOptions)
2970   ::uint32_t cached_has_bits = 0;
2971   // Prevent compiler warnings about cached_has_bits being unused
2972   (void) cached_has_bits;
2973 
2974   _impl_.compression_algorithm_.ClearToEmpty();
2975   ::memset(&_impl_.compression_level_, 0, static_cast<size_t>(
2976       reinterpret_cast<char*>(&_impl_.num_channels_per_target_) -
2977       reinterpret_cast<char*>(&_impl_.compression_level_)) + sizeof(_impl_.num_channels_per_target_));
2978   _internal_metadata_.Clear<std::string>();
2979 }
2980 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2981 const char* RPCOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2982 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2983   while (!ctx->Done(&ptr)) {
2984     ::uint32_t tag;
2985     ptr = ::_pbi::ReadTag(ptr, &tag);
2986     switch (tag >> 3) {
2987       // bool use_rpc_for_inprocess_master = 1;
2988       case 1:
2989         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
2990           _impl_.use_rpc_for_inprocess_master_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2991           CHK_(ptr);
2992         } else {
2993           goto handle_unusual;
2994         }
2995         continue;
2996       // string compression_algorithm = 2;
2997       case 2:
2998         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2999           auto str = _internal_mutable_compression_algorithm();
3000           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
3001           CHK_(ptr);
3002           CHK_(::_pbi::VerifyUTF8(str, nullptr));
3003         } else {
3004           goto handle_unusual;
3005         }
3006         continue;
3007       // int32 compression_level = 3;
3008       case 3:
3009         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
3010           _impl_.compression_level_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
3011           CHK_(ptr);
3012         } else {
3013           goto handle_unusual;
3014         }
3015         continue;
3016       // bool cache_rpc_response = 4;
3017       case 4:
3018         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
3019           _impl_.cache_rpc_response_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3020           CHK_(ptr);
3021         } else {
3022           goto handle_unusual;
3023         }
3024         continue;
3025       // bool disable_session_connection_sharing = 5;
3026       case 5:
3027         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
3028           _impl_.disable_session_connection_sharing_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3029           CHK_(ptr);
3030         } else {
3031           goto handle_unusual;
3032         }
3033         continue;
3034       // int32 num_channels_per_target = 6;
3035       case 6:
3036         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
3037           _impl_.num_channels_per_target_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
3038           CHK_(ptr);
3039         } else {
3040           goto handle_unusual;
3041         }
3042         continue;
3043       default:
3044         goto handle_unusual;
3045     }  // switch
3046   handle_unusual:
3047     if ((tag == 0) || ((tag & 7) == 4)) {
3048       CHK_(ptr);
3049       ctx->SetLastTag(tag);
3050       goto message_done;
3051     }
3052     ptr = UnknownFieldParse(
3053         tag,
3054         _internal_metadata_.mutable_unknown_fields<std::string>(),
3055         ptr, ctx);
3056     CHK_(ptr != nullptr);
3057   }  // while
3058 message_done:
3059   return ptr;
3060 failure:
3061   ptr = nullptr;
3062   goto message_done;
3063 #undef CHK_
3064 }
3065 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3066 ::uint8_t* RPCOptions::_InternalSerialize(
3067     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3068   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RPCOptions)
3069   ::uint32_t cached_has_bits = 0;
3070   (void) cached_has_bits;
3071 
3072   // bool use_rpc_for_inprocess_master = 1;
3073   if (this->_internal_use_rpc_for_inprocess_master() != 0) {
3074     target = stream->EnsureSpace(target);
3075     target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_use_rpc_for_inprocess_master(), target);
3076   }
3077 
3078   // string compression_algorithm = 2;
3079   if (!this->_internal_compression_algorithm().empty()) {
3080     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3081       this->_internal_compression_algorithm().data(), static_cast<int>(this->_internal_compression_algorithm().length()),
3082       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3083       "tensorflow.RPCOptions.compression_algorithm");
3084     target = stream->WriteStringMaybeAliased(
3085         2, this->_internal_compression_algorithm(), target);
3086   }
3087 
3088   // int32 compression_level = 3;
3089   if (this->_internal_compression_level() != 0) {
3090     target = stream->EnsureSpace(target);
3091     target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_compression_level(), target);
3092   }
3093 
3094   // bool cache_rpc_response = 4;
3095   if (this->_internal_cache_rpc_response() != 0) {
3096     target = stream->EnsureSpace(target);
3097     target = ::_pbi::WireFormatLite::WriteBoolToArray(4, this->_internal_cache_rpc_response(), target);
3098   }
3099 
3100   // bool disable_session_connection_sharing = 5;
3101   if (this->_internal_disable_session_connection_sharing() != 0) {
3102     target = stream->EnsureSpace(target);
3103     target = ::_pbi::WireFormatLite::WriteBoolToArray(5, this->_internal_disable_session_connection_sharing(), target);
3104   }
3105 
3106   // int32 num_channels_per_target = 6;
3107   if (this->_internal_num_channels_per_target() != 0) {
3108     target = stream->EnsureSpace(target);
3109     target = ::_pbi::WireFormatLite::WriteInt32ToArray(6, this->_internal_num_channels_per_target(), target);
3110   }
3111 
3112   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3113     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3114         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3115   }
3116   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RPCOptions)
3117   return target;
3118 }
3119 
ByteSizeLong() const3120 size_t RPCOptions::ByteSizeLong() const {
3121 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RPCOptions)
3122   size_t total_size = 0;
3123 
3124   ::uint32_t cached_has_bits = 0;
3125   // Prevent compiler warnings about cached_has_bits being unused
3126   (void) cached_has_bits;
3127 
3128   // string compression_algorithm = 2;
3129   if (!this->_internal_compression_algorithm().empty()) {
3130     total_size += 1 +
3131       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
3132         this->_internal_compression_algorithm());
3133   }
3134 
3135   // int32 compression_level = 3;
3136   if (this->_internal_compression_level() != 0) {
3137     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_compression_level());
3138   }
3139 
3140   // bool use_rpc_for_inprocess_master = 1;
3141   if (this->_internal_use_rpc_for_inprocess_master() != 0) {
3142     total_size += 1 + 1;
3143   }
3144 
3145   // bool cache_rpc_response = 4;
3146   if (this->_internal_cache_rpc_response() != 0) {
3147     total_size += 1 + 1;
3148   }
3149 
3150   // bool disable_session_connection_sharing = 5;
3151   if (this->_internal_disable_session_connection_sharing() != 0) {
3152     total_size += 1 + 1;
3153   }
3154 
3155   // int32 num_channels_per_target = 6;
3156   if (this->_internal_num_channels_per_target() != 0) {
3157     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_num_channels_per_target());
3158   }
3159 
3160   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3161     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3162   }
3163   int cached_size = ::_pbi::ToCachedSize(total_size);
3164   SetCachedSize(cached_size);
3165   return total_size;
3166 }
3167 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3168 void RPCOptions::CheckTypeAndMergeFrom(
3169     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3170   MergeFrom(*::_pbi::DownCast<const RPCOptions*>(
3171       &from));
3172 }
3173 
MergeFrom(const RPCOptions & from)3174 void RPCOptions::MergeFrom(const RPCOptions& from) {
3175   RPCOptions* const _this = this;
3176   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RPCOptions)
3177   GOOGLE_DCHECK_NE(&from, _this);
3178   ::uint32_t cached_has_bits = 0;
3179   (void) cached_has_bits;
3180 
3181   if (!from._internal_compression_algorithm().empty()) {
3182     _this->_internal_set_compression_algorithm(from._internal_compression_algorithm());
3183   }
3184   if (from._internal_compression_level() != 0) {
3185     _this->_internal_set_compression_level(from._internal_compression_level());
3186   }
3187   if (from._internal_use_rpc_for_inprocess_master() != 0) {
3188     _this->_internal_set_use_rpc_for_inprocess_master(from._internal_use_rpc_for_inprocess_master());
3189   }
3190   if (from._internal_cache_rpc_response() != 0) {
3191     _this->_internal_set_cache_rpc_response(from._internal_cache_rpc_response());
3192   }
3193   if (from._internal_disable_session_connection_sharing() != 0) {
3194     _this->_internal_set_disable_session_connection_sharing(from._internal_disable_session_connection_sharing());
3195   }
3196   if (from._internal_num_channels_per_target() != 0) {
3197     _this->_internal_set_num_channels_per_target(from._internal_num_channels_per_target());
3198   }
3199   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3200 }
3201 
CopyFrom(const RPCOptions & from)3202 void RPCOptions::CopyFrom(const RPCOptions& from) {
3203 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RPCOptions)
3204   if (&from == this) return;
3205   Clear();
3206   MergeFrom(from);
3207 }
3208 
IsInitialized() const3209 bool RPCOptions::IsInitialized() const {
3210   return true;
3211 }
3212 
InternalSwap(RPCOptions * other)3213 void RPCOptions::InternalSwap(RPCOptions* other) {
3214   using std::swap;
3215   auto* lhs_arena = GetArenaForAllocation();
3216   auto* rhs_arena = other->GetArenaForAllocation();
3217   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3218   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
3219       &_impl_.compression_algorithm_, lhs_arena,
3220       &other->_impl_.compression_algorithm_, rhs_arena
3221   );
3222   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
3223       PROTOBUF_FIELD_OFFSET(RPCOptions, _impl_.num_channels_per_target_)
3224       + sizeof(RPCOptions::_impl_.num_channels_per_target_)  // NOLINT
3225       - PROTOBUF_FIELD_OFFSET(RPCOptions, _impl_.compression_level_)>(
3226           reinterpret_cast<char*>(&_impl_.compression_level_),
3227           reinterpret_cast<char*>(&other->_impl_.compression_level_));
3228 }
3229 
GetTypeName() const3230 std::string RPCOptions::GetTypeName() const {
3231   return "tensorflow.RPCOptions";
3232 }
3233 
3234 
3235 // ===================================================================
3236 
3237 class SessionMetadata::_Internal {
3238  public:
3239 };
3240 
SessionMetadata(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3241 SessionMetadata::SessionMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3242                          bool is_message_owned)
3243   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3244   SharedCtor(arena, is_message_owned);
3245   // @@protoc_insertion_point(arena_constructor:tensorflow.SessionMetadata)
3246 }
SessionMetadata(const SessionMetadata & from)3247 SessionMetadata::SessionMetadata(const SessionMetadata& from)
3248   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3249   SessionMetadata* const _this = this; (void)_this;
3250   new (&_impl_) Impl_{
3251       decltype(_impl_.name_){}
3252     , decltype(_impl_.version_){}
3253     , /*decltype(_impl_._cached_size_)*/{}};
3254 
3255   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3256   _impl_.name_.InitDefault();
3257   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3258     _impl_.name_.Set("", GetArenaForAllocation());
3259   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3260   if (!from._internal_name().empty()) {
3261     _this->_impl_.name_.Set(from._internal_name(),
3262       _this->GetArenaForAllocation());
3263   }
3264   _this->_impl_.version_ = from._impl_.version_;
3265   // @@protoc_insertion_point(copy_constructor:tensorflow.SessionMetadata)
3266 }
3267 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3268 inline void SessionMetadata::SharedCtor(
3269     ::_pb::Arena* arena, bool is_message_owned) {
3270   (void)arena;
3271   (void)is_message_owned;
3272   new (&_impl_) Impl_{
3273       decltype(_impl_.name_){}
3274     , decltype(_impl_.version_){::int64_t{0}}
3275     , /*decltype(_impl_._cached_size_)*/{}
3276   };
3277   _impl_.name_.InitDefault();
3278   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3279     _impl_.name_.Set("", GetArenaForAllocation());
3280   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3281 }
3282 
~SessionMetadata()3283 SessionMetadata::~SessionMetadata() {
3284   // @@protoc_insertion_point(destructor:tensorflow.SessionMetadata)
3285   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3286   (void)arena;
3287     return;
3288   }
3289   SharedDtor();
3290 }
3291 
SharedDtor()3292 inline void SessionMetadata::SharedDtor() {
3293   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3294   _impl_.name_.Destroy();
3295 }
3296 
SetCachedSize(int size) const3297 void SessionMetadata::SetCachedSize(int size) const {
3298   _impl_._cached_size_.Set(size);
3299 }
3300 
Clear()3301 void SessionMetadata::Clear() {
3302 // @@protoc_insertion_point(message_clear_start:tensorflow.SessionMetadata)
3303   ::uint32_t cached_has_bits = 0;
3304   // Prevent compiler warnings about cached_has_bits being unused
3305   (void) cached_has_bits;
3306 
3307   _impl_.name_.ClearToEmpty();
3308   _impl_.version_ = ::int64_t{0};
3309   _internal_metadata_.Clear<std::string>();
3310 }
3311 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3312 const char* SessionMetadata::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3313 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3314   while (!ctx->Done(&ptr)) {
3315     ::uint32_t tag;
3316     ptr = ::_pbi::ReadTag(ptr, &tag);
3317     switch (tag >> 3) {
3318       // string name = 1;
3319       case 1:
3320         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
3321           auto str = _internal_mutable_name();
3322           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
3323           CHK_(ptr);
3324           CHK_(::_pbi::VerifyUTF8(str, nullptr));
3325         } else {
3326           goto handle_unusual;
3327         }
3328         continue;
3329       // int64 version = 2;
3330       case 2:
3331         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
3332           _impl_.version_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3333           CHK_(ptr);
3334         } else {
3335           goto handle_unusual;
3336         }
3337         continue;
3338       default:
3339         goto handle_unusual;
3340     }  // switch
3341   handle_unusual:
3342     if ((tag == 0) || ((tag & 7) == 4)) {
3343       CHK_(ptr);
3344       ctx->SetLastTag(tag);
3345       goto message_done;
3346     }
3347     ptr = UnknownFieldParse(
3348         tag,
3349         _internal_metadata_.mutable_unknown_fields<std::string>(),
3350         ptr, ctx);
3351     CHK_(ptr != nullptr);
3352   }  // while
3353 message_done:
3354   return ptr;
3355 failure:
3356   ptr = nullptr;
3357   goto message_done;
3358 #undef CHK_
3359 }
3360 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3361 ::uint8_t* SessionMetadata::_InternalSerialize(
3362     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3363   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.SessionMetadata)
3364   ::uint32_t cached_has_bits = 0;
3365   (void) cached_has_bits;
3366 
3367   // string name = 1;
3368   if (!this->_internal_name().empty()) {
3369     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3370       this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
3371       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3372       "tensorflow.SessionMetadata.name");
3373     target = stream->WriteStringMaybeAliased(
3374         1, this->_internal_name(), target);
3375   }
3376 
3377   // int64 version = 2;
3378   if (this->_internal_version() != 0) {
3379     target = stream->EnsureSpace(target);
3380     target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_version(), target);
3381   }
3382 
3383   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3384     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3385         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3386   }
3387   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.SessionMetadata)
3388   return target;
3389 }
3390 
ByteSizeLong() const3391 size_t SessionMetadata::ByteSizeLong() const {
3392 // @@protoc_insertion_point(message_byte_size_start:tensorflow.SessionMetadata)
3393   size_t total_size = 0;
3394 
3395   ::uint32_t cached_has_bits = 0;
3396   // Prevent compiler warnings about cached_has_bits being unused
3397   (void) cached_has_bits;
3398 
3399   // string name = 1;
3400   if (!this->_internal_name().empty()) {
3401     total_size += 1 +
3402       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
3403         this->_internal_name());
3404   }
3405 
3406   // int64 version = 2;
3407   if (this->_internal_version() != 0) {
3408     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_version());
3409   }
3410 
3411   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3412     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3413   }
3414   int cached_size = ::_pbi::ToCachedSize(total_size);
3415   SetCachedSize(cached_size);
3416   return total_size;
3417 }
3418 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3419 void SessionMetadata::CheckTypeAndMergeFrom(
3420     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3421   MergeFrom(*::_pbi::DownCast<const SessionMetadata*>(
3422       &from));
3423 }
3424 
MergeFrom(const SessionMetadata & from)3425 void SessionMetadata::MergeFrom(const SessionMetadata& from) {
3426   SessionMetadata* const _this = this;
3427   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.SessionMetadata)
3428   GOOGLE_DCHECK_NE(&from, _this);
3429   ::uint32_t cached_has_bits = 0;
3430   (void) cached_has_bits;
3431 
3432   if (!from._internal_name().empty()) {
3433     _this->_internal_set_name(from._internal_name());
3434   }
3435   if (from._internal_version() != 0) {
3436     _this->_internal_set_version(from._internal_version());
3437   }
3438   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3439 }
3440 
CopyFrom(const SessionMetadata & from)3441 void SessionMetadata::CopyFrom(const SessionMetadata& from) {
3442 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.SessionMetadata)
3443   if (&from == this) return;
3444   Clear();
3445   MergeFrom(from);
3446 }
3447 
IsInitialized() const3448 bool SessionMetadata::IsInitialized() const {
3449   return true;
3450 }
3451 
InternalSwap(SessionMetadata * other)3452 void SessionMetadata::InternalSwap(SessionMetadata* other) {
3453   using std::swap;
3454   auto* lhs_arena = GetArenaForAllocation();
3455   auto* rhs_arena = other->GetArenaForAllocation();
3456   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3457   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
3458       &_impl_.name_, lhs_arena,
3459       &other->_impl_.name_, rhs_arena
3460   );
3461   swap(_impl_.version_, other->_impl_.version_);
3462 }
3463 
GetTypeName() const3464 std::string SessionMetadata::GetTypeName() const {
3465   return "tensorflow.SessionMetadata";
3466 }
3467 
3468 
3469 // ===================================================================
3470 
ConfigProto_DeviceCountEntry_DoNotUse()3471 ConfigProto_DeviceCountEntry_DoNotUse::ConfigProto_DeviceCountEntry_DoNotUse() {}
ConfigProto_DeviceCountEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)3472 ConfigProto_DeviceCountEntry_DoNotUse::ConfigProto_DeviceCountEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
3473     : SuperType(arena) {}
MergeFrom(const ConfigProto_DeviceCountEntry_DoNotUse & other)3474 void ConfigProto_DeviceCountEntry_DoNotUse::MergeFrom(const ConfigProto_DeviceCountEntry_DoNotUse& other) {
3475   MergeFromInternal(other);
3476 }
3477 
3478 // ===================================================================
3479 
3480 class ConfigProto_Experimental::_Internal {
3481  public:
3482   static const ::tensorflow::SessionMetadata& session_metadata(const ConfigProto_Experimental* msg);
3483   static const ::tensorflow::CoordinationServiceConfig& coordination_config(const ConfigProto_Experimental* msg);
3484 };
3485 
3486 const ::tensorflow::SessionMetadata&
session_metadata(const ConfigProto_Experimental * msg)3487 ConfigProto_Experimental::_Internal::session_metadata(const ConfigProto_Experimental* msg) {
3488   return *msg->_impl_.session_metadata_;
3489 }
3490 const ::tensorflow::CoordinationServiceConfig&
coordination_config(const ConfigProto_Experimental * msg)3491 ConfigProto_Experimental::_Internal::coordination_config(const ConfigProto_Experimental* msg) {
3492   return *msg->_impl_.coordination_config_;
3493 }
clear_coordination_config()3494 void ConfigProto_Experimental::clear_coordination_config() {
3495   if (GetArenaForAllocation() == nullptr && _impl_.coordination_config_ != nullptr) {
3496     delete _impl_.coordination_config_;
3497   }
3498   _impl_.coordination_config_ = nullptr;
3499 }
ConfigProto_Experimental(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3500 ConfigProto_Experimental::ConfigProto_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3501                          bool is_message_owned)
3502   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3503   SharedCtor(arena, is_message_owned);
3504   // @@protoc_insertion_point(arena_constructor:tensorflow.ConfigProto.Experimental)
3505 }
ConfigProto_Experimental(const ConfigProto_Experimental & from)3506 ConfigProto_Experimental::ConfigProto_Experimental(const ConfigProto_Experimental& from)
3507   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3508   ConfigProto_Experimental* const _this = this; (void)_this;
3509   new (&_impl_) Impl_{
3510       decltype(_impl_.collective_group_leader_){}
3511     , decltype(_impl_.executor_type_){}
3512     , decltype(_impl_.session_metadata_){nullptr}
3513     , decltype(_impl_.coordination_config_){nullptr}
3514     , decltype(_impl_.recv_buf_max_chunk_){}
3515     , decltype(_impl_.use_numa_affinity_){}
3516     , decltype(_impl_.collective_deterministic_sequential_execution_){}
3517     , decltype(_impl_.collective_nccl_){}
3518     , decltype(_impl_.share_session_state_in_clusterspec_propagation_){}
3519     , decltype(_impl_.disable_thread_spinning_){}
3520     , decltype(_impl_.share_cluster_devices_in_session_){}
3521     , decltype(_impl_.optimize_for_static_graph_){}
3522     , decltype(_impl_.enable_mlir_bridge_){}
3523     , decltype(_impl_.mlir_bridge_rollout_){}
3524     , decltype(_impl_.xla_fusion_autotuner_thresh_){}
3525     , decltype(_impl_.enable_mlir_graph_optimization_){}
3526     , decltype(_impl_.disable_output_partition_graphs_){}
3527     , decltype(_impl_.use_tfrt_){}
3528     , decltype(_impl_.disable_functional_ops_lowering_){}
3529     , decltype(_impl_.xla_prefer_single_graph_cluster_){}
3530     , /*decltype(_impl_._cached_size_)*/{}};
3531 
3532   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3533   _impl_.collective_group_leader_.InitDefault();
3534   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3535     _impl_.collective_group_leader_.Set("", GetArenaForAllocation());
3536   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3537   if (!from._internal_collective_group_leader().empty()) {
3538     _this->_impl_.collective_group_leader_.Set(from._internal_collective_group_leader(),
3539       _this->GetArenaForAllocation());
3540   }
3541   _impl_.executor_type_.InitDefault();
3542   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3543     _impl_.executor_type_.Set("", GetArenaForAllocation());
3544   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3545   if (!from._internal_executor_type().empty()) {
3546     _this->_impl_.executor_type_.Set(from._internal_executor_type(),
3547       _this->GetArenaForAllocation());
3548   }
3549   if (from._internal_has_session_metadata()) {
3550     _this->_impl_.session_metadata_ = new ::tensorflow::SessionMetadata(*from._impl_.session_metadata_);
3551   }
3552   if (from._internal_has_coordination_config()) {
3553     _this->_impl_.coordination_config_ = new ::tensorflow::CoordinationServiceConfig(*from._impl_.coordination_config_);
3554   }
3555   ::memcpy(&_impl_.recv_buf_max_chunk_, &from._impl_.recv_buf_max_chunk_,
3556     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.xla_prefer_single_graph_cluster_) -
3557     reinterpret_cast<char*>(&_impl_.recv_buf_max_chunk_)) + sizeof(_impl_.xla_prefer_single_graph_cluster_));
3558   // @@protoc_insertion_point(copy_constructor:tensorflow.ConfigProto.Experimental)
3559 }
3560 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3561 inline void ConfigProto_Experimental::SharedCtor(
3562     ::_pb::Arena* arena, bool is_message_owned) {
3563   (void)arena;
3564   (void)is_message_owned;
3565   new (&_impl_) Impl_{
3566       decltype(_impl_.collective_group_leader_){}
3567     , decltype(_impl_.executor_type_){}
3568     , decltype(_impl_.session_metadata_){nullptr}
3569     , decltype(_impl_.coordination_config_){nullptr}
3570     , decltype(_impl_.recv_buf_max_chunk_){0}
3571     , decltype(_impl_.use_numa_affinity_){false}
3572     , decltype(_impl_.collective_deterministic_sequential_execution_){false}
3573     , decltype(_impl_.collective_nccl_){false}
3574     , decltype(_impl_.share_session_state_in_clusterspec_propagation_){false}
3575     , decltype(_impl_.disable_thread_spinning_){false}
3576     , decltype(_impl_.share_cluster_devices_in_session_){false}
3577     , decltype(_impl_.optimize_for_static_graph_){false}
3578     , decltype(_impl_.enable_mlir_bridge_){false}
3579     , decltype(_impl_.mlir_bridge_rollout_){0}
3580     , decltype(_impl_.xla_fusion_autotuner_thresh_){::int64_t{0}}
3581     , decltype(_impl_.enable_mlir_graph_optimization_){false}
3582     , decltype(_impl_.disable_output_partition_graphs_){false}
3583     , decltype(_impl_.use_tfrt_){false}
3584     , decltype(_impl_.disable_functional_ops_lowering_){false}
3585     , decltype(_impl_.xla_prefer_single_graph_cluster_){false}
3586     , /*decltype(_impl_._cached_size_)*/{}
3587   };
3588   _impl_.collective_group_leader_.InitDefault();
3589   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3590     _impl_.collective_group_leader_.Set("", GetArenaForAllocation());
3591   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3592   _impl_.executor_type_.InitDefault();
3593   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3594     _impl_.executor_type_.Set("", GetArenaForAllocation());
3595   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3596 }
3597 
~ConfigProto_Experimental()3598 ConfigProto_Experimental::~ConfigProto_Experimental() {
3599   // @@protoc_insertion_point(destructor:tensorflow.ConfigProto.Experimental)
3600   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3601   (void)arena;
3602     return;
3603   }
3604   SharedDtor();
3605 }
3606 
SharedDtor()3607 inline void ConfigProto_Experimental::SharedDtor() {
3608   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3609   _impl_.collective_group_leader_.Destroy();
3610   _impl_.executor_type_.Destroy();
3611   if (this != internal_default_instance()) delete _impl_.session_metadata_;
3612   if (this != internal_default_instance()) delete _impl_.coordination_config_;
3613 }
3614 
SetCachedSize(int size) const3615 void ConfigProto_Experimental::SetCachedSize(int size) const {
3616   _impl_._cached_size_.Set(size);
3617 }
3618 
Clear()3619 void ConfigProto_Experimental::Clear() {
3620 // @@protoc_insertion_point(message_clear_start:tensorflow.ConfigProto.Experimental)
3621   ::uint32_t cached_has_bits = 0;
3622   // Prevent compiler warnings about cached_has_bits being unused
3623   (void) cached_has_bits;
3624 
3625   _impl_.collective_group_leader_.ClearToEmpty();
3626   _impl_.executor_type_.ClearToEmpty();
3627   if (GetArenaForAllocation() == nullptr && _impl_.session_metadata_ != nullptr) {
3628     delete _impl_.session_metadata_;
3629   }
3630   _impl_.session_metadata_ = nullptr;
3631   if (GetArenaForAllocation() == nullptr && _impl_.coordination_config_ != nullptr) {
3632     delete _impl_.coordination_config_;
3633   }
3634   _impl_.coordination_config_ = nullptr;
3635   ::memset(&_impl_.recv_buf_max_chunk_, 0, static_cast<size_t>(
3636       reinterpret_cast<char*>(&_impl_.xla_prefer_single_graph_cluster_) -
3637       reinterpret_cast<char*>(&_impl_.recv_buf_max_chunk_)) + sizeof(_impl_.xla_prefer_single_graph_cluster_));
3638   _internal_metadata_.Clear<std::string>();
3639 }
3640 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3641 const char* ConfigProto_Experimental::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3642 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3643   while (!ctx->Done(&ptr)) {
3644     ::uint32_t tag;
3645     ptr = ::_pbi::ReadTag(ptr, &tag);
3646     switch (tag >> 3) {
3647       // string collective_group_leader = 1;
3648       case 1:
3649         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
3650           auto str = _internal_mutable_collective_group_leader();
3651           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
3652           CHK_(ptr);
3653           CHK_(::_pbi::VerifyUTF8(str, nullptr));
3654         } else {
3655           goto handle_unusual;
3656         }
3657         continue;
3658       // string executor_type = 3;
3659       case 3:
3660         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
3661           auto str = _internal_mutable_executor_type();
3662           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
3663           CHK_(ptr);
3664           CHK_(::_pbi::VerifyUTF8(str, nullptr));
3665         } else {
3666           goto handle_unusual;
3667         }
3668         continue;
3669       // int32 recv_buf_max_chunk = 4;
3670       case 4:
3671         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
3672           _impl_.recv_buf_max_chunk_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
3673           CHK_(ptr);
3674         } else {
3675           goto handle_unusual;
3676         }
3677         continue;
3678       // bool use_numa_affinity = 5;
3679       case 5:
3680         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
3681           _impl_.use_numa_affinity_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3682           CHK_(ptr);
3683         } else {
3684           goto handle_unusual;
3685         }
3686         continue;
3687       // bool collective_deterministic_sequential_execution = 6;
3688       case 6:
3689         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
3690           _impl_.collective_deterministic_sequential_execution_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3691           CHK_(ptr);
3692         } else {
3693           goto handle_unusual;
3694         }
3695         continue;
3696       // bool collective_nccl = 7;
3697       case 7:
3698         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
3699           _impl_.collective_nccl_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3700           CHK_(ptr);
3701         } else {
3702           goto handle_unusual;
3703         }
3704         continue;
3705       // bool share_session_state_in_clusterspec_propagation = 8;
3706       case 8:
3707         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
3708           _impl_.share_session_state_in_clusterspec_propagation_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3709           CHK_(ptr);
3710         } else {
3711           goto handle_unusual;
3712         }
3713         continue;
3714       // bool disable_thread_spinning = 9;
3715       case 9:
3716         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
3717           _impl_.disable_thread_spinning_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3718           CHK_(ptr);
3719         } else {
3720           goto handle_unusual;
3721         }
3722         continue;
3723       // bool share_cluster_devices_in_session = 10;
3724       case 10:
3725         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 80)) {
3726           _impl_.share_cluster_devices_in_session_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3727           CHK_(ptr);
3728         } else {
3729           goto handle_unusual;
3730         }
3731         continue;
3732       // .tensorflow.SessionMetadata session_metadata = 11;
3733       case 11:
3734         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 90)) {
3735           ptr = ctx->ParseMessage(_internal_mutable_session_metadata(), ptr);
3736           CHK_(ptr);
3737         } else {
3738           goto handle_unusual;
3739         }
3740         continue;
3741       // bool optimize_for_static_graph = 12;
3742       case 12:
3743         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 96)) {
3744           _impl_.optimize_for_static_graph_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3745           CHK_(ptr);
3746         } else {
3747           goto handle_unusual;
3748         }
3749         continue;
3750       // bool enable_mlir_bridge = 13;
3751       case 13:
3752         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 104)) {
3753           _impl_.enable_mlir_bridge_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3754           CHK_(ptr);
3755         } else {
3756           goto handle_unusual;
3757         }
3758         continue;
3759       // bool disable_output_partition_graphs = 14;
3760       case 14:
3761         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 112)) {
3762           _impl_.disable_output_partition_graphs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3763           CHK_(ptr);
3764         } else {
3765           goto handle_unusual;
3766         }
3767         continue;
3768       // int64 xla_fusion_autotuner_thresh = 15;
3769       case 15:
3770         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 120)) {
3771           _impl_.xla_fusion_autotuner_thresh_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3772           CHK_(ptr);
3773         } else {
3774           goto handle_unusual;
3775         }
3776         continue;
3777       // bool enable_mlir_graph_optimization = 16;
3778       case 16:
3779         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 128)) {
3780           _impl_.enable_mlir_graph_optimization_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3781           CHK_(ptr);
3782         } else {
3783           goto handle_unusual;
3784         }
3785         continue;
3786       // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
3787       case 17:
3788         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 136)) {
3789           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3790           CHK_(ptr);
3791           _internal_set_mlir_bridge_rollout(static_cast<::tensorflow::ConfigProto_Experimental_MlirBridgeRollout>(val));
3792         } else {
3793           goto handle_unusual;
3794         }
3795         continue;
3796       // bool use_tfrt = 18;
3797       case 18:
3798         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 144)) {
3799           _impl_.use_tfrt_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3800           CHK_(ptr);
3801         } else {
3802           goto handle_unusual;
3803         }
3804         continue;
3805       // bool disable_functional_ops_lowering = 21;
3806       case 21:
3807         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 168)) {
3808           _impl_.disable_functional_ops_lowering_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3809           CHK_(ptr);
3810         } else {
3811           goto handle_unusual;
3812         }
3813         continue;
3814       // bool xla_prefer_single_graph_cluster = 22;
3815       case 22:
3816         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 176)) {
3817           _impl_.xla_prefer_single_graph_cluster_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3818           CHK_(ptr);
3819         } else {
3820           goto handle_unusual;
3821         }
3822         continue;
3823       // .tensorflow.CoordinationServiceConfig coordination_config = 23;
3824       case 23:
3825         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 186)) {
3826           ptr = ctx->ParseMessage(_internal_mutable_coordination_config(), ptr);
3827           CHK_(ptr);
3828         } else {
3829           goto handle_unusual;
3830         }
3831         continue;
3832       default:
3833         goto handle_unusual;
3834     }  // switch
3835   handle_unusual:
3836     if ((tag == 0) || ((tag & 7) == 4)) {
3837       CHK_(ptr);
3838       ctx->SetLastTag(tag);
3839       goto message_done;
3840     }
3841     ptr = UnknownFieldParse(
3842         tag,
3843         _internal_metadata_.mutable_unknown_fields<std::string>(),
3844         ptr, ctx);
3845     CHK_(ptr != nullptr);
3846   }  // while
3847 message_done:
3848   return ptr;
3849 failure:
3850   ptr = nullptr;
3851   goto message_done;
3852 #undef CHK_
3853 }
3854 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3855 ::uint8_t* ConfigProto_Experimental::_InternalSerialize(
3856     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3857   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ConfigProto.Experimental)
3858   ::uint32_t cached_has_bits = 0;
3859   (void) cached_has_bits;
3860 
3861   // string collective_group_leader = 1;
3862   if (!this->_internal_collective_group_leader().empty()) {
3863     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3864       this->_internal_collective_group_leader().data(), static_cast<int>(this->_internal_collective_group_leader().length()),
3865       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3866       "tensorflow.ConfigProto.Experimental.collective_group_leader");
3867     target = stream->WriteStringMaybeAliased(
3868         1, this->_internal_collective_group_leader(), target);
3869   }
3870 
3871   // string executor_type = 3;
3872   if (!this->_internal_executor_type().empty()) {
3873     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3874       this->_internal_executor_type().data(), static_cast<int>(this->_internal_executor_type().length()),
3875       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3876       "tensorflow.ConfigProto.Experimental.executor_type");
3877     target = stream->WriteStringMaybeAliased(
3878         3, this->_internal_executor_type(), target);
3879   }
3880 
3881   // int32 recv_buf_max_chunk = 4;
3882   if (this->_internal_recv_buf_max_chunk() != 0) {
3883     target = stream->EnsureSpace(target);
3884     target = ::_pbi::WireFormatLite::WriteInt32ToArray(4, this->_internal_recv_buf_max_chunk(), target);
3885   }
3886 
3887   // bool use_numa_affinity = 5;
3888   if (this->_internal_use_numa_affinity() != 0) {
3889     target = stream->EnsureSpace(target);
3890     target = ::_pbi::WireFormatLite::WriteBoolToArray(5, this->_internal_use_numa_affinity(), target);
3891   }
3892 
3893   // bool collective_deterministic_sequential_execution = 6;
3894   if (this->_internal_collective_deterministic_sequential_execution() != 0) {
3895     target = stream->EnsureSpace(target);
3896     target = ::_pbi::WireFormatLite::WriteBoolToArray(6, this->_internal_collective_deterministic_sequential_execution(), target);
3897   }
3898 
3899   // bool collective_nccl = 7;
3900   if (this->_internal_collective_nccl() != 0) {
3901     target = stream->EnsureSpace(target);
3902     target = ::_pbi::WireFormatLite::WriteBoolToArray(7, this->_internal_collective_nccl(), target);
3903   }
3904 
3905   // bool share_session_state_in_clusterspec_propagation = 8;
3906   if (this->_internal_share_session_state_in_clusterspec_propagation() != 0) {
3907     target = stream->EnsureSpace(target);
3908     target = ::_pbi::WireFormatLite::WriteBoolToArray(8, this->_internal_share_session_state_in_clusterspec_propagation(), target);
3909   }
3910 
3911   // bool disable_thread_spinning = 9;
3912   if (this->_internal_disable_thread_spinning() != 0) {
3913     target = stream->EnsureSpace(target);
3914     target = ::_pbi::WireFormatLite::WriteBoolToArray(9, this->_internal_disable_thread_spinning(), target);
3915   }
3916 
3917   // bool share_cluster_devices_in_session = 10;
3918   if (this->_internal_share_cluster_devices_in_session() != 0) {
3919     target = stream->EnsureSpace(target);
3920     target = ::_pbi::WireFormatLite::WriteBoolToArray(10, this->_internal_share_cluster_devices_in_session(), target);
3921   }
3922 
3923   // .tensorflow.SessionMetadata session_metadata = 11;
3924   if (this->_internal_has_session_metadata()) {
3925     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3926       InternalWriteMessage(11, _Internal::session_metadata(this),
3927         _Internal::session_metadata(this).GetCachedSize(), target, stream);
3928   }
3929 
3930   // bool optimize_for_static_graph = 12;
3931   if (this->_internal_optimize_for_static_graph() != 0) {
3932     target = stream->EnsureSpace(target);
3933     target = ::_pbi::WireFormatLite::WriteBoolToArray(12, this->_internal_optimize_for_static_graph(), target);
3934   }
3935 
3936   // bool enable_mlir_bridge = 13;
3937   if (this->_internal_enable_mlir_bridge() != 0) {
3938     target = stream->EnsureSpace(target);
3939     target = ::_pbi::WireFormatLite::WriteBoolToArray(13, this->_internal_enable_mlir_bridge(), target);
3940   }
3941 
3942   // bool disable_output_partition_graphs = 14;
3943   if (this->_internal_disable_output_partition_graphs() != 0) {
3944     target = stream->EnsureSpace(target);
3945     target = ::_pbi::WireFormatLite::WriteBoolToArray(14, this->_internal_disable_output_partition_graphs(), target);
3946   }
3947 
3948   // int64 xla_fusion_autotuner_thresh = 15;
3949   if (this->_internal_xla_fusion_autotuner_thresh() != 0) {
3950     target = stream->EnsureSpace(target);
3951     target = ::_pbi::WireFormatLite::WriteInt64ToArray(15, this->_internal_xla_fusion_autotuner_thresh(), target);
3952   }
3953 
3954   // bool enable_mlir_graph_optimization = 16;
3955   if (this->_internal_enable_mlir_graph_optimization() != 0) {
3956     target = stream->EnsureSpace(target);
3957     target = ::_pbi::WireFormatLite::WriteBoolToArray(16, this->_internal_enable_mlir_graph_optimization(), target);
3958   }
3959 
3960   // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
3961   if (this->_internal_mlir_bridge_rollout() != 0) {
3962     target = stream->EnsureSpace(target);
3963     target = ::_pbi::WireFormatLite::WriteEnumToArray(
3964       17, this->_internal_mlir_bridge_rollout(), target);
3965   }
3966 
3967   // bool use_tfrt = 18;
3968   if (this->_internal_use_tfrt() != 0) {
3969     target = stream->EnsureSpace(target);
3970     target = ::_pbi::WireFormatLite::WriteBoolToArray(18, this->_internal_use_tfrt(), target);
3971   }
3972 
3973   // bool disable_functional_ops_lowering = 21;
3974   if (this->_internal_disable_functional_ops_lowering() != 0) {
3975     target = stream->EnsureSpace(target);
3976     target = ::_pbi::WireFormatLite::WriteBoolToArray(21, this->_internal_disable_functional_ops_lowering(), target);
3977   }
3978 
3979   // bool xla_prefer_single_graph_cluster = 22;
3980   if (this->_internal_xla_prefer_single_graph_cluster() != 0) {
3981     target = stream->EnsureSpace(target);
3982     target = ::_pbi::WireFormatLite::WriteBoolToArray(22, this->_internal_xla_prefer_single_graph_cluster(), target);
3983   }
3984 
3985   // .tensorflow.CoordinationServiceConfig coordination_config = 23;
3986   if (this->_internal_has_coordination_config()) {
3987     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3988       InternalWriteMessage(23, _Internal::coordination_config(this),
3989         _Internal::coordination_config(this).GetCachedSize(), target, stream);
3990   }
3991 
3992   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3993     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3994         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3995   }
3996   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ConfigProto.Experimental)
3997   return target;
3998 }
3999 
ByteSizeLong() const4000 size_t ConfigProto_Experimental::ByteSizeLong() const {
4001 // @@protoc_insertion_point(message_byte_size_start:tensorflow.ConfigProto.Experimental)
4002   size_t total_size = 0;
4003 
4004   ::uint32_t cached_has_bits = 0;
4005   // Prevent compiler warnings about cached_has_bits being unused
4006   (void) cached_has_bits;
4007 
4008   // string collective_group_leader = 1;
4009   if (!this->_internal_collective_group_leader().empty()) {
4010     total_size += 1 +
4011       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
4012         this->_internal_collective_group_leader());
4013   }
4014 
4015   // string executor_type = 3;
4016   if (!this->_internal_executor_type().empty()) {
4017     total_size += 1 +
4018       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
4019         this->_internal_executor_type());
4020   }
4021 
4022   // .tensorflow.SessionMetadata session_metadata = 11;
4023   if (this->_internal_has_session_metadata()) {
4024     total_size += 1 +
4025       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4026         *_impl_.session_metadata_);
4027   }
4028 
4029   // .tensorflow.CoordinationServiceConfig coordination_config = 23;
4030   if (this->_internal_has_coordination_config()) {
4031     total_size += 2 +
4032       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4033         *_impl_.coordination_config_);
4034   }
4035 
4036   // int32 recv_buf_max_chunk = 4;
4037   if (this->_internal_recv_buf_max_chunk() != 0) {
4038     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_recv_buf_max_chunk());
4039   }
4040 
4041   // bool use_numa_affinity = 5;
4042   if (this->_internal_use_numa_affinity() != 0) {
4043     total_size += 1 + 1;
4044   }
4045 
4046   // bool collective_deterministic_sequential_execution = 6;
4047   if (this->_internal_collective_deterministic_sequential_execution() != 0) {
4048     total_size += 1 + 1;
4049   }
4050 
4051   // bool collective_nccl = 7;
4052   if (this->_internal_collective_nccl() != 0) {
4053     total_size += 1 + 1;
4054   }
4055 
4056   // bool share_session_state_in_clusterspec_propagation = 8;
4057   if (this->_internal_share_session_state_in_clusterspec_propagation() != 0) {
4058     total_size += 1 + 1;
4059   }
4060 
4061   // bool disable_thread_spinning = 9;
4062   if (this->_internal_disable_thread_spinning() != 0) {
4063     total_size += 1 + 1;
4064   }
4065 
4066   // bool share_cluster_devices_in_session = 10;
4067   if (this->_internal_share_cluster_devices_in_session() != 0) {
4068     total_size += 1 + 1;
4069   }
4070 
4071   // bool optimize_for_static_graph = 12;
4072   if (this->_internal_optimize_for_static_graph() != 0) {
4073     total_size += 1 + 1;
4074   }
4075 
4076   // bool enable_mlir_bridge = 13;
4077   if (this->_internal_enable_mlir_bridge() != 0) {
4078     total_size += 1 + 1;
4079   }
4080 
4081   // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
4082   if (this->_internal_mlir_bridge_rollout() != 0) {
4083     total_size += 2 +
4084       ::_pbi::WireFormatLite::EnumSize(this->_internal_mlir_bridge_rollout());
4085   }
4086 
4087   // int64 xla_fusion_autotuner_thresh = 15;
4088   if (this->_internal_xla_fusion_autotuner_thresh() != 0) {
4089     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_xla_fusion_autotuner_thresh());
4090   }
4091 
4092   // bool enable_mlir_graph_optimization = 16;
4093   if (this->_internal_enable_mlir_graph_optimization() != 0) {
4094     total_size += 2 + 1;
4095   }
4096 
4097   // bool disable_output_partition_graphs = 14;
4098   if (this->_internal_disable_output_partition_graphs() != 0) {
4099     total_size += 1 + 1;
4100   }
4101 
4102   // bool use_tfrt = 18;
4103   if (this->_internal_use_tfrt() != 0) {
4104     total_size += 2 + 1;
4105   }
4106 
4107   // bool disable_functional_ops_lowering = 21;
4108   if (this->_internal_disable_functional_ops_lowering() != 0) {
4109     total_size += 2 + 1;
4110   }
4111 
4112   // bool xla_prefer_single_graph_cluster = 22;
4113   if (this->_internal_xla_prefer_single_graph_cluster() != 0) {
4114     total_size += 2 + 1;
4115   }
4116 
4117   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4118     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
4119   }
4120   int cached_size = ::_pbi::ToCachedSize(total_size);
4121   SetCachedSize(cached_size);
4122   return total_size;
4123 }
4124 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)4125 void ConfigProto_Experimental::CheckTypeAndMergeFrom(
4126     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
4127   MergeFrom(*::_pbi::DownCast<const ConfigProto_Experimental*>(
4128       &from));
4129 }
4130 
MergeFrom(const ConfigProto_Experimental & from)4131 void ConfigProto_Experimental::MergeFrom(const ConfigProto_Experimental& from) {
4132   ConfigProto_Experimental* const _this = this;
4133   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ConfigProto.Experimental)
4134   GOOGLE_DCHECK_NE(&from, _this);
4135   ::uint32_t cached_has_bits = 0;
4136   (void) cached_has_bits;
4137 
4138   if (!from._internal_collective_group_leader().empty()) {
4139     _this->_internal_set_collective_group_leader(from._internal_collective_group_leader());
4140   }
4141   if (!from._internal_executor_type().empty()) {
4142     _this->_internal_set_executor_type(from._internal_executor_type());
4143   }
4144   if (from._internal_has_session_metadata()) {
4145     _this->_internal_mutable_session_metadata()->::tensorflow::SessionMetadata::MergeFrom(
4146         from._internal_session_metadata());
4147   }
4148   if (from._internal_has_coordination_config()) {
4149     _this->_internal_mutable_coordination_config()->::tensorflow::CoordinationServiceConfig::MergeFrom(
4150         from._internal_coordination_config());
4151   }
4152   if (from._internal_recv_buf_max_chunk() != 0) {
4153     _this->_internal_set_recv_buf_max_chunk(from._internal_recv_buf_max_chunk());
4154   }
4155   if (from._internal_use_numa_affinity() != 0) {
4156     _this->_internal_set_use_numa_affinity(from._internal_use_numa_affinity());
4157   }
4158   if (from._internal_collective_deterministic_sequential_execution() != 0) {
4159     _this->_internal_set_collective_deterministic_sequential_execution(from._internal_collective_deterministic_sequential_execution());
4160   }
4161   if (from._internal_collective_nccl() != 0) {
4162     _this->_internal_set_collective_nccl(from._internal_collective_nccl());
4163   }
4164   if (from._internal_share_session_state_in_clusterspec_propagation() != 0) {
4165     _this->_internal_set_share_session_state_in_clusterspec_propagation(from._internal_share_session_state_in_clusterspec_propagation());
4166   }
4167   if (from._internal_disable_thread_spinning() != 0) {
4168     _this->_internal_set_disable_thread_spinning(from._internal_disable_thread_spinning());
4169   }
4170   if (from._internal_share_cluster_devices_in_session() != 0) {
4171     _this->_internal_set_share_cluster_devices_in_session(from._internal_share_cluster_devices_in_session());
4172   }
4173   if (from._internal_optimize_for_static_graph() != 0) {
4174     _this->_internal_set_optimize_for_static_graph(from._internal_optimize_for_static_graph());
4175   }
4176   if (from._internal_enable_mlir_bridge() != 0) {
4177     _this->_internal_set_enable_mlir_bridge(from._internal_enable_mlir_bridge());
4178   }
4179   if (from._internal_mlir_bridge_rollout() != 0) {
4180     _this->_internal_set_mlir_bridge_rollout(from._internal_mlir_bridge_rollout());
4181   }
4182   if (from._internal_xla_fusion_autotuner_thresh() != 0) {
4183     _this->_internal_set_xla_fusion_autotuner_thresh(from._internal_xla_fusion_autotuner_thresh());
4184   }
4185   if (from._internal_enable_mlir_graph_optimization() != 0) {
4186     _this->_internal_set_enable_mlir_graph_optimization(from._internal_enable_mlir_graph_optimization());
4187   }
4188   if (from._internal_disable_output_partition_graphs() != 0) {
4189     _this->_internal_set_disable_output_partition_graphs(from._internal_disable_output_partition_graphs());
4190   }
4191   if (from._internal_use_tfrt() != 0) {
4192     _this->_internal_set_use_tfrt(from._internal_use_tfrt());
4193   }
4194   if (from._internal_disable_functional_ops_lowering() != 0) {
4195     _this->_internal_set_disable_functional_ops_lowering(from._internal_disable_functional_ops_lowering());
4196   }
4197   if (from._internal_xla_prefer_single_graph_cluster() != 0) {
4198     _this->_internal_set_xla_prefer_single_graph_cluster(from._internal_xla_prefer_single_graph_cluster());
4199   }
4200   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4201 }
4202 
CopyFrom(const ConfigProto_Experimental & from)4203 void ConfigProto_Experimental::CopyFrom(const ConfigProto_Experimental& from) {
4204 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ConfigProto.Experimental)
4205   if (&from == this) return;
4206   Clear();
4207   MergeFrom(from);
4208 }
4209 
IsInitialized() const4210 bool ConfigProto_Experimental::IsInitialized() const {
4211   return true;
4212 }
4213 
InternalSwap(ConfigProto_Experimental * other)4214 void ConfigProto_Experimental::InternalSwap(ConfigProto_Experimental* other) {
4215   using std::swap;
4216   auto* lhs_arena = GetArenaForAllocation();
4217   auto* rhs_arena = other->GetArenaForAllocation();
4218   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
4219   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
4220       &_impl_.collective_group_leader_, lhs_arena,
4221       &other->_impl_.collective_group_leader_, rhs_arena
4222   );
4223   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
4224       &_impl_.executor_type_, lhs_arena,
4225       &other->_impl_.executor_type_, rhs_arena
4226   );
4227   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
4228       PROTOBUF_FIELD_OFFSET(ConfigProto_Experimental, _impl_.xla_prefer_single_graph_cluster_)
4229       + sizeof(ConfigProto_Experimental::_impl_.xla_prefer_single_graph_cluster_)  // NOLINT
4230       - PROTOBUF_FIELD_OFFSET(ConfigProto_Experimental, _impl_.session_metadata_)>(
4231           reinterpret_cast<char*>(&_impl_.session_metadata_),
4232           reinterpret_cast<char*>(&other->_impl_.session_metadata_));
4233 }
4234 
GetTypeName() const4235 std::string ConfigProto_Experimental::GetTypeName() const {
4236   return "tensorflow.ConfigProto.Experimental";
4237 }
4238 
4239 
4240 // ===================================================================
4241 
4242 class ConfigProto::_Internal {
4243  public:
4244   static const ::tensorflow::GPUOptions& gpu_options(const ConfigProto* msg);
4245   static const ::tensorflow::GraphOptions& graph_options(const ConfigProto* msg);
4246   static const ::tensorflow::RPCOptions& rpc_options(const ConfigProto* msg);
4247   static const ::tensorflow::ClusterDef& cluster_def(const ConfigProto* msg);
4248   static const ::tensorflow::ConfigProto_Experimental& experimental(const ConfigProto* msg);
4249 };
4250 
4251 const ::tensorflow::GPUOptions&
gpu_options(const ConfigProto * msg)4252 ConfigProto::_Internal::gpu_options(const ConfigProto* msg) {
4253   return *msg->_impl_.gpu_options_;
4254 }
4255 const ::tensorflow::GraphOptions&
graph_options(const ConfigProto * msg)4256 ConfigProto::_Internal::graph_options(const ConfigProto* msg) {
4257   return *msg->_impl_.graph_options_;
4258 }
4259 const ::tensorflow::RPCOptions&
rpc_options(const ConfigProto * msg)4260 ConfigProto::_Internal::rpc_options(const ConfigProto* msg) {
4261   return *msg->_impl_.rpc_options_;
4262 }
4263 const ::tensorflow::ClusterDef&
cluster_def(const ConfigProto * msg)4264 ConfigProto::_Internal::cluster_def(const ConfigProto* msg) {
4265   return *msg->_impl_.cluster_def_;
4266 }
4267 const ::tensorflow::ConfigProto_Experimental&
experimental(const ConfigProto * msg)4268 ConfigProto::_Internal::experimental(const ConfigProto* msg) {
4269   return *msg->_impl_.experimental_;
4270 }
clear_cluster_def()4271 void ConfigProto::clear_cluster_def() {
4272   if (GetArenaForAllocation() == nullptr && _impl_.cluster_def_ != nullptr) {
4273     delete _impl_.cluster_def_;
4274   }
4275   _impl_.cluster_def_ = nullptr;
4276 }
ConfigProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)4277 ConfigProto::ConfigProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4278                          bool is_message_owned)
4279   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
4280   SharedCtor(arena, is_message_owned);
4281   // @@protoc_insertion_point(arena_constructor:tensorflow.ConfigProto)
4282 }
ConfigProto(const ConfigProto & from)4283 ConfigProto::ConfigProto(const ConfigProto& from)
4284   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
4285   ConfigProto* const _this = this; (void)_this;
4286   new (&_impl_) Impl_{
4287       /*decltype(_impl_.device_count_)*/{}
4288     , decltype(_impl_.device_filters_){from._impl_.device_filters_}
4289     , decltype(_impl_.session_inter_op_thread_pool_){from._impl_.session_inter_op_thread_pool_}
4290     , decltype(_impl_.gpu_options_){nullptr}
4291     , decltype(_impl_.graph_options_){nullptr}
4292     , decltype(_impl_.rpc_options_){nullptr}
4293     , decltype(_impl_.cluster_def_){nullptr}
4294     , decltype(_impl_.experimental_){nullptr}
4295     , decltype(_impl_.intra_op_parallelism_threads_){}
4296     , decltype(_impl_.placement_period_){}
4297     , decltype(_impl_.inter_op_parallelism_threads_){}
4298     , decltype(_impl_.use_per_session_threads_){}
4299     , decltype(_impl_.allow_soft_placement_){}
4300     , decltype(_impl_.log_device_placement_){}
4301     , decltype(_impl_.isolate_session_state_){}
4302     , decltype(_impl_.operation_timeout_in_ms_){}
4303     , decltype(_impl_.share_cluster_devices_in_session_){}
4304     , /*decltype(_impl_._cached_size_)*/{}};
4305 
4306   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4307   _this->_impl_.device_count_.MergeFrom(from._impl_.device_count_);
4308   if (from._internal_has_gpu_options()) {
4309     _this->_impl_.gpu_options_ = new ::tensorflow::GPUOptions(*from._impl_.gpu_options_);
4310   }
4311   if (from._internal_has_graph_options()) {
4312     _this->_impl_.graph_options_ = new ::tensorflow::GraphOptions(*from._impl_.graph_options_);
4313   }
4314   if (from._internal_has_rpc_options()) {
4315     _this->_impl_.rpc_options_ = new ::tensorflow::RPCOptions(*from._impl_.rpc_options_);
4316   }
4317   if (from._internal_has_cluster_def()) {
4318     _this->_impl_.cluster_def_ = new ::tensorflow::ClusterDef(*from._impl_.cluster_def_);
4319   }
4320   if (from._internal_has_experimental()) {
4321     _this->_impl_.experimental_ = new ::tensorflow::ConfigProto_Experimental(*from._impl_.experimental_);
4322   }
4323   ::memcpy(&_impl_.intra_op_parallelism_threads_, &from._impl_.intra_op_parallelism_threads_,
4324     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.share_cluster_devices_in_session_) -
4325     reinterpret_cast<char*>(&_impl_.intra_op_parallelism_threads_)) + sizeof(_impl_.share_cluster_devices_in_session_));
4326   // @@protoc_insertion_point(copy_constructor:tensorflow.ConfigProto)
4327 }
4328 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)4329 inline void ConfigProto::SharedCtor(
4330     ::_pb::Arena* arena, bool is_message_owned) {
4331   (void)arena;
4332   (void)is_message_owned;
4333   new (&_impl_) Impl_{
4334       /*decltype(_impl_.device_count_)*/{::_pbi::ArenaInitialized(), arena}
4335     , decltype(_impl_.device_filters_){arena}
4336     , decltype(_impl_.session_inter_op_thread_pool_){arena}
4337     , decltype(_impl_.gpu_options_){nullptr}
4338     , decltype(_impl_.graph_options_){nullptr}
4339     , decltype(_impl_.rpc_options_){nullptr}
4340     , decltype(_impl_.cluster_def_){nullptr}
4341     , decltype(_impl_.experimental_){nullptr}
4342     , decltype(_impl_.intra_op_parallelism_threads_){0}
4343     , decltype(_impl_.placement_period_){0}
4344     , decltype(_impl_.inter_op_parallelism_threads_){0}
4345     , decltype(_impl_.use_per_session_threads_){false}
4346     , decltype(_impl_.allow_soft_placement_){false}
4347     , decltype(_impl_.log_device_placement_){false}
4348     , decltype(_impl_.isolate_session_state_){false}
4349     , decltype(_impl_.operation_timeout_in_ms_){::int64_t{0}}
4350     , decltype(_impl_.share_cluster_devices_in_session_){false}
4351     , /*decltype(_impl_._cached_size_)*/{}
4352   };
4353 }
4354 
~ConfigProto()4355 ConfigProto::~ConfigProto() {
4356   // @@protoc_insertion_point(destructor:tensorflow.ConfigProto)
4357   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
4358   (void)arena;
4359     return;
4360   }
4361   SharedDtor();
4362 }
4363 
SharedDtor()4364 inline void ConfigProto::SharedDtor() {
4365   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
4366   _impl_.device_count_.Destruct();
4367   _impl_.device_count_.~MapFieldLite();
4368   _impl_.device_filters_.~RepeatedPtrField();
4369   _impl_.session_inter_op_thread_pool_.~RepeatedPtrField();
4370   if (this != internal_default_instance()) delete _impl_.gpu_options_;
4371   if (this != internal_default_instance()) delete _impl_.graph_options_;
4372   if (this != internal_default_instance()) delete _impl_.rpc_options_;
4373   if (this != internal_default_instance()) delete _impl_.cluster_def_;
4374   if (this != internal_default_instance()) delete _impl_.experimental_;
4375 }
4376 
SetCachedSize(int size) const4377 void ConfigProto::SetCachedSize(int size) const {
4378   _impl_._cached_size_.Set(size);
4379 }
4380 
Clear()4381 void ConfigProto::Clear() {
4382 // @@protoc_insertion_point(message_clear_start:tensorflow.ConfigProto)
4383   ::uint32_t cached_has_bits = 0;
4384   // Prevent compiler warnings about cached_has_bits being unused
4385   (void) cached_has_bits;
4386 
4387   _impl_.device_count_.Clear();
4388   _impl_.device_filters_.Clear();
4389   _impl_.session_inter_op_thread_pool_.Clear();
4390   if (GetArenaForAllocation() == nullptr && _impl_.gpu_options_ != nullptr) {
4391     delete _impl_.gpu_options_;
4392   }
4393   _impl_.gpu_options_ = nullptr;
4394   if (GetArenaForAllocation() == nullptr && _impl_.graph_options_ != nullptr) {
4395     delete _impl_.graph_options_;
4396   }
4397   _impl_.graph_options_ = nullptr;
4398   if (GetArenaForAllocation() == nullptr && _impl_.rpc_options_ != nullptr) {
4399     delete _impl_.rpc_options_;
4400   }
4401   _impl_.rpc_options_ = nullptr;
4402   if (GetArenaForAllocation() == nullptr && _impl_.cluster_def_ != nullptr) {
4403     delete _impl_.cluster_def_;
4404   }
4405   _impl_.cluster_def_ = nullptr;
4406   if (GetArenaForAllocation() == nullptr && _impl_.experimental_ != nullptr) {
4407     delete _impl_.experimental_;
4408   }
4409   _impl_.experimental_ = nullptr;
4410   ::memset(&_impl_.intra_op_parallelism_threads_, 0, static_cast<size_t>(
4411       reinterpret_cast<char*>(&_impl_.share_cluster_devices_in_session_) -
4412       reinterpret_cast<char*>(&_impl_.intra_op_parallelism_threads_)) + sizeof(_impl_.share_cluster_devices_in_session_));
4413   _internal_metadata_.Clear<std::string>();
4414 }
4415 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)4416 const char* ConfigProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
4417 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4418   while (!ctx->Done(&ptr)) {
4419     ::uint32_t tag;
4420     ptr = ::_pbi::ReadTag(ptr, &tag);
4421     switch (tag >> 3) {
4422       // map<string, int32> device_count = 1;
4423       case 1:
4424         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
4425           ptr -= 1;
4426           do {
4427             ptr += 1;
4428             ptr = ctx->ParseMessage(&_impl_.device_count_, ptr);
4429             CHK_(ptr);
4430             if (!ctx->DataAvailable(ptr)) break;
4431           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
4432         } else {
4433           goto handle_unusual;
4434         }
4435         continue;
4436       // int32 intra_op_parallelism_threads = 2;
4437       case 2:
4438         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
4439           _impl_.intra_op_parallelism_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
4440           CHK_(ptr);
4441         } else {
4442           goto handle_unusual;
4443         }
4444         continue;
4445       // int32 placement_period = 3;
4446       case 3:
4447         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
4448           _impl_.placement_period_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
4449           CHK_(ptr);
4450         } else {
4451           goto handle_unusual;
4452         }
4453         continue;
4454       // repeated string device_filters = 4;
4455       case 4:
4456         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
4457           ptr -= 1;
4458           do {
4459             ptr += 1;
4460             auto str = _internal_add_device_filters();
4461             ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
4462             CHK_(ptr);
4463             CHK_(::_pbi::VerifyUTF8(str, nullptr));
4464             if (!ctx->DataAvailable(ptr)) break;
4465           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<34>(ptr));
4466         } else {
4467           goto handle_unusual;
4468         }
4469         continue;
4470       // int32 inter_op_parallelism_threads = 5;
4471       case 5:
4472         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
4473           _impl_.inter_op_parallelism_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
4474           CHK_(ptr);
4475         } else {
4476           goto handle_unusual;
4477         }
4478         continue;
4479       // .tensorflow.GPUOptions gpu_options = 6;
4480       case 6:
4481         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
4482           ptr = ctx->ParseMessage(_internal_mutable_gpu_options(), ptr);
4483           CHK_(ptr);
4484         } else {
4485           goto handle_unusual;
4486         }
4487         continue;
4488       // bool allow_soft_placement = 7;
4489       case 7:
4490         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
4491           _impl_.allow_soft_placement_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
4492           CHK_(ptr);
4493         } else {
4494           goto handle_unusual;
4495         }
4496         continue;
4497       // bool log_device_placement = 8;
4498       case 8:
4499         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
4500           _impl_.log_device_placement_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
4501           CHK_(ptr);
4502         } else {
4503           goto handle_unusual;
4504         }
4505         continue;
4506       // bool use_per_session_threads = 9;
4507       case 9:
4508         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
4509           _impl_.use_per_session_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
4510           CHK_(ptr);
4511         } else {
4512           goto handle_unusual;
4513         }
4514         continue;
4515       // .tensorflow.GraphOptions graph_options = 10;
4516       case 10:
4517         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 82)) {
4518           ptr = ctx->ParseMessage(_internal_mutable_graph_options(), ptr);
4519           CHK_(ptr);
4520         } else {
4521           goto handle_unusual;
4522         }
4523         continue;
4524       // int64 operation_timeout_in_ms = 11;
4525       case 11:
4526         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 88)) {
4527           _impl_.operation_timeout_in_ms_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
4528           CHK_(ptr);
4529         } else {
4530           goto handle_unusual;
4531         }
4532         continue;
4533       // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
4534       case 12:
4535         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 98)) {
4536           ptr -= 1;
4537           do {
4538             ptr += 1;
4539             ptr = ctx->ParseMessage(_internal_add_session_inter_op_thread_pool(), ptr);
4540             CHK_(ptr);
4541             if (!ctx->DataAvailable(ptr)) break;
4542           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<98>(ptr));
4543         } else {
4544           goto handle_unusual;
4545         }
4546         continue;
4547       // .tensorflow.RPCOptions rpc_options = 13;
4548       case 13:
4549         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 106)) {
4550           ptr = ctx->ParseMessage(_internal_mutable_rpc_options(), ptr);
4551           CHK_(ptr);
4552         } else {
4553           goto handle_unusual;
4554         }
4555         continue;
4556       // .tensorflow.ClusterDef cluster_def = 14;
4557       case 14:
4558         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 114)) {
4559           ptr = ctx->ParseMessage(_internal_mutable_cluster_def(), ptr);
4560           CHK_(ptr);
4561         } else {
4562           goto handle_unusual;
4563         }
4564         continue;
4565       // bool isolate_session_state = 15;
4566       case 15:
4567         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 120)) {
4568           _impl_.isolate_session_state_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
4569           CHK_(ptr);
4570         } else {
4571           goto handle_unusual;
4572         }
4573         continue;
4574       // .tensorflow.ConfigProto.Experimental experimental = 16;
4575       case 16:
4576         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 130)) {
4577           ptr = ctx->ParseMessage(_internal_mutable_experimental(), ptr);
4578           CHK_(ptr);
4579         } else {
4580           goto handle_unusual;
4581         }
4582         continue;
4583       // bool share_cluster_devices_in_session = 17;
4584       case 17:
4585         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 136)) {
4586           _impl_.share_cluster_devices_in_session_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
4587           CHK_(ptr);
4588         } else {
4589           goto handle_unusual;
4590         }
4591         continue;
4592       default:
4593         goto handle_unusual;
4594     }  // switch
4595   handle_unusual:
4596     if ((tag == 0) || ((tag & 7) == 4)) {
4597       CHK_(ptr);
4598       ctx->SetLastTag(tag);
4599       goto message_done;
4600     }
4601     ptr = UnknownFieldParse(
4602         tag,
4603         _internal_metadata_.mutable_unknown_fields<std::string>(),
4604         ptr, ctx);
4605     CHK_(ptr != nullptr);
4606   }  // while
4607 message_done:
4608   return ptr;
4609 failure:
4610   ptr = nullptr;
4611   goto message_done;
4612 #undef CHK_
4613 }
4614 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const4615 ::uint8_t* ConfigProto::_InternalSerialize(
4616     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
4617   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ConfigProto)
4618   ::uint32_t cached_has_bits = 0;
4619   (void) cached_has_bits;
4620 
4621   // map<string, int32> device_count = 1;
4622   if (!this->_internal_device_count().empty()) {
4623     using MapType = ::_pb::Map<std::string, ::int32_t>;
4624     using WireHelper = ConfigProto_DeviceCountEntry_DoNotUse::Funcs;
4625     const auto& map_field = this->_internal_device_count();
4626     auto check_utf8 = [](const MapType::value_type& entry) {
4627       (void)entry;
4628       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4629         entry.first.data(), static_cast<int>(entry.first.length()),
4630         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
4631         "tensorflow.ConfigProto.DeviceCountEntry.key");
4632     };
4633 
4634     if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
4635       for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
4636         target = WireHelper::InternalSerialize(1, entry.first, entry.second, target, stream);
4637         check_utf8(entry);
4638       }
4639     } else {
4640       for (const auto& entry : map_field) {
4641         target = WireHelper::InternalSerialize(1, entry.first, entry.second, target, stream);
4642         check_utf8(entry);
4643       }
4644     }
4645   }
4646 
4647   // int32 intra_op_parallelism_threads = 2;
4648   if (this->_internal_intra_op_parallelism_threads() != 0) {
4649     target = stream->EnsureSpace(target);
4650     target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_intra_op_parallelism_threads(), target);
4651   }
4652 
4653   // int32 placement_period = 3;
4654   if (this->_internal_placement_period() != 0) {
4655     target = stream->EnsureSpace(target);
4656     target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_placement_period(), target);
4657   }
4658 
4659   // repeated string device_filters = 4;
4660   for (int i = 0, n = this->_internal_device_filters_size(); i < n; i++) {
4661     const auto& s = this->_internal_device_filters(i);
4662     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4663       s.data(), static_cast<int>(s.length()),
4664       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
4665       "tensorflow.ConfigProto.device_filters");
4666     target = stream->WriteString(4, s, target);
4667   }
4668 
4669   // int32 inter_op_parallelism_threads = 5;
4670   if (this->_internal_inter_op_parallelism_threads() != 0) {
4671     target = stream->EnsureSpace(target);
4672     target = ::_pbi::WireFormatLite::WriteInt32ToArray(5, this->_internal_inter_op_parallelism_threads(), target);
4673   }
4674 
4675   // .tensorflow.GPUOptions gpu_options = 6;
4676   if (this->_internal_has_gpu_options()) {
4677     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4678       InternalWriteMessage(6, _Internal::gpu_options(this),
4679         _Internal::gpu_options(this).GetCachedSize(), target, stream);
4680   }
4681 
4682   // bool allow_soft_placement = 7;
4683   if (this->_internal_allow_soft_placement() != 0) {
4684     target = stream->EnsureSpace(target);
4685     target = ::_pbi::WireFormatLite::WriteBoolToArray(7, this->_internal_allow_soft_placement(), target);
4686   }
4687 
4688   // bool log_device_placement = 8;
4689   if (this->_internal_log_device_placement() != 0) {
4690     target = stream->EnsureSpace(target);
4691     target = ::_pbi::WireFormatLite::WriteBoolToArray(8, this->_internal_log_device_placement(), target);
4692   }
4693 
4694   // bool use_per_session_threads = 9;
4695   if (this->_internal_use_per_session_threads() != 0) {
4696     target = stream->EnsureSpace(target);
4697     target = ::_pbi::WireFormatLite::WriteBoolToArray(9, this->_internal_use_per_session_threads(), target);
4698   }
4699 
4700   // .tensorflow.GraphOptions graph_options = 10;
4701   if (this->_internal_has_graph_options()) {
4702     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4703       InternalWriteMessage(10, _Internal::graph_options(this),
4704         _Internal::graph_options(this).GetCachedSize(), target, stream);
4705   }
4706 
4707   // int64 operation_timeout_in_ms = 11;
4708   if (this->_internal_operation_timeout_in_ms() != 0) {
4709     target = stream->EnsureSpace(target);
4710     target = ::_pbi::WireFormatLite::WriteInt64ToArray(11, this->_internal_operation_timeout_in_ms(), target);
4711   }
4712 
4713   // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
4714   for (unsigned i = 0,
4715       n = static_cast<unsigned>(this->_internal_session_inter_op_thread_pool_size()); i < n; i++) {
4716     const auto& repfield = this->_internal_session_inter_op_thread_pool(i);
4717     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4718         InternalWriteMessage(12, repfield, repfield.GetCachedSize(), target, stream);
4719   }
4720 
4721   // .tensorflow.RPCOptions rpc_options = 13;
4722   if (this->_internal_has_rpc_options()) {
4723     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4724       InternalWriteMessage(13, _Internal::rpc_options(this),
4725         _Internal::rpc_options(this).GetCachedSize(), target, stream);
4726   }
4727 
4728   // .tensorflow.ClusterDef cluster_def = 14;
4729   if (this->_internal_has_cluster_def()) {
4730     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4731       InternalWriteMessage(14, _Internal::cluster_def(this),
4732         _Internal::cluster_def(this).GetCachedSize(), target, stream);
4733   }
4734 
4735   // bool isolate_session_state = 15;
4736   if (this->_internal_isolate_session_state() != 0) {
4737     target = stream->EnsureSpace(target);
4738     target = ::_pbi::WireFormatLite::WriteBoolToArray(15, this->_internal_isolate_session_state(), target);
4739   }
4740 
4741   // .tensorflow.ConfigProto.Experimental experimental = 16;
4742   if (this->_internal_has_experimental()) {
4743     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
4744       InternalWriteMessage(16, _Internal::experimental(this),
4745         _Internal::experimental(this).GetCachedSize(), target, stream);
4746   }
4747 
4748   // bool share_cluster_devices_in_session = 17;
4749   if (this->_internal_share_cluster_devices_in_session() != 0) {
4750     target = stream->EnsureSpace(target);
4751     target = ::_pbi::WireFormatLite::WriteBoolToArray(17, this->_internal_share_cluster_devices_in_session(), target);
4752   }
4753 
4754   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4755     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
4756         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
4757   }
4758   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ConfigProto)
4759   return target;
4760 }
4761 
ByteSizeLong() const4762 size_t ConfigProto::ByteSizeLong() const {
4763 // @@protoc_insertion_point(message_byte_size_start:tensorflow.ConfigProto)
4764   size_t total_size = 0;
4765 
4766   ::uint32_t cached_has_bits = 0;
4767   // Prevent compiler warnings about cached_has_bits being unused
4768   (void) cached_has_bits;
4769 
4770   // map<string, int32> device_count = 1;
4771   total_size += 1 *
4772       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_device_count_size());
4773   for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >::const_iterator
4774       it = this->_internal_device_count().begin();
4775       it != this->_internal_device_count().end(); ++it) {
4776     total_size += ConfigProto_DeviceCountEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
4777   }
4778 
4779   // repeated string device_filters = 4;
4780   total_size += 1 *
4781       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.device_filters_.size());
4782   for (int i = 0, n = _impl_.device_filters_.size(); i < n; i++) {
4783     total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
4784       _impl_.device_filters_.Get(i));
4785   }
4786 
4787   // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
4788   total_size += 1UL * this->_internal_session_inter_op_thread_pool_size();
4789   for (const auto& msg : this->_impl_.session_inter_op_thread_pool_) {
4790     total_size +=
4791       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
4792   }
4793 
4794   // .tensorflow.GPUOptions gpu_options = 6;
4795   if (this->_internal_has_gpu_options()) {
4796     total_size += 1 +
4797       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4798         *_impl_.gpu_options_);
4799   }
4800 
4801   // .tensorflow.GraphOptions graph_options = 10;
4802   if (this->_internal_has_graph_options()) {
4803     total_size += 1 +
4804       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4805         *_impl_.graph_options_);
4806   }
4807 
4808   // .tensorflow.RPCOptions rpc_options = 13;
4809   if (this->_internal_has_rpc_options()) {
4810     total_size += 1 +
4811       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4812         *_impl_.rpc_options_);
4813   }
4814 
4815   // .tensorflow.ClusterDef cluster_def = 14;
4816   if (this->_internal_has_cluster_def()) {
4817     total_size += 1 +
4818       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4819         *_impl_.cluster_def_);
4820   }
4821 
4822   // .tensorflow.ConfigProto.Experimental experimental = 16;
4823   if (this->_internal_has_experimental()) {
4824     total_size += 2 +
4825       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
4826         *_impl_.experimental_);
4827   }
4828 
4829   // int32 intra_op_parallelism_threads = 2;
4830   if (this->_internal_intra_op_parallelism_threads() != 0) {
4831     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_intra_op_parallelism_threads());
4832   }
4833 
4834   // int32 placement_period = 3;
4835   if (this->_internal_placement_period() != 0) {
4836     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_placement_period());
4837   }
4838 
4839   // int32 inter_op_parallelism_threads = 5;
4840   if (this->_internal_inter_op_parallelism_threads() != 0) {
4841     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_inter_op_parallelism_threads());
4842   }
4843 
4844   // bool use_per_session_threads = 9;
4845   if (this->_internal_use_per_session_threads() != 0) {
4846     total_size += 1 + 1;
4847   }
4848 
4849   // bool allow_soft_placement = 7;
4850   if (this->_internal_allow_soft_placement() != 0) {
4851     total_size += 1 + 1;
4852   }
4853 
4854   // bool log_device_placement = 8;
4855   if (this->_internal_log_device_placement() != 0) {
4856     total_size += 1 + 1;
4857   }
4858 
4859   // bool isolate_session_state = 15;
4860   if (this->_internal_isolate_session_state() != 0) {
4861     total_size += 1 + 1;
4862   }
4863 
4864   // int64 operation_timeout_in_ms = 11;
4865   if (this->_internal_operation_timeout_in_ms() != 0) {
4866     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_operation_timeout_in_ms());
4867   }
4868 
4869   // bool share_cluster_devices_in_session = 17;
4870   if (this->_internal_share_cluster_devices_in_session() != 0) {
4871     total_size += 2 + 1;
4872   }
4873 
4874   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
4875     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
4876   }
4877   int cached_size = ::_pbi::ToCachedSize(total_size);
4878   SetCachedSize(cached_size);
4879   return total_size;
4880 }
4881 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)4882 void ConfigProto::CheckTypeAndMergeFrom(
4883     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
4884   MergeFrom(*::_pbi::DownCast<const ConfigProto*>(
4885       &from));
4886 }
4887 
MergeFrom(const ConfigProto & from)4888 void ConfigProto::MergeFrom(const ConfigProto& from) {
4889   ConfigProto* const _this = this;
4890   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ConfigProto)
4891   GOOGLE_DCHECK_NE(&from, _this);
4892   ::uint32_t cached_has_bits = 0;
4893   (void) cached_has_bits;
4894 
4895   _this->_impl_.device_count_.MergeFrom(from._impl_.device_count_);
4896   _this->_impl_.device_filters_.MergeFrom(from._impl_.device_filters_);
4897   _this->_impl_.session_inter_op_thread_pool_.MergeFrom(from._impl_.session_inter_op_thread_pool_);
4898   if (from._internal_has_gpu_options()) {
4899     _this->_internal_mutable_gpu_options()->::tensorflow::GPUOptions::MergeFrom(
4900         from._internal_gpu_options());
4901   }
4902   if (from._internal_has_graph_options()) {
4903     _this->_internal_mutable_graph_options()->::tensorflow::GraphOptions::MergeFrom(
4904         from._internal_graph_options());
4905   }
4906   if (from._internal_has_rpc_options()) {
4907     _this->_internal_mutable_rpc_options()->::tensorflow::RPCOptions::MergeFrom(
4908         from._internal_rpc_options());
4909   }
4910   if (from._internal_has_cluster_def()) {
4911     _this->_internal_mutable_cluster_def()->::tensorflow::ClusterDef::MergeFrom(
4912         from._internal_cluster_def());
4913   }
4914   if (from._internal_has_experimental()) {
4915     _this->_internal_mutable_experimental()->::tensorflow::ConfigProto_Experimental::MergeFrom(
4916         from._internal_experimental());
4917   }
4918   if (from._internal_intra_op_parallelism_threads() != 0) {
4919     _this->_internal_set_intra_op_parallelism_threads(from._internal_intra_op_parallelism_threads());
4920   }
4921   if (from._internal_placement_period() != 0) {
4922     _this->_internal_set_placement_period(from._internal_placement_period());
4923   }
4924   if (from._internal_inter_op_parallelism_threads() != 0) {
4925     _this->_internal_set_inter_op_parallelism_threads(from._internal_inter_op_parallelism_threads());
4926   }
4927   if (from._internal_use_per_session_threads() != 0) {
4928     _this->_internal_set_use_per_session_threads(from._internal_use_per_session_threads());
4929   }
4930   if (from._internal_allow_soft_placement() != 0) {
4931     _this->_internal_set_allow_soft_placement(from._internal_allow_soft_placement());
4932   }
4933   if (from._internal_log_device_placement() != 0) {
4934     _this->_internal_set_log_device_placement(from._internal_log_device_placement());
4935   }
4936   if (from._internal_isolate_session_state() != 0) {
4937     _this->_internal_set_isolate_session_state(from._internal_isolate_session_state());
4938   }
4939   if (from._internal_operation_timeout_in_ms() != 0) {
4940     _this->_internal_set_operation_timeout_in_ms(from._internal_operation_timeout_in_ms());
4941   }
4942   if (from._internal_share_cluster_devices_in_session() != 0) {
4943     _this->_internal_set_share_cluster_devices_in_session(from._internal_share_cluster_devices_in_session());
4944   }
4945   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4946 }
4947 
CopyFrom(const ConfigProto & from)4948 void ConfigProto::CopyFrom(const ConfigProto& from) {
4949 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ConfigProto)
4950   if (&from == this) return;
4951   Clear();
4952   MergeFrom(from);
4953 }
4954 
IsInitialized() const4955 bool ConfigProto::IsInitialized() const {
4956   return true;
4957 }
4958 
InternalSwap(ConfigProto * other)4959 void ConfigProto::InternalSwap(ConfigProto* other) {
4960   using std::swap;
4961   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
4962   _impl_.device_count_.InternalSwap(&other->_impl_.device_count_);
4963   _impl_.device_filters_.InternalSwap(&other->_impl_.device_filters_);
4964   _impl_.session_inter_op_thread_pool_.InternalSwap(&other->_impl_.session_inter_op_thread_pool_);
4965   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
4966       PROTOBUF_FIELD_OFFSET(ConfigProto, _impl_.share_cluster_devices_in_session_)
4967       + sizeof(ConfigProto::_impl_.share_cluster_devices_in_session_)  // NOLINT
4968       - PROTOBUF_FIELD_OFFSET(ConfigProto, _impl_.gpu_options_)>(
4969           reinterpret_cast<char*>(&_impl_.gpu_options_),
4970           reinterpret_cast<char*>(&other->_impl_.gpu_options_));
4971 }
4972 
GetTypeName() const4973 std::string ConfigProto::GetTypeName() const {
4974   return "tensorflow.ConfigProto";
4975 }
4976 
4977 
4978 // ===================================================================
4979 
4980 class RunOptions_Experimental_RunHandlerPoolOptions::_Internal {
4981  public:
4982 };
4983 
RunOptions_Experimental_RunHandlerPoolOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)4984 RunOptions_Experimental_RunHandlerPoolOptions::RunOptions_Experimental_RunHandlerPoolOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4985                          bool is_message_owned)
4986   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
4987   SharedCtor(arena, is_message_owned);
4988   // @@protoc_insertion_point(arena_constructor:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
4989 }
RunOptions_Experimental_RunHandlerPoolOptions(const RunOptions_Experimental_RunHandlerPoolOptions & from)4990 RunOptions_Experimental_RunHandlerPoolOptions::RunOptions_Experimental_RunHandlerPoolOptions(const RunOptions_Experimental_RunHandlerPoolOptions& from)
4991   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
4992   RunOptions_Experimental_RunHandlerPoolOptions* const _this = this; (void)_this;
4993   new (&_impl_) Impl_{
4994       decltype(_impl_.priority_){}
4995     , /*decltype(_impl_._cached_size_)*/{}};
4996 
4997   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
4998   _this->_impl_.priority_ = from._impl_.priority_;
4999   // @@protoc_insertion_point(copy_constructor:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5000 }
5001 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5002 inline void RunOptions_Experimental_RunHandlerPoolOptions::SharedCtor(
5003     ::_pb::Arena* arena, bool is_message_owned) {
5004   (void)arena;
5005   (void)is_message_owned;
5006   new (&_impl_) Impl_{
5007       decltype(_impl_.priority_){::int64_t{0}}
5008     , /*decltype(_impl_._cached_size_)*/{}
5009   };
5010 }
5011 
~RunOptions_Experimental_RunHandlerPoolOptions()5012 RunOptions_Experimental_RunHandlerPoolOptions::~RunOptions_Experimental_RunHandlerPoolOptions() {
5013   // @@protoc_insertion_point(destructor:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5014   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5015   (void)arena;
5016     return;
5017   }
5018   SharedDtor();
5019 }
5020 
SharedDtor()5021 inline void RunOptions_Experimental_RunHandlerPoolOptions::SharedDtor() {
5022   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5023 }
5024 
SetCachedSize(int size) const5025 void RunOptions_Experimental_RunHandlerPoolOptions::SetCachedSize(int size) const {
5026   _impl_._cached_size_.Set(size);
5027 }
5028 
Clear()5029 void RunOptions_Experimental_RunHandlerPoolOptions::Clear() {
5030 // @@protoc_insertion_point(message_clear_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5031   ::uint32_t cached_has_bits = 0;
5032   // Prevent compiler warnings about cached_has_bits being unused
5033   (void) cached_has_bits;
5034 
5035   _impl_.priority_ = ::int64_t{0};
5036   _internal_metadata_.Clear<std::string>();
5037 }
5038 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5039 const char* RunOptions_Experimental_RunHandlerPoolOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5040 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5041   while (!ctx->Done(&ptr)) {
5042     ::uint32_t tag;
5043     ptr = ::_pbi::ReadTag(ptr, &tag);
5044     switch (tag >> 3) {
5045       // int64 priority = 1;
5046       case 1:
5047         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
5048           _impl_.priority_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5049           CHK_(ptr);
5050         } else {
5051           goto handle_unusual;
5052         }
5053         continue;
5054       default:
5055         goto handle_unusual;
5056     }  // switch
5057   handle_unusual:
5058     if ((tag == 0) || ((tag & 7) == 4)) {
5059       CHK_(ptr);
5060       ctx->SetLastTag(tag);
5061       goto message_done;
5062     }
5063     ptr = UnknownFieldParse(
5064         tag,
5065         _internal_metadata_.mutable_unknown_fields<std::string>(),
5066         ptr, ctx);
5067     CHK_(ptr != nullptr);
5068   }  // while
5069 message_done:
5070   return ptr;
5071 failure:
5072   ptr = nullptr;
5073   goto message_done;
5074 #undef CHK_
5075 }
5076 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5077 ::uint8_t* RunOptions_Experimental_RunHandlerPoolOptions::_InternalSerialize(
5078     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5079   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5080   ::uint32_t cached_has_bits = 0;
5081   (void) cached_has_bits;
5082 
5083   // int64 priority = 1;
5084   if (this->_internal_priority() != 0) {
5085     target = stream->EnsureSpace(target);
5086     target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_priority(), target);
5087   }
5088 
5089   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5090     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5091         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5092   }
5093   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5094   return target;
5095 }
5096 
ByteSizeLong() const5097 size_t RunOptions_Experimental_RunHandlerPoolOptions::ByteSizeLong() const {
5098 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5099   size_t total_size = 0;
5100 
5101   ::uint32_t cached_has_bits = 0;
5102   // Prevent compiler warnings about cached_has_bits being unused
5103   (void) cached_has_bits;
5104 
5105   // int64 priority = 1;
5106   if (this->_internal_priority() != 0) {
5107     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_priority());
5108   }
5109 
5110   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5111     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5112   }
5113   int cached_size = ::_pbi::ToCachedSize(total_size);
5114   SetCachedSize(cached_size);
5115   return total_size;
5116 }
5117 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5118 void RunOptions_Experimental_RunHandlerPoolOptions::CheckTypeAndMergeFrom(
5119     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5120   MergeFrom(*::_pbi::DownCast<const RunOptions_Experimental_RunHandlerPoolOptions*>(
5121       &from));
5122 }
5123 
MergeFrom(const RunOptions_Experimental_RunHandlerPoolOptions & from)5124 void RunOptions_Experimental_RunHandlerPoolOptions::MergeFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from) {
5125   RunOptions_Experimental_RunHandlerPoolOptions* const _this = this;
5126   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5127   GOOGLE_DCHECK_NE(&from, _this);
5128   ::uint32_t cached_has_bits = 0;
5129   (void) cached_has_bits;
5130 
5131   if (from._internal_priority() != 0) {
5132     _this->_internal_set_priority(from._internal_priority());
5133   }
5134   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5135 }
5136 
CopyFrom(const RunOptions_Experimental_RunHandlerPoolOptions & from)5137 void RunOptions_Experimental_RunHandlerPoolOptions::CopyFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from) {
5138 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
5139   if (&from == this) return;
5140   Clear();
5141   MergeFrom(from);
5142 }
5143 
IsInitialized() const5144 bool RunOptions_Experimental_RunHandlerPoolOptions::IsInitialized() const {
5145   return true;
5146 }
5147 
InternalSwap(RunOptions_Experimental_RunHandlerPoolOptions * other)5148 void RunOptions_Experimental_RunHandlerPoolOptions::InternalSwap(RunOptions_Experimental_RunHandlerPoolOptions* other) {
5149   using std::swap;
5150   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5151   swap(_impl_.priority_, other->_impl_.priority_);
5152 }
5153 
GetTypeName() const5154 std::string RunOptions_Experimental_RunHandlerPoolOptions::GetTypeName() const {
5155   return "tensorflow.RunOptions.Experimental.RunHandlerPoolOptions";
5156 }
5157 
5158 
5159 // ===================================================================
5160 
5161 class RunOptions_Experimental::_Internal {
5162  public:
5163   static const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& run_handler_pool_options(const RunOptions_Experimental* msg);
5164 };
5165 
5166 const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions&
run_handler_pool_options(const RunOptions_Experimental * msg)5167 RunOptions_Experimental::_Internal::run_handler_pool_options(const RunOptions_Experimental* msg) {
5168   return *msg->_impl_.run_handler_pool_options_;
5169 }
RunOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5170 RunOptions_Experimental::RunOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5171                          bool is_message_owned)
5172   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5173   SharedCtor(arena, is_message_owned);
5174   // @@protoc_insertion_point(arena_constructor:tensorflow.RunOptions.Experimental)
5175 }
RunOptions_Experimental(const RunOptions_Experimental & from)5176 RunOptions_Experimental::RunOptions_Experimental(const RunOptions_Experimental& from)
5177   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5178   RunOptions_Experimental* const _this = this; (void)_this;
5179   new (&_impl_) Impl_{
5180       decltype(_impl_.run_handler_pool_options_){nullptr}
5181     , decltype(_impl_.collective_graph_key_){}
5182     , decltype(_impl_.use_run_handler_pool_){}
5183     , /*decltype(_impl_._cached_size_)*/{}};
5184 
5185   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5186   if (from._internal_has_run_handler_pool_options()) {
5187     _this->_impl_.run_handler_pool_options_ = new ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions(*from._impl_.run_handler_pool_options_);
5188   }
5189   ::memcpy(&_impl_.collective_graph_key_, &from._impl_.collective_graph_key_,
5190     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.use_run_handler_pool_) -
5191     reinterpret_cast<char*>(&_impl_.collective_graph_key_)) + sizeof(_impl_.use_run_handler_pool_));
5192   // @@protoc_insertion_point(copy_constructor:tensorflow.RunOptions.Experimental)
5193 }
5194 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5195 inline void RunOptions_Experimental::SharedCtor(
5196     ::_pb::Arena* arena, bool is_message_owned) {
5197   (void)arena;
5198   (void)is_message_owned;
5199   new (&_impl_) Impl_{
5200       decltype(_impl_.run_handler_pool_options_){nullptr}
5201     , decltype(_impl_.collective_graph_key_){::int64_t{0}}
5202     , decltype(_impl_.use_run_handler_pool_){false}
5203     , /*decltype(_impl_._cached_size_)*/{}
5204   };
5205 }
5206 
~RunOptions_Experimental()5207 RunOptions_Experimental::~RunOptions_Experimental() {
5208   // @@protoc_insertion_point(destructor:tensorflow.RunOptions.Experimental)
5209   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5210   (void)arena;
5211     return;
5212   }
5213   SharedDtor();
5214 }
5215 
SharedDtor()5216 inline void RunOptions_Experimental::SharedDtor() {
5217   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5218   if (this != internal_default_instance()) delete _impl_.run_handler_pool_options_;
5219 }
5220 
SetCachedSize(int size) const5221 void RunOptions_Experimental::SetCachedSize(int size) const {
5222   _impl_._cached_size_.Set(size);
5223 }
5224 
Clear()5225 void RunOptions_Experimental::Clear() {
5226 // @@protoc_insertion_point(message_clear_start:tensorflow.RunOptions.Experimental)
5227   ::uint32_t cached_has_bits = 0;
5228   // Prevent compiler warnings about cached_has_bits being unused
5229   (void) cached_has_bits;
5230 
5231   if (GetArenaForAllocation() == nullptr && _impl_.run_handler_pool_options_ != nullptr) {
5232     delete _impl_.run_handler_pool_options_;
5233   }
5234   _impl_.run_handler_pool_options_ = nullptr;
5235   ::memset(&_impl_.collective_graph_key_, 0, static_cast<size_t>(
5236       reinterpret_cast<char*>(&_impl_.use_run_handler_pool_) -
5237       reinterpret_cast<char*>(&_impl_.collective_graph_key_)) + sizeof(_impl_.use_run_handler_pool_));
5238   _internal_metadata_.Clear<std::string>();
5239 }
5240 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5241 const char* RunOptions_Experimental::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5242 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5243   while (!ctx->Done(&ptr)) {
5244     ::uint32_t tag;
5245     ptr = ::_pbi::ReadTag(ptr, &tag);
5246     switch (tag >> 3) {
5247       // int64 collective_graph_key = 1;
5248       case 1:
5249         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
5250           _impl_.collective_graph_key_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5251           CHK_(ptr);
5252         } else {
5253           goto handle_unusual;
5254         }
5255         continue;
5256       // bool use_run_handler_pool = 2;
5257       case 2:
5258         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
5259           _impl_.use_run_handler_pool_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5260           CHK_(ptr);
5261         } else {
5262           goto handle_unusual;
5263         }
5264         continue;
5265       // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
5266       case 3:
5267         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
5268           ptr = ctx->ParseMessage(_internal_mutable_run_handler_pool_options(), ptr);
5269           CHK_(ptr);
5270         } else {
5271           goto handle_unusual;
5272         }
5273         continue;
5274       default:
5275         goto handle_unusual;
5276     }  // switch
5277   handle_unusual:
5278     if ((tag == 0) || ((tag & 7) == 4)) {
5279       CHK_(ptr);
5280       ctx->SetLastTag(tag);
5281       goto message_done;
5282     }
5283     ptr = UnknownFieldParse(
5284         tag,
5285         _internal_metadata_.mutable_unknown_fields<std::string>(),
5286         ptr, ctx);
5287     CHK_(ptr != nullptr);
5288   }  // while
5289 message_done:
5290   return ptr;
5291 failure:
5292   ptr = nullptr;
5293   goto message_done;
5294 #undef CHK_
5295 }
5296 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5297 ::uint8_t* RunOptions_Experimental::_InternalSerialize(
5298     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5299   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunOptions.Experimental)
5300   ::uint32_t cached_has_bits = 0;
5301   (void) cached_has_bits;
5302 
5303   // int64 collective_graph_key = 1;
5304   if (this->_internal_collective_graph_key() != 0) {
5305     target = stream->EnsureSpace(target);
5306     target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_collective_graph_key(), target);
5307   }
5308 
5309   // bool use_run_handler_pool = 2;
5310   if (this->_internal_use_run_handler_pool() != 0) {
5311     target = stream->EnsureSpace(target);
5312     target = ::_pbi::WireFormatLite::WriteBoolToArray(2, this->_internal_use_run_handler_pool(), target);
5313   }
5314 
5315   // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
5316   if (this->_internal_has_run_handler_pool_options()) {
5317     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5318       InternalWriteMessage(3, _Internal::run_handler_pool_options(this),
5319         _Internal::run_handler_pool_options(this).GetCachedSize(), target, stream);
5320   }
5321 
5322   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5323     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5324         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5325   }
5326   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunOptions.Experimental)
5327   return target;
5328 }
5329 
ByteSizeLong() const5330 size_t RunOptions_Experimental::ByteSizeLong() const {
5331 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RunOptions.Experimental)
5332   size_t total_size = 0;
5333 
5334   ::uint32_t cached_has_bits = 0;
5335   // Prevent compiler warnings about cached_has_bits being unused
5336   (void) cached_has_bits;
5337 
5338   // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
5339   if (this->_internal_has_run_handler_pool_options()) {
5340     total_size += 1 +
5341       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
5342         *_impl_.run_handler_pool_options_);
5343   }
5344 
5345   // int64 collective_graph_key = 1;
5346   if (this->_internal_collective_graph_key() != 0) {
5347     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_collective_graph_key());
5348   }
5349 
5350   // bool use_run_handler_pool = 2;
5351   if (this->_internal_use_run_handler_pool() != 0) {
5352     total_size += 1 + 1;
5353   }
5354 
5355   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5356     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5357   }
5358   int cached_size = ::_pbi::ToCachedSize(total_size);
5359   SetCachedSize(cached_size);
5360   return total_size;
5361 }
5362 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5363 void RunOptions_Experimental::CheckTypeAndMergeFrom(
5364     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5365   MergeFrom(*::_pbi::DownCast<const RunOptions_Experimental*>(
5366       &from));
5367 }
5368 
MergeFrom(const RunOptions_Experimental & from)5369 void RunOptions_Experimental::MergeFrom(const RunOptions_Experimental& from) {
5370   RunOptions_Experimental* const _this = this;
5371   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunOptions.Experimental)
5372   GOOGLE_DCHECK_NE(&from, _this);
5373   ::uint32_t cached_has_bits = 0;
5374   (void) cached_has_bits;
5375 
5376   if (from._internal_has_run_handler_pool_options()) {
5377     _this->_internal_mutable_run_handler_pool_options()->::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions::MergeFrom(
5378         from._internal_run_handler_pool_options());
5379   }
5380   if (from._internal_collective_graph_key() != 0) {
5381     _this->_internal_set_collective_graph_key(from._internal_collective_graph_key());
5382   }
5383   if (from._internal_use_run_handler_pool() != 0) {
5384     _this->_internal_set_use_run_handler_pool(from._internal_use_run_handler_pool());
5385   }
5386   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5387 }
5388 
CopyFrom(const RunOptions_Experimental & from)5389 void RunOptions_Experimental::CopyFrom(const RunOptions_Experimental& from) {
5390 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunOptions.Experimental)
5391   if (&from == this) return;
5392   Clear();
5393   MergeFrom(from);
5394 }
5395 
IsInitialized() const5396 bool RunOptions_Experimental::IsInitialized() const {
5397   return true;
5398 }
5399 
InternalSwap(RunOptions_Experimental * other)5400 void RunOptions_Experimental::InternalSwap(RunOptions_Experimental* other) {
5401   using std::swap;
5402   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5403   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
5404       PROTOBUF_FIELD_OFFSET(RunOptions_Experimental, _impl_.use_run_handler_pool_)
5405       + sizeof(RunOptions_Experimental::_impl_.use_run_handler_pool_)  // NOLINT
5406       - PROTOBUF_FIELD_OFFSET(RunOptions_Experimental, _impl_.run_handler_pool_options_)>(
5407           reinterpret_cast<char*>(&_impl_.run_handler_pool_options_),
5408           reinterpret_cast<char*>(&other->_impl_.run_handler_pool_options_));
5409 }
5410 
GetTypeName() const5411 std::string RunOptions_Experimental::GetTypeName() const {
5412   return "tensorflow.RunOptions.Experimental";
5413 }
5414 
5415 
5416 // ===================================================================
5417 
5418 class RunOptions::_Internal {
5419  public:
5420   static const ::tensorflow::DebugOptions& debug_options(const RunOptions* msg);
5421   static const ::tensorflow::RunOptions_Experimental& experimental(const RunOptions* msg);
5422 };
5423 
5424 const ::tensorflow::DebugOptions&
debug_options(const RunOptions * msg)5425 RunOptions::_Internal::debug_options(const RunOptions* msg) {
5426   return *msg->_impl_.debug_options_;
5427 }
5428 const ::tensorflow::RunOptions_Experimental&
experimental(const RunOptions * msg)5429 RunOptions::_Internal::experimental(const RunOptions* msg) {
5430   return *msg->_impl_.experimental_;
5431 }
clear_debug_options()5432 void RunOptions::clear_debug_options() {
5433   if (GetArenaForAllocation() == nullptr && _impl_.debug_options_ != nullptr) {
5434     delete _impl_.debug_options_;
5435   }
5436   _impl_.debug_options_ = nullptr;
5437 }
RunOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5438 RunOptions::RunOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5439                          bool is_message_owned)
5440   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5441   SharedCtor(arena, is_message_owned);
5442   // @@protoc_insertion_point(arena_constructor:tensorflow.RunOptions)
5443 }
RunOptions(const RunOptions & from)5444 RunOptions::RunOptions(const RunOptions& from)
5445   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5446   RunOptions* const _this = this; (void)_this;
5447   new (&_impl_) Impl_{
5448       decltype(_impl_.debug_options_){nullptr}
5449     , decltype(_impl_.experimental_){nullptr}
5450     , decltype(_impl_.timeout_in_ms_){}
5451     , decltype(_impl_.trace_level_){}
5452     , decltype(_impl_.inter_op_thread_pool_){}
5453     , decltype(_impl_.output_partition_graphs_){}
5454     , decltype(_impl_.report_tensor_allocations_upon_oom_){}
5455     , /*decltype(_impl_._cached_size_)*/{}};
5456 
5457   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5458   if (from._internal_has_debug_options()) {
5459     _this->_impl_.debug_options_ = new ::tensorflow::DebugOptions(*from._impl_.debug_options_);
5460   }
5461   if (from._internal_has_experimental()) {
5462     _this->_impl_.experimental_ = new ::tensorflow::RunOptions_Experimental(*from._impl_.experimental_);
5463   }
5464   ::memcpy(&_impl_.timeout_in_ms_, &from._impl_.timeout_in_ms_,
5465     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.report_tensor_allocations_upon_oom_) -
5466     reinterpret_cast<char*>(&_impl_.timeout_in_ms_)) + sizeof(_impl_.report_tensor_allocations_upon_oom_));
5467   // @@protoc_insertion_point(copy_constructor:tensorflow.RunOptions)
5468 }
5469 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5470 inline void RunOptions::SharedCtor(
5471     ::_pb::Arena* arena, bool is_message_owned) {
5472   (void)arena;
5473   (void)is_message_owned;
5474   new (&_impl_) Impl_{
5475       decltype(_impl_.debug_options_){nullptr}
5476     , decltype(_impl_.experimental_){nullptr}
5477     , decltype(_impl_.timeout_in_ms_){::int64_t{0}}
5478     , decltype(_impl_.trace_level_){0}
5479     , decltype(_impl_.inter_op_thread_pool_){0}
5480     , decltype(_impl_.output_partition_graphs_){false}
5481     , decltype(_impl_.report_tensor_allocations_upon_oom_){false}
5482     , /*decltype(_impl_._cached_size_)*/{}
5483   };
5484 }
5485 
~RunOptions()5486 RunOptions::~RunOptions() {
5487   // @@protoc_insertion_point(destructor:tensorflow.RunOptions)
5488   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5489   (void)arena;
5490     return;
5491   }
5492   SharedDtor();
5493 }
5494 
SharedDtor()5495 inline void RunOptions::SharedDtor() {
5496   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5497   if (this != internal_default_instance()) delete _impl_.debug_options_;
5498   if (this != internal_default_instance()) delete _impl_.experimental_;
5499 }
5500 
SetCachedSize(int size) const5501 void RunOptions::SetCachedSize(int size) const {
5502   _impl_._cached_size_.Set(size);
5503 }
5504 
Clear()5505 void RunOptions::Clear() {
5506 // @@protoc_insertion_point(message_clear_start:tensorflow.RunOptions)
5507   ::uint32_t cached_has_bits = 0;
5508   // Prevent compiler warnings about cached_has_bits being unused
5509   (void) cached_has_bits;
5510 
5511   if (GetArenaForAllocation() == nullptr && _impl_.debug_options_ != nullptr) {
5512     delete _impl_.debug_options_;
5513   }
5514   _impl_.debug_options_ = nullptr;
5515   if (GetArenaForAllocation() == nullptr && _impl_.experimental_ != nullptr) {
5516     delete _impl_.experimental_;
5517   }
5518   _impl_.experimental_ = nullptr;
5519   ::memset(&_impl_.timeout_in_ms_, 0, static_cast<size_t>(
5520       reinterpret_cast<char*>(&_impl_.report_tensor_allocations_upon_oom_) -
5521       reinterpret_cast<char*>(&_impl_.timeout_in_ms_)) + sizeof(_impl_.report_tensor_allocations_upon_oom_));
5522   _internal_metadata_.Clear<std::string>();
5523 }
5524 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5525 const char* RunOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5526 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5527   while (!ctx->Done(&ptr)) {
5528     ::uint32_t tag;
5529     ptr = ::_pbi::ReadTag(ptr, &tag);
5530     switch (tag >> 3) {
5531       // .tensorflow.RunOptions.TraceLevel trace_level = 1;
5532       case 1:
5533         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
5534           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5535           CHK_(ptr);
5536           _internal_set_trace_level(static_cast<::tensorflow::RunOptions_TraceLevel>(val));
5537         } else {
5538           goto handle_unusual;
5539         }
5540         continue;
5541       // int64 timeout_in_ms = 2;
5542       case 2:
5543         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
5544           _impl_.timeout_in_ms_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5545           CHK_(ptr);
5546         } else {
5547           goto handle_unusual;
5548         }
5549         continue;
5550       // int32 inter_op_thread_pool = 3;
5551       case 3:
5552         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
5553           _impl_.inter_op_thread_pool_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
5554           CHK_(ptr);
5555         } else {
5556           goto handle_unusual;
5557         }
5558         continue;
5559       // bool output_partition_graphs = 5;
5560       case 5:
5561         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
5562           _impl_.output_partition_graphs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5563           CHK_(ptr);
5564         } else {
5565           goto handle_unusual;
5566         }
5567         continue;
5568       // .tensorflow.DebugOptions debug_options = 6;
5569       case 6:
5570         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
5571           ptr = ctx->ParseMessage(_internal_mutable_debug_options(), ptr);
5572           CHK_(ptr);
5573         } else {
5574           goto handle_unusual;
5575         }
5576         continue;
5577       // bool report_tensor_allocations_upon_oom = 7;
5578       case 7:
5579         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
5580           _impl_.report_tensor_allocations_upon_oom_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
5581           CHK_(ptr);
5582         } else {
5583           goto handle_unusual;
5584         }
5585         continue;
5586       // .tensorflow.RunOptions.Experimental experimental = 8;
5587       case 8:
5588         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 66)) {
5589           ptr = ctx->ParseMessage(_internal_mutable_experimental(), ptr);
5590           CHK_(ptr);
5591         } else {
5592           goto handle_unusual;
5593         }
5594         continue;
5595       default:
5596         goto handle_unusual;
5597     }  // switch
5598   handle_unusual:
5599     if ((tag == 0) || ((tag & 7) == 4)) {
5600       CHK_(ptr);
5601       ctx->SetLastTag(tag);
5602       goto message_done;
5603     }
5604     ptr = UnknownFieldParse(
5605         tag,
5606         _internal_metadata_.mutable_unknown_fields<std::string>(),
5607         ptr, ctx);
5608     CHK_(ptr != nullptr);
5609   }  // while
5610 message_done:
5611   return ptr;
5612 failure:
5613   ptr = nullptr;
5614   goto message_done;
5615 #undef CHK_
5616 }
5617 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5618 ::uint8_t* RunOptions::_InternalSerialize(
5619     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5620   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunOptions)
5621   ::uint32_t cached_has_bits = 0;
5622   (void) cached_has_bits;
5623 
5624   // .tensorflow.RunOptions.TraceLevel trace_level = 1;
5625   if (this->_internal_trace_level() != 0) {
5626     target = stream->EnsureSpace(target);
5627     target = ::_pbi::WireFormatLite::WriteEnumToArray(
5628       1, this->_internal_trace_level(), target);
5629   }
5630 
5631   // int64 timeout_in_ms = 2;
5632   if (this->_internal_timeout_in_ms() != 0) {
5633     target = stream->EnsureSpace(target);
5634     target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_timeout_in_ms(), target);
5635   }
5636 
5637   // int32 inter_op_thread_pool = 3;
5638   if (this->_internal_inter_op_thread_pool() != 0) {
5639     target = stream->EnsureSpace(target);
5640     target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_inter_op_thread_pool(), target);
5641   }
5642 
5643   // bool output_partition_graphs = 5;
5644   if (this->_internal_output_partition_graphs() != 0) {
5645     target = stream->EnsureSpace(target);
5646     target = ::_pbi::WireFormatLite::WriteBoolToArray(5, this->_internal_output_partition_graphs(), target);
5647   }
5648 
5649   // .tensorflow.DebugOptions debug_options = 6;
5650   if (this->_internal_has_debug_options()) {
5651     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5652       InternalWriteMessage(6, _Internal::debug_options(this),
5653         _Internal::debug_options(this).GetCachedSize(), target, stream);
5654   }
5655 
5656   // bool report_tensor_allocations_upon_oom = 7;
5657   if (this->_internal_report_tensor_allocations_upon_oom() != 0) {
5658     target = stream->EnsureSpace(target);
5659     target = ::_pbi::WireFormatLite::WriteBoolToArray(7, this->_internal_report_tensor_allocations_upon_oom(), target);
5660   }
5661 
5662   // .tensorflow.RunOptions.Experimental experimental = 8;
5663   if (this->_internal_has_experimental()) {
5664     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5665       InternalWriteMessage(8, _Internal::experimental(this),
5666         _Internal::experimental(this).GetCachedSize(), target, stream);
5667   }
5668 
5669   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5670     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5671         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5672   }
5673   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunOptions)
5674   return target;
5675 }
5676 
ByteSizeLong() const5677 size_t RunOptions::ByteSizeLong() const {
5678 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RunOptions)
5679   size_t total_size = 0;
5680 
5681   ::uint32_t cached_has_bits = 0;
5682   // Prevent compiler warnings about cached_has_bits being unused
5683   (void) cached_has_bits;
5684 
5685   // .tensorflow.DebugOptions debug_options = 6;
5686   if (this->_internal_has_debug_options()) {
5687     total_size += 1 +
5688       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
5689         *_impl_.debug_options_);
5690   }
5691 
5692   // .tensorflow.RunOptions.Experimental experimental = 8;
5693   if (this->_internal_has_experimental()) {
5694     total_size += 1 +
5695       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
5696         *_impl_.experimental_);
5697   }
5698 
5699   // int64 timeout_in_ms = 2;
5700   if (this->_internal_timeout_in_ms() != 0) {
5701     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_timeout_in_ms());
5702   }
5703 
5704   // .tensorflow.RunOptions.TraceLevel trace_level = 1;
5705   if (this->_internal_trace_level() != 0) {
5706     total_size += 1 +
5707       ::_pbi::WireFormatLite::EnumSize(this->_internal_trace_level());
5708   }
5709 
5710   // int32 inter_op_thread_pool = 3;
5711   if (this->_internal_inter_op_thread_pool() != 0) {
5712     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_inter_op_thread_pool());
5713   }
5714 
5715   // bool output_partition_graphs = 5;
5716   if (this->_internal_output_partition_graphs() != 0) {
5717     total_size += 1 + 1;
5718   }
5719 
5720   // bool report_tensor_allocations_upon_oom = 7;
5721   if (this->_internal_report_tensor_allocations_upon_oom() != 0) {
5722     total_size += 1 + 1;
5723   }
5724 
5725   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5726     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
5727   }
5728   int cached_size = ::_pbi::ToCachedSize(total_size);
5729   SetCachedSize(cached_size);
5730   return total_size;
5731 }
5732 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)5733 void RunOptions::CheckTypeAndMergeFrom(
5734     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
5735   MergeFrom(*::_pbi::DownCast<const RunOptions*>(
5736       &from));
5737 }
5738 
MergeFrom(const RunOptions & from)5739 void RunOptions::MergeFrom(const RunOptions& from) {
5740   RunOptions* const _this = this;
5741   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunOptions)
5742   GOOGLE_DCHECK_NE(&from, _this);
5743   ::uint32_t cached_has_bits = 0;
5744   (void) cached_has_bits;
5745 
5746   if (from._internal_has_debug_options()) {
5747     _this->_internal_mutable_debug_options()->::tensorflow::DebugOptions::MergeFrom(
5748         from._internal_debug_options());
5749   }
5750   if (from._internal_has_experimental()) {
5751     _this->_internal_mutable_experimental()->::tensorflow::RunOptions_Experimental::MergeFrom(
5752         from._internal_experimental());
5753   }
5754   if (from._internal_timeout_in_ms() != 0) {
5755     _this->_internal_set_timeout_in_ms(from._internal_timeout_in_ms());
5756   }
5757   if (from._internal_trace_level() != 0) {
5758     _this->_internal_set_trace_level(from._internal_trace_level());
5759   }
5760   if (from._internal_inter_op_thread_pool() != 0) {
5761     _this->_internal_set_inter_op_thread_pool(from._internal_inter_op_thread_pool());
5762   }
5763   if (from._internal_output_partition_graphs() != 0) {
5764     _this->_internal_set_output_partition_graphs(from._internal_output_partition_graphs());
5765   }
5766   if (from._internal_report_tensor_allocations_upon_oom() != 0) {
5767     _this->_internal_set_report_tensor_allocations_upon_oom(from._internal_report_tensor_allocations_upon_oom());
5768   }
5769   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5770 }
5771 
CopyFrom(const RunOptions & from)5772 void RunOptions::CopyFrom(const RunOptions& from) {
5773 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunOptions)
5774   if (&from == this) return;
5775   Clear();
5776   MergeFrom(from);
5777 }
5778 
IsInitialized() const5779 bool RunOptions::IsInitialized() const {
5780   return true;
5781 }
5782 
InternalSwap(RunOptions * other)5783 void RunOptions::InternalSwap(RunOptions* other) {
5784   using std::swap;
5785   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
5786   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
5787       PROTOBUF_FIELD_OFFSET(RunOptions, _impl_.report_tensor_allocations_upon_oom_)
5788       + sizeof(RunOptions::_impl_.report_tensor_allocations_upon_oom_)  // NOLINT
5789       - PROTOBUF_FIELD_OFFSET(RunOptions, _impl_.debug_options_)>(
5790           reinterpret_cast<char*>(&_impl_.debug_options_),
5791           reinterpret_cast<char*>(&other->_impl_.debug_options_));
5792 }
5793 
GetTypeName() const5794 std::string RunOptions::GetTypeName() const {
5795   return "tensorflow.RunOptions";
5796 }
5797 
5798 
5799 // ===================================================================
5800 
5801 class RunMetadata_FunctionGraphs::_Internal {
5802  public:
5803   static const ::tensorflow::GraphDef& pre_optimization_graph(const RunMetadata_FunctionGraphs* msg);
5804   static const ::tensorflow::GraphDef& post_optimization_graph(const RunMetadata_FunctionGraphs* msg);
5805 };
5806 
5807 const ::tensorflow::GraphDef&
pre_optimization_graph(const RunMetadata_FunctionGraphs * msg)5808 RunMetadata_FunctionGraphs::_Internal::pre_optimization_graph(const RunMetadata_FunctionGraphs* msg) {
5809   return *msg->_impl_.pre_optimization_graph_;
5810 }
5811 const ::tensorflow::GraphDef&
post_optimization_graph(const RunMetadata_FunctionGraphs * msg)5812 RunMetadata_FunctionGraphs::_Internal::post_optimization_graph(const RunMetadata_FunctionGraphs* msg) {
5813   return *msg->_impl_.post_optimization_graph_;
5814 }
clear_partition_graphs()5815 void RunMetadata_FunctionGraphs::clear_partition_graphs() {
5816   _impl_.partition_graphs_.Clear();
5817 }
clear_pre_optimization_graph()5818 void RunMetadata_FunctionGraphs::clear_pre_optimization_graph() {
5819   if (GetArenaForAllocation() == nullptr && _impl_.pre_optimization_graph_ != nullptr) {
5820     delete _impl_.pre_optimization_graph_;
5821   }
5822   _impl_.pre_optimization_graph_ = nullptr;
5823 }
clear_post_optimization_graph()5824 void RunMetadata_FunctionGraphs::clear_post_optimization_graph() {
5825   if (GetArenaForAllocation() == nullptr && _impl_.post_optimization_graph_ != nullptr) {
5826     delete _impl_.post_optimization_graph_;
5827   }
5828   _impl_.post_optimization_graph_ = nullptr;
5829 }
RunMetadata_FunctionGraphs(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)5830 RunMetadata_FunctionGraphs::RunMetadata_FunctionGraphs(::PROTOBUF_NAMESPACE_ID::Arena* arena,
5831                          bool is_message_owned)
5832   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
5833   SharedCtor(arena, is_message_owned);
5834   // @@protoc_insertion_point(arena_constructor:tensorflow.RunMetadata.FunctionGraphs)
5835 }
RunMetadata_FunctionGraphs(const RunMetadata_FunctionGraphs & from)5836 RunMetadata_FunctionGraphs::RunMetadata_FunctionGraphs(const RunMetadata_FunctionGraphs& from)
5837   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
5838   RunMetadata_FunctionGraphs* const _this = this; (void)_this;
5839   new (&_impl_) Impl_{
5840       decltype(_impl_.partition_graphs_){from._impl_.partition_graphs_}
5841     , decltype(_impl_.pre_optimization_graph_){nullptr}
5842     , decltype(_impl_.post_optimization_graph_){nullptr}
5843     , /*decltype(_impl_._cached_size_)*/{}};
5844 
5845   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
5846   if (from._internal_has_pre_optimization_graph()) {
5847     _this->_impl_.pre_optimization_graph_ = new ::tensorflow::GraphDef(*from._impl_.pre_optimization_graph_);
5848   }
5849   if (from._internal_has_post_optimization_graph()) {
5850     _this->_impl_.post_optimization_graph_ = new ::tensorflow::GraphDef(*from._impl_.post_optimization_graph_);
5851   }
5852   // @@protoc_insertion_point(copy_constructor:tensorflow.RunMetadata.FunctionGraphs)
5853 }
5854 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)5855 inline void RunMetadata_FunctionGraphs::SharedCtor(
5856     ::_pb::Arena* arena, bool is_message_owned) {
5857   (void)arena;
5858   (void)is_message_owned;
5859   new (&_impl_) Impl_{
5860       decltype(_impl_.partition_graphs_){arena}
5861     , decltype(_impl_.pre_optimization_graph_){nullptr}
5862     , decltype(_impl_.post_optimization_graph_){nullptr}
5863     , /*decltype(_impl_._cached_size_)*/{}
5864   };
5865 }
5866 
~RunMetadata_FunctionGraphs()5867 RunMetadata_FunctionGraphs::~RunMetadata_FunctionGraphs() {
5868   // @@protoc_insertion_point(destructor:tensorflow.RunMetadata.FunctionGraphs)
5869   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
5870   (void)arena;
5871     return;
5872   }
5873   SharedDtor();
5874 }
5875 
SharedDtor()5876 inline void RunMetadata_FunctionGraphs::SharedDtor() {
5877   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
5878   _impl_.partition_graphs_.~RepeatedPtrField();
5879   if (this != internal_default_instance()) delete _impl_.pre_optimization_graph_;
5880   if (this != internal_default_instance()) delete _impl_.post_optimization_graph_;
5881 }
5882 
SetCachedSize(int size) const5883 void RunMetadata_FunctionGraphs::SetCachedSize(int size) const {
5884   _impl_._cached_size_.Set(size);
5885 }
5886 
Clear()5887 void RunMetadata_FunctionGraphs::Clear() {
5888 // @@protoc_insertion_point(message_clear_start:tensorflow.RunMetadata.FunctionGraphs)
5889   ::uint32_t cached_has_bits = 0;
5890   // Prevent compiler warnings about cached_has_bits being unused
5891   (void) cached_has_bits;
5892 
5893   _impl_.partition_graphs_.Clear();
5894   if (GetArenaForAllocation() == nullptr && _impl_.pre_optimization_graph_ != nullptr) {
5895     delete _impl_.pre_optimization_graph_;
5896   }
5897   _impl_.pre_optimization_graph_ = nullptr;
5898   if (GetArenaForAllocation() == nullptr && _impl_.post_optimization_graph_ != nullptr) {
5899     delete _impl_.post_optimization_graph_;
5900   }
5901   _impl_.post_optimization_graph_ = nullptr;
5902   _internal_metadata_.Clear<std::string>();
5903 }
5904 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)5905 const char* RunMetadata_FunctionGraphs::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
5906 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5907   while (!ctx->Done(&ptr)) {
5908     ::uint32_t tag;
5909     ptr = ::_pbi::ReadTag(ptr, &tag);
5910     switch (tag >> 3) {
5911       // repeated .tensorflow.GraphDef partition_graphs = 1;
5912       case 1:
5913         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
5914           ptr -= 1;
5915           do {
5916             ptr += 1;
5917             ptr = ctx->ParseMessage(_internal_add_partition_graphs(), ptr);
5918             CHK_(ptr);
5919             if (!ctx->DataAvailable(ptr)) break;
5920           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
5921         } else {
5922           goto handle_unusual;
5923         }
5924         continue;
5925       // .tensorflow.GraphDef pre_optimization_graph = 2;
5926       case 2:
5927         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
5928           ptr = ctx->ParseMessage(_internal_mutable_pre_optimization_graph(), ptr);
5929           CHK_(ptr);
5930         } else {
5931           goto handle_unusual;
5932         }
5933         continue;
5934       // .tensorflow.GraphDef post_optimization_graph = 3;
5935       case 3:
5936         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
5937           ptr = ctx->ParseMessage(_internal_mutable_post_optimization_graph(), ptr);
5938           CHK_(ptr);
5939         } else {
5940           goto handle_unusual;
5941         }
5942         continue;
5943       default:
5944         goto handle_unusual;
5945     }  // switch
5946   handle_unusual:
5947     if ((tag == 0) || ((tag & 7) == 4)) {
5948       CHK_(ptr);
5949       ctx->SetLastTag(tag);
5950       goto message_done;
5951     }
5952     ptr = UnknownFieldParse(
5953         tag,
5954         _internal_metadata_.mutable_unknown_fields<std::string>(),
5955         ptr, ctx);
5956     CHK_(ptr != nullptr);
5957   }  // while
5958 message_done:
5959   return ptr;
5960 failure:
5961   ptr = nullptr;
5962   goto message_done;
5963 #undef CHK_
5964 }
5965 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const5966 ::uint8_t* RunMetadata_FunctionGraphs::_InternalSerialize(
5967     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
5968   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunMetadata.FunctionGraphs)
5969   ::uint32_t cached_has_bits = 0;
5970   (void) cached_has_bits;
5971 
5972   // repeated .tensorflow.GraphDef partition_graphs = 1;
5973   for (unsigned i = 0,
5974       n = static_cast<unsigned>(this->_internal_partition_graphs_size()); i < n; i++) {
5975     const auto& repfield = this->_internal_partition_graphs(i);
5976     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5977         InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
5978   }
5979 
5980   // .tensorflow.GraphDef pre_optimization_graph = 2;
5981   if (this->_internal_has_pre_optimization_graph()) {
5982     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5983       InternalWriteMessage(2, _Internal::pre_optimization_graph(this),
5984         _Internal::pre_optimization_graph(this).GetCachedSize(), target, stream);
5985   }
5986 
5987   // .tensorflow.GraphDef post_optimization_graph = 3;
5988   if (this->_internal_has_post_optimization_graph()) {
5989     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5990       InternalWriteMessage(3, _Internal::post_optimization_graph(this),
5991         _Internal::post_optimization_graph(this).GetCachedSize(), target, stream);
5992   }
5993 
5994   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
5995     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
5996         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
5997   }
5998   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunMetadata.FunctionGraphs)
5999   return target;
6000 }
6001 
ByteSizeLong() const6002 size_t RunMetadata_FunctionGraphs::ByteSizeLong() const {
6003 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RunMetadata.FunctionGraphs)
6004   size_t total_size = 0;
6005 
6006   ::uint32_t cached_has_bits = 0;
6007   // Prevent compiler warnings about cached_has_bits being unused
6008   (void) cached_has_bits;
6009 
6010   // repeated .tensorflow.GraphDef partition_graphs = 1;
6011   total_size += 1UL * this->_internal_partition_graphs_size();
6012   for (const auto& msg : this->_impl_.partition_graphs_) {
6013     total_size +=
6014       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
6015   }
6016 
6017   // .tensorflow.GraphDef pre_optimization_graph = 2;
6018   if (this->_internal_has_pre_optimization_graph()) {
6019     total_size += 1 +
6020       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6021         *_impl_.pre_optimization_graph_);
6022   }
6023 
6024   // .tensorflow.GraphDef post_optimization_graph = 3;
6025   if (this->_internal_has_post_optimization_graph()) {
6026     total_size += 1 +
6027       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6028         *_impl_.post_optimization_graph_);
6029   }
6030 
6031   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6032     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
6033   }
6034   int cached_size = ::_pbi::ToCachedSize(total_size);
6035   SetCachedSize(cached_size);
6036   return total_size;
6037 }
6038 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)6039 void RunMetadata_FunctionGraphs::CheckTypeAndMergeFrom(
6040     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
6041   MergeFrom(*::_pbi::DownCast<const RunMetadata_FunctionGraphs*>(
6042       &from));
6043 }
6044 
MergeFrom(const RunMetadata_FunctionGraphs & from)6045 void RunMetadata_FunctionGraphs::MergeFrom(const RunMetadata_FunctionGraphs& from) {
6046   RunMetadata_FunctionGraphs* const _this = this;
6047   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunMetadata.FunctionGraphs)
6048   GOOGLE_DCHECK_NE(&from, _this);
6049   ::uint32_t cached_has_bits = 0;
6050   (void) cached_has_bits;
6051 
6052   _this->_impl_.partition_graphs_.MergeFrom(from._impl_.partition_graphs_);
6053   if (from._internal_has_pre_optimization_graph()) {
6054     _this->_internal_mutable_pre_optimization_graph()->::tensorflow::GraphDef::MergeFrom(
6055         from._internal_pre_optimization_graph());
6056   }
6057   if (from._internal_has_post_optimization_graph()) {
6058     _this->_internal_mutable_post_optimization_graph()->::tensorflow::GraphDef::MergeFrom(
6059         from._internal_post_optimization_graph());
6060   }
6061   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6062 }
6063 
CopyFrom(const RunMetadata_FunctionGraphs & from)6064 void RunMetadata_FunctionGraphs::CopyFrom(const RunMetadata_FunctionGraphs& from) {
6065 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunMetadata.FunctionGraphs)
6066   if (&from == this) return;
6067   Clear();
6068   MergeFrom(from);
6069 }
6070 
IsInitialized() const6071 bool RunMetadata_FunctionGraphs::IsInitialized() const {
6072   return true;
6073 }
6074 
InternalSwap(RunMetadata_FunctionGraphs * other)6075 void RunMetadata_FunctionGraphs::InternalSwap(RunMetadata_FunctionGraphs* other) {
6076   using std::swap;
6077   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
6078   _impl_.partition_graphs_.InternalSwap(&other->_impl_.partition_graphs_);
6079   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
6080       PROTOBUF_FIELD_OFFSET(RunMetadata_FunctionGraphs, _impl_.post_optimization_graph_)
6081       + sizeof(RunMetadata_FunctionGraphs::_impl_.post_optimization_graph_)  // NOLINT
6082       - PROTOBUF_FIELD_OFFSET(RunMetadata_FunctionGraphs, _impl_.pre_optimization_graph_)>(
6083           reinterpret_cast<char*>(&_impl_.pre_optimization_graph_),
6084           reinterpret_cast<char*>(&other->_impl_.pre_optimization_graph_));
6085 }
6086 
GetTypeName() const6087 std::string RunMetadata_FunctionGraphs::GetTypeName() const {
6088   return "tensorflow.RunMetadata.FunctionGraphs";
6089 }
6090 
6091 
6092 // ===================================================================
6093 
6094 class RunMetadata::_Internal {
6095  public:
6096   static const ::tensorflow::StepStats& step_stats(const RunMetadata* msg);
6097   static const ::tensorflow::CostGraphDef& cost_graph(const RunMetadata* msg);
6098   static const ::tensorflow::SessionMetadata& session_metadata(const RunMetadata* msg);
6099 };
6100 
6101 const ::tensorflow::StepStats&
step_stats(const RunMetadata * msg)6102 RunMetadata::_Internal::step_stats(const RunMetadata* msg) {
6103   return *msg->_impl_.step_stats_;
6104 }
6105 const ::tensorflow::CostGraphDef&
cost_graph(const RunMetadata * msg)6106 RunMetadata::_Internal::cost_graph(const RunMetadata* msg) {
6107   return *msg->_impl_.cost_graph_;
6108 }
6109 const ::tensorflow::SessionMetadata&
session_metadata(const RunMetadata * msg)6110 RunMetadata::_Internal::session_metadata(const RunMetadata* msg) {
6111   return *msg->_impl_.session_metadata_;
6112 }
clear_step_stats()6113 void RunMetadata::clear_step_stats() {
6114   if (GetArenaForAllocation() == nullptr && _impl_.step_stats_ != nullptr) {
6115     delete _impl_.step_stats_;
6116   }
6117   _impl_.step_stats_ = nullptr;
6118 }
clear_cost_graph()6119 void RunMetadata::clear_cost_graph() {
6120   if (GetArenaForAllocation() == nullptr && _impl_.cost_graph_ != nullptr) {
6121     delete _impl_.cost_graph_;
6122   }
6123   _impl_.cost_graph_ = nullptr;
6124 }
clear_partition_graphs()6125 void RunMetadata::clear_partition_graphs() {
6126   _impl_.partition_graphs_.Clear();
6127 }
RunMetadata(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)6128 RunMetadata::RunMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena,
6129                          bool is_message_owned)
6130   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
6131   SharedCtor(arena, is_message_owned);
6132   // @@protoc_insertion_point(arena_constructor:tensorflow.RunMetadata)
6133 }
RunMetadata(const RunMetadata & from)6134 RunMetadata::RunMetadata(const RunMetadata& from)
6135   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
6136   RunMetadata* const _this = this; (void)_this;
6137   new (&_impl_) Impl_{
6138       decltype(_impl_.partition_graphs_){from._impl_.partition_graphs_}
6139     , decltype(_impl_.function_graphs_){from._impl_.function_graphs_}
6140     , decltype(_impl_.step_stats_){nullptr}
6141     , decltype(_impl_.cost_graph_){nullptr}
6142     , decltype(_impl_.session_metadata_){nullptr}
6143     , /*decltype(_impl_._cached_size_)*/{}};
6144 
6145   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6146   if (from._internal_has_step_stats()) {
6147     _this->_impl_.step_stats_ = new ::tensorflow::StepStats(*from._impl_.step_stats_);
6148   }
6149   if (from._internal_has_cost_graph()) {
6150     _this->_impl_.cost_graph_ = new ::tensorflow::CostGraphDef(*from._impl_.cost_graph_);
6151   }
6152   if (from._internal_has_session_metadata()) {
6153     _this->_impl_.session_metadata_ = new ::tensorflow::SessionMetadata(*from._impl_.session_metadata_);
6154   }
6155   // @@protoc_insertion_point(copy_constructor:tensorflow.RunMetadata)
6156 }
6157 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)6158 inline void RunMetadata::SharedCtor(
6159     ::_pb::Arena* arena, bool is_message_owned) {
6160   (void)arena;
6161   (void)is_message_owned;
6162   new (&_impl_) Impl_{
6163       decltype(_impl_.partition_graphs_){arena}
6164     , decltype(_impl_.function_graphs_){arena}
6165     , decltype(_impl_.step_stats_){nullptr}
6166     , decltype(_impl_.cost_graph_){nullptr}
6167     , decltype(_impl_.session_metadata_){nullptr}
6168     , /*decltype(_impl_._cached_size_)*/{}
6169   };
6170 }
6171 
~RunMetadata()6172 RunMetadata::~RunMetadata() {
6173   // @@protoc_insertion_point(destructor:tensorflow.RunMetadata)
6174   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
6175   (void)arena;
6176     return;
6177   }
6178   SharedDtor();
6179 }
6180 
SharedDtor()6181 inline void RunMetadata::SharedDtor() {
6182   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
6183   _impl_.partition_graphs_.~RepeatedPtrField();
6184   _impl_.function_graphs_.~RepeatedPtrField();
6185   if (this != internal_default_instance()) delete _impl_.step_stats_;
6186   if (this != internal_default_instance()) delete _impl_.cost_graph_;
6187   if (this != internal_default_instance()) delete _impl_.session_metadata_;
6188 }
6189 
SetCachedSize(int size) const6190 void RunMetadata::SetCachedSize(int size) const {
6191   _impl_._cached_size_.Set(size);
6192 }
6193 
Clear()6194 void RunMetadata::Clear() {
6195 // @@protoc_insertion_point(message_clear_start:tensorflow.RunMetadata)
6196   ::uint32_t cached_has_bits = 0;
6197   // Prevent compiler warnings about cached_has_bits being unused
6198   (void) cached_has_bits;
6199 
6200   _impl_.partition_graphs_.Clear();
6201   _impl_.function_graphs_.Clear();
6202   if (GetArenaForAllocation() == nullptr && _impl_.step_stats_ != nullptr) {
6203     delete _impl_.step_stats_;
6204   }
6205   _impl_.step_stats_ = nullptr;
6206   if (GetArenaForAllocation() == nullptr && _impl_.cost_graph_ != nullptr) {
6207     delete _impl_.cost_graph_;
6208   }
6209   _impl_.cost_graph_ = nullptr;
6210   if (GetArenaForAllocation() == nullptr && _impl_.session_metadata_ != nullptr) {
6211     delete _impl_.session_metadata_;
6212   }
6213   _impl_.session_metadata_ = nullptr;
6214   _internal_metadata_.Clear<std::string>();
6215 }
6216 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)6217 const char* RunMetadata::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
6218 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
6219   while (!ctx->Done(&ptr)) {
6220     ::uint32_t tag;
6221     ptr = ::_pbi::ReadTag(ptr, &tag);
6222     switch (tag >> 3) {
6223       // .tensorflow.StepStats step_stats = 1;
6224       case 1:
6225         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
6226           ptr = ctx->ParseMessage(_internal_mutable_step_stats(), ptr);
6227           CHK_(ptr);
6228         } else {
6229           goto handle_unusual;
6230         }
6231         continue;
6232       // .tensorflow.CostGraphDef cost_graph = 2;
6233       case 2:
6234         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
6235           ptr = ctx->ParseMessage(_internal_mutable_cost_graph(), ptr);
6236           CHK_(ptr);
6237         } else {
6238           goto handle_unusual;
6239         }
6240         continue;
6241       // repeated .tensorflow.GraphDef partition_graphs = 3;
6242       case 3:
6243         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
6244           ptr -= 1;
6245           do {
6246             ptr += 1;
6247             ptr = ctx->ParseMessage(_internal_add_partition_graphs(), ptr);
6248             CHK_(ptr);
6249             if (!ctx->DataAvailable(ptr)) break;
6250           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr));
6251         } else {
6252           goto handle_unusual;
6253         }
6254         continue;
6255       // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
6256       case 4:
6257         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
6258           ptr -= 1;
6259           do {
6260             ptr += 1;
6261             ptr = ctx->ParseMessage(_internal_add_function_graphs(), ptr);
6262             CHK_(ptr);
6263             if (!ctx->DataAvailable(ptr)) break;
6264           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<34>(ptr));
6265         } else {
6266           goto handle_unusual;
6267         }
6268         continue;
6269       // .tensorflow.SessionMetadata session_metadata = 5;
6270       case 5:
6271         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
6272           ptr = ctx->ParseMessage(_internal_mutable_session_metadata(), ptr);
6273           CHK_(ptr);
6274         } else {
6275           goto handle_unusual;
6276         }
6277         continue;
6278       default:
6279         goto handle_unusual;
6280     }  // switch
6281   handle_unusual:
6282     if ((tag == 0) || ((tag & 7) == 4)) {
6283       CHK_(ptr);
6284       ctx->SetLastTag(tag);
6285       goto message_done;
6286     }
6287     ptr = UnknownFieldParse(
6288         tag,
6289         _internal_metadata_.mutable_unknown_fields<std::string>(),
6290         ptr, ctx);
6291     CHK_(ptr != nullptr);
6292   }  // while
6293 message_done:
6294   return ptr;
6295 failure:
6296   ptr = nullptr;
6297   goto message_done;
6298 #undef CHK_
6299 }
6300 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const6301 ::uint8_t* RunMetadata::_InternalSerialize(
6302     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
6303   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunMetadata)
6304   ::uint32_t cached_has_bits = 0;
6305   (void) cached_has_bits;
6306 
6307   // .tensorflow.StepStats step_stats = 1;
6308   if (this->_internal_has_step_stats()) {
6309     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6310       InternalWriteMessage(1, _Internal::step_stats(this),
6311         _Internal::step_stats(this).GetCachedSize(), target, stream);
6312   }
6313 
6314   // .tensorflow.CostGraphDef cost_graph = 2;
6315   if (this->_internal_has_cost_graph()) {
6316     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6317       InternalWriteMessage(2, _Internal::cost_graph(this),
6318         _Internal::cost_graph(this).GetCachedSize(), target, stream);
6319   }
6320 
6321   // repeated .tensorflow.GraphDef partition_graphs = 3;
6322   for (unsigned i = 0,
6323       n = static_cast<unsigned>(this->_internal_partition_graphs_size()); i < n; i++) {
6324     const auto& repfield = this->_internal_partition_graphs(i);
6325     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6326         InternalWriteMessage(3, repfield, repfield.GetCachedSize(), target, stream);
6327   }
6328 
6329   // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
6330   for (unsigned i = 0,
6331       n = static_cast<unsigned>(this->_internal_function_graphs_size()); i < n; i++) {
6332     const auto& repfield = this->_internal_function_graphs(i);
6333     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6334         InternalWriteMessage(4, repfield, repfield.GetCachedSize(), target, stream);
6335   }
6336 
6337   // .tensorflow.SessionMetadata session_metadata = 5;
6338   if (this->_internal_has_session_metadata()) {
6339     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6340       InternalWriteMessage(5, _Internal::session_metadata(this),
6341         _Internal::session_metadata(this).GetCachedSize(), target, stream);
6342   }
6343 
6344   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6345     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
6346         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
6347   }
6348   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunMetadata)
6349   return target;
6350 }
6351 
ByteSizeLong() const6352 size_t RunMetadata::ByteSizeLong() const {
6353 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RunMetadata)
6354   size_t total_size = 0;
6355 
6356   ::uint32_t cached_has_bits = 0;
6357   // Prevent compiler warnings about cached_has_bits being unused
6358   (void) cached_has_bits;
6359 
6360   // repeated .tensorflow.GraphDef partition_graphs = 3;
6361   total_size += 1UL * this->_internal_partition_graphs_size();
6362   for (const auto& msg : this->_impl_.partition_graphs_) {
6363     total_size +=
6364       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
6365   }
6366 
6367   // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
6368   total_size += 1UL * this->_internal_function_graphs_size();
6369   for (const auto& msg : this->_impl_.function_graphs_) {
6370     total_size +=
6371       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
6372   }
6373 
6374   // .tensorflow.StepStats step_stats = 1;
6375   if (this->_internal_has_step_stats()) {
6376     total_size += 1 +
6377       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6378         *_impl_.step_stats_);
6379   }
6380 
6381   // .tensorflow.CostGraphDef cost_graph = 2;
6382   if (this->_internal_has_cost_graph()) {
6383     total_size += 1 +
6384       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6385         *_impl_.cost_graph_);
6386   }
6387 
6388   // .tensorflow.SessionMetadata session_metadata = 5;
6389   if (this->_internal_has_session_metadata()) {
6390     total_size += 1 +
6391       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6392         *_impl_.session_metadata_);
6393   }
6394 
6395   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6396     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
6397   }
6398   int cached_size = ::_pbi::ToCachedSize(total_size);
6399   SetCachedSize(cached_size);
6400   return total_size;
6401 }
6402 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)6403 void RunMetadata::CheckTypeAndMergeFrom(
6404     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
6405   MergeFrom(*::_pbi::DownCast<const RunMetadata*>(
6406       &from));
6407 }
6408 
MergeFrom(const RunMetadata & from)6409 void RunMetadata::MergeFrom(const RunMetadata& from) {
6410   RunMetadata* const _this = this;
6411   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunMetadata)
6412   GOOGLE_DCHECK_NE(&from, _this);
6413   ::uint32_t cached_has_bits = 0;
6414   (void) cached_has_bits;
6415 
6416   _this->_impl_.partition_graphs_.MergeFrom(from._impl_.partition_graphs_);
6417   _this->_impl_.function_graphs_.MergeFrom(from._impl_.function_graphs_);
6418   if (from._internal_has_step_stats()) {
6419     _this->_internal_mutable_step_stats()->::tensorflow::StepStats::MergeFrom(
6420         from._internal_step_stats());
6421   }
6422   if (from._internal_has_cost_graph()) {
6423     _this->_internal_mutable_cost_graph()->::tensorflow::CostGraphDef::MergeFrom(
6424         from._internal_cost_graph());
6425   }
6426   if (from._internal_has_session_metadata()) {
6427     _this->_internal_mutable_session_metadata()->::tensorflow::SessionMetadata::MergeFrom(
6428         from._internal_session_metadata());
6429   }
6430   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6431 }
6432 
CopyFrom(const RunMetadata & from)6433 void RunMetadata::CopyFrom(const RunMetadata& from) {
6434 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunMetadata)
6435   if (&from == this) return;
6436   Clear();
6437   MergeFrom(from);
6438 }
6439 
IsInitialized() const6440 bool RunMetadata::IsInitialized() const {
6441   return true;
6442 }
6443 
InternalSwap(RunMetadata * other)6444 void RunMetadata::InternalSwap(RunMetadata* other) {
6445   using std::swap;
6446   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
6447   _impl_.partition_graphs_.InternalSwap(&other->_impl_.partition_graphs_);
6448   _impl_.function_graphs_.InternalSwap(&other->_impl_.function_graphs_);
6449   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
6450       PROTOBUF_FIELD_OFFSET(RunMetadata, _impl_.session_metadata_)
6451       + sizeof(RunMetadata::_impl_.session_metadata_)  // NOLINT
6452       - PROTOBUF_FIELD_OFFSET(RunMetadata, _impl_.step_stats_)>(
6453           reinterpret_cast<char*>(&_impl_.step_stats_),
6454           reinterpret_cast<char*>(&other->_impl_.step_stats_));
6455 }
6456 
GetTypeName() const6457 std::string RunMetadata::GetTypeName() const {
6458   return "tensorflow.RunMetadata";
6459 }
6460 
6461 
6462 // ===================================================================
6463 
6464 class TensorConnection::_Internal {
6465  public:
6466 };
6467 
TensorConnection(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)6468 TensorConnection::TensorConnection(::PROTOBUF_NAMESPACE_ID::Arena* arena,
6469                          bool is_message_owned)
6470   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
6471   SharedCtor(arena, is_message_owned);
6472   // @@protoc_insertion_point(arena_constructor:tensorflow.TensorConnection)
6473 }
TensorConnection(const TensorConnection & from)6474 TensorConnection::TensorConnection(const TensorConnection& from)
6475   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
6476   TensorConnection* const _this = this; (void)_this;
6477   new (&_impl_) Impl_{
6478       decltype(_impl_.from_tensor_){}
6479     , decltype(_impl_.to_tensor_){}
6480     , /*decltype(_impl_._cached_size_)*/{}};
6481 
6482   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6483   _impl_.from_tensor_.InitDefault();
6484   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6485     _impl_.from_tensor_.Set("", GetArenaForAllocation());
6486   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6487   if (!from._internal_from_tensor().empty()) {
6488     _this->_impl_.from_tensor_.Set(from._internal_from_tensor(),
6489       _this->GetArenaForAllocation());
6490   }
6491   _impl_.to_tensor_.InitDefault();
6492   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6493     _impl_.to_tensor_.Set("", GetArenaForAllocation());
6494   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6495   if (!from._internal_to_tensor().empty()) {
6496     _this->_impl_.to_tensor_.Set(from._internal_to_tensor(),
6497       _this->GetArenaForAllocation());
6498   }
6499   // @@protoc_insertion_point(copy_constructor:tensorflow.TensorConnection)
6500 }
6501 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)6502 inline void TensorConnection::SharedCtor(
6503     ::_pb::Arena* arena, bool is_message_owned) {
6504   (void)arena;
6505   (void)is_message_owned;
6506   new (&_impl_) Impl_{
6507       decltype(_impl_.from_tensor_){}
6508     , decltype(_impl_.to_tensor_){}
6509     , /*decltype(_impl_._cached_size_)*/{}
6510   };
6511   _impl_.from_tensor_.InitDefault();
6512   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6513     _impl_.from_tensor_.Set("", GetArenaForAllocation());
6514   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6515   _impl_.to_tensor_.InitDefault();
6516   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
6517     _impl_.to_tensor_.Set("", GetArenaForAllocation());
6518   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
6519 }
6520 
~TensorConnection()6521 TensorConnection::~TensorConnection() {
6522   // @@protoc_insertion_point(destructor:tensorflow.TensorConnection)
6523   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
6524   (void)arena;
6525     return;
6526   }
6527   SharedDtor();
6528 }
6529 
SharedDtor()6530 inline void TensorConnection::SharedDtor() {
6531   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
6532   _impl_.from_tensor_.Destroy();
6533   _impl_.to_tensor_.Destroy();
6534 }
6535 
SetCachedSize(int size) const6536 void TensorConnection::SetCachedSize(int size) const {
6537   _impl_._cached_size_.Set(size);
6538 }
6539 
Clear()6540 void TensorConnection::Clear() {
6541 // @@protoc_insertion_point(message_clear_start:tensorflow.TensorConnection)
6542   ::uint32_t cached_has_bits = 0;
6543   // Prevent compiler warnings about cached_has_bits being unused
6544   (void) cached_has_bits;
6545 
6546   _impl_.from_tensor_.ClearToEmpty();
6547   _impl_.to_tensor_.ClearToEmpty();
6548   _internal_metadata_.Clear<std::string>();
6549 }
6550 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)6551 const char* TensorConnection::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
6552 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
6553   while (!ctx->Done(&ptr)) {
6554     ::uint32_t tag;
6555     ptr = ::_pbi::ReadTag(ptr, &tag);
6556     switch (tag >> 3) {
6557       // string from_tensor = 1;
6558       case 1:
6559         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
6560           auto str = _internal_mutable_from_tensor();
6561           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
6562           CHK_(ptr);
6563           CHK_(::_pbi::VerifyUTF8(str, nullptr));
6564         } else {
6565           goto handle_unusual;
6566         }
6567         continue;
6568       // string to_tensor = 2;
6569       case 2:
6570         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
6571           auto str = _internal_mutable_to_tensor();
6572           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
6573           CHK_(ptr);
6574           CHK_(::_pbi::VerifyUTF8(str, nullptr));
6575         } else {
6576           goto handle_unusual;
6577         }
6578         continue;
6579       default:
6580         goto handle_unusual;
6581     }  // switch
6582   handle_unusual:
6583     if ((tag == 0) || ((tag & 7) == 4)) {
6584       CHK_(ptr);
6585       ctx->SetLastTag(tag);
6586       goto message_done;
6587     }
6588     ptr = UnknownFieldParse(
6589         tag,
6590         _internal_metadata_.mutable_unknown_fields<std::string>(),
6591         ptr, ctx);
6592     CHK_(ptr != nullptr);
6593   }  // while
6594 message_done:
6595   return ptr;
6596 failure:
6597   ptr = nullptr;
6598   goto message_done;
6599 #undef CHK_
6600 }
6601 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const6602 ::uint8_t* TensorConnection::_InternalSerialize(
6603     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
6604   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorConnection)
6605   ::uint32_t cached_has_bits = 0;
6606   (void) cached_has_bits;
6607 
6608   // string from_tensor = 1;
6609   if (!this->_internal_from_tensor().empty()) {
6610     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6611       this->_internal_from_tensor().data(), static_cast<int>(this->_internal_from_tensor().length()),
6612       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
6613       "tensorflow.TensorConnection.from_tensor");
6614     target = stream->WriteStringMaybeAliased(
6615         1, this->_internal_from_tensor(), target);
6616   }
6617 
6618   // string to_tensor = 2;
6619   if (!this->_internal_to_tensor().empty()) {
6620     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6621       this->_internal_to_tensor().data(), static_cast<int>(this->_internal_to_tensor().length()),
6622       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
6623       "tensorflow.TensorConnection.to_tensor");
6624     target = stream->WriteStringMaybeAliased(
6625         2, this->_internal_to_tensor(), target);
6626   }
6627 
6628   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6629     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
6630         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
6631   }
6632   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorConnection)
6633   return target;
6634 }
6635 
ByteSizeLong() const6636 size_t TensorConnection::ByteSizeLong() const {
6637 // @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorConnection)
6638   size_t total_size = 0;
6639 
6640   ::uint32_t cached_has_bits = 0;
6641   // Prevent compiler warnings about cached_has_bits being unused
6642   (void) cached_has_bits;
6643 
6644   // string from_tensor = 1;
6645   if (!this->_internal_from_tensor().empty()) {
6646     total_size += 1 +
6647       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
6648         this->_internal_from_tensor());
6649   }
6650 
6651   // string to_tensor = 2;
6652   if (!this->_internal_to_tensor().empty()) {
6653     total_size += 1 +
6654       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
6655         this->_internal_to_tensor());
6656   }
6657 
6658   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
6659     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
6660   }
6661   int cached_size = ::_pbi::ToCachedSize(total_size);
6662   SetCachedSize(cached_size);
6663   return total_size;
6664 }
6665 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)6666 void TensorConnection::CheckTypeAndMergeFrom(
6667     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
6668   MergeFrom(*::_pbi::DownCast<const TensorConnection*>(
6669       &from));
6670 }
6671 
MergeFrom(const TensorConnection & from)6672 void TensorConnection::MergeFrom(const TensorConnection& from) {
6673   TensorConnection* const _this = this;
6674   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorConnection)
6675   GOOGLE_DCHECK_NE(&from, _this);
6676   ::uint32_t cached_has_bits = 0;
6677   (void) cached_has_bits;
6678 
6679   if (!from._internal_from_tensor().empty()) {
6680     _this->_internal_set_from_tensor(from._internal_from_tensor());
6681   }
6682   if (!from._internal_to_tensor().empty()) {
6683     _this->_internal_set_to_tensor(from._internal_to_tensor());
6684   }
6685   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6686 }
6687 
CopyFrom(const TensorConnection & from)6688 void TensorConnection::CopyFrom(const TensorConnection& from) {
6689 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorConnection)
6690   if (&from == this) return;
6691   Clear();
6692   MergeFrom(from);
6693 }
6694 
IsInitialized() const6695 bool TensorConnection::IsInitialized() const {
6696   return true;
6697 }
6698 
InternalSwap(TensorConnection * other)6699 void TensorConnection::InternalSwap(TensorConnection* other) {
6700   using std::swap;
6701   auto* lhs_arena = GetArenaForAllocation();
6702   auto* rhs_arena = other->GetArenaForAllocation();
6703   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
6704   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
6705       &_impl_.from_tensor_, lhs_arena,
6706       &other->_impl_.from_tensor_, rhs_arena
6707   );
6708   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
6709       &_impl_.to_tensor_, lhs_arena,
6710       &other->_impl_.to_tensor_, rhs_arena
6711   );
6712 }
6713 
GetTypeName() const6714 std::string TensorConnection::GetTypeName() const {
6715   return "tensorflow.TensorConnection";
6716 }
6717 
6718 
6719 // ===================================================================
6720 
CallableOptions_FeedDevicesEntry_DoNotUse()6721 CallableOptions_FeedDevicesEntry_DoNotUse::CallableOptions_FeedDevicesEntry_DoNotUse() {}
CallableOptions_FeedDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)6722 CallableOptions_FeedDevicesEntry_DoNotUse::CallableOptions_FeedDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
6723     : SuperType(arena) {}
MergeFrom(const CallableOptions_FeedDevicesEntry_DoNotUse & other)6724 void CallableOptions_FeedDevicesEntry_DoNotUse::MergeFrom(const CallableOptions_FeedDevicesEntry_DoNotUse& other) {
6725   MergeFromInternal(other);
6726 }
6727 
6728 // ===================================================================
6729 
CallableOptions_FetchDevicesEntry_DoNotUse()6730 CallableOptions_FetchDevicesEntry_DoNotUse::CallableOptions_FetchDevicesEntry_DoNotUse() {}
CallableOptions_FetchDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)6731 CallableOptions_FetchDevicesEntry_DoNotUse::CallableOptions_FetchDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
6732     : SuperType(arena) {}
MergeFrom(const CallableOptions_FetchDevicesEntry_DoNotUse & other)6733 void CallableOptions_FetchDevicesEntry_DoNotUse::MergeFrom(const CallableOptions_FetchDevicesEntry_DoNotUse& other) {
6734   MergeFromInternal(other);
6735 }
6736 
6737 // ===================================================================
6738 
6739 class CallableOptions::_Internal {
6740  public:
6741   static const ::tensorflow::RunOptions& run_options(const CallableOptions* msg);
6742 };
6743 
6744 const ::tensorflow::RunOptions&
run_options(const CallableOptions * msg)6745 CallableOptions::_Internal::run_options(const CallableOptions* msg) {
6746   return *msg->_impl_.run_options_;
6747 }
CallableOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)6748 CallableOptions::CallableOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
6749                          bool is_message_owned)
6750   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
6751   SharedCtor(arena, is_message_owned);
6752   // @@protoc_insertion_point(arena_constructor:tensorflow.CallableOptions)
6753 }
CallableOptions(const CallableOptions & from)6754 CallableOptions::CallableOptions(const CallableOptions& from)
6755   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
6756   CallableOptions* const _this = this; (void)_this;
6757   new (&_impl_) Impl_{
6758       decltype(_impl_.feed_){from._impl_.feed_}
6759     , decltype(_impl_.fetch_){from._impl_.fetch_}
6760     , decltype(_impl_.target_){from._impl_.target_}
6761     , decltype(_impl_.tensor_connection_){from._impl_.tensor_connection_}
6762     , /*decltype(_impl_.feed_devices_)*/{}
6763     , /*decltype(_impl_.fetch_devices_)*/{}
6764     , decltype(_impl_.run_options_){nullptr}
6765     , decltype(_impl_.fetch_skip_sync_){}
6766     , /*decltype(_impl_._cached_size_)*/{}};
6767 
6768   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
6769   _this->_impl_.feed_devices_.MergeFrom(from._impl_.feed_devices_);
6770   _this->_impl_.fetch_devices_.MergeFrom(from._impl_.fetch_devices_);
6771   if (from._internal_has_run_options()) {
6772     _this->_impl_.run_options_ = new ::tensorflow::RunOptions(*from._impl_.run_options_);
6773   }
6774   _this->_impl_.fetch_skip_sync_ = from._impl_.fetch_skip_sync_;
6775   // @@protoc_insertion_point(copy_constructor:tensorflow.CallableOptions)
6776 }
6777 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)6778 inline void CallableOptions::SharedCtor(
6779     ::_pb::Arena* arena, bool is_message_owned) {
6780   (void)arena;
6781   (void)is_message_owned;
6782   new (&_impl_) Impl_{
6783       decltype(_impl_.feed_){arena}
6784     , decltype(_impl_.fetch_){arena}
6785     , decltype(_impl_.target_){arena}
6786     , decltype(_impl_.tensor_connection_){arena}
6787     , /*decltype(_impl_.feed_devices_)*/{::_pbi::ArenaInitialized(), arena}
6788     , /*decltype(_impl_.fetch_devices_)*/{::_pbi::ArenaInitialized(), arena}
6789     , decltype(_impl_.run_options_){nullptr}
6790     , decltype(_impl_.fetch_skip_sync_){false}
6791     , /*decltype(_impl_._cached_size_)*/{}
6792   };
6793 }
6794 
~CallableOptions()6795 CallableOptions::~CallableOptions() {
6796   // @@protoc_insertion_point(destructor:tensorflow.CallableOptions)
6797   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
6798   (void)arena;
6799     return;
6800   }
6801   SharedDtor();
6802 }
6803 
SharedDtor()6804 inline void CallableOptions::SharedDtor() {
6805   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
6806   _impl_.feed_.~RepeatedPtrField();
6807   _impl_.fetch_.~RepeatedPtrField();
6808   _impl_.target_.~RepeatedPtrField();
6809   _impl_.tensor_connection_.~RepeatedPtrField();
6810   _impl_.feed_devices_.Destruct();
6811   _impl_.feed_devices_.~MapFieldLite();
6812   _impl_.fetch_devices_.Destruct();
6813   _impl_.fetch_devices_.~MapFieldLite();
6814   if (this != internal_default_instance()) delete _impl_.run_options_;
6815 }
6816 
SetCachedSize(int size) const6817 void CallableOptions::SetCachedSize(int size) const {
6818   _impl_._cached_size_.Set(size);
6819 }
6820 
Clear()6821 void CallableOptions::Clear() {
6822 // @@protoc_insertion_point(message_clear_start:tensorflow.CallableOptions)
6823   ::uint32_t cached_has_bits = 0;
6824   // Prevent compiler warnings about cached_has_bits being unused
6825   (void) cached_has_bits;
6826 
6827   _impl_.feed_.Clear();
6828   _impl_.fetch_.Clear();
6829   _impl_.target_.Clear();
6830   _impl_.tensor_connection_.Clear();
6831   _impl_.feed_devices_.Clear();
6832   _impl_.fetch_devices_.Clear();
6833   if (GetArenaForAllocation() == nullptr && _impl_.run_options_ != nullptr) {
6834     delete _impl_.run_options_;
6835   }
6836   _impl_.run_options_ = nullptr;
6837   _impl_.fetch_skip_sync_ = false;
6838   _internal_metadata_.Clear<std::string>();
6839 }
6840 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)6841 const char* CallableOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
6842 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
6843   while (!ctx->Done(&ptr)) {
6844     ::uint32_t tag;
6845     ptr = ::_pbi::ReadTag(ptr, &tag);
6846     switch (tag >> 3) {
6847       // repeated string feed = 1;
6848       case 1:
6849         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
6850           ptr -= 1;
6851           do {
6852             ptr += 1;
6853             auto str = _internal_add_feed();
6854             ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
6855             CHK_(ptr);
6856             CHK_(::_pbi::VerifyUTF8(str, nullptr));
6857             if (!ctx->DataAvailable(ptr)) break;
6858           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
6859         } else {
6860           goto handle_unusual;
6861         }
6862         continue;
6863       // repeated string fetch = 2;
6864       case 2:
6865         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
6866           ptr -= 1;
6867           do {
6868             ptr += 1;
6869             auto str = _internal_add_fetch();
6870             ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
6871             CHK_(ptr);
6872             CHK_(::_pbi::VerifyUTF8(str, nullptr));
6873             if (!ctx->DataAvailable(ptr)) break;
6874           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
6875         } else {
6876           goto handle_unusual;
6877         }
6878         continue;
6879       // repeated string target = 3;
6880       case 3:
6881         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
6882           ptr -= 1;
6883           do {
6884             ptr += 1;
6885             auto str = _internal_add_target();
6886             ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
6887             CHK_(ptr);
6888             CHK_(::_pbi::VerifyUTF8(str, nullptr));
6889             if (!ctx->DataAvailable(ptr)) break;
6890           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr));
6891         } else {
6892           goto handle_unusual;
6893         }
6894         continue;
6895       // .tensorflow.RunOptions run_options = 4;
6896       case 4:
6897         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
6898           ptr = ctx->ParseMessage(_internal_mutable_run_options(), ptr);
6899           CHK_(ptr);
6900         } else {
6901           goto handle_unusual;
6902         }
6903         continue;
6904       // repeated .tensorflow.TensorConnection tensor_connection = 5;
6905       case 5:
6906         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
6907           ptr -= 1;
6908           do {
6909             ptr += 1;
6910             ptr = ctx->ParseMessage(_internal_add_tensor_connection(), ptr);
6911             CHK_(ptr);
6912             if (!ctx->DataAvailable(ptr)) break;
6913           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<42>(ptr));
6914         } else {
6915           goto handle_unusual;
6916         }
6917         continue;
6918       // map<string, string> feed_devices = 6;
6919       case 6:
6920         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
6921           ptr -= 1;
6922           do {
6923             ptr += 1;
6924             ptr = ctx->ParseMessage(&_impl_.feed_devices_, ptr);
6925             CHK_(ptr);
6926             if (!ctx->DataAvailable(ptr)) break;
6927           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<50>(ptr));
6928         } else {
6929           goto handle_unusual;
6930         }
6931         continue;
6932       // map<string, string> fetch_devices = 7;
6933       case 7:
6934         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 58)) {
6935           ptr -= 1;
6936           do {
6937             ptr += 1;
6938             ptr = ctx->ParseMessage(&_impl_.fetch_devices_, ptr);
6939             CHK_(ptr);
6940             if (!ctx->DataAvailable(ptr)) break;
6941           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<58>(ptr));
6942         } else {
6943           goto handle_unusual;
6944         }
6945         continue;
6946       // bool fetch_skip_sync = 8;
6947       case 8:
6948         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
6949           _impl_.fetch_skip_sync_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
6950           CHK_(ptr);
6951         } else {
6952           goto handle_unusual;
6953         }
6954         continue;
6955       default:
6956         goto handle_unusual;
6957     }  // switch
6958   handle_unusual:
6959     if ((tag == 0) || ((tag & 7) == 4)) {
6960       CHK_(ptr);
6961       ctx->SetLastTag(tag);
6962       goto message_done;
6963     }
6964     ptr = UnknownFieldParse(
6965         tag,
6966         _internal_metadata_.mutable_unknown_fields<std::string>(),
6967         ptr, ctx);
6968     CHK_(ptr != nullptr);
6969   }  // while
6970 message_done:
6971   return ptr;
6972 failure:
6973   ptr = nullptr;
6974   goto message_done;
6975 #undef CHK_
6976 }
6977 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const6978 ::uint8_t* CallableOptions::_InternalSerialize(
6979     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
6980   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.CallableOptions)
6981   ::uint32_t cached_has_bits = 0;
6982   (void) cached_has_bits;
6983 
6984   // repeated string feed = 1;
6985   for (int i = 0, n = this->_internal_feed_size(); i < n; i++) {
6986     const auto& s = this->_internal_feed(i);
6987     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6988       s.data(), static_cast<int>(s.length()),
6989       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
6990       "tensorflow.CallableOptions.feed");
6991     target = stream->WriteString(1, s, target);
6992   }
6993 
6994   // repeated string fetch = 2;
6995   for (int i = 0, n = this->_internal_fetch_size(); i < n; i++) {
6996     const auto& s = this->_internal_fetch(i);
6997     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6998       s.data(), static_cast<int>(s.length()),
6999       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7000       "tensorflow.CallableOptions.fetch");
7001     target = stream->WriteString(2, s, target);
7002   }
7003 
7004   // repeated string target = 3;
7005   for (int i = 0, n = this->_internal_target_size(); i < n; i++) {
7006     const auto& s = this->_internal_target(i);
7007     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
7008       s.data(), static_cast<int>(s.length()),
7009       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7010       "tensorflow.CallableOptions.target");
7011     target = stream->WriteString(3, s, target);
7012   }
7013 
7014   // .tensorflow.RunOptions run_options = 4;
7015   if (this->_internal_has_run_options()) {
7016     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7017       InternalWriteMessage(4, _Internal::run_options(this),
7018         _Internal::run_options(this).GetCachedSize(), target, stream);
7019   }
7020 
7021   // repeated .tensorflow.TensorConnection tensor_connection = 5;
7022   for (unsigned i = 0,
7023       n = static_cast<unsigned>(this->_internal_tensor_connection_size()); i < n; i++) {
7024     const auto& repfield = this->_internal_tensor_connection(i);
7025     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7026         InternalWriteMessage(5, repfield, repfield.GetCachedSize(), target, stream);
7027   }
7028 
7029   // map<string, string> feed_devices = 6;
7030   if (!this->_internal_feed_devices().empty()) {
7031     using MapType = ::_pb::Map<std::string, std::string>;
7032     using WireHelper = CallableOptions_FeedDevicesEntry_DoNotUse::Funcs;
7033     const auto& map_field = this->_internal_feed_devices();
7034     auto check_utf8 = [](const MapType::value_type& entry) {
7035       (void)entry;
7036       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
7037         entry.first.data(), static_cast<int>(entry.first.length()),
7038         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7039         "tensorflow.CallableOptions.FeedDevicesEntry.key");
7040       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
7041         entry.second.data(), static_cast<int>(entry.second.length()),
7042         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7043         "tensorflow.CallableOptions.FeedDevicesEntry.value");
7044     };
7045 
7046     if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
7047       for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
7048         target = WireHelper::InternalSerialize(6, entry.first, entry.second, target, stream);
7049         check_utf8(entry);
7050       }
7051     } else {
7052       for (const auto& entry : map_field) {
7053         target = WireHelper::InternalSerialize(6, entry.first, entry.second, target, stream);
7054         check_utf8(entry);
7055       }
7056     }
7057   }
7058 
7059   // map<string, string> fetch_devices = 7;
7060   if (!this->_internal_fetch_devices().empty()) {
7061     using MapType = ::_pb::Map<std::string, std::string>;
7062     using WireHelper = CallableOptions_FetchDevicesEntry_DoNotUse::Funcs;
7063     const auto& map_field = this->_internal_fetch_devices();
7064     auto check_utf8 = [](const MapType::value_type& entry) {
7065       (void)entry;
7066       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
7067         entry.first.data(), static_cast<int>(entry.first.length()),
7068         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7069         "tensorflow.CallableOptions.FetchDevicesEntry.key");
7070       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
7071         entry.second.data(), static_cast<int>(entry.second.length()),
7072         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7073         "tensorflow.CallableOptions.FetchDevicesEntry.value");
7074     };
7075 
7076     if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
7077       for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
7078         target = WireHelper::InternalSerialize(7, entry.first, entry.second, target, stream);
7079         check_utf8(entry);
7080       }
7081     } else {
7082       for (const auto& entry : map_field) {
7083         target = WireHelper::InternalSerialize(7, entry.first, entry.second, target, stream);
7084         check_utf8(entry);
7085       }
7086     }
7087   }
7088 
7089   // bool fetch_skip_sync = 8;
7090   if (this->_internal_fetch_skip_sync() != 0) {
7091     target = stream->EnsureSpace(target);
7092     target = ::_pbi::WireFormatLite::WriteBoolToArray(8, this->_internal_fetch_skip_sync(), target);
7093   }
7094 
7095   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
7096     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
7097         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
7098   }
7099   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.CallableOptions)
7100   return target;
7101 }
7102 
ByteSizeLong() const7103 size_t CallableOptions::ByteSizeLong() const {
7104 // @@protoc_insertion_point(message_byte_size_start:tensorflow.CallableOptions)
7105   size_t total_size = 0;
7106 
7107   ::uint32_t cached_has_bits = 0;
7108   // Prevent compiler warnings about cached_has_bits being unused
7109   (void) cached_has_bits;
7110 
7111   // repeated string feed = 1;
7112   total_size += 1 *
7113       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.feed_.size());
7114   for (int i = 0, n = _impl_.feed_.size(); i < n; i++) {
7115     total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
7116       _impl_.feed_.Get(i));
7117   }
7118 
7119   // repeated string fetch = 2;
7120   total_size += 1 *
7121       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.fetch_.size());
7122   for (int i = 0, n = _impl_.fetch_.size(); i < n; i++) {
7123     total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
7124       _impl_.fetch_.Get(i));
7125   }
7126 
7127   // repeated string target = 3;
7128   total_size += 1 *
7129       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.target_.size());
7130   for (int i = 0, n = _impl_.target_.size(); i < n; i++) {
7131     total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
7132       _impl_.target_.Get(i));
7133   }
7134 
7135   // repeated .tensorflow.TensorConnection tensor_connection = 5;
7136   total_size += 1UL * this->_internal_tensor_connection_size();
7137   for (const auto& msg : this->_impl_.tensor_connection_) {
7138     total_size +=
7139       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
7140   }
7141 
7142   // map<string, string> feed_devices = 6;
7143   total_size += 1 *
7144       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_feed_devices_size());
7145   for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
7146       it = this->_internal_feed_devices().begin();
7147       it != this->_internal_feed_devices().end(); ++it) {
7148     total_size += CallableOptions_FeedDevicesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
7149   }
7150 
7151   // map<string, string> fetch_devices = 7;
7152   total_size += 1 *
7153       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_fetch_devices_size());
7154   for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
7155       it = this->_internal_fetch_devices().begin();
7156       it != this->_internal_fetch_devices().end(); ++it) {
7157     total_size += CallableOptions_FetchDevicesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
7158   }
7159 
7160   // .tensorflow.RunOptions run_options = 4;
7161   if (this->_internal_has_run_options()) {
7162     total_size += 1 +
7163       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
7164         *_impl_.run_options_);
7165   }
7166 
7167   // bool fetch_skip_sync = 8;
7168   if (this->_internal_fetch_skip_sync() != 0) {
7169     total_size += 1 + 1;
7170   }
7171 
7172   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
7173     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
7174   }
7175   int cached_size = ::_pbi::ToCachedSize(total_size);
7176   SetCachedSize(cached_size);
7177   return total_size;
7178 }
7179 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)7180 void CallableOptions::CheckTypeAndMergeFrom(
7181     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
7182   MergeFrom(*::_pbi::DownCast<const CallableOptions*>(
7183       &from));
7184 }
7185 
MergeFrom(const CallableOptions & from)7186 void CallableOptions::MergeFrom(const CallableOptions& from) {
7187   CallableOptions* const _this = this;
7188   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.CallableOptions)
7189   GOOGLE_DCHECK_NE(&from, _this);
7190   ::uint32_t cached_has_bits = 0;
7191   (void) cached_has_bits;
7192 
7193   _this->_impl_.feed_.MergeFrom(from._impl_.feed_);
7194   _this->_impl_.fetch_.MergeFrom(from._impl_.fetch_);
7195   _this->_impl_.target_.MergeFrom(from._impl_.target_);
7196   _this->_impl_.tensor_connection_.MergeFrom(from._impl_.tensor_connection_);
7197   _this->_impl_.feed_devices_.MergeFrom(from._impl_.feed_devices_);
7198   _this->_impl_.fetch_devices_.MergeFrom(from._impl_.fetch_devices_);
7199   if (from._internal_has_run_options()) {
7200     _this->_internal_mutable_run_options()->::tensorflow::RunOptions::MergeFrom(
7201         from._internal_run_options());
7202   }
7203   if (from._internal_fetch_skip_sync() != 0) {
7204     _this->_internal_set_fetch_skip_sync(from._internal_fetch_skip_sync());
7205   }
7206   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
7207 }
7208 
CopyFrom(const CallableOptions & from)7209 void CallableOptions::CopyFrom(const CallableOptions& from) {
7210 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.CallableOptions)
7211   if (&from == this) return;
7212   Clear();
7213   MergeFrom(from);
7214 }
7215 
IsInitialized() const7216 bool CallableOptions::IsInitialized() const {
7217   return true;
7218 }
7219 
InternalSwap(CallableOptions * other)7220 void CallableOptions::InternalSwap(CallableOptions* other) {
7221   using std::swap;
7222   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
7223   _impl_.feed_.InternalSwap(&other->_impl_.feed_);
7224   _impl_.fetch_.InternalSwap(&other->_impl_.fetch_);
7225   _impl_.target_.InternalSwap(&other->_impl_.target_);
7226   _impl_.tensor_connection_.InternalSwap(&other->_impl_.tensor_connection_);
7227   _impl_.feed_devices_.InternalSwap(&other->_impl_.feed_devices_);
7228   _impl_.fetch_devices_.InternalSwap(&other->_impl_.fetch_devices_);
7229   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
7230       PROTOBUF_FIELD_OFFSET(CallableOptions, _impl_.fetch_skip_sync_)
7231       + sizeof(CallableOptions::_impl_.fetch_skip_sync_)  // NOLINT
7232       - PROTOBUF_FIELD_OFFSET(CallableOptions, _impl_.run_options_)>(
7233           reinterpret_cast<char*>(&_impl_.run_options_),
7234           reinterpret_cast<char*>(&other->_impl_.run_options_));
7235 }
7236 
GetTypeName() const7237 std::string CallableOptions::GetTypeName() const {
7238   return "tensorflow.CallableOptions";
7239 }
7240 
7241 
7242 // @@protoc_insertion_point(namespace_scope)
7243 }  // namespace tensorflow
7244 PROTOBUF_NAMESPACE_OPEN
7245 template<> PROTOBUF_NOINLINE ::tensorflow::GPUOptions_Experimental_VirtualDevices*
CreateMaybeMessage(Arena * arena)7246 Arena::CreateMaybeMessage< ::tensorflow::GPUOptions_Experimental_VirtualDevices >(Arena* arena) {
7247   return Arena::CreateMessageInternal< ::tensorflow::GPUOptions_Experimental_VirtualDevices >(arena);
7248 }
7249 template<> PROTOBUF_NOINLINE ::tensorflow::GPUOptions_Experimental*
CreateMaybeMessage(Arena * arena)7250 Arena::CreateMaybeMessage< ::tensorflow::GPUOptions_Experimental >(Arena* arena) {
7251   return Arena::CreateMessageInternal< ::tensorflow::GPUOptions_Experimental >(arena);
7252 }
7253 template<> PROTOBUF_NOINLINE ::tensorflow::GPUOptions*
CreateMaybeMessage(Arena * arena)7254 Arena::CreateMaybeMessage< ::tensorflow::GPUOptions >(Arena* arena) {
7255   return Arena::CreateMessageInternal< ::tensorflow::GPUOptions >(arena);
7256 }
7257 template<> PROTOBUF_NOINLINE ::tensorflow::OptimizerOptions*
CreateMaybeMessage(Arena * arena)7258 Arena::CreateMaybeMessage< ::tensorflow::OptimizerOptions >(Arena* arena) {
7259   return Arena::CreateMessageInternal< ::tensorflow::OptimizerOptions >(arena);
7260 }
7261 template<> PROTOBUF_NOINLINE ::tensorflow::GraphOptions*
CreateMaybeMessage(Arena * arena)7262 Arena::CreateMaybeMessage< ::tensorflow::GraphOptions >(Arena* arena) {
7263   return Arena::CreateMessageInternal< ::tensorflow::GraphOptions >(arena);
7264 }
7265 template<> PROTOBUF_NOINLINE ::tensorflow::ThreadPoolOptionProto*
CreateMaybeMessage(Arena * arena)7266 Arena::CreateMaybeMessage< ::tensorflow::ThreadPoolOptionProto >(Arena* arena) {
7267   return Arena::CreateMessageInternal< ::tensorflow::ThreadPoolOptionProto >(arena);
7268 }
7269 template<> PROTOBUF_NOINLINE ::tensorflow::RPCOptions*
CreateMaybeMessage(Arena * arena)7270 Arena::CreateMaybeMessage< ::tensorflow::RPCOptions >(Arena* arena) {
7271   return Arena::CreateMessageInternal< ::tensorflow::RPCOptions >(arena);
7272 }
7273 template<> PROTOBUF_NOINLINE ::tensorflow::SessionMetadata*
CreateMaybeMessage(Arena * arena)7274 Arena::CreateMaybeMessage< ::tensorflow::SessionMetadata >(Arena* arena) {
7275   return Arena::CreateMessageInternal< ::tensorflow::SessionMetadata >(arena);
7276 }
7277 template<> PROTOBUF_NOINLINE ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)7278 Arena::CreateMaybeMessage< ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse >(Arena* arena) {
7279   return Arena::CreateMessageInternal< ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse >(arena);
7280 }
7281 template<> PROTOBUF_NOINLINE ::tensorflow::ConfigProto_Experimental*
CreateMaybeMessage(Arena * arena)7282 Arena::CreateMaybeMessage< ::tensorflow::ConfigProto_Experimental >(Arena* arena) {
7283   return Arena::CreateMessageInternal< ::tensorflow::ConfigProto_Experimental >(arena);
7284 }
7285 template<> PROTOBUF_NOINLINE ::tensorflow::ConfigProto*
CreateMaybeMessage(Arena * arena)7286 Arena::CreateMaybeMessage< ::tensorflow::ConfigProto >(Arena* arena) {
7287   return Arena::CreateMessageInternal< ::tensorflow::ConfigProto >(arena);
7288 }
7289 template<> PROTOBUF_NOINLINE ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions*
CreateMaybeMessage(Arena * arena)7290 Arena::CreateMaybeMessage< ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions >(Arena* arena) {
7291   return Arena::CreateMessageInternal< ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions >(arena);
7292 }
7293 template<> PROTOBUF_NOINLINE ::tensorflow::RunOptions_Experimental*
CreateMaybeMessage(Arena * arena)7294 Arena::CreateMaybeMessage< ::tensorflow::RunOptions_Experimental >(Arena* arena) {
7295   return Arena::CreateMessageInternal< ::tensorflow::RunOptions_Experimental >(arena);
7296 }
7297 template<> PROTOBUF_NOINLINE ::tensorflow::RunOptions*
CreateMaybeMessage(Arena * arena)7298 Arena::CreateMaybeMessage< ::tensorflow::RunOptions >(Arena* arena) {
7299   return Arena::CreateMessageInternal< ::tensorflow::RunOptions >(arena);
7300 }
7301 template<> PROTOBUF_NOINLINE ::tensorflow::RunMetadata_FunctionGraphs*
CreateMaybeMessage(Arena * arena)7302 Arena::CreateMaybeMessage< ::tensorflow::RunMetadata_FunctionGraphs >(Arena* arena) {
7303   return Arena::CreateMessageInternal< ::tensorflow::RunMetadata_FunctionGraphs >(arena);
7304 }
7305 template<> PROTOBUF_NOINLINE ::tensorflow::RunMetadata*
CreateMaybeMessage(Arena * arena)7306 Arena::CreateMaybeMessage< ::tensorflow::RunMetadata >(Arena* arena) {
7307   return Arena::CreateMessageInternal< ::tensorflow::RunMetadata >(arena);
7308 }
7309 template<> PROTOBUF_NOINLINE ::tensorflow::TensorConnection*
CreateMaybeMessage(Arena * arena)7310 Arena::CreateMaybeMessage< ::tensorflow::TensorConnection >(Arena* arena) {
7311   return Arena::CreateMessageInternal< ::tensorflow::TensorConnection >(arena);
7312 }
7313 template<> PROTOBUF_NOINLINE ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)7314 Arena::CreateMaybeMessage< ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse >(Arena* arena) {
7315   return Arena::CreateMessageInternal< ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse >(arena);
7316 }
7317 template<> PROTOBUF_NOINLINE ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)7318 Arena::CreateMaybeMessage< ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse >(Arena* arena) {
7319   return Arena::CreateMessageInternal< ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse >(arena);
7320 }
7321 template<> PROTOBUF_NOINLINE ::tensorflow::CallableOptions*
CreateMaybeMessage(Arena * arena)7322 Arena::CreateMaybeMessage< ::tensorflow::CallableOptions >(Arena* arena) {
7323   return Arena::CreateMessageInternal< ::tensorflow::CallableOptions >(arena);
7324 }
7325 PROTOBUF_NAMESPACE_CLOSE
7326 
7327 // @@protoc_insertion_point(global_scope)
7328 #include <google/protobuf/port_undef.inc>
7329