1 // Generated by the protocol buffer compiler.  DO NOT EDIT!
2 // source: tensorflow/core/protobuf/config.proto
3 
4 #ifndef GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto
5 #define GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto
6 
7 #include <cstdint>
8 #include <limits>
9 #include <string>
10 
11 #include <google/protobuf/port_def.inc>
12 #if PROTOBUF_VERSION < 3021000
13 #error This file was generated by a newer version of protoc which is
14 #error incompatible with your Protocol Buffer headers. Please update
15 #error your headers.
16 #endif
17 #if 3021012 < PROTOBUF_MIN_PROTOC_VERSION
18 #error This file was generated by an older version of protoc which is
19 #error incompatible with your Protocol Buffer headers. Please
20 #error regenerate this file with a newer version of protoc.
21 #endif
22 
23 #include <google/protobuf/port_undef.inc>
24 #include <google/protobuf/io/coded_stream.h>
25 #include <google/protobuf/arena.h>
26 #include <google/protobuf/arenastring.h>
27 #include <google/protobuf/generated_message_util.h>
28 #include <google/protobuf/metadata_lite.h>
29 #include <google/protobuf/message_lite.h>
30 #include <google/protobuf/repeated_field.h>  // IWYU pragma: export
31 #include <google/protobuf/extension_set.h>  // IWYU pragma: export
32 #include <google/protobuf/map.h>  // IWYU pragma: export
33 #include <google/protobuf/map_entry_lite.h>
34 #include <google/protobuf/map_field_lite.h>
35 #include <google/protobuf/generated_enum_util.h>
36 #include "tensorflow/core/framework/cost_graph.pb.h"
37 #include "tensorflow/core/framework/graph.pb.h"
38 #include "tensorflow/core/framework/step_stats.pb.h"
39 #include "tensorflow/core/protobuf/cluster.pb.h"
40 #include "tensorflow/core/protobuf/coordination_config.pb.h"
41 #include "tensorflow/core/protobuf/debug.pb.h"
42 #include "tensorflow/core/protobuf/rewriter_config.pb.h"
43 // @@protoc_insertion_point(includes)
44 #include <google/protobuf/port_def.inc>
45 #define PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto
46 PROTOBUF_NAMESPACE_OPEN
47 namespace internal {
48 class AnyMetadata;
49 }  // namespace internal
50 PROTOBUF_NAMESPACE_CLOSE
51 
52 // Internal implementation detail -- do not use these members.
53 struct TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto {
54   static const ::uint32_t offsets[];
55 };
56 namespace tensorflow {
57 class CallableOptions;
58 struct CallableOptionsDefaultTypeInternal;
59 extern CallableOptionsDefaultTypeInternal _CallableOptions_default_instance_;
60 class CallableOptions_FeedDevicesEntry_DoNotUse;
61 struct CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal;
62 extern CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal _CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_;
63 class CallableOptions_FetchDevicesEntry_DoNotUse;
64 struct CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal;
65 extern CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal _CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_;
66 class ConfigProto;
67 struct ConfigProtoDefaultTypeInternal;
68 extern ConfigProtoDefaultTypeInternal _ConfigProto_default_instance_;
69 class ConfigProto_DeviceCountEntry_DoNotUse;
70 struct ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal;
71 extern ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal _ConfigProto_DeviceCountEntry_DoNotUse_default_instance_;
72 class ConfigProto_Experimental;
73 struct ConfigProto_ExperimentalDefaultTypeInternal;
74 extern ConfigProto_ExperimentalDefaultTypeInternal _ConfigProto_Experimental_default_instance_;
75 class GPUOptions;
76 struct GPUOptionsDefaultTypeInternal;
77 extern GPUOptionsDefaultTypeInternal _GPUOptions_default_instance_;
78 class GPUOptions_Experimental;
79 struct GPUOptions_ExperimentalDefaultTypeInternal;
80 extern GPUOptions_ExperimentalDefaultTypeInternal _GPUOptions_Experimental_default_instance_;
81 class GPUOptions_Experimental_VirtualDevices;
82 struct GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal;
83 extern GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal _GPUOptions_Experimental_VirtualDevices_default_instance_;
84 class GraphOptions;
85 struct GraphOptionsDefaultTypeInternal;
86 extern GraphOptionsDefaultTypeInternal _GraphOptions_default_instance_;
87 class OptimizerOptions;
88 struct OptimizerOptionsDefaultTypeInternal;
89 extern OptimizerOptionsDefaultTypeInternal _OptimizerOptions_default_instance_;
90 class RPCOptions;
91 struct RPCOptionsDefaultTypeInternal;
92 extern RPCOptionsDefaultTypeInternal _RPCOptions_default_instance_;
93 class RunMetadata;
94 struct RunMetadataDefaultTypeInternal;
95 extern RunMetadataDefaultTypeInternal _RunMetadata_default_instance_;
96 class RunMetadata_FunctionGraphs;
97 struct RunMetadata_FunctionGraphsDefaultTypeInternal;
98 extern RunMetadata_FunctionGraphsDefaultTypeInternal _RunMetadata_FunctionGraphs_default_instance_;
99 class RunOptions;
100 struct RunOptionsDefaultTypeInternal;
101 extern RunOptionsDefaultTypeInternal _RunOptions_default_instance_;
102 class RunOptions_Experimental;
103 struct RunOptions_ExperimentalDefaultTypeInternal;
104 extern RunOptions_ExperimentalDefaultTypeInternal _RunOptions_Experimental_default_instance_;
105 class RunOptions_Experimental_RunHandlerPoolOptions;
106 struct RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal;
107 extern RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal _RunOptions_Experimental_RunHandlerPoolOptions_default_instance_;
108 class SessionMetadata;
109 struct SessionMetadataDefaultTypeInternal;
110 extern SessionMetadataDefaultTypeInternal _SessionMetadata_default_instance_;
111 class TensorConnection;
112 struct TensorConnectionDefaultTypeInternal;
113 extern TensorConnectionDefaultTypeInternal _TensorConnection_default_instance_;
114 class ThreadPoolOptionProto;
115 struct ThreadPoolOptionProtoDefaultTypeInternal;
116 extern ThreadPoolOptionProtoDefaultTypeInternal _ThreadPoolOptionProto_default_instance_;
117 }  // namespace tensorflow
118 PROTOBUF_NAMESPACE_OPEN
119 template<> ::tensorflow::CallableOptions* Arena::CreateMaybeMessage<::tensorflow::CallableOptions>(Arena*);
120 template<> ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse>(Arena*);
121 template<> ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse>(Arena*);
122 template<> ::tensorflow::ConfigProto* Arena::CreateMaybeMessage<::tensorflow::ConfigProto>(Arena*);
123 template<> ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse>(Arena*);
124 template<> ::tensorflow::ConfigProto_Experimental* Arena::CreateMaybeMessage<::tensorflow::ConfigProto_Experimental>(Arena*);
125 template<> ::tensorflow::GPUOptions* Arena::CreateMaybeMessage<::tensorflow::GPUOptions>(Arena*);
126 template<> ::tensorflow::GPUOptions_Experimental* Arena::CreateMaybeMessage<::tensorflow::GPUOptions_Experimental>(Arena*);
127 template<> ::tensorflow::GPUOptions_Experimental_VirtualDevices* Arena::CreateMaybeMessage<::tensorflow::GPUOptions_Experimental_VirtualDevices>(Arena*);
128 template<> ::tensorflow::GraphOptions* Arena::CreateMaybeMessage<::tensorflow::GraphOptions>(Arena*);
129 template<> ::tensorflow::OptimizerOptions* Arena::CreateMaybeMessage<::tensorflow::OptimizerOptions>(Arena*);
130 template<> ::tensorflow::RPCOptions* Arena::CreateMaybeMessage<::tensorflow::RPCOptions>(Arena*);
131 template<> ::tensorflow::RunMetadata* Arena::CreateMaybeMessage<::tensorflow::RunMetadata>(Arena*);
132 template<> ::tensorflow::RunMetadata_FunctionGraphs* Arena::CreateMaybeMessage<::tensorflow::RunMetadata_FunctionGraphs>(Arena*);
133 template<> ::tensorflow::RunOptions* Arena::CreateMaybeMessage<::tensorflow::RunOptions>(Arena*);
134 template<> ::tensorflow::RunOptions_Experimental* Arena::CreateMaybeMessage<::tensorflow::RunOptions_Experimental>(Arena*);
135 template<> ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* Arena::CreateMaybeMessage<::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions>(Arena*);
136 template<> ::tensorflow::SessionMetadata* Arena::CreateMaybeMessage<::tensorflow::SessionMetadata>(Arena*);
137 template<> ::tensorflow::TensorConnection* Arena::CreateMaybeMessage<::tensorflow::TensorConnection>(Arena*);
138 template<> ::tensorflow::ThreadPoolOptionProto* Arena::CreateMaybeMessage<::tensorflow::ThreadPoolOptionProto>(Arena*);
139 PROTOBUF_NAMESPACE_CLOSE
140 namespace tensorflow {
141 
142 enum OptimizerOptions_Level : int {
143   OptimizerOptions_Level_L1 = 0,
144   OptimizerOptions_Level_L0 = -1,
145   OptimizerOptions_Level_OptimizerOptions_Level_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
146   OptimizerOptions_Level_OptimizerOptions_Level_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
147 };
148 bool OptimizerOptions_Level_IsValid(int value);
149 constexpr OptimizerOptions_Level OptimizerOptions_Level_Level_MIN = OptimizerOptions_Level_L0;
150 constexpr OptimizerOptions_Level OptimizerOptions_Level_Level_MAX = OptimizerOptions_Level_L1;
151 constexpr int OptimizerOptions_Level_Level_ARRAYSIZE = OptimizerOptions_Level_Level_MAX + 1;
152 
153 const std::string& OptimizerOptions_Level_Name(OptimizerOptions_Level value);
154 template<typename T>
OptimizerOptions_Level_Name(T enum_t_value)155 inline const std::string& OptimizerOptions_Level_Name(T enum_t_value) {
156   static_assert(::std::is_same<T, OptimizerOptions_Level>::value ||
157     ::std::is_integral<T>::value,
158     "Incorrect type passed to function OptimizerOptions_Level_Name.");
159   return OptimizerOptions_Level_Name(static_cast<OptimizerOptions_Level>(enum_t_value));
160 }
161 bool OptimizerOptions_Level_Parse(
162     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, OptimizerOptions_Level* value);
163 enum OptimizerOptions_GlobalJitLevel : int {
164   OptimizerOptions_GlobalJitLevel_DEFAULT = 0,
165   OptimizerOptions_GlobalJitLevel_OFF = -1,
166   OptimizerOptions_GlobalJitLevel_ON_1 = 1,
167   OptimizerOptions_GlobalJitLevel_ON_2 = 2,
168   OptimizerOptions_GlobalJitLevel_OptimizerOptions_GlobalJitLevel_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
169   OptimizerOptions_GlobalJitLevel_OptimizerOptions_GlobalJitLevel_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
170 };
171 bool OptimizerOptions_GlobalJitLevel_IsValid(int value);
172 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MIN = OptimizerOptions_GlobalJitLevel_OFF;
173 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MAX = OptimizerOptions_GlobalJitLevel_ON_2;
174 constexpr int OptimizerOptions_GlobalJitLevel_GlobalJitLevel_ARRAYSIZE = OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MAX + 1;
175 
176 const std::string& OptimizerOptions_GlobalJitLevel_Name(OptimizerOptions_GlobalJitLevel value);
177 template<typename T>
OptimizerOptions_GlobalJitLevel_Name(T enum_t_value)178 inline const std::string& OptimizerOptions_GlobalJitLevel_Name(T enum_t_value) {
179   static_assert(::std::is_same<T, OptimizerOptions_GlobalJitLevel>::value ||
180     ::std::is_integral<T>::value,
181     "Incorrect type passed to function OptimizerOptions_GlobalJitLevel_Name.");
182   return OptimizerOptions_GlobalJitLevel_Name(static_cast<OptimizerOptions_GlobalJitLevel>(enum_t_value));
183 }
184 bool OptimizerOptions_GlobalJitLevel_Parse(
185     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, OptimizerOptions_GlobalJitLevel* value);
186 enum ConfigProto_Experimental_MlirBridgeRollout : int {
187   ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED = 0,
188   ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_ENABLED = 1,
189   ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_DISABLED = 2,
190   ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3,
191   ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED = 4,
192   ConfigProto_Experimental_MlirBridgeRollout_ConfigProto_Experimental_MlirBridgeRollout_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
193   ConfigProto_Experimental_MlirBridgeRollout_ConfigProto_Experimental_MlirBridgeRollout_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
194 };
195 bool ConfigProto_Experimental_MlirBridgeRollout_IsValid(int value);
196 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MIN = ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
197 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MAX = ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED;
198 constexpr int ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_ARRAYSIZE = ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MAX + 1;
199 
200 const std::string& ConfigProto_Experimental_MlirBridgeRollout_Name(ConfigProto_Experimental_MlirBridgeRollout value);
201 template<typename T>
ConfigProto_Experimental_MlirBridgeRollout_Name(T enum_t_value)202 inline const std::string& ConfigProto_Experimental_MlirBridgeRollout_Name(T enum_t_value) {
203   static_assert(::std::is_same<T, ConfigProto_Experimental_MlirBridgeRollout>::value ||
204     ::std::is_integral<T>::value,
205     "Incorrect type passed to function ConfigProto_Experimental_MlirBridgeRollout_Name.");
206   return ConfigProto_Experimental_MlirBridgeRollout_Name(static_cast<ConfigProto_Experimental_MlirBridgeRollout>(enum_t_value));
207 }
208 bool ConfigProto_Experimental_MlirBridgeRollout_Parse(
209     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ConfigProto_Experimental_MlirBridgeRollout* value);
210 enum RunOptions_TraceLevel : int {
211   RunOptions_TraceLevel_NO_TRACE = 0,
212   RunOptions_TraceLevel_SOFTWARE_TRACE = 1,
213   RunOptions_TraceLevel_HARDWARE_TRACE = 2,
214   RunOptions_TraceLevel_FULL_TRACE = 3,
215   RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
216   RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
217 };
218 bool RunOptions_TraceLevel_IsValid(int value);
219 constexpr RunOptions_TraceLevel RunOptions_TraceLevel_TraceLevel_MIN = RunOptions_TraceLevel_NO_TRACE;
220 constexpr RunOptions_TraceLevel RunOptions_TraceLevel_TraceLevel_MAX = RunOptions_TraceLevel_FULL_TRACE;
221 constexpr int RunOptions_TraceLevel_TraceLevel_ARRAYSIZE = RunOptions_TraceLevel_TraceLevel_MAX + 1;
222 
223 const std::string& RunOptions_TraceLevel_Name(RunOptions_TraceLevel value);
224 template<typename T>
RunOptions_TraceLevel_Name(T enum_t_value)225 inline const std::string& RunOptions_TraceLevel_Name(T enum_t_value) {
226   static_assert(::std::is_same<T, RunOptions_TraceLevel>::value ||
227     ::std::is_integral<T>::value,
228     "Incorrect type passed to function RunOptions_TraceLevel_Name.");
229   return RunOptions_TraceLevel_Name(static_cast<RunOptions_TraceLevel>(enum_t_value));
230 }
231 bool RunOptions_TraceLevel_Parse(
232     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, RunOptions_TraceLevel* value);
233 // ===================================================================
234 
235 class GPUOptions_Experimental_VirtualDevices final :
236     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GPUOptions.Experimental.VirtualDevices) */ {
237  public:
GPUOptions_Experimental_VirtualDevices()238   inline GPUOptions_Experimental_VirtualDevices() : GPUOptions_Experimental_VirtualDevices(nullptr) {}
239   ~GPUOptions_Experimental_VirtualDevices() override;
240   explicit PROTOBUF_CONSTEXPR GPUOptions_Experimental_VirtualDevices(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
241 
242   GPUOptions_Experimental_VirtualDevices(const GPUOptions_Experimental_VirtualDevices& from);
GPUOptions_Experimental_VirtualDevices(GPUOptions_Experimental_VirtualDevices && from)243   GPUOptions_Experimental_VirtualDevices(GPUOptions_Experimental_VirtualDevices&& from) noexcept
244     : GPUOptions_Experimental_VirtualDevices() {
245     *this = ::std::move(from);
246   }
247 
248   inline GPUOptions_Experimental_VirtualDevices& operator=(const GPUOptions_Experimental_VirtualDevices& from) {
249     if (this == &from) return *this;
250     CopyFrom(from);
251     return *this;
252   }
253   inline GPUOptions_Experimental_VirtualDevices& operator=(GPUOptions_Experimental_VirtualDevices&& from) noexcept {
254     if (this == &from) return *this;
255     if (GetOwningArena() == from.GetOwningArena()
256   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
257         && GetOwningArena() != nullptr
258   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
259     ) {
260       InternalSwap(&from);
261     } else {
262       CopyFrom(from);
263     }
264     return *this;
265   }
266 
default_instance()267   static const GPUOptions_Experimental_VirtualDevices& default_instance() {
268     return *internal_default_instance();
269   }
internal_default_instance()270   static inline const GPUOptions_Experimental_VirtualDevices* internal_default_instance() {
271     return reinterpret_cast<const GPUOptions_Experimental_VirtualDevices*>(
272                &_GPUOptions_Experimental_VirtualDevices_default_instance_);
273   }
274   static constexpr int kIndexInFileMessages =
275     0;
276 
swap(GPUOptions_Experimental_VirtualDevices & a,GPUOptions_Experimental_VirtualDevices & b)277   friend void swap(GPUOptions_Experimental_VirtualDevices& a, GPUOptions_Experimental_VirtualDevices& b) {
278     a.Swap(&b);
279   }
Swap(GPUOptions_Experimental_VirtualDevices * other)280   inline void Swap(GPUOptions_Experimental_VirtualDevices* other) {
281     if (other == this) return;
282   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
283     if (GetOwningArena() != nullptr &&
284         GetOwningArena() == other->GetOwningArena()) {
285    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
286     if (GetOwningArena() == other->GetOwningArena()) {
287   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
288       InternalSwap(other);
289     } else {
290       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
291     }
292   }
293   void UnsafeArenaSwap(GPUOptions_Experimental_VirtualDevices* other) {
294     if (other == this) return;
295     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
296     InternalSwap(other);
297   }
298 
299   // implements Message ----------------------------------------------
300 
301   GPUOptions_Experimental_VirtualDevices* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
302     return CreateMaybeMessage<GPUOptions_Experimental_VirtualDevices>(arena);
303   }
304   GPUOptions_Experimental_VirtualDevices* New() const {
305     return New(nullptr);
306   }
307   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
308   void CopyFrom(const GPUOptions_Experimental_VirtualDevices& from);
309   void MergeFrom(const GPUOptions_Experimental_VirtualDevices& from);
310   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
311   bool IsInitialized() const final;
312 
313   size_t ByteSizeLong() const final;
314   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
315   ::uint8_t* _InternalSerialize(
316       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
317   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
318 
319   private:
320   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
321   void SharedDtor();
322   void SetCachedSize(int size) const;
323   void InternalSwap(GPUOptions_Experimental_VirtualDevices* other);
324 
325   private:
326   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
327   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
328     return "tensorflow.GPUOptions.Experimental.VirtualDevices";
329   }
330   protected:
331   explicit GPUOptions_Experimental_VirtualDevices(::PROTOBUF_NAMESPACE_ID::Arena* arena,
332                        bool is_message_owned = false);
333   public:
334 
335   std::string GetTypeName() const final;
336 
337   // nested types ----------------------------------------------------
338 
339   // accessors -------------------------------------------------------
340 
341   enum : int {
342     kMemoryLimitMbFieldNumber = 1,
343     kPriorityFieldNumber = 2,
344     kDeviceOrdinalFieldNumber = 3,
345   };
346   // repeated float memory_limit_mb = 1;
347   int memory_limit_mb_size() const;
348   private:
349   int _internal_memory_limit_mb_size() const;
350   public:
351   void clear_memory_limit_mb();
352   private:
353   float _internal_memory_limit_mb(int index) const;
354   const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
355       _internal_memory_limit_mb() const;
356   void _internal_add_memory_limit_mb(float value);
357   ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
358       _internal_mutable_memory_limit_mb();
359   public:
360   float memory_limit_mb(int index) const;
361   void set_memory_limit_mb(int index, float value);
362   void add_memory_limit_mb(float value);
363   const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
364       memory_limit_mb() const;
365   ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
366       mutable_memory_limit_mb();
367 
368   // repeated int32 priority = 2;
369   int priority_size() const;
370   private:
371   int _internal_priority_size() const;
372   public:
373   void clear_priority();
374   private:
375   ::int32_t _internal_priority(int index) const;
376   const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
377       _internal_priority() const;
378   void _internal_add_priority(::int32_t value);
379   ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
380       _internal_mutable_priority();
381   public:
382   ::int32_t priority(int index) const;
383   void set_priority(int index, ::int32_t value);
384   void add_priority(::int32_t value);
385   const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
386       priority() const;
387   ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
388       mutable_priority();
389 
390   // repeated int32 device_ordinal = 3;
391   int device_ordinal_size() const;
392   private:
393   int _internal_device_ordinal_size() const;
394   public:
395   void clear_device_ordinal();
396   private:
397   ::int32_t _internal_device_ordinal(int index) const;
398   const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
399       _internal_device_ordinal() const;
400   void _internal_add_device_ordinal(::int32_t value);
401   ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
402       _internal_mutable_device_ordinal();
403   public:
404   ::int32_t device_ordinal(int index) const;
405   void set_device_ordinal(int index, ::int32_t value);
406   void add_device_ordinal(::int32_t value);
407   const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
408       device_ordinal() const;
409   ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
410       mutable_device_ordinal();
411 
412   // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
413  private:
414   class _Internal;
415 
416   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
417   typedef void InternalArenaConstructable_;
418   typedef void DestructorSkippable_;
419   struct Impl_ {
420     ::PROTOBUF_NAMESPACE_ID::RepeatedField< float > memory_limit_mb_;
421     ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t > priority_;
422     mutable std::atomic<int> _priority_cached_byte_size_;
423     ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t > device_ordinal_;
424     mutable std::atomic<int> _device_ordinal_cached_byte_size_;
425     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
426   };
427   union { Impl_ _impl_; };
428   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
429 };
430 // -------------------------------------------------------------------
431 
432 class GPUOptions_Experimental final :
433     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GPUOptions.Experimental) */ {
434  public:
GPUOptions_Experimental()435   inline GPUOptions_Experimental() : GPUOptions_Experimental(nullptr) {}
436   ~GPUOptions_Experimental() override;
437   explicit PROTOBUF_CONSTEXPR GPUOptions_Experimental(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
438 
439   GPUOptions_Experimental(const GPUOptions_Experimental& from);
GPUOptions_Experimental(GPUOptions_Experimental && from)440   GPUOptions_Experimental(GPUOptions_Experimental&& from) noexcept
441     : GPUOptions_Experimental() {
442     *this = ::std::move(from);
443   }
444 
445   inline GPUOptions_Experimental& operator=(const GPUOptions_Experimental& from) {
446     if (this == &from) return *this;
447     CopyFrom(from);
448     return *this;
449   }
450   inline GPUOptions_Experimental& operator=(GPUOptions_Experimental&& from) noexcept {
451     if (this == &from) return *this;
452     if (GetOwningArena() == from.GetOwningArena()
453   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
454         && GetOwningArena() != nullptr
455   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
456     ) {
457       InternalSwap(&from);
458     } else {
459       CopyFrom(from);
460     }
461     return *this;
462   }
463 
default_instance()464   static const GPUOptions_Experimental& default_instance() {
465     return *internal_default_instance();
466   }
internal_default_instance()467   static inline const GPUOptions_Experimental* internal_default_instance() {
468     return reinterpret_cast<const GPUOptions_Experimental*>(
469                &_GPUOptions_Experimental_default_instance_);
470   }
471   static constexpr int kIndexInFileMessages =
472     1;
473 
swap(GPUOptions_Experimental & a,GPUOptions_Experimental & b)474   friend void swap(GPUOptions_Experimental& a, GPUOptions_Experimental& b) {
475     a.Swap(&b);
476   }
Swap(GPUOptions_Experimental * other)477   inline void Swap(GPUOptions_Experimental* other) {
478     if (other == this) return;
479   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
480     if (GetOwningArena() != nullptr &&
481         GetOwningArena() == other->GetOwningArena()) {
482    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
483     if (GetOwningArena() == other->GetOwningArena()) {
484   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
485       InternalSwap(other);
486     } else {
487       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
488     }
489   }
490   void UnsafeArenaSwap(GPUOptions_Experimental* other) {
491     if (other == this) return;
492     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
493     InternalSwap(other);
494   }
495 
496   // implements Message ----------------------------------------------
497 
498   GPUOptions_Experimental* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
499     return CreateMaybeMessage<GPUOptions_Experimental>(arena);
500   }
501   GPUOptions_Experimental* New() const {
502     return New(nullptr);
503   }
504   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
505   void CopyFrom(const GPUOptions_Experimental& from);
506   void MergeFrom(const GPUOptions_Experimental& from);
507   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
508   bool IsInitialized() const final;
509 
510   size_t ByteSizeLong() const final;
511   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
512   ::uint8_t* _InternalSerialize(
513       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
514   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
515 
516   private:
517   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
518   void SharedDtor();
519   void SetCachedSize(int size) const;
520   void InternalSwap(GPUOptions_Experimental* other);
521 
522   private:
523   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
524   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
525     return "tensorflow.GPUOptions.Experimental";
526   }
527   protected:
528   explicit GPUOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
529                        bool is_message_owned = false);
530   public:
531 
532   std::string GetTypeName() const final;
533 
534   // nested types ----------------------------------------------------
535 
536   typedef GPUOptions_Experimental_VirtualDevices VirtualDevices;
537 
538   // accessors -------------------------------------------------------
539 
540   enum : int {
541     kVirtualDevicesFieldNumber = 1,
542     kCollectiveRingOrderFieldNumber = 4,
543     kNumDevToDevCopyStreamsFieldNumber = 3,
544     kKernelTrackerMaxIntervalFieldNumber = 7,
545     kUseUnifiedMemoryFieldNumber = 2,
546     kTimestampedAllocatorFieldNumber = 5,
547     kUseCudaMallocAsyncFieldNumber = 11,
548     kDisallowRetryOnAllocationFailureFieldNumber = 12,
549     kKernelTrackerMaxBytesFieldNumber = 8,
550     kInternalFragmentationFractionFieldNumber = 10,
551     kKernelTrackerMaxPendingFieldNumber = 9,
552   };
553   // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
554   int virtual_devices_size() const;
555   private:
556   int _internal_virtual_devices_size() const;
557   public:
558   void clear_virtual_devices();
559   ::tensorflow::GPUOptions_Experimental_VirtualDevices* mutable_virtual_devices(int index);
560   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >*
561       mutable_virtual_devices();
562   private:
563   const ::tensorflow::GPUOptions_Experimental_VirtualDevices& _internal_virtual_devices(int index) const;
564   ::tensorflow::GPUOptions_Experimental_VirtualDevices* _internal_add_virtual_devices();
565   public:
566   const ::tensorflow::GPUOptions_Experimental_VirtualDevices& virtual_devices(int index) const;
567   ::tensorflow::GPUOptions_Experimental_VirtualDevices* add_virtual_devices();
568   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >&
569       virtual_devices() const;
570 
571   // string collective_ring_order = 4;
572   void clear_collective_ring_order();
573   const std::string& collective_ring_order() const;
574   template <typename ArgT0 = const std::string&, typename... ArgT>
575   void set_collective_ring_order(ArgT0&& arg0, ArgT... args);
576   std::string* mutable_collective_ring_order();
577   PROTOBUF_NODISCARD std::string* release_collective_ring_order();
578   void set_allocated_collective_ring_order(std::string* collective_ring_order);
579   private:
580   const std::string& _internal_collective_ring_order() const;
581   inline PROTOBUF_ALWAYS_INLINE void _internal_set_collective_ring_order(const std::string& value);
582   std::string* _internal_mutable_collective_ring_order();
583   public:
584 
585   // int32 num_dev_to_dev_copy_streams = 3;
586   void clear_num_dev_to_dev_copy_streams();
587   ::int32_t num_dev_to_dev_copy_streams() const;
588   void set_num_dev_to_dev_copy_streams(::int32_t value);
589   private:
590   ::int32_t _internal_num_dev_to_dev_copy_streams() const;
591   void _internal_set_num_dev_to_dev_copy_streams(::int32_t value);
592   public:
593 
594   // int32 kernel_tracker_max_interval = 7;
595   void clear_kernel_tracker_max_interval();
596   ::int32_t kernel_tracker_max_interval() const;
597   void set_kernel_tracker_max_interval(::int32_t value);
598   private:
599   ::int32_t _internal_kernel_tracker_max_interval() const;
600   void _internal_set_kernel_tracker_max_interval(::int32_t value);
601   public:
602 
603   // bool use_unified_memory = 2;
604   void clear_use_unified_memory();
605   bool use_unified_memory() const;
606   void set_use_unified_memory(bool value);
607   private:
608   bool _internal_use_unified_memory() const;
609   void _internal_set_use_unified_memory(bool value);
610   public:
611 
612   // bool timestamped_allocator = 5;
613   void clear_timestamped_allocator();
614   bool timestamped_allocator() const;
615   void set_timestamped_allocator(bool value);
616   private:
617   bool _internal_timestamped_allocator() const;
618   void _internal_set_timestamped_allocator(bool value);
619   public:
620 
621   // bool use_cuda_malloc_async = 11;
622   void clear_use_cuda_malloc_async();
623   bool use_cuda_malloc_async() const;
624   void set_use_cuda_malloc_async(bool value);
625   private:
626   bool _internal_use_cuda_malloc_async() const;
627   void _internal_set_use_cuda_malloc_async(bool value);
628   public:
629 
630   // bool disallow_retry_on_allocation_failure = 12;
631   void clear_disallow_retry_on_allocation_failure();
632   bool disallow_retry_on_allocation_failure() const;
633   void set_disallow_retry_on_allocation_failure(bool value);
634   private:
635   bool _internal_disallow_retry_on_allocation_failure() const;
636   void _internal_set_disallow_retry_on_allocation_failure(bool value);
637   public:
638 
639   // int32 kernel_tracker_max_bytes = 8;
640   void clear_kernel_tracker_max_bytes();
641   ::int32_t kernel_tracker_max_bytes() const;
642   void set_kernel_tracker_max_bytes(::int32_t value);
643   private:
644   ::int32_t _internal_kernel_tracker_max_bytes() const;
645   void _internal_set_kernel_tracker_max_bytes(::int32_t value);
646   public:
647 
648   // double internal_fragmentation_fraction = 10;
649   void clear_internal_fragmentation_fraction();
650   double internal_fragmentation_fraction() const;
651   void set_internal_fragmentation_fraction(double value);
652   private:
653   double _internal_internal_fragmentation_fraction() const;
654   void _internal_set_internal_fragmentation_fraction(double value);
655   public:
656 
657   // int32 kernel_tracker_max_pending = 9;
658   void clear_kernel_tracker_max_pending();
659   ::int32_t kernel_tracker_max_pending() const;
660   void set_kernel_tracker_max_pending(::int32_t value);
661   private:
662   ::int32_t _internal_kernel_tracker_max_pending() const;
663   void _internal_set_kernel_tracker_max_pending(::int32_t value);
664   public:
665 
666   // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental)
667  private:
668   class _Internal;
669 
670   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
671   typedef void InternalArenaConstructable_;
672   typedef void DestructorSkippable_;
673   struct Impl_ {
674     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices > virtual_devices_;
675     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collective_ring_order_;
676     ::int32_t num_dev_to_dev_copy_streams_;
677     ::int32_t kernel_tracker_max_interval_;
678     bool use_unified_memory_;
679     bool timestamped_allocator_;
680     bool use_cuda_malloc_async_;
681     bool disallow_retry_on_allocation_failure_;
682     ::int32_t kernel_tracker_max_bytes_;
683     double internal_fragmentation_fraction_;
684     ::int32_t kernel_tracker_max_pending_;
685     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
686   };
687   union { Impl_ _impl_; };
688   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
689 };
690 // -------------------------------------------------------------------
691 
692 class GPUOptions final :
693     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GPUOptions) */ {
694  public:
GPUOptions()695   inline GPUOptions() : GPUOptions(nullptr) {}
696   ~GPUOptions() override;
697   explicit PROTOBUF_CONSTEXPR GPUOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
698 
699   GPUOptions(const GPUOptions& from);
GPUOptions(GPUOptions && from)700   GPUOptions(GPUOptions&& from) noexcept
701     : GPUOptions() {
702     *this = ::std::move(from);
703   }
704 
705   inline GPUOptions& operator=(const GPUOptions& from) {
706     if (this == &from) return *this;
707     CopyFrom(from);
708     return *this;
709   }
710   inline GPUOptions& operator=(GPUOptions&& from) noexcept {
711     if (this == &from) return *this;
712     if (GetOwningArena() == from.GetOwningArena()
713   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
714         && GetOwningArena() != nullptr
715   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
716     ) {
717       InternalSwap(&from);
718     } else {
719       CopyFrom(from);
720     }
721     return *this;
722   }
723 
default_instance()724   static const GPUOptions& default_instance() {
725     return *internal_default_instance();
726   }
internal_default_instance()727   static inline const GPUOptions* internal_default_instance() {
728     return reinterpret_cast<const GPUOptions*>(
729                &_GPUOptions_default_instance_);
730   }
731   static constexpr int kIndexInFileMessages =
732     2;
733 
swap(GPUOptions & a,GPUOptions & b)734   friend void swap(GPUOptions& a, GPUOptions& b) {
735     a.Swap(&b);
736   }
Swap(GPUOptions * other)737   inline void Swap(GPUOptions* other) {
738     if (other == this) return;
739   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
740     if (GetOwningArena() != nullptr &&
741         GetOwningArena() == other->GetOwningArena()) {
742    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
743     if (GetOwningArena() == other->GetOwningArena()) {
744   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
745       InternalSwap(other);
746     } else {
747       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
748     }
749   }
750   void UnsafeArenaSwap(GPUOptions* other) {
751     if (other == this) return;
752     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
753     InternalSwap(other);
754   }
755 
756   // implements Message ----------------------------------------------
757 
758   GPUOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
759     return CreateMaybeMessage<GPUOptions>(arena);
760   }
761   GPUOptions* New() const {
762     return New(nullptr);
763   }
764   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
765   void CopyFrom(const GPUOptions& from);
766   void MergeFrom(const GPUOptions& from);
767   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
768   bool IsInitialized() const final;
769 
770   size_t ByteSizeLong() const final;
771   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
772   ::uint8_t* _InternalSerialize(
773       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
774   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
775 
776   private:
777   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
778   void SharedDtor();
779   void SetCachedSize(int size) const;
780   void InternalSwap(GPUOptions* other);
781 
782   private:
783   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
784   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
785     return "tensorflow.GPUOptions";
786   }
787   protected:
788   explicit GPUOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
789                        bool is_message_owned = false);
790   public:
791 
792   std::string GetTypeName() const final;
793 
794   // nested types ----------------------------------------------------
795 
796   typedef GPUOptions_Experimental Experimental;
797 
798   // accessors -------------------------------------------------------
799 
800   enum : int {
801     kAllocatorTypeFieldNumber = 2,
802     kVisibleDeviceListFieldNumber = 5,
803     kExperimentalFieldNumber = 9,
804     kPerProcessGpuMemoryFractionFieldNumber = 1,
805     kDeferredDeletionBytesFieldNumber = 3,
806     kPollingActiveDelayUsecsFieldNumber = 6,
807     kAllowGrowthFieldNumber = 4,
808     kForceGpuCompatibleFieldNumber = 8,
809     kPollingInactiveDelayMsecsFieldNumber = 7,
810   };
811   // string allocator_type = 2;
812   void clear_allocator_type();
813   const std::string& allocator_type() const;
814   template <typename ArgT0 = const std::string&, typename... ArgT>
815   void set_allocator_type(ArgT0&& arg0, ArgT... args);
816   std::string* mutable_allocator_type();
817   PROTOBUF_NODISCARD std::string* release_allocator_type();
818   void set_allocated_allocator_type(std::string* allocator_type);
819   private:
820   const std::string& _internal_allocator_type() const;
821   inline PROTOBUF_ALWAYS_INLINE void _internal_set_allocator_type(const std::string& value);
822   std::string* _internal_mutable_allocator_type();
823   public:
824 
825   // string visible_device_list = 5;
826   void clear_visible_device_list();
827   const std::string& visible_device_list() const;
828   template <typename ArgT0 = const std::string&, typename... ArgT>
829   void set_visible_device_list(ArgT0&& arg0, ArgT... args);
830   std::string* mutable_visible_device_list();
831   PROTOBUF_NODISCARD std::string* release_visible_device_list();
832   void set_allocated_visible_device_list(std::string* visible_device_list);
833   private:
834   const std::string& _internal_visible_device_list() const;
835   inline PROTOBUF_ALWAYS_INLINE void _internal_set_visible_device_list(const std::string& value);
836   std::string* _internal_mutable_visible_device_list();
837   public:
838 
839   // .tensorflow.GPUOptions.Experimental experimental = 9;
840   bool has_experimental() const;
841   private:
842   bool _internal_has_experimental() const;
843   public:
844   void clear_experimental();
845   const ::tensorflow::GPUOptions_Experimental& experimental() const;
846   PROTOBUF_NODISCARD ::tensorflow::GPUOptions_Experimental* release_experimental();
847   ::tensorflow::GPUOptions_Experimental* mutable_experimental();
848   void set_allocated_experimental(::tensorflow::GPUOptions_Experimental* experimental);
849   private:
850   const ::tensorflow::GPUOptions_Experimental& _internal_experimental() const;
851   ::tensorflow::GPUOptions_Experimental* _internal_mutable_experimental();
852   public:
853   void unsafe_arena_set_allocated_experimental(
854       ::tensorflow::GPUOptions_Experimental* experimental);
855   ::tensorflow::GPUOptions_Experimental* unsafe_arena_release_experimental();
856 
857   // double per_process_gpu_memory_fraction = 1;
858   void clear_per_process_gpu_memory_fraction();
859   double per_process_gpu_memory_fraction() const;
860   void set_per_process_gpu_memory_fraction(double value);
861   private:
862   double _internal_per_process_gpu_memory_fraction() const;
863   void _internal_set_per_process_gpu_memory_fraction(double value);
864   public:
865 
866   // int64 deferred_deletion_bytes = 3;
867   void clear_deferred_deletion_bytes();
868   ::int64_t deferred_deletion_bytes() const;
869   void set_deferred_deletion_bytes(::int64_t value);
870   private:
871   ::int64_t _internal_deferred_deletion_bytes() const;
872   void _internal_set_deferred_deletion_bytes(::int64_t value);
873   public:
874 
875   // int32 polling_active_delay_usecs = 6;
876   void clear_polling_active_delay_usecs();
877   ::int32_t polling_active_delay_usecs() const;
878   void set_polling_active_delay_usecs(::int32_t value);
879   private:
880   ::int32_t _internal_polling_active_delay_usecs() const;
881   void _internal_set_polling_active_delay_usecs(::int32_t value);
882   public:
883 
884   // bool allow_growth = 4;
885   void clear_allow_growth();
886   bool allow_growth() const;
887   void set_allow_growth(bool value);
888   private:
889   bool _internal_allow_growth() const;
890   void _internal_set_allow_growth(bool value);
891   public:
892 
893   // bool force_gpu_compatible = 8;
894   void clear_force_gpu_compatible();
895   bool force_gpu_compatible() const;
896   void set_force_gpu_compatible(bool value);
897   private:
898   bool _internal_force_gpu_compatible() const;
899   void _internal_set_force_gpu_compatible(bool value);
900   public:
901 
902   // int32 polling_inactive_delay_msecs = 7;
903   void clear_polling_inactive_delay_msecs();
904   ::int32_t polling_inactive_delay_msecs() const;
905   void set_polling_inactive_delay_msecs(::int32_t value);
906   private:
907   ::int32_t _internal_polling_inactive_delay_msecs() const;
908   void _internal_set_polling_inactive_delay_msecs(::int32_t value);
909   public:
910 
911   // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
912  private:
913   class _Internal;
914 
915   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
916   typedef void InternalArenaConstructable_;
917   typedef void DestructorSkippable_;
918   struct Impl_ {
919     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr allocator_type_;
920     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr visible_device_list_;
921     ::tensorflow::GPUOptions_Experimental* experimental_;
922     double per_process_gpu_memory_fraction_;
923     ::int64_t deferred_deletion_bytes_;
924     ::int32_t polling_active_delay_usecs_;
925     bool allow_growth_;
926     bool force_gpu_compatible_;
927     ::int32_t polling_inactive_delay_msecs_;
928     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
929   };
930   union { Impl_ _impl_; };
931   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
932 };
933 // -------------------------------------------------------------------
934 
935 class OptimizerOptions final :
936     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.OptimizerOptions) */ {
937  public:
OptimizerOptions()938   inline OptimizerOptions() : OptimizerOptions(nullptr) {}
939   ~OptimizerOptions() override;
940   explicit PROTOBUF_CONSTEXPR OptimizerOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
941 
942   OptimizerOptions(const OptimizerOptions& from);
OptimizerOptions(OptimizerOptions && from)943   OptimizerOptions(OptimizerOptions&& from) noexcept
944     : OptimizerOptions() {
945     *this = ::std::move(from);
946   }
947 
948   inline OptimizerOptions& operator=(const OptimizerOptions& from) {
949     if (this == &from) return *this;
950     CopyFrom(from);
951     return *this;
952   }
953   inline OptimizerOptions& operator=(OptimizerOptions&& from) noexcept {
954     if (this == &from) return *this;
955     if (GetOwningArena() == from.GetOwningArena()
956   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
957         && GetOwningArena() != nullptr
958   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
959     ) {
960       InternalSwap(&from);
961     } else {
962       CopyFrom(from);
963     }
964     return *this;
965   }
966 
default_instance()967   static const OptimizerOptions& default_instance() {
968     return *internal_default_instance();
969   }
internal_default_instance()970   static inline const OptimizerOptions* internal_default_instance() {
971     return reinterpret_cast<const OptimizerOptions*>(
972                &_OptimizerOptions_default_instance_);
973   }
974   static constexpr int kIndexInFileMessages =
975     3;
976 
swap(OptimizerOptions & a,OptimizerOptions & b)977   friend void swap(OptimizerOptions& a, OptimizerOptions& b) {
978     a.Swap(&b);
979   }
Swap(OptimizerOptions * other)980   inline void Swap(OptimizerOptions* other) {
981     if (other == this) return;
982   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
983     if (GetOwningArena() != nullptr &&
984         GetOwningArena() == other->GetOwningArena()) {
985    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
986     if (GetOwningArena() == other->GetOwningArena()) {
987   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
988       InternalSwap(other);
989     } else {
990       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
991     }
992   }
993   void UnsafeArenaSwap(OptimizerOptions* other) {
994     if (other == this) return;
995     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
996     InternalSwap(other);
997   }
998 
999   // implements Message ----------------------------------------------
1000 
1001   OptimizerOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1002     return CreateMaybeMessage<OptimizerOptions>(arena);
1003   }
1004   OptimizerOptions* New() const {
1005     return New(nullptr);
1006   }
1007   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
1008   void CopyFrom(const OptimizerOptions& from);
1009   void MergeFrom(const OptimizerOptions& from);
1010   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1011   bool IsInitialized() const final;
1012 
1013   size_t ByteSizeLong() const final;
1014   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1015   ::uint8_t* _InternalSerialize(
1016       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1017   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1018 
1019   private:
1020   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1021   void SharedDtor();
1022   void SetCachedSize(int size) const;
1023   void InternalSwap(OptimizerOptions* other);
1024 
1025   private:
1026   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1027   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1028     return "tensorflow.OptimizerOptions";
1029   }
1030   protected:
1031   explicit OptimizerOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1032                        bool is_message_owned = false);
1033   public:
1034 
1035   std::string GetTypeName() const final;
1036 
1037   // nested types ----------------------------------------------------
1038 
1039   typedef OptimizerOptions_Level Level;
1040   static constexpr Level L1 =
1041     OptimizerOptions_Level_L1;
1042   static constexpr Level L0 =
1043     OptimizerOptions_Level_L0;
1044   static inline bool Level_IsValid(int value) {
1045     return OptimizerOptions_Level_IsValid(value);
1046   }
1047   static constexpr Level Level_MIN =
1048     OptimizerOptions_Level_Level_MIN;
1049   static constexpr Level Level_MAX =
1050     OptimizerOptions_Level_Level_MAX;
1051   static constexpr int Level_ARRAYSIZE =
1052     OptimizerOptions_Level_Level_ARRAYSIZE;
1053   template<typename T>
1054   static inline const std::string& Level_Name(T enum_t_value) {
1055     static_assert(::std::is_same<T, Level>::value ||
1056       ::std::is_integral<T>::value,
1057       "Incorrect type passed to function Level_Name.");
1058     return OptimizerOptions_Level_Name(enum_t_value);
1059   }
1060   static inline bool Level_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
1061       Level* value) {
1062     return OptimizerOptions_Level_Parse(name, value);
1063   }
1064 
1065   typedef OptimizerOptions_GlobalJitLevel GlobalJitLevel;
1066   static constexpr GlobalJitLevel DEFAULT =
1067     OptimizerOptions_GlobalJitLevel_DEFAULT;
1068   static constexpr GlobalJitLevel OFF =
1069     OptimizerOptions_GlobalJitLevel_OFF;
1070   static constexpr GlobalJitLevel ON_1 =
1071     OptimizerOptions_GlobalJitLevel_ON_1;
1072   static constexpr GlobalJitLevel ON_2 =
1073     OptimizerOptions_GlobalJitLevel_ON_2;
1074   static inline bool GlobalJitLevel_IsValid(int value) {
1075     return OptimizerOptions_GlobalJitLevel_IsValid(value);
1076   }
1077   static constexpr GlobalJitLevel GlobalJitLevel_MIN =
1078     OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MIN;
1079   static constexpr GlobalJitLevel GlobalJitLevel_MAX =
1080     OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MAX;
1081   static constexpr int GlobalJitLevel_ARRAYSIZE =
1082     OptimizerOptions_GlobalJitLevel_GlobalJitLevel_ARRAYSIZE;
1083   template<typename T>
1084   static inline const std::string& GlobalJitLevel_Name(T enum_t_value) {
1085     static_assert(::std::is_same<T, GlobalJitLevel>::value ||
1086       ::std::is_integral<T>::value,
1087       "Incorrect type passed to function GlobalJitLevel_Name.");
1088     return OptimizerOptions_GlobalJitLevel_Name(enum_t_value);
1089   }
1090   static inline bool GlobalJitLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
1091       GlobalJitLevel* value) {
1092     return OptimizerOptions_GlobalJitLevel_Parse(name, value);
1093   }
1094 
1095   // accessors -------------------------------------------------------
1096 
1097   enum : int {
1098     kOptLevelFieldNumber = 3,
1099     kDoCommonSubexpressionEliminationFieldNumber = 1,
1100     kDoConstantFoldingFieldNumber = 2,
1101     kDoFunctionInliningFieldNumber = 4,
1102     kCpuGlobalJitFieldNumber = 7,
1103     kMaxFoldedConstantInBytesFieldNumber = 6,
1104     kGlobalJitLevelFieldNumber = 5,
1105   };
1106   // .tensorflow.OptimizerOptions.Level opt_level = 3;
1107   void clear_opt_level();
1108   ::tensorflow::OptimizerOptions_Level opt_level() const;
1109   void set_opt_level(::tensorflow::OptimizerOptions_Level value);
1110   private:
1111   ::tensorflow::OptimizerOptions_Level _internal_opt_level() const;
1112   void _internal_set_opt_level(::tensorflow::OptimizerOptions_Level value);
1113   public:
1114 
1115   // bool do_common_subexpression_elimination = 1;
1116   void clear_do_common_subexpression_elimination();
1117   bool do_common_subexpression_elimination() const;
1118   void set_do_common_subexpression_elimination(bool value);
1119   private:
1120   bool _internal_do_common_subexpression_elimination() const;
1121   void _internal_set_do_common_subexpression_elimination(bool value);
1122   public:
1123 
1124   // bool do_constant_folding = 2;
1125   void clear_do_constant_folding();
1126   bool do_constant_folding() const;
1127   void set_do_constant_folding(bool value);
1128   private:
1129   bool _internal_do_constant_folding() const;
1130   void _internal_set_do_constant_folding(bool value);
1131   public:
1132 
1133   // bool do_function_inlining = 4;
1134   void clear_do_function_inlining();
1135   bool do_function_inlining() const;
1136   void set_do_function_inlining(bool value);
1137   private:
1138   bool _internal_do_function_inlining() const;
1139   void _internal_set_do_function_inlining(bool value);
1140   public:
1141 
1142   // bool cpu_global_jit = 7;
1143   void clear_cpu_global_jit();
1144   bool cpu_global_jit() const;
1145   void set_cpu_global_jit(bool value);
1146   private:
1147   bool _internal_cpu_global_jit() const;
1148   void _internal_set_cpu_global_jit(bool value);
1149   public:
1150 
1151   // int64 max_folded_constant_in_bytes = 6;
1152   void clear_max_folded_constant_in_bytes();
1153   ::int64_t max_folded_constant_in_bytes() const;
1154   void set_max_folded_constant_in_bytes(::int64_t value);
1155   private:
1156   ::int64_t _internal_max_folded_constant_in_bytes() const;
1157   void _internal_set_max_folded_constant_in_bytes(::int64_t value);
1158   public:
1159 
1160   // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
1161   void clear_global_jit_level();
1162   ::tensorflow::OptimizerOptions_GlobalJitLevel global_jit_level() const;
1163   void set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value);
1164   private:
1165   ::tensorflow::OptimizerOptions_GlobalJitLevel _internal_global_jit_level() const;
1166   void _internal_set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value);
1167   public:
1168 
1169   // @@protoc_insertion_point(class_scope:tensorflow.OptimizerOptions)
1170  private:
1171   class _Internal;
1172 
1173   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1174   typedef void InternalArenaConstructable_;
1175   typedef void DestructorSkippable_;
1176   struct Impl_ {
1177     int opt_level_;
1178     bool do_common_subexpression_elimination_;
1179     bool do_constant_folding_;
1180     bool do_function_inlining_;
1181     bool cpu_global_jit_;
1182     ::int64_t max_folded_constant_in_bytes_;
1183     int global_jit_level_;
1184     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1185   };
1186   union { Impl_ _impl_; };
1187   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1188 };
1189 // -------------------------------------------------------------------
1190 
1191 class GraphOptions final :
1192     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GraphOptions) */ {
1193  public:
GraphOptions()1194   inline GraphOptions() : GraphOptions(nullptr) {}
1195   ~GraphOptions() override;
1196   explicit PROTOBUF_CONSTEXPR GraphOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1197 
1198   GraphOptions(const GraphOptions& from);
GraphOptions(GraphOptions && from)1199   GraphOptions(GraphOptions&& from) noexcept
1200     : GraphOptions() {
1201     *this = ::std::move(from);
1202   }
1203 
1204   inline GraphOptions& operator=(const GraphOptions& from) {
1205     if (this == &from) return *this;
1206     CopyFrom(from);
1207     return *this;
1208   }
1209   inline GraphOptions& operator=(GraphOptions&& from) noexcept {
1210     if (this == &from) return *this;
1211     if (GetOwningArena() == from.GetOwningArena()
1212   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1213         && GetOwningArena() != nullptr
1214   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
1215     ) {
1216       InternalSwap(&from);
1217     } else {
1218       CopyFrom(from);
1219     }
1220     return *this;
1221   }
1222 
default_instance()1223   static const GraphOptions& default_instance() {
1224     return *internal_default_instance();
1225   }
internal_default_instance()1226   static inline const GraphOptions* internal_default_instance() {
1227     return reinterpret_cast<const GraphOptions*>(
1228                &_GraphOptions_default_instance_);
1229   }
1230   static constexpr int kIndexInFileMessages =
1231     4;
1232 
swap(GraphOptions & a,GraphOptions & b)1233   friend void swap(GraphOptions& a, GraphOptions& b) {
1234     a.Swap(&b);
1235   }
Swap(GraphOptions * other)1236   inline void Swap(GraphOptions* other) {
1237     if (other == this) return;
1238   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1239     if (GetOwningArena() != nullptr &&
1240         GetOwningArena() == other->GetOwningArena()) {
1241    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
1242     if (GetOwningArena() == other->GetOwningArena()) {
1243   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
1244       InternalSwap(other);
1245     } else {
1246       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1247     }
1248   }
1249   void UnsafeArenaSwap(GraphOptions* other) {
1250     if (other == this) return;
1251     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1252     InternalSwap(other);
1253   }
1254 
1255   // implements Message ----------------------------------------------
1256 
1257   GraphOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1258     return CreateMaybeMessage<GraphOptions>(arena);
1259   }
1260   GraphOptions* New() const {
1261     return New(nullptr);
1262   }
1263   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
1264   void CopyFrom(const GraphOptions& from);
1265   void MergeFrom(const GraphOptions& from);
1266   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1267   bool IsInitialized() const final;
1268 
1269   size_t ByteSizeLong() const final;
1270   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1271   ::uint8_t* _InternalSerialize(
1272       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1273   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1274 
1275   private:
1276   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1277   void SharedDtor();
1278   void SetCachedSize(int size) const;
1279   void InternalSwap(GraphOptions* other);
1280 
1281   private:
1282   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1283   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1284     return "tensorflow.GraphOptions";
1285   }
1286   protected:
1287   explicit GraphOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1288                        bool is_message_owned = false);
1289   public:
1290 
1291   std::string GetTypeName() const final;
1292 
1293   // nested types ----------------------------------------------------
1294 
1295   // accessors -------------------------------------------------------
1296 
1297   enum : int {
1298     kOptimizerOptionsFieldNumber = 3,
1299     kRewriteOptionsFieldNumber = 10,
1300     kBuildCostModelFieldNumber = 4,
1301     kEnableRecvSchedulingFieldNumber = 2,
1302     kInferShapesFieldNumber = 5,
1303     kPlacePrunedGraphFieldNumber = 6,
1304     kEnableBfloat16SendrecvFieldNumber = 7,
1305     kTimelineStepFieldNumber = 8,
1306     kBuildCostModelAfterFieldNumber = 9,
1307   };
1308   // .tensorflow.OptimizerOptions optimizer_options = 3;
1309   bool has_optimizer_options() const;
1310   private:
1311   bool _internal_has_optimizer_options() const;
1312   public:
1313   void clear_optimizer_options();
1314   const ::tensorflow::OptimizerOptions& optimizer_options() const;
1315   PROTOBUF_NODISCARD ::tensorflow::OptimizerOptions* release_optimizer_options();
1316   ::tensorflow::OptimizerOptions* mutable_optimizer_options();
1317   void set_allocated_optimizer_options(::tensorflow::OptimizerOptions* optimizer_options);
1318   private:
1319   const ::tensorflow::OptimizerOptions& _internal_optimizer_options() const;
1320   ::tensorflow::OptimizerOptions* _internal_mutable_optimizer_options();
1321   public:
1322   void unsafe_arena_set_allocated_optimizer_options(
1323       ::tensorflow::OptimizerOptions* optimizer_options);
1324   ::tensorflow::OptimizerOptions* unsafe_arena_release_optimizer_options();
1325 
1326   // .tensorflow.RewriterConfig rewrite_options = 10;
1327   bool has_rewrite_options() const;
1328   private:
1329   bool _internal_has_rewrite_options() const;
1330   public:
1331   void clear_rewrite_options();
1332   const ::tensorflow::RewriterConfig& rewrite_options() const;
1333   PROTOBUF_NODISCARD ::tensorflow::RewriterConfig* release_rewrite_options();
1334   ::tensorflow::RewriterConfig* mutable_rewrite_options();
1335   void set_allocated_rewrite_options(::tensorflow::RewriterConfig* rewrite_options);
1336   private:
1337   const ::tensorflow::RewriterConfig& _internal_rewrite_options() const;
1338   ::tensorflow::RewriterConfig* _internal_mutable_rewrite_options();
1339   public:
1340   void unsafe_arena_set_allocated_rewrite_options(
1341       ::tensorflow::RewriterConfig* rewrite_options);
1342   ::tensorflow::RewriterConfig* unsafe_arena_release_rewrite_options();
1343 
1344   // int64 build_cost_model = 4;
1345   void clear_build_cost_model();
1346   ::int64_t build_cost_model() const;
1347   void set_build_cost_model(::int64_t value);
1348   private:
1349   ::int64_t _internal_build_cost_model() const;
1350   void _internal_set_build_cost_model(::int64_t value);
1351   public:
1352 
1353   // bool enable_recv_scheduling = 2;
1354   void clear_enable_recv_scheduling();
1355   bool enable_recv_scheduling() const;
1356   void set_enable_recv_scheduling(bool value);
1357   private:
1358   bool _internal_enable_recv_scheduling() const;
1359   void _internal_set_enable_recv_scheduling(bool value);
1360   public:
1361 
1362   // bool infer_shapes = 5;
1363   void clear_infer_shapes();
1364   bool infer_shapes() const;
1365   void set_infer_shapes(bool value);
1366   private:
1367   bool _internal_infer_shapes() const;
1368   void _internal_set_infer_shapes(bool value);
1369   public:
1370 
1371   // bool place_pruned_graph = 6;
1372   void clear_place_pruned_graph();
1373   bool place_pruned_graph() const;
1374   void set_place_pruned_graph(bool value);
1375   private:
1376   bool _internal_place_pruned_graph() const;
1377   void _internal_set_place_pruned_graph(bool value);
1378   public:
1379 
1380   // bool enable_bfloat16_sendrecv = 7;
1381   void clear_enable_bfloat16_sendrecv();
1382   bool enable_bfloat16_sendrecv() const;
1383   void set_enable_bfloat16_sendrecv(bool value);
1384   private:
1385   bool _internal_enable_bfloat16_sendrecv() const;
1386   void _internal_set_enable_bfloat16_sendrecv(bool value);
1387   public:
1388 
1389   // int32 timeline_step = 8;
1390   void clear_timeline_step();
1391   ::int32_t timeline_step() const;
1392   void set_timeline_step(::int32_t value);
1393   private:
1394   ::int32_t _internal_timeline_step() const;
1395   void _internal_set_timeline_step(::int32_t value);
1396   public:
1397 
1398   // int64 build_cost_model_after = 9;
1399   void clear_build_cost_model_after();
1400   ::int64_t build_cost_model_after() const;
1401   void set_build_cost_model_after(::int64_t value);
1402   private:
1403   ::int64_t _internal_build_cost_model_after() const;
1404   void _internal_set_build_cost_model_after(::int64_t value);
1405   public:
1406 
1407   // @@protoc_insertion_point(class_scope:tensorflow.GraphOptions)
1408  private:
1409   class _Internal;
1410 
1411   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1412   typedef void InternalArenaConstructable_;
1413   typedef void DestructorSkippable_;
1414   struct Impl_ {
1415     ::tensorflow::OptimizerOptions* optimizer_options_;
1416     ::tensorflow::RewriterConfig* rewrite_options_;
1417     ::int64_t build_cost_model_;
1418     bool enable_recv_scheduling_;
1419     bool infer_shapes_;
1420     bool place_pruned_graph_;
1421     bool enable_bfloat16_sendrecv_;
1422     ::int32_t timeline_step_;
1423     ::int64_t build_cost_model_after_;
1424     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1425   };
1426   union { Impl_ _impl_; };
1427   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1428 };
1429 // -------------------------------------------------------------------
1430 
1431 class ThreadPoolOptionProto final :
1432     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.ThreadPoolOptionProto) */ {
1433  public:
ThreadPoolOptionProto()1434   inline ThreadPoolOptionProto() : ThreadPoolOptionProto(nullptr) {}
1435   ~ThreadPoolOptionProto() override;
1436   explicit PROTOBUF_CONSTEXPR ThreadPoolOptionProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1437 
1438   ThreadPoolOptionProto(const ThreadPoolOptionProto& from);
ThreadPoolOptionProto(ThreadPoolOptionProto && from)1439   ThreadPoolOptionProto(ThreadPoolOptionProto&& from) noexcept
1440     : ThreadPoolOptionProto() {
1441     *this = ::std::move(from);
1442   }
1443 
1444   inline ThreadPoolOptionProto& operator=(const ThreadPoolOptionProto& from) {
1445     if (this == &from) return *this;
1446     CopyFrom(from);
1447     return *this;
1448   }
1449   inline ThreadPoolOptionProto& operator=(ThreadPoolOptionProto&& from) noexcept {
1450     if (this == &from) return *this;
1451     if (GetOwningArena() == from.GetOwningArena()
1452   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1453         && GetOwningArena() != nullptr
1454   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
1455     ) {
1456       InternalSwap(&from);
1457     } else {
1458       CopyFrom(from);
1459     }
1460     return *this;
1461   }
1462 
default_instance()1463   static const ThreadPoolOptionProto& default_instance() {
1464     return *internal_default_instance();
1465   }
internal_default_instance()1466   static inline const ThreadPoolOptionProto* internal_default_instance() {
1467     return reinterpret_cast<const ThreadPoolOptionProto*>(
1468                &_ThreadPoolOptionProto_default_instance_);
1469   }
1470   static constexpr int kIndexInFileMessages =
1471     5;
1472 
swap(ThreadPoolOptionProto & a,ThreadPoolOptionProto & b)1473   friend void swap(ThreadPoolOptionProto& a, ThreadPoolOptionProto& b) {
1474     a.Swap(&b);
1475   }
Swap(ThreadPoolOptionProto * other)1476   inline void Swap(ThreadPoolOptionProto* other) {
1477     if (other == this) return;
1478   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1479     if (GetOwningArena() != nullptr &&
1480         GetOwningArena() == other->GetOwningArena()) {
1481    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
1482     if (GetOwningArena() == other->GetOwningArena()) {
1483   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
1484       InternalSwap(other);
1485     } else {
1486       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1487     }
1488   }
1489   void UnsafeArenaSwap(ThreadPoolOptionProto* other) {
1490     if (other == this) return;
1491     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1492     InternalSwap(other);
1493   }
1494 
1495   // implements Message ----------------------------------------------
1496 
1497   ThreadPoolOptionProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1498     return CreateMaybeMessage<ThreadPoolOptionProto>(arena);
1499   }
1500   ThreadPoolOptionProto* New() const {
1501     return New(nullptr);
1502   }
1503   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
1504   void CopyFrom(const ThreadPoolOptionProto& from);
1505   void MergeFrom(const ThreadPoolOptionProto& from);
1506   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1507   bool IsInitialized() const final;
1508 
1509   size_t ByteSizeLong() const final;
1510   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1511   ::uint8_t* _InternalSerialize(
1512       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1513   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1514 
1515   private:
1516   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1517   void SharedDtor();
1518   void SetCachedSize(int size) const;
1519   void InternalSwap(ThreadPoolOptionProto* other);
1520 
1521   private:
1522   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1523   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1524     return "tensorflow.ThreadPoolOptionProto";
1525   }
1526   protected:
1527   explicit ThreadPoolOptionProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1528                        bool is_message_owned = false);
1529   public:
1530 
1531   std::string GetTypeName() const final;
1532 
1533   // nested types ----------------------------------------------------
1534 
1535   // accessors -------------------------------------------------------
1536 
1537   enum : int {
1538     kGlobalNameFieldNumber = 2,
1539     kNumThreadsFieldNumber = 1,
1540   };
1541   // string global_name = 2;
1542   void clear_global_name();
1543   const std::string& global_name() const;
1544   template <typename ArgT0 = const std::string&, typename... ArgT>
1545   void set_global_name(ArgT0&& arg0, ArgT... args);
1546   std::string* mutable_global_name();
1547   PROTOBUF_NODISCARD std::string* release_global_name();
1548   void set_allocated_global_name(std::string* global_name);
1549   private:
1550   const std::string& _internal_global_name() const;
1551   inline PROTOBUF_ALWAYS_INLINE void _internal_set_global_name(const std::string& value);
1552   std::string* _internal_mutable_global_name();
1553   public:
1554 
1555   // int32 num_threads = 1;
1556   void clear_num_threads();
1557   ::int32_t num_threads() const;
1558   void set_num_threads(::int32_t value);
1559   private:
1560   ::int32_t _internal_num_threads() const;
1561   void _internal_set_num_threads(::int32_t value);
1562   public:
1563 
1564   // @@protoc_insertion_point(class_scope:tensorflow.ThreadPoolOptionProto)
1565  private:
1566   class _Internal;
1567 
1568   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1569   typedef void InternalArenaConstructable_;
1570   typedef void DestructorSkippable_;
1571   struct Impl_ {
1572     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr global_name_;
1573     ::int32_t num_threads_;
1574     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1575   };
1576   union { Impl_ _impl_; };
1577   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1578 };
1579 // -------------------------------------------------------------------
1580 
1581 class RPCOptions final :
1582     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RPCOptions) */ {
1583  public:
RPCOptions()1584   inline RPCOptions() : RPCOptions(nullptr) {}
1585   ~RPCOptions() override;
1586   explicit PROTOBUF_CONSTEXPR RPCOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1587 
1588   RPCOptions(const RPCOptions& from);
RPCOptions(RPCOptions && from)1589   RPCOptions(RPCOptions&& from) noexcept
1590     : RPCOptions() {
1591     *this = ::std::move(from);
1592   }
1593 
1594   inline RPCOptions& operator=(const RPCOptions& from) {
1595     if (this == &from) return *this;
1596     CopyFrom(from);
1597     return *this;
1598   }
1599   inline RPCOptions& operator=(RPCOptions&& from) noexcept {
1600     if (this == &from) return *this;
1601     if (GetOwningArena() == from.GetOwningArena()
1602   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1603         && GetOwningArena() != nullptr
1604   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
1605     ) {
1606       InternalSwap(&from);
1607     } else {
1608       CopyFrom(from);
1609     }
1610     return *this;
1611   }
1612 
default_instance()1613   static const RPCOptions& default_instance() {
1614     return *internal_default_instance();
1615   }
internal_default_instance()1616   static inline const RPCOptions* internal_default_instance() {
1617     return reinterpret_cast<const RPCOptions*>(
1618                &_RPCOptions_default_instance_);
1619   }
1620   static constexpr int kIndexInFileMessages =
1621     6;
1622 
swap(RPCOptions & a,RPCOptions & b)1623   friend void swap(RPCOptions& a, RPCOptions& b) {
1624     a.Swap(&b);
1625   }
Swap(RPCOptions * other)1626   inline void Swap(RPCOptions* other) {
1627     if (other == this) return;
1628   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1629     if (GetOwningArena() != nullptr &&
1630         GetOwningArena() == other->GetOwningArena()) {
1631    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
1632     if (GetOwningArena() == other->GetOwningArena()) {
1633   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
1634       InternalSwap(other);
1635     } else {
1636       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1637     }
1638   }
1639   void UnsafeArenaSwap(RPCOptions* other) {
1640     if (other == this) return;
1641     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1642     InternalSwap(other);
1643   }
1644 
1645   // implements Message ----------------------------------------------
1646 
1647   RPCOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1648     return CreateMaybeMessage<RPCOptions>(arena);
1649   }
1650   RPCOptions* New() const {
1651     return New(nullptr);
1652   }
1653   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
1654   void CopyFrom(const RPCOptions& from);
1655   void MergeFrom(const RPCOptions& from);
1656   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1657   bool IsInitialized() const final;
1658 
1659   size_t ByteSizeLong() const final;
1660   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1661   ::uint8_t* _InternalSerialize(
1662       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1663   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1664 
1665   private:
1666   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1667   void SharedDtor();
1668   void SetCachedSize(int size) const;
1669   void InternalSwap(RPCOptions* other);
1670 
1671   private:
1672   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1673   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1674     return "tensorflow.RPCOptions";
1675   }
1676   protected:
1677   explicit RPCOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1678                        bool is_message_owned = false);
1679   public:
1680 
1681   std::string GetTypeName() const final;
1682 
1683   // nested types ----------------------------------------------------
1684 
1685   // accessors -------------------------------------------------------
1686 
1687   enum : int {
1688     kCompressionAlgorithmFieldNumber = 2,
1689     kCompressionLevelFieldNumber = 3,
1690     kUseRpcForInprocessMasterFieldNumber = 1,
1691     kCacheRpcResponseFieldNumber = 4,
1692     kDisableSessionConnectionSharingFieldNumber = 5,
1693     kNumChannelsPerTargetFieldNumber = 6,
1694   };
1695   // string compression_algorithm = 2;
1696   void clear_compression_algorithm();
1697   const std::string& compression_algorithm() const;
1698   template <typename ArgT0 = const std::string&, typename... ArgT>
1699   void set_compression_algorithm(ArgT0&& arg0, ArgT... args);
1700   std::string* mutable_compression_algorithm();
1701   PROTOBUF_NODISCARD std::string* release_compression_algorithm();
1702   void set_allocated_compression_algorithm(std::string* compression_algorithm);
1703   private:
1704   const std::string& _internal_compression_algorithm() const;
1705   inline PROTOBUF_ALWAYS_INLINE void _internal_set_compression_algorithm(const std::string& value);
1706   std::string* _internal_mutable_compression_algorithm();
1707   public:
1708 
1709   // int32 compression_level = 3;
1710   void clear_compression_level();
1711   ::int32_t compression_level() const;
1712   void set_compression_level(::int32_t value);
1713   private:
1714   ::int32_t _internal_compression_level() const;
1715   void _internal_set_compression_level(::int32_t value);
1716   public:
1717 
1718   // bool use_rpc_for_inprocess_master = 1;
1719   void clear_use_rpc_for_inprocess_master();
1720   bool use_rpc_for_inprocess_master() const;
1721   void set_use_rpc_for_inprocess_master(bool value);
1722   private:
1723   bool _internal_use_rpc_for_inprocess_master() const;
1724   void _internal_set_use_rpc_for_inprocess_master(bool value);
1725   public:
1726 
1727   // bool cache_rpc_response = 4;
1728   void clear_cache_rpc_response();
1729   bool cache_rpc_response() const;
1730   void set_cache_rpc_response(bool value);
1731   private:
1732   bool _internal_cache_rpc_response() const;
1733   void _internal_set_cache_rpc_response(bool value);
1734   public:
1735 
1736   // bool disable_session_connection_sharing = 5;
1737   void clear_disable_session_connection_sharing();
1738   bool disable_session_connection_sharing() const;
1739   void set_disable_session_connection_sharing(bool value);
1740   private:
1741   bool _internal_disable_session_connection_sharing() const;
1742   void _internal_set_disable_session_connection_sharing(bool value);
1743   public:
1744 
1745   // int32 num_channels_per_target = 6;
1746   void clear_num_channels_per_target();
1747   ::int32_t num_channels_per_target() const;
1748   void set_num_channels_per_target(::int32_t value);
1749   private:
1750   ::int32_t _internal_num_channels_per_target() const;
1751   void _internal_set_num_channels_per_target(::int32_t value);
1752   public:
1753 
1754   // @@protoc_insertion_point(class_scope:tensorflow.RPCOptions)
1755  private:
1756   class _Internal;
1757 
1758   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1759   typedef void InternalArenaConstructable_;
1760   typedef void DestructorSkippable_;
1761   struct Impl_ {
1762     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr compression_algorithm_;
1763     ::int32_t compression_level_;
1764     bool use_rpc_for_inprocess_master_;
1765     bool cache_rpc_response_;
1766     bool disable_session_connection_sharing_;
1767     ::int32_t num_channels_per_target_;
1768     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1769   };
1770   union { Impl_ _impl_; };
1771   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1772 };
1773 // -------------------------------------------------------------------
1774 
1775 class SessionMetadata final :
1776     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.SessionMetadata) */ {
1777  public:
SessionMetadata()1778   inline SessionMetadata() : SessionMetadata(nullptr) {}
1779   ~SessionMetadata() override;
1780   explicit PROTOBUF_CONSTEXPR SessionMetadata(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1781 
1782   SessionMetadata(const SessionMetadata& from);
SessionMetadata(SessionMetadata && from)1783   SessionMetadata(SessionMetadata&& from) noexcept
1784     : SessionMetadata() {
1785     *this = ::std::move(from);
1786   }
1787 
1788   inline SessionMetadata& operator=(const SessionMetadata& from) {
1789     if (this == &from) return *this;
1790     CopyFrom(from);
1791     return *this;
1792   }
1793   inline SessionMetadata& operator=(SessionMetadata&& from) noexcept {
1794     if (this == &from) return *this;
1795     if (GetOwningArena() == from.GetOwningArena()
1796   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1797         && GetOwningArena() != nullptr
1798   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
1799     ) {
1800       InternalSwap(&from);
1801     } else {
1802       CopyFrom(from);
1803     }
1804     return *this;
1805   }
1806 
default_instance()1807   static const SessionMetadata& default_instance() {
1808     return *internal_default_instance();
1809   }
internal_default_instance()1810   static inline const SessionMetadata* internal_default_instance() {
1811     return reinterpret_cast<const SessionMetadata*>(
1812                &_SessionMetadata_default_instance_);
1813   }
1814   static constexpr int kIndexInFileMessages =
1815     7;
1816 
swap(SessionMetadata & a,SessionMetadata & b)1817   friend void swap(SessionMetadata& a, SessionMetadata& b) {
1818     a.Swap(&b);
1819   }
Swap(SessionMetadata * other)1820   inline void Swap(SessionMetadata* other) {
1821     if (other == this) return;
1822   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1823     if (GetOwningArena() != nullptr &&
1824         GetOwningArena() == other->GetOwningArena()) {
1825    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
1826     if (GetOwningArena() == other->GetOwningArena()) {
1827   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
1828       InternalSwap(other);
1829     } else {
1830       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1831     }
1832   }
1833   void UnsafeArenaSwap(SessionMetadata* other) {
1834     if (other == this) return;
1835     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1836     InternalSwap(other);
1837   }
1838 
1839   // implements Message ----------------------------------------------
1840 
1841   SessionMetadata* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1842     return CreateMaybeMessage<SessionMetadata>(arena);
1843   }
1844   SessionMetadata* New() const {
1845     return New(nullptr);
1846   }
1847   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
1848   void CopyFrom(const SessionMetadata& from);
1849   void MergeFrom(const SessionMetadata& from);
1850   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1851   bool IsInitialized() const final;
1852 
1853   size_t ByteSizeLong() const final;
1854   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1855   ::uint8_t* _InternalSerialize(
1856       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1857   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1858 
1859   private:
1860   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1861   void SharedDtor();
1862   void SetCachedSize(int size) const;
1863   void InternalSwap(SessionMetadata* other);
1864 
1865   private:
1866   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1867   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1868     return "tensorflow.SessionMetadata";
1869   }
1870   protected:
1871   explicit SessionMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1872                        bool is_message_owned = false);
1873   public:
1874 
1875   std::string GetTypeName() const final;
1876 
1877   // nested types ----------------------------------------------------
1878 
1879   // accessors -------------------------------------------------------
1880 
1881   enum : int {
1882     kNameFieldNumber = 1,
1883     kVersionFieldNumber = 2,
1884   };
1885   // string name = 1;
1886   void clear_name();
1887   const std::string& name() const;
1888   template <typename ArgT0 = const std::string&, typename... ArgT>
1889   void set_name(ArgT0&& arg0, ArgT... args);
1890   std::string* mutable_name();
1891   PROTOBUF_NODISCARD std::string* release_name();
1892   void set_allocated_name(std::string* name);
1893   private:
1894   const std::string& _internal_name() const;
1895   inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value);
1896   std::string* _internal_mutable_name();
1897   public:
1898 
1899   // int64 version = 2;
1900   void clear_version();
1901   ::int64_t version() const;
1902   void set_version(::int64_t value);
1903   private:
1904   ::int64_t _internal_version() const;
1905   void _internal_set_version(::int64_t value);
1906   public:
1907 
1908   // @@protoc_insertion_point(class_scope:tensorflow.SessionMetadata)
1909  private:
1910   class _Internal;
1911 
1912   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1913   typedef void InternalArenaConstructable_;
1914   typedef void DestructorSkippable_;
1915   struct Impl_ {
1916     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_;
1917     ::int64_t version_;
1918     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1919   };
1920   union { Impl_ _impl_; };
1921   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1922 };
1923 // -------------------------------------------------------------------
1924 
1925 class ConfigProto_DeviceCountEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<ConfigProto_DeviceCountEntry_DoNotUse,
1926     std::string, ::int32_t,
1927     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
1928     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32> {
1929 public:
1930   typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<ConfigProto_DeviceCountEntry_DoNotUse,
1931     std::string, ::int32_t,
1932     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
1933     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32> SuperType;
1934   ConfigProto_DeviceCountEntry_DoNotUse();
1935   explicit PROTOBUF_CONSTEXPR ConfigProto_DeviceCountEntry_DoNotUse(
1936       ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1937   explicit ConfigProto_DeviceCountEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena);
1938   void MergeFrom(const ConfigProto_DeviceCountEntry_DoNotUse& other);
internal_default_instance()1939   static const ConfigProto_DeviceCountEntry_DoNotUse* internal_default_instance() { return reinterpret_cast<const ConfigProto_DeviceCountEntry_DoNotUse*>(&_ConfigProto_DeviceCountEntry_DoNotUse_default_instance_); }
ValidateKey(std::string * s)1940   static bool ValidateKey(std::string* s) {
1941     return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.ConfigProto.DeviceCountEntry.key");
1942  }
ValidateValue(void *)1943   static bool ValidateValue(void*) { return true; }
1944   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1945 };
1946 
1947 // -------------------------------------------------------------------
1948 
1949 class ConfigProto_Experimental final :
1950     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.ConfigProto.Experimental) */ {
1951  public:
ConfigProto_Experimental()1952   inline ConfigProto_Experimental() : ConfigProto_Experimental(nullptr) {}
1953   ~ConfigProto_Experimental() override;
1954   explicit PROTOBUF_CONSTEXPR ConfigProto_Experimental(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1955 
1956   ConfigProto_Experimental(const ConfigProto_Experimental& from);
ConfigProto_Experimental(ConfigProto_Experimental && from)1957   ConfigProto_Experimental(ConfigProto_Experimental&& from) noexcept
1958     : ConfigProto_Experimental() {
1959     *this = ::std::move(from);
1960   }
1961 
1962   inline ConfigProto_Experimental& operator=(const ConfigProto_Experimental& from) {
1963     if (this == &from) return *this;
1964     CopyFrom(from);
1965     return *this;
1966   }
1967   inline ConfigProto_Experimental& operator=(ConfigProto_Experimental&& from) noexcept {
1968     if (this == &from) return *this;
1969     if (GetOwningArena() == from.GetOwningArena()
1970   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1971         && GetOwningArena() != nullptr
1972   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
1973     ) {
1974       InternalSwap(&from);
1975     } else {
1976       CopyFrom(from);
1977     }
1978     return *this;
1979   }
1980 
default_instance()1981   static const ConfigProto_Experimental& default_instance() {
1982     return *internal_default_instance();
1983   }
internal_default_instance()1984   static inline const ConfigProto_Experimental* internal_default_instance() {
1985     return reinterpret_cast<const ConfigProto_Experimental*>(
1986                &_ConfigProto_Experimental_default_instance_);
1987   }
1988   static constexpr int kIndexInFileMessages =
1989     9;
1990 
swap(ConfigProto_Experimental & a,ConfigProto_Experimental & b)1991   friend void swap(ConfigProto_Experimental& a, ConfigProto_Experimental& b) {
1992     a.Swap(&b);
1993   }
Swap(ConfigProto_Experimental * other)1994   inline void Swap(ConfigProto_Experimental* other) {
1995     if (other == this) return;
1996   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1997     if (GetOwningArena() != nullptr &&
1998         GetOwningArena() == other->GetOwningArena()) {
1999    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
2000     if (GetOwningArena() == other->GetOwningArena()) {
2001   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
2002       InternalSwap(other);
2003     } else {
2004       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2005     }
2006   }
2007   void UnsafeArenaSwap(ConfigProto_Experimental* other) {
2008     if (other == this) return;
2009     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2010     InternalSwap(other);
2011   }
2012 
2013   // implements Message ----------------------------------------------
2014 
2015   ConfigProto_Experimental* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2016     return CreateMaybeMessage<ConfigProto_Experimental>(arena);
2017   }
2018   ConfigProto_Experimental* New() const {
2019     return New(nullptr);
2020   }
2021   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
2022   void CopyFrom(const ConfigProto_Experimental& from);
2023   void MergeFrom(const ConfigProto_Experimental& from);
2024   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2025   bool IsInitialized() const final;
2026 
2027   size_t ByteSizeLong() const final;
2028   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2029   ::uint8_t* _InternalSerialize(
2030       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2031   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2032 
2033   private:
2034   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2035   void SharedDtor();
2036   void SetCachedSize(int size) const;
2037   void InternalSwap(ConfigProto_Experimental* other);
2038 
2039   private:
2040   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2041   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2042     return "tensorflow.ConfigProto.Experimental";
2043   }
2044   protected:
2045   explicit ConfigProto_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2046                        bool is_message_owned = false);
2047   public:
2048 
2049   std::string GetTypeName() const final;
2050 
2051   // nested types ----------------------------------------------------
2052 
2053   typedef ConfigProto_Experimental_MlirBridgeRollout MlirBridgeRollout;
2054   static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_UNSPECIFIED =
2055     ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
2056   static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_ENABLED =
2057     ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_ENABLED;
2058   static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_DISABLED =
2059     ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_DISABLED;
2060   static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED =
2061     ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED;
2062   static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED =
2063     ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED;
2064   static inline bool MlirBridgeRollout_IsValid(int value) {
2065     return ConfigProto_Experimental_MlirBridgeRollout_IsValid(value);
2066   }
2067   static constexpr MlirBridgeRollout MlirBridgeRollout_MIN =
2068     ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MIN;
2069   static constexpr MlirBridgeRollout MlirBridgeRollout_MAX =
2070     ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MAX;
2071   static constexpr int MlirBridgeRollout_ARRAYSIZE =
2072     ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_ARRAYSIZE;
2073   template<typename T>
2074   static inline const std::string& MlirBridgeRollout_Name(T enum_t_value) {
2075     static_assert(::std::is_same<T, MlirBridgeRollout>::value ||
2076       ::std::is_integral<T>::value,
2077       "Incorrect type passed to function MlirBridgeRollout_Name.");
2078     return ConfigProto_Experimental_MlirBridgeRollout_Name(enum_t_value);
2079   }
2080   static inline bool MlirBridgeRollout_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
2081       MlirBridgeRollout* value) {
2082     return ConfigProto_Experimental_MlirBridgeRollout_Parse(name, value);
2083   }
2084 
2085   // accessors -------------------------------------------------------
2086 
2087   enum : int {
2088     kCollectiveGroupLeaderFieldNumber = 1,
2089     kExecutorTypeFieldNumber = 3,
2090     kSessionMetadataFieldNumber = 11,
2091     kCoordinationConfigFieldNumber = 23,
2092     kRecvBufMaxChunkFieldNumber = 4,
2093     kUseNumaAffinityFieldNumber = 5,
2094     kCollectiveDeterministicSequentialExecutionFieldNumber = 6,
2095     kCollectiveNcclFieldNumber = 7,
2096     kShareSessionStateInClusterspecPropagationFieldNumber = 8,
2097     kDisableThreadSpinningFieldNumber = 9,
2098     kShareClusterDevicesInSessionFieldNumber = 10,
2099     kOptimizeForStaticGraphFieldNumber = 12,
2100     kEnableMlirBridgeFieldNumber = 13,
2101     kMlirBridgeRolloutFieldNumber = 17,
2102     kXlaFusionAutotunerThreshFieldNumber = 15,
2103     kEnableMlirGraphOptimizationFieldNumber = 16,
2104     kDisableOutputPartitionGraphsFieldNumber = 14,
2105     kUseTfrtFieldNumber = 18,
2106     kDisableFunctionalOpsLoweringFieldNumber = 21,
2107     kXlaPreferSingleGraphClusterFieldNumber = 22,
2108   };
2109   // string collective_group_leader = 1;
2110   void clear_collective_group_leader();
2111   const std::string& collective_group_leader() const;
2112   template <typename ArgT0 = const std::string&, typename... ArgT>
2113   void set_collective_group_leader(ArgT0&& arg0, ArgT... args);
2114   std::string* mutable_collective_group_leader();
2115   PROTOBUF_NODISCARD std::string* release_collective_group_leader();
2116   void set_allocated_collective_group_leader(std::string* collective_group_leader);
2117   private:
2118   const std::string& _internal_collective_group_leader() const;
2119   inline PROTOBUF_ALWAYS_INLINE void _internal_set_collective_group_leader(const std::string& value);
2120   std::string* _internal_mutable_collective_group_leader();
2121   public:
2122 
2123   // string executor_type = 3;
2124   void clear_executor_type();
2125   const std::string& executor_type() const;
2126   template <typename ArgT0 = const std::string&, typename... ArgT>
2127   void set_executor_type(ArgT0&& arg0, ArgT... args);
2128   std::string* mutable_executor_type();
2129   PROTOBUF_NODISCARD std::string* release_executor_type();
2130   void set_allocated_executor_type(std::string* executor_type);
2131   private:
2132   const std::string& _internal_executor_type() const;
2133   inline PROTOBUF_ALWAYS_INLINE void _internal_set_executor_type(const std::string& value);
2134   std::string* _internal_mutable_executor_type();
2135   public:
2136 
2137   // .tensorflow.SessionMetadata session_metadata = 11;
2138   bool has_session_metadata() const;
2139   private:
2140   bool _internal_has_session_metadata() const;
2141   public:
2142   void clear_session_metadata();
2143   const ::tensorflow::SessionMetadata& session_metadata() const;
2144   PROTOBUF_NODISCARD ::tensorflow::SessionMetadata* release_session_metadata();
2145   ::tensorflow::SessionMetadata* mutable_session_metadata();
2146   void set_allocated_session_metadata(::tensorflow::SessionMetadata* session_metadata);
2147   private:
2148   const ::tensorflow::SessionMetadata& _internal_session_metadata() const;
2149   ::tensorflow::SessionMetadata* _internal_mutable_session_metadata();
2150   public:
2151   void unsafe_arena_set_allocated_session_metadata(
2152       ::tensorflow::SessionMetadata* session_metadata);
2153   ::tensorflow::SessionMetadata* unsafe_arena_release_session_metadata();
2154 
2155   // .tensorflow.CoordinationServiceConfig coordination_config = 23;
2156   bool has_coordination_config() const;
2157   private:
2158   bool _internal_has_coordination_config() const;
2159   public:
2160   void clear_coordination_config();
2161   const ::tensorflow::CoordinationServiceConfig& coordination_config() const;
2162   PROTOBUF_NODISCARD ::tensorflow::CoordinationServiceConfig* release_coordination_config();
2163   ::tensorflow::CoordinationServiceConfig* mutable_coordination_config();
2164   void set_allocated_coordination_config(::tensorflow::CoordinationServiceConfig* coordination_config);
2165   private:
2166   const ::tensorflow::CoordinationServiceConfig& _internal_coordination_config() const;
2167   ::tensorflow::CoordinationServiceConfig* _internal_mutable_coordination_config();
2168   public:
2169   void unsafe_arena_set_allocated_coordination_config(
2170       ::tensorflow::CoordinationServiceConfig* coordination_config);
2171   ::tensorflow::CoordinationServiceConfig* unsafe_arena_release_coordination_config();
2172 
2173   // int32 recv_buf_max_chunk = 4;
2174   void clear_recv_buf_max_chunk();
2175   ::int32_t recv_buf_max_chunk() const;
2176   void set_recv_buf_max_chunk(::int32_t value);
2177   private:
2178   ::int32_t _internal_recv_buf_max_chunk() const;
2179   void _internal_set_recv_buf_max_chunk(::int32_t value);
2180   public:
2181 
2182   // bool use_numa_affinity = 5;
2183   void clear_use_numa_affinity();
2184   bool use_numa_affinity() const;
2185   void set_use_numa_affinity(bool value);
2186   private:
2187   bool _internal_use_numa_affinity() const;
2188   void _internal_set_use_numa_affinity(bool value);
2189   public:
2190 
2191   // bool collective_deterministic_sequential_execution = 6;
2192   void clear_collective_deterministic_sequential_execution();
2193   bool collective_deterministic_sequential_execution() const;
2194   void set_collective_deterministic_sequential_execution(bool value);
2195   private:
2196   bool _internal_collective_deterministic_sequential_execution() const;
2197   void _internal_set_collective_deterministic_sequential_execution(bool value);
2198   public:
2199 
2200   // bool collective_nccl = 7;
2201   void clear_collective_nccl();
2202   bool collective_nccl() const;
2203   void set_collective_nccl(bool value);
2204   private:
2205   bool _internal_collective_nccl() const;
2206   void _internal_set_collective_nccl(bool value);
2207   public:
2208 
2209   // bool share_session_state_in_clusterspec_propagation = 8;
2210   void clear_share_session_state_in_clusterspec_propagation();
2211   bool share_session_state_in_clusterspec_propagation() const;
2212   void set_share_session_state_in_clusterspec_propagation(bool value);
2213   private:
2214   bool _internal_share_session_state_in_clusterspec_propagation() const;
2215   void _internal_set_share_session_state_in_clusterspec_propagation(bool value);
2216   public:
2217 
2218   // bool disable_thread_spinning = 9;
2219   void clear_disable_thread_spinning();
2220   bool disable_thread_spinning() const;
2221   void set_disable_thread_spinning(bool value);
2222   private:
2223   bool _internal_disable_thread_spinning() const;
2224   void _internal_set_disable_thread_spinning(bool value);
2225   public:
2226 
2227   // bool share_cluster_devices_in_session = 10;
2228   void clear_share_cluster_devices_in_session();
2229   bool share_cluster_devices_in_session() const;
2230   void set_share_cluster_devices_in_session(bool value);
2231   private:
2232   bool _internal_share_cluster_devices_in_session() const;
2233   void _internal_set_share_cluster_devices_in_session(bool value);
2234   public:
2235 
2236   // bool optimize_for_static_graph = 12;
2237   void clear_optimize_for_static_graph();
2238   bool optimize_for_static_graph() const;
2239   void set_optimize_for_static_graph(bool value);
2240   private:
2241   bool _internal_optimize_for_static_graph() const;
2242   void _internal_set_optimize_for_static_graph(bool value);
2243   public:
2244 
2245   // bool enable_mlir_bridge = 13;
2246   void clear_enable_mlir_bridge();
2247   bool enable_mlir_bridge() const;
2248   void set_enable_mlir_bridge(bool value);
2249   private:
2250   bool _internal_enable_mlir_bridge() const;
2251   void _internal_set_enable_mlir_bridge(bool value);
2252   public:
2253 
2254   // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
2255   void clear_mlir_bridge_rollout();
2256   ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout mlir_bridge_rollout() const;
2257   void set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value);
2258   private:
2259   ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout _internal_mlir_bridge_rollout() const;
2260   void _internal_set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value);
2261   public:
2262 
2263   // int64 xla_fusion_autotuner_thresh = 15;
2264   void clear_xla_fusion_autotuner_thresh();
2265   ::int64_t xla_fusion_autotuner_thresh() const;
2266   void set_xla_fusion_autotuner_thresh(::int64_t value);
2267   private:
2268   ::int64_t _internal_xla_fusion_autotuner_thresh() const;
2269   void _internal_set_xla_fusion_autotuner_thresh(::int64_t value);
2270   public:
2271 
2272   // bool enable_mlir_graph_optimization = 16;
2273   void clear_enable_mlir_graph_optimization();
2274   bool enable_mlir_graph_optimization() const;
2275   void set_enable_mlir_graph_optimization(bool value);
2276   private:
2277   bool _internal_enable_mlir_graph_optimization() const;
2278   void _internal_set_enable_mlir_graph_optimization(bool value);
2279   public:
2280 
2281   // bool disable_output_partition_graphs = 14;
2282   void clear_disable_output_partition_graphs();
2283   bool disable_output_partition_graphs() const;
2284   void set_disable_output_partition_graphs(bool value);
2285   private:
2286   bool _internal_disable_output_partition_graphs() const;
2287   void _internal_set_disable_output_partition_graphs(bool value);
2288   public:
2289 
2290   // bool use_tfrt = 18;
2291   void clear_use_tfrt();
2292   bool use_tfrt() const;
2293   void set_use_tfrt(bool value);
2294   private:
2295   bool _internal_use_tfrt() const;
2296   void _internal_set_use_tfrt(bool value);
2297   public:
2298 
2299   // bool disable_functional_ops_lowering = 21;
2300   void clear_disable_functional_ops_lowering();
2301   bool disable_functional_ops_lowering() const;
2302   void set_disable_functional_ops_lowering(bool value);
2303   private:
2304   bool _internal_disable_functional_ops_lowering() const;
2305   void _internal_set_disable_functional_ops_lowering(bool value);
2306   public:
2307 
2308   // bool xla_prefer_single_graph_cluster = 22;
2309   void clear_xla_prefer_single_graph_cluster();
2310   bool xla_prefer_single_graph_cluster() const;
2311   void set_xla_prefer_single_graph_cluster(bool value);
2312   private:
2313   bool _internal_xla_prefer_single_graph_cluster() const;
2314   void _internal_set_xla_prefer_single_graph_cluster(bool value);
2315   public:
2316 
2317   // @@protoc_insertion_point(class_scope:tensorflow.ConfigProto.Experimental)
2318  private:
2319   class _Internal;
2320 
2321   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
2322   typedef void InternalArenaConstructable_;
2323   typedef void DestructorSkippable_;
2324   struct Impl_ {
2325     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collective_group_leader_;
2326     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr executor_type_;
2327     ::tensorflow::SessionMetadata* session_metadata_;
2328     ::tensorflow::CoordinationServiceConfig* coordination_config_;
2329     ::int32_t recv_buf_max_chunk_;
2330     bool use_numa_affinity_;
2331     bool collective_deterministic_sequential_execution_;
2332     bool collective_nccl_;
2333     bool share_session_state_in_clusterspec_propagation_;
2334     bool disable_thread_spinning_;
2335     bool share_cluster_devices_in_session_;
2336     bool optimize_for_static_graph_;
2337     bool enable_mlir_bridge_;
2338     int mlir_bridge_rollout_;
2339     ::int64_t xla_fusion_autotuner_thresh_;
2340     bool enable_mlir_graph_optimization_;
2341     bool disable_output_partition_graphs_;
2342     bool use_tfrt_;
2343     bool disable_functional_ops_lowering_;
2344     bool xla_prefer_single_graph_cluster_;
2345     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
2346   };
2347   union { Impl_ _impl_; };
2348   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
2349 };
2350 // -------------------------------------------------------------------
2351 
2352 class ConfigProto final :
2353     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.ConfigProto) */ {
2354  public:
ConfigProto()2355   inline ConfigProto() : ConfigProto(nullptr) {}
2356   ~ConfigProto() override;
2357   explicit PROTOBUF_CONSTEXPR ConfigProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
2358 
2359   ConfigProto(const ConfigProto& from);
ConfigProto(ConfigProto && from)2360   ConfigProto(ConfigProto&& from) noexcept
2361     : ConfigProto() {
2362     *this = ::std::move(from);
2363   }
2364 
2365   inline ConfigProto& operator=(const ConfigProto& from) {
2366     if (this == &from) return *this;
2367     CopyFrom(from);
2368     return *this;
2369   }
2370   inline ConfigProto& operator=(ConfigProto&& from) noexcept {
2371     if (this == &from) return *this;
2372     if (GetOwningArena() == from.GetOwningArena()
2373   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
2374         && GetOwningArena() != nullptr
2375   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
2376     ) {
2377       InternalSwap(&from);
2378     } else {
2379       CopyFrom(from);
2380     }
2381     return *this;
2382   }
2383 
default_instance()2384   static const ConfigProto& default_instance() {
2385     return *internal_default_instance();
2386   }
internal_default_instance()2387   static inline const ConfigProto* internal_default_instance() {
2388     return reinterpret_cast<const ConfigProto*>(
2389                &_ConfigProto_default_instance_);
2390   }
2391   static constexpr int kIndexInFileMessages =
2392     10;
2393 
swap(ConfigProto & a,ConfigProto & b)2394   friend void swap(ConfigProto& a, ConfigProto& b) {
2395     a.Swap(&b);
2396   }
Swap(ConfigProto * other)2397   inline void Swap(ConfigProto* other) {
2398     if (other == this) return;
2399   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
2400     if (GetOwningArena() != nullptr &&
2401         GetOwningArena() == other->GetOwningArena()) {
2402    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
2403     if (GetOwningArena() == other->GetOwningArena()) {
2404   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
2405       InternalSwap(other);
2406     } else {
2407       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2408     }
2409   }
2410   void UnsafeArenaSwap(ConfigProto* other) {
2411     if (other == this) return;
2412     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2413     InternalSwap(other);
2414   }
2415 
2416   // implements Message ----------------------------------------------
2417 
2418   ConfigProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2419     return CreateMaybeMessage<ConfigProto>(arena);
2420   }
2421   ConfigProto* New() const {
2422     return New(nullptr);
2423   }
2424   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
2425   void CopyFrom(const ConfigProto& from);
2426   void MergeFrom(const ConfigProto& from);
2427   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2428   bool IsInitialized() const final;
2429 
2430   size_t ByteSizeLong() const final;
2431   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2432   ::uint8_t* _InternalSerialize(
2433       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2434   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2435 
2436   private:
2437   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2438   void SharedDtor();
2439   void SetCachedSize(int size) const;
2440   void InternalSwap(ConfigProto* other);
2441 
2442   private:
2443   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2444   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2445     return "tensorflow.ConfigProto";
2446   }
2447   protected:
2448   explicit ConfigProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2449                        bool is_message_owned = false);
2450   public:
2451 
2452   std::string GetTypeName() const final;
2453 
2454   // nested types ----------------------------------------------------
2455 
2456   typedef ConfigProto_Experimental Experimental;
2457 
2458   // accessors -------------------------------------------------------
2459 
2460   enum : int {
2461     kDeviceCountFieldNumber = 1,
2462     kDeviceFiltersFieldNumber = 4,
2463     kSessionInterOpThreadPoolFieldNumber = 12,
2464     kGpuOptionsFieldNumber = 6,
2465     kGraphOptionsFieldNumber = 10,
2466     kRpcOptionsFieldNumber = 13,
2467     kClusterDefFieldNumber = 14,
2468     kExperimentalFieldNumber = 16,
2469     kIntraOpParallelismThreadsFieldNumber = 2,
2470     kPlacementPeriodFieldNumber = 3,
2471     kInterOpParallelismThreadsFieldNumber = 5,
2472     kUsePerSessionThreadsFieldNumber = 9,
2473     kAllowSoftPlacementFieldNumber = 7,
2474     kLogDevicePlacementFieldNumber = 8,
2475     kIsolateSessionStateFieldNumber = 15,
2476     kOperationTimeoutInMsFieldNumber = 11,
2477     kShareClusterDevicesInSessionFieldNumber = 17,
2478   };
2479   // map<string, int32> device_count = 1;
2480   int device_count_size() const;
2481   private:
2482   int _internal_device_count_size() const;
2483   public:
2484   void clear_device_count();
2485   private:
2486   const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >&
2487       _internal_device_count() const;
2488   ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >*
2489       _internal_mutable_device_count();
2490   public:
2491   const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >&
2492       device_count() const;
2493   ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >*
2494       mutable_device_count();
2495 
2496   // repeated string device_filters = 4;
2497   int device_filters_size() const;
2498   private:
2499   int _internal_device_filters_size() const;
2500   public:
2501   void clear_device_filters();
2502   const std::string& device_filters(int index) const;
2503   std::string* mutable_device_filters(int index);
2504   void set_device_filters(int index, const std::string& value);
2505   void set_device_filters(int index, std::string&& value);
2506   void set_device_filters(int index, const char* value);
2507   void set_device_filters(int index, const char* value, size_t size);
2508   std::string* add_device_filters();
2509   void add_device_filters(const std::string& value);
2510   void add_device_filters(std::string&& value);
2511   void add_device_filters(const char* value);
2512   void add_device_filters(const char* value, size_t size);
2513   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& device_filters() const;
2514   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_device_filters();
2515   private:
2516   const std::string& _internal_device_filters(int index) const;
2517   std::string* _internal_add_device_filters();
2518   public:
2519 
2520   // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
2521   int session_inter_op_thread_pool_size() const;
2522   private:
2523   int _internal_session_inter_op_thread_pool_size() const;
2524   public:
2525   void clear_session_inter_op_thread_pool();
2526   ::tensorflow::ThreadPoolOptionProto* mutable_session_inter_op_thread_pool(int index);
2527   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto >*
2528       mutable_session_inter_op_thread_pool();
2529   private:
2530   const ::tensorflow::ThreadPoolOptionProto& _internal_session_inter_op_thread_pool(int index) const;
2531   ::tensorflow::ThreadPoolOptionProto* _internal_add_session_inter_op_thread_pool();
2532   public:
2533   const ::tensorflow::ThreadPoolOptionProto& session_inter_op_thread_pool(int index) const;
2534   ::tensorflow::ThreadPoolOptionProto* add_session_inter_op_thread_pool();
2535   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto >&
2536       session_inter_op_thread_pool() const;
2537 
2538   // .tensorflow.GPUOptions gpu_options = 6;
2539   bool has_gpu_options() const;
2540   private:
2541   bool _internal_has_gpu_options() const;
2542   public:
2543   void clear_gpu_options();
2544   const ::tensorflow::GPUOptions& gpu_options() const;
2545   PROTOBUF_NODISCARD ::tensorflow::GPUOptions* release_gpu_options();
2546   ::tensorflow::GPUOptions* mutable_gpu_options();
2547   void set_allocated_gpu_options(::tensorflow::GPUOptions* gpu_options);
2548   private:
2549   const ::tensorflow::GPUOptions& _internal_gpu_options() const;
2550   ::tensorflow::GPUOptions* _internal_mutable_gpu_options();
2551   public:
2552   void unsafe_arena_set_allocated_gpu_options(
2553       ::tensorflow::GPUOptions* gpu_options);
2554   ::tensorflow::GPUOptions* unsafe_arena_release_gpu_options();
2555 
2556   // .tensorflow.GraphOptions graph_options = 10;
2557   bool has_graph_options() const;
2558   private:
2559   bool _internal_has_graph_options() const;
2560   public:
2561   void clear_graph_options();
2562   const ::tensorflow::GraphOptions& graph_options() const;
2563   PROTOBUF_NODISCARD ::tensorflow::GraphOptions* release_graph_options();
2564   ::tensorflow::GraphOptions* mutable_graph_options();
2565   void set_allocated_graph_options(::tensorflow::GraphOptions* graph_options);
2566   private:
2567   const ::tensorflow::GraphOptions& _internal_graph_options() const;
2568   ::tensorflow::GraphOptions* _internal_mutable_graph_options();
2569   public:
2570   void unsafe_arena_set_allocated_graph_options(
2571       ::tensorflow::GraphOptions* graph_options);
2572   ::tensorflow::GraphOptions* unsafe_arena_release_graph_options();
2573 
2574   // .tensorflow.RPCOptions rpc_options = 13;
2575   bool has_rpc_options() const;
2576   private:
2577   bool _internal_has_rpc_options() const;
2578   public:
2579   void clear_rpc_options();
2580   const ::tensorflow::RPCOptions& rpc_options() const;
2581   PROTOBUF_NODISCARD ::tensorflow::RPCOptions* release_rpc_options();
2582   ::tensorflow::RPCOptions* mutable_rpc_options();
2583   void set_allocated_rpc_options(::tensorflow::RPCOptions* rpc_options);
2584   private:
2585   const ::tensorflow::RPCOptions& _internal_rpc_options() const;
2586   ::tensorflow::RPCOptions* _internal_mutable_rpc_options();
2587   public:
2588   void unsafe_arena_set_allocated_rpc_options(
2589       ::tensorflow::RPCOptions* rpc_options);
2590   ::tensorflow::RPCOptions* unsafe_arena_release_rpc_options();
2591 
2592   // .tensorflow.ClusterDef cluster_def = 14;
2593   bool has_cluster_def() const;
2594   private:
2595   bool _internal_has_cluster_def() const;
2596   public:
2597   void clear_cluster_def();
2598   const ::tensorflow::ClusterDef& cluster_def() const;
2599   PROTOBUF_NODISCARD ::tensorflow::ClusterDef* release_cluster_def();
2600   ::tensorflow::ClusterDef* mutable_cluster_def();
2601   void set_allocated_cluster_def(::tensorflow::ClusterDef* cluster_def);
2602   private:
2603   const ::tensorflow::ClusterDef& _internal_cluster_def() const;
2604   ::tensorflow::ClusterDef* _internal_mutable_cluster_def();
2605   public:
2606   void unsafe_arena_set_allocated_cluster_def(
2607       ::tensorflow::ClusterDef* cluster_def);
2608   ::tensorflow::ClusterDef* unsafe_arena_release_cluster_def();
2609 
2610   // .tensorflow.ConfigProto.Experimental experimental = 16;
2611   bool has_experimental() const;
2612   private:
2613   bool _internal_has_experimental() const;
2614   public:
2615   void clear_experimental();
2616   const ::tensorflow::ConfigProto_Experimental& experimental() const;
2617   PROTOBUF_NODISCARD ::tensorflow::ConfigProto_Experimental* release_experimental();
2618   ::tensorflow::ConfigProto_Experimental* mutable_experimental();
2619   void set_allocated_experimental(::tensorflow::ConfigProto_Experimental* experimental);
2620   private:
2621   const ::tensorflow::ConfigProto_Experimental& _internal_experimental() const;
2622   ::tensorflow::ConfigProto_Experimental* _internal_mutable_experimental();
2623   public:
2624   void unsafe_arena_set_allocated_experimental(
2625       ::tensorflow::ConfigProto_Experimental* experimental);
2626   ::tensorflow::ConfigProto_Experimental* unsafe_arena_release_experimental();
2627 
2628   // int32 intra_op_parallelism_threads = 2;
2629   void clear_intra_op_parallelism_threads();
2630   ::int32_t intra_op_parallelism_threads() const;
2631   void set_intra_op_parallelism_threads(::int32_t value);
2632   private:
2633   ::int32_t _internal_intra_op_parallelism_threads() const;
2634   void _internal_set_intra_op_parallelism_threads(::int32_t value);
2635   public:
2636 
2637   // int32 placement_period = 3;
2638   void clear_placement_period();
2639   ::int32_t placement_period() const;
2640   void set_placement_period(::int32_t value);
2641   private:
2642   ::int32_t _internal_placement_period() const;
2643   void _internal_set_placement_period(::int32_t value);
2644   public:
2645 
2646   // int32 inter_op_parallelism_threads = 5;
2647   void clear_inter_op_parallelism_threads();
2648   ::int32_t inter_op_parallelism_threads() const;
2649   void set_inter_op_parallelism_threads(::int32_t value);
2650   private:
2651   ::int32_t _internal_inter_op_parallelism_threads() const;
2652   void _internal_set_inter_op_parallelism_threads(::int32_t value);
2653   public:
2654 
2655   // bool use_per_session_threads = 9;
2656   void clear_use_per_session_threads();
2657   bool use_per_session_threads() const;
2658   void set_use_per_session_threads(bool value);
2659   private:
2660   bool _internal_use_per_session_threads() const;
2661   void _internal_set_use_per_session_threads(bool value);
2662   public:
2663 
2664   // bool allow_soft_placement = 7;
2665   void clear_allow_soft_placement();
2666   bool allow_soft_placement() const;
2667   void set_allow_soft_placement(bool value);
2668   private:
2669   bool _internal_allow_soft_placement() const;
2670   void _internal_set_allow_soft_placement(bool value);
2671   public:
2672 
2673   // bool log_device_placement = 8;
2674   void clear_log_device_placement();
2675   bool log_device_placement() const;
2676   void set_log_device_placement(bool value);
2677   private:
2678   bool _internal_log_device_placement() const;
2679   void _internal_set_log_device_placement(bool value);
2680   public:
2681 
2682   // bool isolate_session_state = 15;
2683   void clear_isolate_session_state();
2684   bool isolate_session_state() const;
2685   void set_isolate_session_state(bool value);
2686   private:
2687   bool _internal_isolate_session_state() const;
2688   void _internal_set_isolate_session_state(bool value);
2689   public:
2690 
2691   // int64 operation_timeout_in_ms = 11;
2692   void clear_operation_timeout_in_ms();
2693   ::int64_t operation_timeout_in_ms() const;
2694   void set_operation_timeout_in_ms(::int64_t value);
2695   private:
2696   ::int64_t _internal_operation_timeout_in_ms() const;
2697   void _internal_set_operation_timeout_in_ms(::int64_t value);
2698   public:
2699 
2700   // bool share_cluster_devices_in_session = 17;
2701   void clear_share_cluster_devices_in_session();
2702   bool share_cluster_devices_in_session() const;
2703   void set_share_cluster_devices_in_session(bool value);
2704   private:
2705   bool _internal_share_cluster_devices_in_session() const;
2706   void _internal_set_share_cluster_devices_in_session(bool value);
2707   public:
2708 
2709   // @@protoc_insertion_point(class_scope:tensorflow.ConfigProto)
2710  private:
2711   class _Internal;
2712 
2713   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
2714   typedef void InternalArenaConstructable_;
2715   typedef void DestructorSkippable_;
2716   struct Impl_ {
2717     ::PROTOBUF_NAMESPACE_ID::internal::MapFieldLite<
2718         ConfigProto_DeviceCountEntry_DoNotUse,
2719         std::string, ::int32_t,
2720         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
2721         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32> device_count_;
2722     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> device_filters_;
2723     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto > session_inter_op_thread_pool_;
2724     ::tensorflow::GPUOptions* gpu_options_;
2725     ::tensorflow::GraphOptions* graph_options_;
2726     ::tensorflow::RPCOptions* rpc_options_;
2727     ::tensorflow::ClusterDef* cluster_def_;
2728     ::tensorflow::ConfigProto_Experimental* experimental_;
2729     ::int32_t intra_op_parallelism_threads_;
2730     ::int32_t placement_period_;
2731     ::int32_t inter_op_parallelism_threads_;
2732     bool use_per_session_threads_;
2733     bool allow_soft_placement_;
2734     bool log_device_placement_;
2735     bool isolate_session_state_;
2736     ::int64_t operation_timeout_in_ms_;
2737     bool share_cluster_devices_in_session_;
2738     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
2739   };
2740   union { Impl_ _impl_; };
2741   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
2742 };
2743 // -------------------------------------------------------------------
2744 
2745 class RunOptions_Experimental_RunHandlerPoolOptions final :
2746     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions) */ {
2747  public:
RunOptions_Experimental_RunHandlerPoolOptions()2748   inline RunOptions_Experimental_RunHandlerPoolOptions() : RunOptions_Experimental_RunHandlerPoolOptions(nullptr) {}
2749   ~RunOptions_Experimental_RunHandlerPoolOptions() override;
2750   explicit PROTOBUF_CONSTEXPR RunOptions_Experimental_RunHandlerPoolOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
2751 
2752   RunOptions_Experimental_RunHandlerPoolOptions(const RunOptions_Experimental_RunHandlerPoolOptions& from);
RunOptions_Experimental_RunHandlerPoolOptions(RunOptions_Experimental_RunHandlerPoolOptions && from)2753   RunOptions_Experimental_RunHandlerPoolOptions(RunOptions_Experimental_RunHandlerPoolOptions&& from) noexcept
2754     : RunOptions_Experimental_RunHandlerPoolOptions() {
2755     *this = ::std::move(from);
2756   }
2757 
2758   inline RunOptions_Experimental_RunHandlerPoolOptions& operator=(const RunOptions_Experimental_RunHandlerPoolOptions& from) {
2759     if (this == &from) return *this;
2760     CopyFrom(from);
2761     return *this;
2762   }
2763   inline RunOptions_Experimental_RunHandlerPoolOptions& operator=(RunOptions_Experimental_RunHandlerPoolOptions&& from) noexcept {
2764     if (this == &from) return *this;
2765     if (GetOwningArena() == from.GetOwningArena()
2766   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
2767         && GetOwningArena() != nullptr
2768   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
2769     ) {
2770       InternalSwap(&from);
2771     } else {
2772       CopyFrom(from);
2773     }
2774     return *this;
2775   }
2776 
default_instance()2777   static const RunOptions_Experimental_RunHandlerPoolOptions& default_instance() {
2778     return *internal_default_instance();
2779   }
internal_default_instance()2780   static inline const RunOptions_Experimental_RunHandlerPoolOptions* internal_default_instance() {
2781     return reinterpret_cast<const RunOptions_Experimental_RunHandlerPoolOptions*>(
2782                &_RunOptions_Experimental_RunHandlerPoolOptions_default_instance_);
2783   }
2784   static constexpr int kIndexInFileMessages =
2785     11;
2786 
swap(RunOptions_Experimental_RunHandlerPoolOptions & a,RunOptions_Experimental_RunHandlerPoolOptions & b)2787   friend void swap(RunOptions_Experimental_RunHandlerPoolOptions& a, RunOptions_Experimental_RunHandlerPoolOptions& b) {
2788     a.Swap(&b);
2789   }
Swap(RunOptions_Experimental_RunHandlerPoolOptions * other)2790   inline void Swap(RunOptions_Experimental_RunHandlerPoolOptions* other) {
2791     if (other == this) return;
2792   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
2793     if (GetOwningArena() != nullptr &&
2794         GetOwningArena() == other->GetOwningArena()) {
2795    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
2796     if (GetOwningArena() == other->GetOwningArena()) {
2797   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
2798       InternalSwap(other);
2799     } else {
2800       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2801     }
2802   }
2803   void UnsafeArenaSwap(RunOptions_Experimental_RunHandlerPoolOptions* other) {
2804     if (other == this) return;
2805     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2806     InternalSwap(other);
2807   }
2808 
2809   // implements Message ----------------------------------------------
2810 
2811   RunOptions_Experimental_RunHandlerPoolOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2812     return CreateMaybeMessage<RunOptions_Experimental_RunHandlerPoolOptions>(arena);
2813   }
2814   RunOptions_Experimental_RunHandlerPoolOptions* New() const {
2815     return New(nullptr);
2816   }
2817   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
2818   void CopyFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from);
2819   void MergeFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from);
2820   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2821   bool IsInitialized() const final;
2822 
2823   size_t ByteSizeLong() const final;
2824   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2825   ::uint8_t* _InternalSerialize(
2826       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2827   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2828 
2829   private:
2830   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2831   void SharedDtor();
2832   void SetCachedSize(int size) const;
2833   void InternalSwap(RunOptions_Experimental_RunHandlerPoolOptions* other);
2834 
2835   private:
2836   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2837   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2838     return "tensorflow.RunOptions.Experimental.RunHandlerPoolOptions";
2839   }
2840   protected:
2841   explicit RunOptions_Experimental_RunHandlerPoolOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2842                        bool is_message_owned = false);
2843   public:
2844 
2845   std::string GetTypeName() const final;
2846 
2847   // nested types ----------------------------------------------------
2848 
2849   // accessors -------------------------------------------------------
2850 
2851   enum : int {
2852     kPriorityFieldNumber = 1,
2853   };
2854   // int64 priority = 1;
2855   void clear_priority();
2856   ::int64_t priority() const;
2857   void set_priority(::int64_t value);
2858   private:
2859   ::int64_t _internal_priority() const;
2860   void _internal_set_priority(::int64_t value);
2861   public:
2862 
2863   // @@protoc_insertion_point(class_scope:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
2864  private:
2865   class _Internal;
2866 
2867   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
2868   typedef void InternalArenaConstructable_;
2869   typedef void DestructorSkippable_;
2870   struct Impl_ {
2871     ::int64_t priority_;
2872     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
2873   };
2874   union { Impl_ _impl_; };
2875   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
2876 };
2877 // -------------------------------------------------------------------
2878 
2879 class RunOptions_Experimental final :
2880     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunOptions.Experimental) */ {
2881  public:
RunOptions_Experimental()2882   inline RunOptions_Experimental() : RunOptions_Experimental(nullptr) {}
2883   ~RunOptions_Experimental() override;
2884   explicit PROTOBUF_CONSTEXPR RunOptions_Experimental(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
2885 
2886   RunOptions_Experimental(const RunOptions_Experimental& from);
RunOptions_Experimental(RunOptions_Experimental && from)2887   RunOptions_Experimental(RunOptions_Experimental&& from) noexcept
2888     : RunOptions_Experimental() {
2889     *this = ::std::move(from);
2890   }
2891 
2892   inline RunOptions_Experimental& operator=(const RunOptions_Experimental& from) {
2893     if (this == &from) return *this;
2894     CopyFrom(from);
2895     return *this;
2896   }
2897   inline RunOptions_Experimental& operator=(RunOptions_Experimental&& from) noexcept {
2898     if (this == &from) return *this;
2899     if (GetOwningArena() == from.GetOwningArena()
2900   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
2901         && GetOwningArena() != nullptr
2902   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
2903     ) {
2904       InternalSwap(&from);
2905     } else {
2906       CopyFrom(from);
2907     }
2908     return *this;
2909   }
2910 
default_instance()2911   static const RunOptions_Experimental& default_instance() {
2912     return *internal_default_instance();
2913   }
internal_default_instance()2914   static inline const RunOptions_Experimental* internal_default_instance() {
2915     return reinterpret_cast<const RunOptions_Experimental*>(
2916                &_RunOptions_Experimental_default_instance_);
2917   }
2918   static constexpr int kIndexInFileMessages =
2919     12;
2920 
swap(RunOptions_Experimental & a,RunOptions_Experimental & b)2921   friend void swap(RunOptions_Experimental& a, RunOptions_Experimental& b) {
2922     a.Swap(&b);
2923   }
Swap(RunOptions_Experimental * other)2924   inline void Swap(RunOptions_Experimental* other) {
2925     if (other == this) return;
2926   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
2927     if (GetOwningArena() != nullptr &&
2928         GetOwningArena() == other->GetOwningArena()) {
2929    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
2930     if (GetOwningArena() == other->GetOwningArena()) {
2931   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
2932       InternalSwap(other);
2933     } else {
2934       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2935     }
2936   }
2937   void UnsafeArenaSwap(RunOptions_Experimental* other) {
2938     if (other == this) return;
2939     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2940     InternalSwap(other);
2941   }
2942 
2943   // implements Message ----------------------------------------------
2944 
2945   RunOptions_Experimental* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2946     return CreateMaybeMessage<RunOptions_Experimental>(arena);
2947   }
2948   RunOptions_Experimental* New() const {
2949     return New(nullptr);
2950   }
2951   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
2952   void CopyFrom(const RunOptions_Experimental& from);
2953   void MergeFrom(const RunOptions_Experimental& from);
2954   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2955   bool IsInitialized() const final;
2956 
2957   size_t ByteSizeLong() const final;
2958   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2959   ::uint8_t* _InternalSerialize(
2960       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2961   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2962 
2963   private:
2964   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2965   void SharedDtor();
2966   void SetCachedSize(int size) const;
2967   void InternalSwap(RunOptions_Experimental* other);
2968 
2969   private:
2970   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2971   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2972     return "tensorflow.RunOptions.Experimental";
2973   }
2974   protected:
2975   explicit RunOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2976                        bool is_message_owned = false);
2977   public:
2978 
2979   std::string GetTypeName() const final;
2980 
2981   // nested types ----------------------------------------------------
2982 
2983   typedef RunOptions_Experimental_RunHandlerPoolOptions RunHandlerPoolOptions;
2984 
2985   // accessors -------------------------------------------------------
2986 
2987   enum : int {
2988     kRunHandlerPoolOptionsFieldNumber = 3,
2989     kCollectiveGraphKeyFieldNumber = 1,
2990     kUseRunHandlerPoolFieldNumber = 2,
2991   };
2992   // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
2993   bool has_run_handler_pool_options() const;
2994   private:
2995   bool _internal_has_run_handler_pool_options() const;
2996   public:
2997   void clear_run_handler_pool_options();
2998   const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& run_handler_pool_options() const;
2999   PROTOBUF_NODISCARD ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* release_run_handler_pool_options();
3000   ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* mutable_run_handler_pool_options();
3001   void set_allocated_run_handler_pool_options(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options);
3002   private:
3003   const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& _internal_run_handler_pool_options() const;
3004   ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* _internal_mutable_run_handler_pool_options();
3005   public:
3006   void unsafe_arena_set_allocated_run_handler_pool_options(
3007       ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options);
3008   ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* unsafe_arena_release_run_handler_pool_options();
3009 
3010   // int64 collective_graph_key = 1;
3011   void clear_collective_graph_key();
3012   ::int64_t collective_graph_key() const;
3013   void set_collective_graph_key(::int64_t value);
3014   private:
3015   ::int64_t _internal_collective_graph_key() const;
3016   void _internal_set_collective_graph_key(::int64_t value);
3017   public:
3018 
3019   // bool use_run_handler_pool = 2;
3020   void clear_use_run_handler_pool();
3021   bool use_run_handler_pool() const;
3022   void set_use_run_handler_pool(bool value);
3023   private:
3024   bool _internal_use_run_handler_pool() const;
3025   void _internal_set_use_run_handler_pool(bool value);
3026   public:
3027 
3028   // @@protoc_insertion_point(class_scope:tensorflow.RunOptions.Experimental)
3029  private:
3030   class _Internal;
3031 
3032   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3033   typedef void InternalArenaConstructable_;
3034   typedef void DestructorSkippable_;
3035   struct Impl_ {
3036     ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options_;
3037     ::int64_t collective_graph_key_;
3038     bool use_run_handler_pool_;
3039     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3040   };
3041   union { Impl_ _impl_; };
3042   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3043 };
3044 // -------------------------------------------------------------------
3045 
3046 class RunOptions final :
3047     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunOptions) */ {
3048  public:
RunOptions()3049   inline RunOptions() : RunOptions(nullptr) {}
3050   ~RunOptions() override;
3051   explicit PROTOBUF_CONSTEXPR RunOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3052 
3053   RunOptions(const RunOptions& from);
RunOptions(RunOptions && from)3054   RunOptions(RunOptions&& from) noexcept
3055     : RunOptions() {
3056     *this = ::std::move(from);
3057   }
3058 
3059   inline RunOptions& operator=(const RunOptions& from) {
3060     if (this == &from) return *this;
3061     CopyFrom(from);
3062     return *this;
3063   }
3064   inline RunOptions& operator=(RunOptions&& from) noexcept {
3065     if (this == &from) return *this;
3066     if (GetOwningArena() == from.GetOwningArena()
3067   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3068         && GetOwningArena() != nullptr
3069   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
3070     ) {
3071       InternalSwap(&from);
3072     } else {
3073       CopyFrom(from);
3074     }
3075     return *this;
3076   }
3077 
default_instance()3078   static const RunOptions& default_instance() {
3079     return *internal_default_instance();
3080   }
internal_default_instance()3081   static inline const RunOptions* internal_default_instance() {
3082     return reinterpret_cast<const RunOptions*>(
3083                &_RunOptions_default_instance_);
3084   }
3085   static constexpr int kIndexInFileMessages =
3086     13;
3087 
swap(RunOptions & a,RunOptions & b)3088   friend void swap(RunOptions& a, RunOptions& b) {
3089     a.Swap(&b);
3090   }
Swap(RunOptions * other)3091   inline void Swap(RunOptions* other) {
3092     if (other == this) return;
3093   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3094     if (GetOwningArena() != nullptr &&
3095         GetOwningArena() == other->GetOwningArena()) {
3096    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
3097     if (GetOwningArena() == other->GetOwningArena()) {
3098   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
3099       InternalSwap(other);
3100     } else {
3101       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3102     }
3103   }
3104   void UnsafeArenaSwap(RunOptions* other) {
3105     if (other == this) return;
3106     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3107     InternalSwap(other);
3108   }
3109 
3110   // implements Message ----------------------------------------------
3111 
3112   RunOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3113     return CreateMaybeMessage<RunOptions>(arena);
3114   }
3115   RunOptions* New() const {
3116     return New(nullptr);
3117   }
3118   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
3119   void CopyFrom(const RunOptions& from);
3120   void MergeFrom(const RunOptions& from);
3121   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3122   bool IsInitialized() const final;
3123 
3124   size_t ByteSizeLong() const final;
3125   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3126   ::uint8_t* _InternalSerialize(
3127       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3128   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3129 
3130   private:
3131   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3132   void SharedDtor();
3133   void SetCachedSize(int size) const;
3134   void InternalSwap(RunOptions* other);
3135 
3136   private:
3137   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3138   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3139     return "tensorflow.RunOptions";
3140   }
3141   protected:
3142   explicit RunOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3143                        bool is_message_owned = false);
3144   public:
3145 
3146   std::string GetTypeName() const final;
3147 
3148   // nested types ----------------------------------------------------
3149 
3150   typedef RunOptions_Experimental Experimental;
3151 
3152   typedef RunOptions_TraceLevel TraceLevel;
3153   static constexpr TraceLevel NO_TRACE =
3154     RunOptions_TraceLevel_NO_TRACE;
3155   static constexpr TraceLevel SOFTWARE_TRACE =
3156     RunOptions_TraceLevel_SOFTWARE_TRACE;
3157   static constexpr TraceLevel HARDWARE_TRACE =
3158     RunOptions_TraceLevel_HARDWARE_TRACE;
3159   static constexpr TraceLevel FULL_TRACE =
3160     RunOptions_TraceLevel_FULL_TRACE;
3161   static inline bool TraceLevel_IsValid(int value) {
3162     return RunOptions_TraceLevel_IsValid(value);
3163   }
3164   static constexpr TraceLevel TraceLevel_MIN =
3165     RunOptions_TraceLevel_TraceLevel_MIN;
3166   static constexpr TraceLevel TraceLevel_MAX =
3167     RunOptions_TraceLevel_TraceLevel_MAX;
3168   static constexpr int TraceLevel_ARRAYSIZE =
3169     RunOptions_TraceLevel_TraceLevel_ARRAYSIZE;
3170   template<typename T>
3171   static inline const std::string& TraceLevel_Name(T enum_t_value) {
3172     static_assert(::std::is_same<T, TraceLevel>::value ||
3173       ::std::is_integral<T>::value,
3174       "Incorrect type passed to function TraceLevel_Name.");
3175     return RunOptions_TraceLevel_Name(enum_t_value);
3176   }
3177   static inline bool TraceLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
3178       TraceLevel* value) {
3179     return RunOptions_TraceLevel_Parse(name, value);
3180   }
3181 
3182   // accessors -------------------------------------------------------
3183 
3184   enum : int {
3185     kDebugOptionsFieldNumber = 6,
3186     kExperimentalFieldNumber = 8,
3187     kTimeoutInMsFieldNumber = 2,
3188     kTraceLevelFieldNumber = 1,
3189     kInterOpThreadPoolFieldNumber = 3,
3190     kOutputPartitionGraphsFieldNumber = 5,
3191     kReportTensorAllocationsUponOomFieldNumber = 7,
3192   };
3193   // .tensorflow.DebugOptions debug_options = 6;
3194   bool has_debug_options() const;
3195   private:
3196   bool _internal_has_debug_options() const;
3197   public:
3198   void clear_debug_options();
3199   const ::tensorflow::DebugOptions& debug_options() const;
3200   PROTOBUF_NODISCARD ::tensorflow::DebugOptions* release_debug_options();
3201   ::tensorflow::DebugOptions* mutable_debug_options();
3202   void set_allocated_debug_options(::tensorflow::DebugOptions* debug_options);
3203   private:
3204   const ::tensorflow::DebugOptions& _internal_debug_options() const;
3205   ::tensorflow::DebugOptions* _internal_mutable_debug_options();
3206   public:
3207   void unsafe_arena_set_allocated_debug_options(
3208       ::tensorflow::DebugOptions* debug_options);
3209   ::tensorflow::DebugOptions* unsafe_arena_release_debug_options();
3210 
3211   // .tensorflow.RunOptions.Experimental experimental = 8;
3212   bool has_experimental() const;
3213   private:
3214   bool _internal_has_experimental() const;
3215   public:
3216   void clear_experimental();
3217   const ::tensorflow::RunOptions_Experimental& experimental() const;
3218   PROTOBUF_NODISCARD ::tensorflow::RunOptions_Experimental* release_experimental();
3219   ::tensorflow::RunOptions_Experimental* mutable_experimental();
3220   void set_allocated_experimental(::tensorflow::RunOptions_Experimental* experimental);
3221   private:
3222   const ::tensorflow::RunOptions_Experimental& _internal_experimental() const;
3223   ::tensorflow::RunOptions_Experimental* _internal_mutable_experimental();
3224   public:
3225   void unsafe_arena_set_allocated_experimental(
3226       ::tensorflow::RunOptions_Experimental* experimental);
3227   ::tensorflow::RunOptions_Experimental* unsafe_arena_release_experimental();
3228 
3229   // int64 timeout_in_ms = 2;
3230   void clear_timeout_in_ms();
3231   ::int64_t timeout_in_ms() const;
3232   void set_timeout_in_ms(::int64_t value);
3233   private:
3234   ::int64_t _internal_timeout_in_ms() const;
3235   void _internal_set_timeout_in_ms(::int64_t value);
3236   public:
3237 
3238   // .tensorflow.RunOptions.TraceLevel trace_level = 1;
3239   void clear_trace_level();
3240   ::tensorflow::RunOptions_TraceLevel trace_level() const;
3241   void set_trace_level(::tensorflow::RunOptions_TraceLevel value);
3242   private:
3243   ::tensorflow::RunOptions_TraceLevel _internal_trace_level() const;
3244   void _internal_set_trace_level(::tensorflow::RunOptions_TraceLevel value);
3245   public:
3246 
3247   // int32 inter_op_thread_pool = 3;
3248   void clear_inter_op_thread_pool();
3249   ::int32_t inter_op_thread_pool() const;
3250   void set_inter_op_thread_pool(::int32_t value);
3251   private:
3252   ::int32_t _internal_inter_op_thread_pool() const;
3253   void _internal_set_inter_op_thread_pool(::int32_t value);
3254   public:
3255 
3256   // bool output_partition_graphs = 5;
3257   void clear_output_partition_graphs();
3258   bool output_partition_graphs() const;
3259   void set_output_partition_graphs(bool value);
3260   private:
3261   bool _internal_output_partition_graphs() const;
3262   void _internal_set_output_partition_graphs(bool value);
3263   public:
3264 
3265   // bool report_tensor_allocations_upon_oom = 7;
3266   void clear_report_tensor_allocations_upon_oom();
3267   bool report_tensor_allocations_upon_oom() const;
3268   void set_report_tensor_allocations_upon_oom(bool value);
3269   private:
3270   bool _internal_report_tensor_allocations_upon_oom() const;
3271   void _internal_set_report_tensor_allocations_upon_oom(bool value);
3272   public:
3273 
3274   // @@protoc_insertion_point(class_scope:tensorflow.RunOptions)
3275  private:
3276   class _Internal;
3277 
3278   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3279   typedef void InternalArenaConstructable_;
3280   typedef void DestructorSkippable_;
3281   struct Impl_ {
3282     ::tensorflow::DebugOptions* debug_options_;
3283     ::tensorflow::RunOptions_Experimental* experimental_;
3284     ::int64_t timeout_in_ms_;
3285     int trace_level_;
3286     ::int32_t inter_op_thread_pool_;
3287     bool output_partition_graphs_;
3288     bool report_tensor_allocations_upon_oom_;
3289     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3290   };
3291   union { Impl_ _impl_; };
3292   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3293 };
3294 // -------------------------------------------------------------------
3295 
3296 class RunMetadata_FunctionGraphs final :
3297     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunMetadata.FunctionGraphs) */ {
3298  public:
RunMetadata_FunctionGraphs()3299   inline RunMetadata_FunctionGraphs() : RunMetadata_FunctionGraphs(nullptr) {}
3300   ~RunMetadata_FunctionGraphs() override;
3301   explicit PROTOBUF_CONSTEXPR RunMetadata_FunctionGraphs(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3302 
3303   RunMetadata_FunctionGraphs(const RunMetadata_FunctionGraphs& from);
RunMetadata_FunctionGraphs(RunMetadata_FunctionGraphs && from)3304   RunMetadata_FunctionGraphs(RunMetadata_FunctionGraphs&& from) noexcept
3305     : RunMetadata_FunctionGraphs() {
3306     *this = ::std::move(from);
3307   }
3308 
3309   inline RunMetadata_FunctionGraphs& operator=(const RunMetadata_FunctionGraphs& from) {
3310     if (this == &from) return *this;
3311     CopyFrom(from);
3312     return *this;
3313   }
3314   inline RunMetadata_FunctionGraphs& operator=(RunMetadata_FunctionGraphs&& from) noexcept {
3315     if (this == &from) return *this;
3316     if (GetOwningArena() == from.GetOwningArena()
3317   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3318         && GetOwningArena() != nullptr
3319   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
3320     ) {
3321       InternalSwap(&from);
3322     } else {
3323       CopyFrom(from);
3324     }
3325     return *this;
3326   }
3327 
default_instance()3328   static const RunMetadata_FunctionGraphs& default_instance() {
3329     return *internal_default_instance();
3330   }
internal_default_instance()3331   static inline const RunMetadata_FunctionGraphs* internal_default_instance() {
3332     return reinterpret_cast<const RunMetadata_FunctionGraphs*>(
3333                &_RunMetadata_FunctionGraphs_default_instance_);
3334   }
3335   static constexpr int kIndexInFileMessages =
3336     14;
3337 
swap(RunMetadata_FunctionGraphs & a,RunMetadata_FunctionGraphs & b)3338   friend void swap(RunMetadata_FunctionGraphs& a, RunMetadata_FunctionGraphs& b) {
3339     a.Swap(&b);
3340   }
Swap(RunMetadata_FunctionGraphs * other)3341   inline void Swap(RunMetadata_FunctionGraphs* other) {
3342     if (other == this) return;
3343   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3344     if (GetOwningArena() != nullptr &&
3345         GetOwningArena() == other->GetOwningArena()) {
3346    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
3347     if (GetOwningArena() == other->GetOwningArena()) {
3348   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
3349       InternalSwap(other);
3350     } else {
3351       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3352     }
3353   }
3354   void UnsafeArenaSwap(RunMetadata_FunctionGraphs* other) {
3355     if (other == this) return;
3356     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3357     InternalSwap(other);
3358   }
3359 
3360   // implements Message ----------------------------------------------
3361 
3362   RunMetadata_FunctionGraphs* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3363     return CreateMaybeMessage<RunMetadata_FunctionGraphs>(arena);
3364   }
3365   RunMetadata_FunctionGraphs* New() const {
3366     return New(nullptr);
3367   }
3368   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
3369   void CopyFrom(const RunMetadata_FunctionGraphs& from);
3370   void MergeFrom(const RunMetadata_FunctionGraphs& from);
3371   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3372   bool IsInitialized() const final;
3373 
3374   size_t ByteSizeLong() const final;
3375   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3376   ::uint8_t* _InternalSerialize(
3377       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3378   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3379 
3380   private:
3381   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3382   void SharedDtor();
3383   void SetCachedSize(int size) const;
3384   void InternalSwap(RunMetadata_FunctionGraphs* other);
3385 
3386   private:
3387   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3388   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3389     return "tensorflow.RunMetadata.FunctionGraphs";
3390   }
3391   protected:
3392   explicit RunMetadata_FunctionGraphs(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3393                        bool is_message_owned = false);
3394   public:
3395 
3396   std::string GetTypeName() const final;
3397 
3398   // nested types ----------------------------------------------------
3399 
3400   // accessors -------------------------------------------------------
3401 
3402   enum : int {
3403     kPartitionGraphsFieldNumber = 1,
3404     kPreOptimizationGraphFieldNumber = 2,
3405     kPostOptimizationGraphFieldNumber = 3,
3406   };
3407   // repeated .tensorflow.GraphDef partition_graphs = 1;
3408   int partition_graphs_size() const;
3409   private:
3410   int _internal_partition_graphs_size() const;
3411   public:
3412   void clear_partition_graphs();
3413   ::tensorflow::GraphDef* mutable_partition_graphs(int index);
3414   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >*
3415       mutable_partition_graphs();
3416   private:
3417   const ::tensorflow::GraphDef& _internal_partition_graphs(int index) const;
3418   ::tensorflow::GraphDef* _internal_add_partition_graphs();
3419   public:
3420   const ::tensorflow::GraphDef& partition_graphs(int index) const;
3421   ::tensorflow::GraphDef* add_partition_graphs();
3422   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >&
3423       partition_graphs() const;
3424 
3425   // .tensorflow.GraphDef pre_optimization_graph = 2;
3426   bool has_pre_optimization_graph() const;
3427   private:
3428   bool _internal_has_pre_optimization_graph() const;
3429   public:
3430   void clear_pre_optimization_graph();
3431   const ::tensorflow::GraphDef& pre_optimization_graph() const;
3432   PROTOBUF_NODISCARD ::tensorflow::GraphDef* release_pre_optimization_graph();
3433   ::tensorflow::GraphDef* mutable_pre_optimization_graph();
3434   void set_allocated_pre_optimization_graph(::tensorflow::GraphDef* pre_optimization_graph);
3435   private:
3436   const ::tensorflow::GraphDef& _internal_pre_optimization_graph() const;
3437   ::tensorflow::GraphDef* _internal_mutable_pre_optimization_graph();
3438   public:
3439   void unsafe_arena_set_allocated_pre_optimization_graph(
3440       ::tensorflow::GraphDef* pre_optimization_graph);
3441   ::tensorflow::GraphDef* unsafe_arena_release_pre_optimization_graph();
3442 
3443   // .tensorflow.GraphDef post_optimization_graph = 3;
3444   bool has_post_optimization_graph() const;
3445   private:
3446   bool _internal_has_post_optimization_graph() const;
3447   public:
3448   void clear_post_optimization_graph();
3449   const ::tensorflow::GraphDef& post_optimization_graph() const;
3450   PROTOBUF_NODISCARD ::tensorflow::GraphDef* release_post_optimization_graph();
3451   ::tensorflow::GraphDef* mutable_post_optimization_graph();
3452   void set_allocated_post_optimization_graph(::tensorflow::GraphDef* post_optimization_graph);
3453   private:
3454   const ::tensorflow::GraphDef& _internal_post_optimization_graph() const;
3455   ::tensorflow::GraphDef* _internal_mutable_post_optimization_graph();
3456   public:
3457   void unsafe_arena_set_allocated_post_optimization_graph(
3458       ::tensorflow::GraphDef* post_optimization_graph);
3459   ::tensorflow::GraphDef* unsafe_arena_release_post_optimization_graph();
3460 
3461   // @@protoc_insertion_point(class_scope:tensorflow.RunMetadata.FunctionGraphs)
3462  private:
3463   class _Internal;
3464 
3465   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3466   typedef void InternalArenaConstructable_;
3467   typedef void DestructorSkippable_;
3468   struct Impl_ {
3469     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef > partition_graphs_;
3470     ::tensorflow::GraphDef* pre_optimization_graph_;
3471     ::tensorflow::GraphDef* post_optimization_graph_;
3472     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3473   };
3474   union { Impl_ _impl_; };
3475   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3476 };
3477 // -------------------------------------------------------------------
3478 
3479 class RunMetadata final :
3480     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunMetadata) */ {
3481  public:
RunMetadata()3482   inline RunMetadata() : RunMetadata(nullptr) {}
3483   ~RunMetadata() override;
3484   explicit PROTOBUF_CONSTEXPR RunMetadata(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3485 
3486   RunMetadata(const RunMetadata& from);
RunMetadata(RunMetadata && from)3487   RunMetadata(RunMetadata&& from) noexcept
3488     : RunMetadata() {
3489     *this = ::std::move(from);
3490   }
3491 
3492   inline RunMetadata& operator=(const RunMetadata& from) {
3493     if (this == &from) return *this;
3494     CopyFrom(from);
3495     return *this;
3496   }
3497   inline RunMetadata& operator=(RunMetadata&& from) noexcept {
3498     if (this == &from) return *this;
3499     if (GetOwningArena() == from.GetOwningArena()
3500   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3501         && GetOwningArena() != nullptr
3502   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
3503     ) {
3504       InternalSwap(&from);
3505     } else {
3506       CopyFrom(from);
3507     }
3508     return *this;
3509   }
3510 
default_instance()3511   static const RunMetadata& default_instance() {
3512     return *internal_default_instance();
3513   }
internal_default_instance()3514   static inline const RunMetadata* internal_default_instance() {
3515     return reinterpret_cast<const RunMetadata*>(
3516                &_RunMetadata_default_instance_);
3517   }
3518   static constexpr int kIndexInFileMessages =
3519     15;
3520 
swap(RunMetadata & a,RunMetadata & b)3521   friend void swap(RunMetadata& a, RunMetadata& b) {
3522     a.Swap(&b);
3523   }
Swap(RunMetadata * other)3524   inline void Swap(RunMetadata* other) {
3525     if (other == this) return;
3526   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3527     if (GetOwningArena() != nullptr &&
3528         GetOwningArena() == other->GetOwningArena()) {
3529    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
3530     if (GetOwningArena() == other->GetOwningArena()) {
3531   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
3532       InternalSwap(other);
3533     } else {
3534       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3535     }
3536   }
3537   void UnsafeArenaSwap(RunMetadata* other) {
3538     if (other == this) return;
3539     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3540     InternalSwap(other);
3541   }
3542 
3543   // implements Message ----------------------------------------------
3544 
3545   RunMetadata* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3546     return CreateMaybeMessage<RunMetadata>(arena);
3547   }
3548   RunMetadata* New() const {
3549     return New(nullptr);
3550   }
3551   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
3552   void CopyFrom(const RunMetadata& from);
3553   void MergeFrom(const RunMetadata& from);
3554   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3555   bool IsInitialized() const final;
3556 
3557   size_t ByteSizeLong() const final;
3558   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3559   ::uint8_t* _InternalSerialize(
3560       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3561   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3562 
3563   private:
3564   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3565   void SharedDtor();
3566   void SetCachedSize(int size) const;
3567   void InternalSwap(RunMetadata* other);
3568 
3569   private:
3570   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3571   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3572     return "tensorflow.RunMetadata";
3573   }
3574   protected:
3575   explicit RunMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3576                        bool is_message_owned = false);
3577   public:
3578 
3579   std::string GetTypeName() const final;
3580 
3581   // nested types ----------------------------------------------------
3582 
3583   typedef RunMetadata_FunctionGraphs FunctionGraphs;
3584 
3585   // accessors -------------------------------------------------------
3586 
3587   enum : int {
3588     kPartitionGraphsFieldNumber = 3,
3589     kFunctionGraphsFieldNumber = 4,
3590     kStepStatsFieldNumber = 1,
3591     kCostGraphFieldNumber = 2,
3592     kSessionMetadataFieldNumber = 5,
3593   };
3594   // repeated .tensorflow.GraphDef partition_graphs = 3;
3595   int partition_graphs_size() const;
3596   private:
3597   int _internal_partition_graphs_size() const;
3598   public:
3599   void clear_partition_graphs();
3600   ::tensorflow::GraphDef* mutable_partition_graphs(int index);
3601   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >*
3602       mutable_partition_graphs();
3603   private:
3604   const ::tensorflow::GraphDef& _internal_partition_graphs(int index) const;
3605   ::tensorflow::GraphDef* _internal_add_partition_graphs();
3606   public:
3607   const ::tensorflow::GraphDef& partition_graphs(int index) const;
3608   ::tensorflow::GraphDef* add_partition_graphs();
3609   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >&
3610       partition_graphs() const;
3611 
3612   // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
3613   int function_graphs_size() const;
3614   private:
3615   int _internal_function_graphs_size() const;
3616   public:
3617   void clear_function_graphs();
3618   ::tensorflow::RunMetadata_FunctionGraphs* mutable_function_graphs(int index);
3619   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs >*
3620       mutable_function_graphs();
3621   private:
3622   const ::tensorflow::RunMetadata_FunctionGraphs& _internal_function_graphs(int index) const;
3623   ::tensorflow::RunMetadata_FunctionGraphs* _internal_add_function_graphs();
3624   public:
3625   const ::tensorflow::RunMetadata_FunctionGraphs& function_graphs(int index) const;
3626   ::tensorflow::RunMetadata_FunctionGraphs* add_function_graphs();
3627   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs >&
3628       function_graphs() const;
3629 
3630   // .tensorflow.StepStats step_stats = 1;
3631   bool has_step_stats() const;
3632   private:
3633   bool _internal_has_step_stats() const;
3634   public:
3635   void clear_step_stats();
3636   const ::tensorflow::StepStats& step_stats() const;
3637   PROTOBUF_NODISCARD ::tensorflow::StepStats* release_step_stats();
3638   ::tensorflow::StepStats* mutable_step_stats();
3639   void set_allocated_step_stats(::tensorflow::StepStats* step_stats);
3640   private:
3641   const ::tensorflow::StepStats& _internal_step_stats() const;
3642   ::tensorflow::StepStats* _internal_mutable_step_stats();
3643   public:
3644   void unsafe_arena_set_allocated_step_stats(
3645       ::tensorflow::StepStats* step_stats);
3646   ::tensorflow::StepStats* unsafe_arena_release_step_stats();
3647 
3648   // .tensorflow.CostGraphDef cost_graph = 2;
3649   bool has_cost_graph() const;
3650   private:
3651   bool _internal_has_cost_graph() const;
3652   public:
3653   void clear_cost_graph();
3654   const ::tensorflow::CostGraphDef& cost_graph() const;
3655   PROTOBUF_NODISCARD ::tensorflow::CostGraphDef* release_cost_graph();
3656   ::tensorflow::CostGraphDef* mutable_cost_graph();
3657   void set_allocated_cost_graph(::tensorflow::CostGraphDef* cost_graph);
3658   private:
3659   const ::tensorflow::CostGraphDef& _internal_cost_graph() const;
3660   ::tensorflow::CostGraphDef* _internal_mutable_cost_graph();
3661   public:
3662   void unsafe_arena_set_allocated_cost_graph(
3663       ::tensorflow::CostGraphDef* cost_graph);
3664   ::tensorflow::CostGraphDef* unsafe_arena_release_cost_graph();
3665 
3666   // .tensorflow.SessionMetadata session_metadata = 5;
3667   bool has_session_metadata() const;
3668   private:
3669   bool _internal_has_session_metadata() const;
3670   public:
3671   void clear_session_metadata();
3672   const ::tensorflow::SessionMetadata& session_metadata() const;
3673   PROTOBUF_NODISCARD ::tensorflow::SessionMetadata* release_session_metadata();
3674   ::tensorflow::SessionMetadata* mutable_session_metadata();
3675   void set_allocated_session_metadata(::tensorflow::SessionMetadata* session_metadata);
3676   private:
3677   const ::tensorflow::SessionMetadata& _internal_session_metadata() const;
3678   ::tensorflow::SessionMetadata* _internal_mutable_session_metadata();
3679   public:
3680   void unsafe_arena_set_allocated_session_metadata(
3681       ::tensorflow::SessionMetadata* session_metadata);
3682   ::tensorflow::SessionMetadata* unsafe_arena_release_session_metadata();
3683 
3684   // @@protoc_insertion_point(class_scope:tensorflow.RunMetadata)
3685  private:
3686   class _Internal;
3687 
3688   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3689   typedef void InternalArenaConstructable_;
3690   typedef void DestructorSkippable_;
3691   struct Impl_ {
3692     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef > partition_graphs_;
3693     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs > function_graphs_;
3694     ::tensorflow::StepStats* step_stats_;
3695     ::tensorflow::CostGraphDef* cost_graph_;
3696     ::tensorflow::SessionMetadata* session_metadata_;
3697     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3698   };
3699   union { Impl_ _impl_; };
3700   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3701 };
3702 // -------------------------------------------------------------------
3703 
3704 class TensorConnection final :
3705     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.TensorConnection) */ {
3706  public:
TensorConnection()3707   inline TensorConnection() : TensorConnection(nullptr) {}
3708   ~TensorConnection() override;
3709   explicit PROTOBUF_CONSTEXPR TensorConnection(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3710 
3711   TensorConnection(const TensorConnection& from);
TensorConnection(TensorConnection && from)3712   TensorConnection(TensorConnection&& from) noexcept
3713     : TensorConnection() {
3714     *this = ::std::move(from);
3715   }
3716 
3717   inline TensorConnection& operator=(const TensorConnection& from) {
3718     if (this == &from) return *this;
3719     CopyFrom(from);
3720     return *this;
3721   }
3722   inline TensorConnection& operator=(TensorConnection&& from) noexcept {
3723     if (this == &from) return *this;
3724     if (GetOwningArena() == from.GetOwningArena()
3725   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3726         && GetOwningArena() != nullptr
3727   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
3728     ) {
3729       InternalSwap(&from);
3730     } else {
3731       CopyFrom(from);
3732     }
3733     return *this;
3734   }
3735 
default_instance()3736   static const TensorConnection& default_instance() {
3737     return *internal_default_instance();
3738   }
internal_default_instance()3739   static inline const TensorConnection* internal_default_instance() {
3740     return reinterpret_cast<const TensorConnection*>(
3741                &_TensorConnection_default_instance_);
3742   }
3743   static constexpr int kIndexInFileMessages =
3744     16;
3745 
swap(TensorConnection & a,TensorConnection & b)3746   friend void swap(TensorConnection& a, TensorConnection& b) {
3747     a.Swap(&b);
3748   }
Swap(TensorConnection * other)3749   inline void Swap(TensorConnection* other) {
3750     if (other == this) return;
3751   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3752     if (GetOwningArena() != nullptr &&
3753         GetOwningArena() == other->GetOwningArena()) {
3754    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
3755     if (GetOwningArena() == other->GetOwningArena()) {
3756   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
3757       InternalSwap(other);
3758     } else {
3759       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3760     }
3761   }
3762   void UnsafeArenaSwap(TensorConnection* other) {
3763     if (other == this) return;
3764     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3765     InternalSwap(other);
3766   }
3767 
3768   // implements Message ----------------------------------------------
3769 
3770   TensorConnection* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3771     return CreateMaybeMessage<TensorConnection>(arena);
3772   }
3773   TensorConnection* New() const {
3774     return New(nullptr);
3775   }
3776   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
3777   void CopyFrom(const TensorConnection& from);
3778   void MergeFrom(const TensorConnection& from);
3779   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3780   bool IsInitialized() const final;
3781 
3782   size_t ByteSizeLong() const final;
3783   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3784   ::uint8_t* _InternalSerialize(
3785       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3786   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3787 
3788   private:
3789   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3790   void SharedDtor();
3791   void SetCachedSize(int size) const;
3792   void InternalSwap(TensorConnection* other);
3793 
3794   private:
3795   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3796   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3797     return "tensorflow.TensorConnection";
3798   }
3799   protected:
3800   explicit TensorConnection(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3801                        bool is_message_owned = false);
3802   public:
3803 
3804   std::string GetTypeName() const final;
3805 
3806   // nested types ----------------------------------------------------
3807 
3808   // accessors -------------------------------------------------------
3809 
3810   enum : int {
3811     kFromTensorFieldNumber = 1,
3812     kToTensorFieldNumber = 2,
3813   };
3814   // string from_tensor = 1;
3815   void clear_from_tensor();
3816   const std::string& from_tensor() const;
3817   template <typename ArgT0 = const std::string&, typename... ArgT>
3818   void set_from_tensor(ArgT0&& arg0, ArgT... args);
3819   std::string* mutable_from_tensor();
3820   PROTOBUF_NODISCARD std::string* release_from_tensor();
3821   void set_allocated_from_tensor(std::string* from_tensor);
3822   private:
3823   const std::string& _internal_from_tensor() const;
3824   inline PROTOBUF_ALWAYS_INLINE void _internal_set_from_tensor(const std::string& value);
3825   std::string* _internal_mutable_from_tensor();
3826   public:
3827 
3828   // string to_tensor = 2;
3829   void clear_to_tensor();
3830   const std::string& to_tensor() const;
3831   template <typename ArgT0 = const std::string&, typename... ArgT>
3832   void set_to_tensor(ArgT0&& arg0, ArgT... args);
3833   std::string* mutable_to_tensor();
3834   PROTOBUF_NODISCARD std::string* release_to_tensor();
3835   void set_allocated_to_tensor(std::string* to_tensor);
3836   private:
3837   const std::string& _internal_to_tensor() const;
3838   inline PROTOBUF_ALWAYS_INLINE void _internal_set_to_tensor(const std::string& value);
3839   std::string* _internal_mutable_to_tensor();
3840   public:
3841 
3842   // @@protoc_insertion_point(class_scope:tensorflow.TensorConnection)
3843  private:
3844   class _Internal;
3845 
3846   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3847   typedef void InternalArenaConstructable_;
3848   typedef void DestructorSkippable_;
3849   struct Impl_ {
3850     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr from_tensor_;
3851     ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr to_tensor_;
3852     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3853   };
3854   union { Impl_ _impl_; };
3855   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3856 };
3857 // -------------------------------------------------------------------
3858 
3859 class CallableOptions_FeedDevicesEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FeedDevicesEntry_DoNotUse,
3860     std::string, std::string,
3861     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3862     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> {
3863 public:
3864   typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FeedDevicesEntry_DoNotUse,
3865     std::string, std::string,
3866     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3867     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> SuperType;
3868   CallableOptions_FeedDevicesEntry_DoNotUse();
3869   explicit PROTOBUF_CONSTEXPR CallableOptions_FeedDevicesEntry_DoNotUse(
3870       ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3871   explicit CallableOptions_FeedDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena);
3872   void MergeFrom(const CallableOptions_FeedDevicesEntry_DoNotUse& other);
internal_default_instance()3873   static const CallableOptions_FeedDevicesEntry_DoNotUse* internal_default_instance() { return reinterpret_cast<const CallableOptions_FeedDevicesEntry_DoNotUse*>(&_CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_); }
ValidateKey(std::string * s)3874   static bool ValidateKey(std::string* s) {
3875     return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FeedDevicesEntry.key");
3876  }
ValidateValue(std::string * s)3877   static bool ValidateValue(std::string* s) {
3878     return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FeedDevicesEntry.value");
3879  }
3880   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3881 };
3882 
3883 // -------------------------------------------------------------------
3884 
3885 class CallableOptions_FetchDevicesEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FetchDevicesEntry_DoNotUse,
3886     std::string, std::string,
3887     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3888     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> {
3889 public:
3890   typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FetchDevicesEntry_DoNotUse,
3891     std::string, std::string,
3892     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3893     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> SuperType;
3894   CallableOptions_FetchDevicesEntry_DoNotUse();
3895   explicit PROTOBUF_CONSTEXPR CallableOptions_FetchDevicesEntry_DoNotUse(
3896       ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3897   explicit CallableOptions_FetchDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena);
3898   void MergeFrom(const CallableOptions_FetchDevicesEntry_DoNotUse& other);
internal_default_instance()3899   static const CallableOptions_FetchDevicesEntry_DoNotUse* internal_default_instance() { return reinterpret_cast<const CallableOptions_FetchDevicesEntry_DoNotUse*>(&_CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_); }
ValidateKey(std::string * s)3900   static bool ValidateKey(std::string* s) {
3901     return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FetchDevicesEntry.key");
3902  }
ValidateValue(std::string * s)3903   static bool ValidateValue(std::string* s) {
3904     return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FetchDevicesEntry.value");
3905  }
3906   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3907 };
3908 
3909 // -------------------------------------------------------------------
3910 
3911 class CallableOptions final :
3912     public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.CallableOptions) */ {
3913  public:
CallableOptions()3914   inline CallableOptions() : CallableOptions(nullptr) {}
3915   ~CallableOptions() override;
3916   explicit PROTOBUF_CONSTEXPR CallableOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3917 
3918   CallableOptions(const CallableOptions& from);
CallableOptions(CallableOptions && from)3919   CallableOptions(CallableOptions&& from) noexcept
3920     : CallableOptions() {
3921     *this = ::std::move(from);
3922   }
3923 
3924   inline CallableOptions& operator=(const CallableOptions& from) {
3925     if (this == &from) return *this;
3926     CopyFrom(from);
3927     return *this;
3928   }
3929   inline CallableOptions& operator=(CallableOptions&& from) noexcept {
3930     if (this == &from) return *this;
3931     if (GetOwningArena() == from.GetOwningArena()
3932   #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3933         && GetOwningArena() != nullptr
3934   #endif  // !PROTOBUF_FORCE_COPY_IN_MOVE
3935     ) {
3936       InternalSwap(&from);
3937     } else {
3938       CopyFrom(from);
3939     }
3940     return *this;
3941   }
3942 
default_instance()3943   static const CallableOptions& default_instance() {
3944     return *internal_default_instance();
3945   }
internal_default_instance()3946   static inline const CallableOptions* internal_default_instance() {
3947     return reinterpret_cast<const CallableOptions*>(
3948                &_CallableOptions_default_instance_);
3949   }
3950   static constexpr int kIndexInFileMessages =
3951     19;
3952 
swap(CallableOptions & a,CallableOptions & b)3953   friend void swap(CallableOptions& a, CallableOptions& b) {
3954     a.Swap(&b);
3955   }
Swap(CallableOptions * other)3956   inline void Swap(CallableOptions* other) {
3957     if (other == this) return;
3958   #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3959     if (GetOwningArena() != nullptr &&
3960         GetOwningArena() == other->GetOwningArena()) {
3961    #else  // PROTOBUF_FORCE_COPY_IN_SWAP
3962     if (GetOwningArena() == other->GetOwningArena()) {
3963   #endif  // !PROTOBUF_FORCE_COPY_IN_SWAP
3964       InternalSwap(other);
3965     } else {
3966       ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3967     }
3968   }
3969   void UnsafeArenaSwap(CallableOptions* other) {
3970     if (other == this) return;
3971     GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3972     InternalSwap(other);
3973   }
3974 
3975   // implements Message ----------------------------------------------
3976 
3977   CallableOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3978     return CreateMaybeMessage<CallableOptions>(arena);
3979   }
3980   CallableOptions* New() const {
3981     return New(nullptr);
3982   }
3983   void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from)  final;
3984   void CopyFrom(const CallableOptions& from);
3985   void MergeFrom(const CallableOptions& from);
3986   PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3987   bool IsInitialized() const final;
3988 
3989   size_t ByteSizeLong() const final;
3990   const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3991   ::uint8_t* _InternalSerialize(
3992       ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3993   int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3994 
3995   private:
3996   void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3997   void SharedDtor();
3998   void SetCachedSize(int size) const;
3999   void InternalSwap(CallableOptions* other);
4000 
4001   private:
4002   friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
4003   static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
4004     return "tensorflow.CallableOptions";
4005   }
4006   protected:
4007   explicit CallableOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4008                        bool is_message_owned = false);
4009   public:
4010 
4011   std::string GetTypeName() const final;
4012 
4013   // nested types ----------------------------------------------------
4014 
4015 
4016   // accessors -------------------------------------------------------
4017 
4018   enum : int {
4019     kFeedFieldNumber = 1,
4020     kFetchFieldNumber = 2,
4021     kTargetFieldNumber = 3,
4022     kTensorConnectionFieldNumber = 5,
4023     kFeedDevicesFieldNumber = 6,
4024     kFetchDevicesFieldNumber = 7,
4025     kRunOptionsFieldNumber = 4,
4026     kFetchSkipSyncFieldNumber = 8,
4027   };
4028   // repeated string feed = 1;
4029   int feed_size() const;
4030   private:
4031   int _internal_feed_size() const;
4032   public:
4033   void clear_feed();
4034   const std::string& feed(int index) const;
4035   std::string* mutable_feed(int index);
4036   void set_feed(int index, const std::string& value);
4037   void set_feed(int index, std::string&& value);
4038   void set_feed(int index, const char* value);
4039   void set_feed(int index, const char* value, size_t size);
4040   std::string* add_feed();
4041   void add_feed(const std::string& value);
4042   void add_feed(std::string&& value);
4043   void add_feed(const char* value);
4044   void add_feed(const char* value, size_t size);
4045   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& feed() const;
4046   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_feed();
4047   private:
4048   const std::string& _internal_feed(int index) const;
4049   std::string* _internal_add_feed();
4050   public:
4051 
4052   // repeated string fetch = 2;
4053   int fetch_size() const;
4054   private:
4055   int _internal_fetch_size() const;
4056   public:
4057   void clear_fetch();
4058   const std::string& fetch(int index) const;
4059   std::string* mutable_fetch(int index);
4060   void set_fetch(int index, const std::string& value);
4061   void set_fetch(int index, std::string&& value);
4062   void set_fetch(int index, const char* value);
4063   void set_fetch(int index, const char* value, size_t size);
4064   std::string* add_fetch();
4065   void add_fetch(const std::string& value);
4066   void add_fetch(std::string&& value);
4067   void add_fetch(const char* value);
4068   void add_fetch(const char* value, size_t size);
4069   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& fetch() const;
4070   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_fetch();
4071   private:
4072   const std::string& _internal_fetch(int index) const;
4073   std::string* _internal_add_fetch();
4074   public:
4075 
4076   // repeated string target = 3;
4077   int target_size() const;
4078   private:
4079   int _internal_target_size() const;
4080   public:
4081   void clear_target();
4082   const std::string& target(int index) const;
4083   std::string* mutable_target(int index);
4084   void set_target(int index, const std::string& value);
4085   void set_target(int index, std::string&& value);
4086   void set_target(int index, const char* value);
4087   void set_target(int index, const char* value, size_t size);
4088   std::string* add_target();
4089   void add_target(const std::string& value);
4090   void add_target(std::string&& value);
4091   void add_target(const char* value);
4092   void add_target(const char* value, size_t size);
4093   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& target() const;
4094   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_target();
4095   private:
4096   const std::string& _internal_target(int index) const;
4097   std::string* _internal_add_target();
4098   public:
4099 
4100   // repeated .tensorflow.TensorConnection tensor_connection = 5;
4101   int tensor_connection_size() const;
4102   private:
4103   int _internal_tensor_connection_size() const;
4104   public:
4105   void clear_tensor_connection();
4106   ::tensorflow::TensorConnection* mutable_tensor_connection(int index);
4107   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection >*
4108       mutable_tensor_connection();
4109   private:
4110   const ::tensorflow::TensorConnection& _internal_tensor_connection(int index) const;
4111   ::tensorflow::TensorConnection* _internal_add_tensor_connection();
4112   public:
4113   const ::tensorflow::TensorConnection& tensor_connection(int index) const;
4114   ::tensorflow::TensorConnection* add_tensor_connection();
4115   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection >&
4116       tensor_connection() const;
4117 
4118   // map<string, string> feed_devices = 6;
4119   int feed_devices_size() const;
4120   private:
4121   int _internal_feed_devices_size() const;
4122   public:
4123   void clear_feed_devices();
4124   private:
4125   const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4126       _internal_feed_devices() const;
4127   ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4128       _internal_mutable_feed_devices();
4129   public:
4130   const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4131       feed_devices() const;
4132   ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4133       mutable_feed_devices();
4134 
4135   // map<string, string> fetch_devices = 7;
4136   int fetch_devices_size() const;
4137   private:
4138   int _internal_fetch_devices_size() const;
4139   public:
4140   void clear_fetch_devices();
4141   private:
4142   const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4143       _internal_fetch_devices() const;
4144   ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4145       _internal_mutable_fetch_devices();
4146   public:
4147   const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4148       fetch_devices() const;
4149   ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4150       mutable_fetch_devices();
4151 
4152   // .tensorflow.RunOptions run_options = 4;
4153   bool has_run_options() const;
4154   private:
4155   bool _internal_has_run_options() const;
4156   public:
4157   void clear_run_options();
4158   const ::tensorflow::RunOptions& run_options() const;
4159   PROTOBUF_NODISCARD ::tensorflow::RunOptions* release_run_options();
4160   ::tensorflow::RunOptions* mutable_run_options();
4161   void set_allocated_run_options(::tensorflow::RunOptions* run_options);
4162   private:
4163   const ::tensorflow::RunOptions& _internal_run_options() const;
4164   ::tensorflow::RunOptions* _internal_mutable_run_options();
4165   public:
4166   void unsafe_arena_set_allocated_run_options(
4167       ::tensorflow::RunOptions* run_options);
4168   ::tensorflow::RunOptions* unsafe_arena_release_run_options();
4169 
4170   // bool fetch_skip_sync = 8;
4171   void clear_fetch_skip_sync();
4172   bool fetch_skip_sync() const;
4173   void set_fetch_skip_sync(bool value);
4174   private:
4175   bool _internal_fetch_skip_sync() const;
4176   void _internal_set_fetch_skip_sync(bool value);
4177   public:
4178 
4179   // @@protoc_insertion_point(class_scope:tensorflow.CallableOptions)
4180  private:
4181   class _Internal;
4182 
4183   template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
4184   typedef void InternalArenaConstructable_;
4185   typedef void DestructorSkippable_;
4186   struct Impl_ {
4187     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> feed_;
4188     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> fetch_;
4189     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> target_;
4190     ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection > tensor_connection_;
4191     ::PROTOBUF_NAMESPACE_ID::internal::MapFieldLite<
4192         CallableOptions_FeedDevicesEntry_DoNotUse,
4193         std::string, std::string,
4194         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
4195         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> feed_devices_;
4196     ::PROTOBUF_NAMESPACE_ID::internal::MapFieldLite<
4197         CallableOptions_FetchDevicesEntry_DoNotUse,
4198         std::string, std::string,
4199         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
4200         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> fetch_devices_;
4201     ::tensorflow::RunOptions* run_options_;
4202     bool fetch_skip_sync_;
4203     mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
4204   };
4205   union { Impl_ _impl_; };
4206   friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
4207 };
4208 // ===================================================================
4209 
4210 
4211 // ===================================================================
4212 
4213 #ifdef __GNUC__
4214   #pragma GCC diagnostic push
4215   #pragma GCC diagnostic ignored "-Wstrict-aliasing"
4216 #endif  // __GNUC__
4217 // GPUOptions_Experimental_VirtualDevices
4218 
4219 // repeated float memory_limit_mb = 1;
_internal_memory_limit_mb_size()4220 inline int GPUOptions_Experimental_VirtualDevices::_internal_memory_limit_mb_size() const {
4221   return _impl_.memory_limit_mb_.size();
4222 }
memory_limit_mb_size()4223 inline int GPUOptions_Experimental_VirtualDevices::memory_limit_mb_size() const {
4224   return _internal_memory_limit_mb_size();
4225 }
clear_memory_limit_mb()4226 inline void GPUOptions_Experimental_VirtualDevices::clear_memory_limit_mb() {
4227   _impl_.memory_limit_mb_.Clear();
4228 }
_internal_memory_limit_mb(int index)4229 inline float GPUOptions_Experimental_VirtualDevices::_internal_memory_limit_mb(int index) const {
4230   return _impl_.memory_limit_mb_.Get(index);
4231 }
memory_limit_mb(int index)4232 inline float GPUOptions_Experimental_VirtualDevices::memory_limit_mb(int index) const {
4233   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4234   return _internal_memory_limit_mb(index);
4235 }
set_memory_limit_mb(int index,float value)4236 inline void GPUOptions_Experimental_VirtualDevices::set_memory_limit_mb(int index, float value) {
4237   _impl_.memory_limit_mb_.Set(index, value);
4238   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4239 }
_internal_add_memory_limit_mb(float value)4240 inline void GPUOptions_Experimental_VirtualDevices::_internal_add_memory_limit_mb(float value) {
4241   _impl_.memory_limit_mb_.Add(value);
4242 }
add_memory_limit_mb(float value)4243 inline void GPUOptions_Experimental_VirtualDevices::add_memory_limit_mb(float value) {
4244   _internal_add_memory_limit_mb(value);
4245   // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4246 }
4247 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
_internal_memory_limit_mb()4248 GPUOptions_Experimental_VirtualDevices::_internal_memory_limit_mb() const {
4249   return _impl_.memory_limit_mb_;
4250 }
4251 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
memory_limit_mb()4252 GPUOptions_Experimental_VirtualDevices::memory_limit_mb() const {
4253   // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4254   return _internal_memory_limit_mb();
4255 }
4256 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
_internal_mutable_memory_limit_mb()4257 GPUOptions_Experimental_VirtualDevices::_internal_mutable_memory_limit_mb() {
4258   return &_impl_.memory_limit_mb_;
4259 }
4260 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
mutable_memory_limit_mb()4261 GPUOptions_Experimental_VirtualDevices::mutable_memory_limit_mb() {
4262   // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4263   return _internal_mutable_memory_limit_mb();
4264 }
4265 
4266 // repeated int32 priority = 2;
_internal_priority_size()4267 inline int GPUOptions_Experimental_VirtualDevices::_internal_priority_size() const {
4268   return _impl_.priority_.size();
4269 }
priority_size()4270 inline int GPUOptions_Experimental_VirtualDevices::priority_size() const {
4271   return _internal_priority_size();
4272 }
clear_priority()4273 inline void GPUOptions_Experimental_VirtualDevices::clear_priority() {
4274   _impl_.priority_.Clear();
4275 }
_internal_priority(int index)4276 inline ::int32_t GPUOptions_Experimental_VirtualDevices::_internal_priority(int index) const {
4277   return _impl_.priority_.Get(index);
4278 }
priority(int index)4279 inline ::int32_t GPUOptions_Experimental_VirtualDevices::priority(int index) const {
4280   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4281   return _internal_priority(index);
4282 }
set_priority(int index,::int32_t value)4283 inline void GPUOptions_Experimental_VirtualDevices::set_priority(int index, ::int32_t value) {
4284   _impl_.priority_.Set(index, value);
4285   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4286 }
_internal_add_priority(::int32_t value)4287 inline void GPUOptions_Experimental_VirtualDevices::_internal_add_priority(::int32_t value) {
4288   _impl_.priority_.Add(value);
4289 }
add_priority(::int32_t value)4290 inline void GPUOptions_Experimental_VirtualDevices::add_priority(::int32_t value) {
4291   _internal_add_priority(value);
4292   // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4293 }
4294 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
_internal_priority()4295 GPUOptions_Experimental_VirtualDevices::_internal_priority() const {
4296   return _impl_.priority_;
4297 }
4298 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
priority()4299 GPUOptions_Experimental_VirtualDevices::priority() const {
4300   // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4301   return _internal_priority();
4302 }
4303 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
_internal_mutable_priority()4304 GPUOptions_Experimental_VirtualDevices::_internal_mutable_priority() {
4305   return &_impl_.priority_;
4306 }
4307 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
mutable_priority()4308 GPUOptions_Experimental_VirtualDevices::mutable_priority() {
4309   // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4310   return _internal_mutable_priority();
4311 }
4312 
4313 // repeated int32 device_ordinal = 3;
_internal_device_ordinal_size()4314 inline int GPUOptions_Experimental_VirtualDevices::_internal_device_ordinal_size() const {
4315   return _impl_.device_ordinal_.size();
4316 }
device_ordinal_size()4317 inline int GPUOptions_Experimental_VirtualDevices::device_ordinal_size() const {
4318   return _internal_device_ordinal_size();
4319 }
clear_device_ordinal()4320 inline void GPUOptions_Experimental_VirtualDevices::clear_device_ordinal() {
4321   _impl_.device_ordinal_.Clear();
4322 }
_internal_device_ordinal(int index)4323 inline ::int32_t GPUOptions_Experimental_VirtualDevices::_internal_device_ordinal(int index) const {
4324   return _impl_.device_ordinal_.Get(index);
4325 }
device_ordinal(int index)4326 inline ::int32_t GPUOptions_Experimental_VirtualDevices::device_ordinal(int index) const {
4327   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4328   return _internal_device_ordinal(index);
4329 }
set_device_ordinal(int index,::int32_t value)4330 inline void GPUOptions_Experimental_VirtualDevices::set_device_ordinal(int index, ::int32_t value) {
4331   _impl_.device_ordinal_.Set(index, value);
4332   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4333 }
_internal_add_device_ordinal(::int32_t value)4334 inline void GPUOptions_Experimental_VirtualDevices::_internal_add_device_ordinal(::int32_t value) {
4335   _impl_.device_ordinal_.Add(value);
4336 }
add_device_ordinal(::int32_t value)4337 inline void GPUOptions_Experimental_VirtualDevices::add_device_ordinal(::int32_t value) {
4338   _internal_add_device_ordinal(value);
4339   // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4340 }
4341 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
_internal_device_ordinal()4342 GPUOptions_Experimental_VirtualDevices::_internal_device_ordinal() const {
4343   return _impl_.device_ordinal_;
4344 }
4345 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
device_ordinal()4346 GPUOptions_Experimental_VirtualDevices::device_ordinal() const {
4347   // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4348   return _internal_device_ordinal();
4349 }
4350 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
_internal_mutable_device_ordinal()4351 GPUOptions_Experimental_VirtualDevices::_internal_mutable_device_ordinal() {
4352   return &_impl_.device_ordinal_;
4353 }
4354 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
mutable_device_ordinal()4355 GPUOptions_Experimental_VirtualDevices::mutable_device_ordinal() {
4356   // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4357   return _internal_mutable_device_ordinal();
4358 }
4359 
4360 // -------------------------------------------------------------------
4361 
4362 // GPUOptions_Experimental
4363 
4364 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
_internal_virtual_devices_size()4365 inline int GPUOptions_Experimental::_internal_virtual_devices_size() const {
4366   return _impl_.virtual_devices_.size();
4367 }
virtual_devices_size()4368 inline int GPUOptions_Experimental::virtual_devices_size() const {
4369   return _internal_virtual_devices_size();
4370 }
clear_virtual_devices()4371 inline void GPUOptions_Experimental::clear_virtual_devices() {
4372   _impl_.virtual_devices_.Clear();
4373 }
mutable_virtual_devices(int index)4374 inline ::tensorflow::GPUOptions_Experimental_VirtualDevices* GPUOptions_Experimental::mutable_virtual_devices(int index) {
4375   // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.Experimental.virtual_devices)
4376   return _impl_.virtual_devices_.Mutable(index);
4377 }
4378 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >*
mutable_virtual_devices()4379 GPUOptions_Experimental::mutable_virtual_devices() {
4380   // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.virtual_devices)
4381   return &_impl_.virtual_devices_;
4382 }
_internal_virtual_devices(int index)4383 inline const ::tensorflow::GPUOptions_Experimental_VirtualDevices& GPUOptions_Experimental::_internal_virtual_devices(int index) const {
4384   return _impl_.virtual_devices_.Get(index);
4385 }
virtual_devices(int index)4386 inline const ::tensorflow::GPUOptions_Experimental_VirtualDevices& GPUOptions_Experimental::virtual_devices(int index) const {
4387   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.virtual_devices)
4388   return _internal_virtual_devices(index);
4389 }
_internal_add_virtual_devices()4390 inline ::tensorflow::GPUOptions_Experimental_VirtualDevices* GPUOptions_Experimental::_internal_add_virtual_devices() {
4391   return _impl_.virtual_devices_.Add();
4392 }
add_virtual_devices()4393 inline ::tensorflow::GPUOptions_Experimental_VirtualDevices* GPUOptions_Experimental::add_virtual_devices() {
4394   ::tensorflow::GPUOptions_Experimental_VirtualDevices* _add = _internal_add_virtual_devices();
4395   // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.virtual_devices)
4396   return _add;
4397 }
4398 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >&
virtual_devices()4399 GPUOptions_Experimental::virtual_devices() const {
4400   // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.virtual_devices)
4401   return _impl_.virtual_devices_;
4402 }
4403 
4404 // bool use_unified_memory = 2;
clear_use_unified_memory()4405 inline void GPUOptions_Experimental::clear_use_unified_memory() {
4406   _impl_.use_unified_memory_ = false;
4407 }
_internal_use_unified_memory()4408 inline bool GPUOptions_Experimental::_internal_use_unified_memory() const {
4409   return _impl_.use_unified_memory_;
4410 }
use_unified_memory()4411 inline bool GPUOptions_Experimental::use_unified_memory() const {
4412   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.use_unified_memory)
4413   return _internal_use_unified_memory();
4414 }
_internal_set_use_unified_memory(bool value)4415 inline void GPUOptions_Experimental::_internal_set_use_unified_memory(bool value) {
4416 
4417   _impl_.use_unified_memory_ = value;
4418 }
set_use_unified_memory(bool value)4419 inline void GPUOptions_Experimental::set_use_unified_memory(bool value) {
4420   _internal_set_use_unified_memory(value);
4421   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.use_unified_memory)
4422 }
4423 
4424 // int32 num_dev_to_dev_copy_streams = 3;
clear_num_dev_to_dev_copy_streams()4425 inline void GPUOptions_Experimental::clear_num_dev_to_dev_copy_streams() {
4426   _impl_.num_dev_to_dev_copy_streams_ = 0;
4427 }
_internal_num_dev_to_dev_copy_streams()4428 inline ::int32_t GPUOptions_Experimental::_internal_num_dev_to_dev_copy_streams() const {
4429   return _impl_.num_dev_to_dev_copy_streams_;
4430 }
num_dev_to_dev_copy_streams()4431 inline ::int32_t GPUOptions_Experimental::num_dev_to_dev_copy_streams() const {
4432   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.num_dev_to_dev_copy_streams)
4433   return _internal_num_dev_to_dev_copy_streams();
4434 }
_internal_set_num_dev_to_dev_copy_streams(::int32_t value)4435 inline void GPUOptions_Experimental::_internal_set_num_dev_to_dev_copy_streams(::int32_t value) {
4436 
4437   _impl_.num_dev_to_dev_copy_streams_ = value;
4438 }
set_num_dev_to_dev_copy_streams(::int32_t value)4439 inline void GPUOptions_Experimental::set_num_dev_to_dev_copy_streams(::int32_t value) {
4440   _internal_set_num_dev_to_dev_copy_streams(value);
4441   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.num_dev_to_dev_copy_streams)
4442 }
4443 
4444 // string collective_ring_order = 4;
clear_collective_ring_order()4445 inline void GPUOptions_Experimental::clear_collective_ring_order() {
4446   _impl_.collective_ring_order_.ClearToEmpty();
4447 }
collective_ring_order()4448 inline const std::string& GPUOptions_Experimental::collective_ring_order() const {
4449   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.collective_ring_order)
4450   return _internal_collective_ring_order();
4451 }
4452 template <typename ArgT0, typename... ArgT>
4453 inline PROTOBUF_ALWAYS_INLINE
set_collective_ring_order(ArgT0 && arg0,ArgT...args)4454 void GPUOptions_Experimental::set_collective_ring_order(ArgT0&& arg0, ArgT... args) {
4455 
4456  _impl_.collective_ring_order_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
4457   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.collective_ring_order)
4458 }
mutable_collective_ring_order()4459 inline std::string* GPUOptions_Experimental::mutable_collective_ring_order() {
4460   std::string* _s = _internal_mutable_collective_ring_order();
4461   // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.Experimental.collective_ring_order)
4462   return _s;
4463 }
_internal_collective_ring_order()4464 inline const std::string& GPUOptions_Experimental::_internal_collective_ring_order() const {
4465   return _impl_.collective_ring_order_.Get();
4466 }
_internal_set_collective_ring_order(const std::string & value)4467 inline void GPUOptions_Experimental::_internal_set_collective_ring_order(const std::string& value) {
4468 
4469   _impl_.collective_ring_order_.Set(value, GetArenaForAllocation());
4470 }
_internal_mutable_collective_ring_order()4471 inline std::string* GPUOptions_Experimental::_internal_mutable_collective_ring_order() {
4472 
4473   return _impl_.collective_ring_order_.Mutable(GetArenaForAllocation());
4474 }
release_collective_ring_order()4475 inline std::string* GPUOptions_Experimental::release_collective_ring_order() {
4476   // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.Experimental.collective_ring_order)
4477   return _impl_.collective_ring_order_.Release();
4478 }
set_allocated_collective_ring_order(std::string * collective_ring_order)4479 inline void GPUOptions_Experimental::set_allocated_collective_ring_order(std::string* collective_ring_order) {
4480   _impl_.collective_ring_order_.SetAllocated(collective_ring_order, GetArenaForAllocation());
4481 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
4482   if (_impl_.collective_ring_order_.IsDefault()) {
4483     _impl_.collective_ring_order_.Set("", GetArenaForAllocation());
4484   }
4485 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
4486   // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.Experimental.collective_ring_order)
4487 }
4488 
4489 // bool timestamped_allocator = 5;
clear_timestamped_allocator()4490 inline void GPUOptions_Experimental::clear_timestamped_allocator() {
4491   _impl_.timestamped_allocator_ = false;
4492 }
_internal_timestamped_allocator()4493 inline bool GPUOptions_Experimental::_internal_timestamped_allocator() const {
4494   return _impl_.timestamped_allocator_;
4495 }
timestamped_allocator()4496 inline bool GPUOptions_Experimental::timestamped_allocator() const {
4497   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.timestamped_allocator)
4498   return _internal_timestamped_allocator();
4499 }
_internal_set_timestamped_allocator(bool value)4500 inline void GPUOptions_Experimental::_internal_set_timestamped_allocator(bool value) {
4501 
4502   _impl_.timestamped_allocator_ = value;
4503 }
set_timestamped_allocator(bool value)4504 inline void GPUOptions_Experimental::set_timestamped_allocator(bool value) {
4505   _internal_set_timestamped_allocator(value);
4506   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.timestamped_allocator)
4507 }
4508 
4509 // int32 kernel_tracker_max_interval = 7;
clear_kernel_tracker_max_interval()4510 inline void GPUOptions_Experimental::clear_kernel_tracker_max_interval() {
4511   _impl_.kernel_tracker_max_interval_ = 0;
4512 }
_internal_kernel_tracker_max_interval()4513 inline ::int32_t GPUOptions_Experimental::_internal_kernel_tracker_max_interval() const {
4514   return _impl_.kernel_tracker_max_interval_;
4515 }
kernel_tracker_max_interval()4516 inline ::int32_t GPUOptions_Experimental::kernel_tracker_max_interval() const {
4517   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.kernel_tracker_max_interval)
4518   return _internal_kernel_tracker_max_interval();
4519 }
_internal_set_kernel_tracker_max_interval(::int32_t value)4520 inline void GPUOptions_Experimental::_internal_set_kernel_tracker_max_interval(::int32_t value) {
4521 
4522   _impl_.kernel_tracker_max_interval_ = value;
4523 }
set_kernel_tracker_max_interval(::int32_t value)4524 inline void GPUOptions_Experimental::set_kernel_tracker_max_interval(::int32_t value) {
4525   _internal_set_kernel_tracker_max_interval(value);
4526   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.kernel_tracker_max_interval)
4527 }
4528 
4529 // int32 kernel_tracker_max_bytes = 8;
clear_kernel_tracker_max_bytes()4530 inline void GPUOptions_Experimental::clear_kernel_tracker_max_bytes() {
4531   _impl_.kernel_tracker_max_bytes_ = 0;
4532 }
_internal_kernel_tracker_max_bytes()4533 inline ::int32_t GPUOptions_Experimental::_internal_kernel_tracker_max_bytes() const {
4534   return _impl_.kernel_tracker_max_bytes_;
4535 }
kernel_tracker_max_bytes()4536 inline ::int32_t GPUOptions_Experimental::kernel_tracker_max_bytes() const {
4537   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.kernel_tracker_max_bytes)
4538   return _internal_kernel_tracker_max_bytes();
4539 }
_internal_set_kernel_tracker_max_bytes(::int32_t value)4540 inline void GPUOptions_Experimental::_internal_set_kernel_tracker_max_bytes(::int32_t value) {
4541 
4542   _impl_.kernel_tracker_max_bytes_ = value;
4543 }
set_kernel_tracker_max_bytes(::int32_t value)4544 inline void GPUOptions_Experimental::set_kernel_tracker_max_bytes(::int32_t value) {
4545   _internal_set_kernel_tracker_max_bytes(value);
4546   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.kernel_tracker_max_bytes)
4547 }
4548 
4549 // int32 kernel_tracker_max_pending = 9;
clear_kernel_tracker_max_pending()4550 inline void GPUOptions_Experimental::clear_kernel_tracker_max_pending() {
4551   _impl_.kernel_tracker_max_pending_ = 0;
4552 }
_internal_kernel_tracker_max_pending()4553 inline ::int32_t GPUOptions_Experimental::_internal_kernel_tracker_max_pending() const {
4554   return _impl_.kernel_tracker_max_pending_;
4555 }
kernel_tracker_max_pending()4556 inline ::int32_t GPUOptions_Experimental::kernel_tracker_max_pending() const {
4557   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.kernel_tracker_max_pending)
4558   return _internal_kernel_tracker_max_pending();
4559 }
_internal_set_kernel_tracker_max_pending(::int32_t value)4560 inline void GPUOptions_Experimental::_internal_set_kernel_tracker_max_pending(::int32_t value) {
4561 
4562   _impl_.kernel_tracker_max_pending_ = value;
4563 }
set_kernel_tracker_max_pending(::int32_t value)4564 inline void GPUOptions_Experimental::set_kernel_tracker_max_pending(::int32_t value) {
4565   _internal_set_kernel_tracker_max_pending(value);
4566   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.kernel_tracker_max_pending)
4567 }
4568 
4569 // double internal_fragmentation_fraction = 10;
clear_internal_fragmentation_fraction()4570 inline void GPUOptions_Experimental::clear_internal_fragmentation_fraction() {
4571   _impl_.internal_fragmentation_fraction_ = 0;
4572 }
_internal_internal_fragmentation_fraction()4573 inline double GPUOptions_Experimental::_internal_internal_fragmentation_fraction() const {
4574   return _impl_.internal_fragmentation_fraction_;
4575 }
internal_fragmentation_fraction()4576 inline double GPUOptions_Experimental::internal_fragmentation_fraction() const {
4577   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.internal_fragmentation_fraction)
4578   return _internal_internal_fragmentation_fraction();
4579 }
_internal_set_internal_fragmentation_fraction(double value)4580 inline void GPUOptions_Experimental::_internal_set_internal_fragmentation_fraction(double value) {
4581 
4582   _impl_.internal_fragmentation_fraction_ = value;
4583 }
set_internal_fragmentation_fraction(double value)4584 inline void GPUOptions_Experimental::set_internal_fragmentation_fraction(double value) {
4585   _internal_set_internal_fragmentation_fraction(value);
4586   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.internal_fragmentation_fraction)
4587 }
4588 
4589 // bool use_cuda_malloc_async = 11;
clear_use_cuda_malloc_async()4590 inline void GPUOptions_Experimental::clear_use_cuda_malloc_async() {
4591   _impl_.use_cuda_malloc_async_ = false;
4592 }
_internal_use_cuda_malloc_async()4593 inline bool GPUOptions_Experimental::_internal_use_cuda_malloc_async() const {
4594   return _impl_.use_cuda_malloc_async_;
4595 }
use_cuda_malloc_async()4596 inline bool GPUOptions_Experimental::use_cuda_malloc_async() const {
4597   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.use_cuda_malloc_async)
4598   return _internal_use_cuda_malloc_async();
4599 }
_internal_set_use_cuda_malloc_async(bool value)4600 inline void GPUOptions_Experimental::_internal_set_use_cuda_malloc_async(bool value) {
4601 
4602   _impl_.use_cuda_malloc_async_ = value;
4603 }
set_use_cuda_malloc_async(bool value)4604 inline void GPUOptions_Experimental::set_use_cuda_malloc_async(bool value) {
4605   _internal_set_use_cuda_malloc_async(value);
4606   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.use_cuda_malloc_async)
4607 }
4608 
4609 // bool disallow_retry_on_allocation_failure = 12;
clear_disallow_retry_on_allocation_failure()4610 inline void GPUOptions_Experimental::clear_disallow_retry_on_allocation_failure() {
4611   _impl_.disallow_retry_on_allocation_failure_ = false;
4612 }
_internal_disallow_retry_on_allocation_failure()4613 inline bool GPUOptions_Experimental::_internal_disallow_retry_on_allocation_failure() const {
4614   return _impl_.disallow_retry_on_allocation_failure_;
4615 }
disallow_retry_on_allocation_failure()4616 inline bool GPUOptions_Experimental::disallow_retry_on_allocation_failure() const {
4617   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.disallow_retry_on_allocation_failure)
4618   return _internal_disallow_retry_on_allocation_failure();
4619 }
_internal_set_disallow_retry_on_allocation_failure(bool value)4620 inline void GPUOptions_Experimental::_internal_set_disallow_retry_on_allocation_failure(bool value) {
4621 
4622   _impl_.disallow_retry_on_allocation_failure_ = value;
4623 }
set_disallow_retry_on_allocation_failure(bool value)4624 inline void GPUOptions_Experimental::set_disallow_retry_on_allocation_failure(bool value) {
4625   _internal_set_disallow_retry_on_allocation_failure(value);
4626   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.disallow_retry_on_allocation_failure)
4627 }
4628 
4629 // -------------------------------------------------------------------
4630 
4631 // GPUOptions
4632 
4633 // double per_process_gpu_memory_fraction = 1;
clear_per_process_gpu_memory_fraction()4634 inline void GPUOptions::clear_per_process_gpu_memory_fraction() {
4635   _impl_.per_process_gpu_memory_fraction_ = 0;
4636 }
_internal_per_process_gpu_memory_fraction()4637 inline double GPUOptions::_internal_per_process_gpu_memory_fraction() const {
4638   return _impl_.per_process_gpu_memory_fraction_;
4639 }
per_process_gpu_memory_fraction()4640 inline double GPUOptions::per_process_gpu_memory_fraction() const {
4641   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.per_process_gpu_memory_fraction)
4642   return _internal_per_process_gpu_memory_fraction();
4643 }
_internal_set_per_process_gpu_memory_fraction(double value)4644 inline void GPUOptions::_internal_set_per_process_gpu_memory_fraction(double value) {
4645 
4646   _impl_.per_process_gpu_memory_fraction_ = value;
4647 }
set_per_process_gpu_memory_fraction(double value)4648 inline void GPUOptions::set_per_process_gpu_memory_fraction(double value) {
4649   _internal_set_per_process_gpu_memory_fraction(value);
4650   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.per_process_gpu_memory_fraction)
4651 }
4652 
4653 // bool allow_growth = 4;
clear_allow_growth()4654 inline void GPUOptions::clear_allow_growth() {
4655   _impl_.allow_growth_ = false;
4656 }
_internal_allow_growth()4657 inline bool GPUOptions::_internal_allow_growth() const {
4658   return _impl_.allow_growth_;
4659 }
allow_growth()4660 inline bool GPUOptions::allow_growth() const {
4661   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.allow_growth)
4662   return _internal_allow_growth();
4663 }
_internal_set_allow_growth(bool value)4664 inline void GPUOptions::_internal_set_allow_growth(bool value) {
4665 
4666   _impl_.allow_growth_ = value;
4667 }
set_allow_growth(bool value)4668 inline void GPUOptions::set_allow_growth(bool value) {
4669   _internal_set_allow_growth(value);
4670   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.allow_growth)
4671 }
4672 
4673 // string allocator_type = 2;
clear_allocator_type()4674 inline void GPUOptions::clear_allocator_type() {
4675   _impl_.allocator_type_.ClearToEmpty();
4676 }
allocator_type()4677 inline const std::string& GPUOptions::allocator_type() const {
4678   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.allocator_type)
4679   return _internal_allocator_type();
4680 }
4681 template <typename ArgT0, typename... ArgT>
4682 inline PROTOBUF_ALWAYS_INLINE
set_allocator_type(ArgT0 && arg0,ArgT...args)4683 void GPUOptions::set_allocator_type(ArgT0&& arg0, ArgT... args) {
4684 
4685  _impl_.allocator_type_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
4686   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.allocator_type)
4687 }
mutable_allocator_type()4688 inline std::string* GPUOptions::mutable_allocator_type() {
4689   std::string* _s = _internal_mutable_allocator_type();
4690   // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.allocator_type)
4691   return _s;
4692 }
_internal_allocator_type()4693 inline const std::string& GPUOptions::_internal_allocator_type() const {
4694   return _impl_.allocator_type_.Get();
4695 }
_internal_set_allocator_type(const std::string & value)4696 inline void GPUOptions::_internal_set_allocator_type(const std::string& value) {
4697 
4698   _impl_.allocator_type_.Set(value, GetArenaForAllocation());
4699 }
_internal_mutable_allocator_type()4700 inline std::string* GPUOptions::_internal_mutable_allocator_type() {
4701 
4702   return _impl_.allocator_type_.Mutable(GetArenaForAllocation());
4703 }
release_allocator_type()4704 inline std::string* GPUOptions::release_allocator_type() {
4705   // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.allocator_type)
4706   return _impl_.allocator_type_.Release();
4707 }
set_allocated_allocator_type(std::string * allocator_type)4708 inline void GPUOptions::set_allocated_allocator_type(std::string* allocator_type) {
4709   _impl_.allocator_type_.SetAllocated(allocator_type, GetArenaForAllocation());
4710 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
4711   if (_impl_.allocator_type_.IsDefault()) {
4712     _impl_.allocator_type_.Set("", GetArenaForAllocation());
4713   }
4714 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
4715   // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.allocator_type)
4716 }
4717 
4718 // int64 deferred_deletion_bytes = 3;
clear_deferred_deletion_bytes()4719 inline void GPUOptions::clear_deferred_deletion_bytes() {
4720   _impl_.deferred_deletion_bytes_ = ::int64_t{0};
4721 }
_internal_deferred_deletion_bytes()4722 inline ::int64_t GPUOptions::_internal_deferred_deletion_bytes() const {
4723   return _impl_.deferred_deletion_bytes_;
4724 }
deferred_deletion_bytes()4725 inline ::int64_t GPUOptions::deferred_deletion_bytes() const {
4726   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.deferred_deletion_bytes)
4727   return _internal_deferred_deletion_bytes();
4728 }
_internal_set_deferred_deletion_bytes(::int64_t value)4729 inline void GPUOptions::_internal_set_deferred_deletion_bytes(::int64_t value) {
4730 
4731   _impl_.deferred_deletion_bytes_ = value;
4732 }
set_deferred_deletion_bytes(::int64_t value)4733 inline void GPUOptions::set_deferred_deletion_bytes(::int64_t value) {
4734   _internal_set_deferred_deletion_bytes(value);
4735   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.deferred_deletion_bytes)
4736 }
4737 
4738 // string visible_device_list = 5;
clear_visible_device_list()4739 inline void GPUOptions::clear_visible_device_list() {
4740   _impl_.visible_device_list_.ClearToEmpty();
4741 }
visible_device_list()4742 inline const std::string& GPUOptions::visible_device_list() const {
4743   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.visible_device_list)
4744   return _internal_visible_device_list();
4745 }
4746 template <typename ArgT0, typename... ArgT>
4747 inline PROTOBUF_ALWAYS_INLINE
set_visible_device_list(ArgT0 && arg0,ArgT...args)4748 void GPUOptions::set_visible_device_list(ArgT0&& arg0, ArgT... args) {
4749 
4750  _impl_.visible_device_list_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
4751   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.visible_device_list)
4752 }
mutable_visible_device_list()4753 inline std::string* GPUOptions::mutable_visible_device_list() {
4754   std::string* _s = _internal_mutable_visible_device_list();
4755   // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.visible_device_list)
4756   return _s;
4757 }
_internal_visible_device_list()4758 inline const std::string& GPUOptions::_internal_visible_device_list() const {
4759   return _impl_.visible_device_list_.Get();
4760 }
_internal_set_visible_device_list(const std::string & value)4761 inline void GPUOptions::_internal_set_visible_device_list(const std::string& value) {
4762 
4763   _impl_.visible_device_list_.Set(value, GetArenaForAllocation());
4764 }
_internal_mutable_visible_device_list()4765 inline std::string* GPUOptions::_internal_mutable_visible_device_list() {
4766 
4767   return _impl_.visible_device_list_.Mutable(GetArenaForAllocation());
4768 }
release_visible_device_list()4769 inline std::string* GPUOptions::release_visible_device_list() {
4770   // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.visible_device_list)
4771   return _impl_.visible_device_list_.Release();
4772 }
set_allocated_visible_device_list(std::string * visible_device_list)4773 inline void GPUOptions::set_allocated_visible_device_list(std::string* visible_device_list) {
4774   _impl_.visible_device_list_.SetAllocated(visible_device_list, GetArenaForAllocation());
4775 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
4776   if (_impl_.visible_device_list_.IsDefault()) {
4777     _impl_.visible_device_list_.Set("", GetArenaForAllocation());
4778   }
4779 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
4780   // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.visible_device_list)
4781 }
4782 
4783 // int32 polling_active_delay_usecs = 6;
clear_polling_active_delay_usecs()4784 inline void GPUOptions::clear_polling_active_delay_usecs() {
4785   _impl_.polling_active_delay_usecs_ = 0;
4786 }
_internal_polling_active_delay_usecs()4787 inline ::int32_t GPUOptions::_internal_polling_active_delay_usecs() const {
4788   return _impl_.polling_active_delay_usecs_;
4789 }
polling_active_delay_usecs()4790 inline ::int32_t GPUOptions::polling_active_delay_usecs() const {
4791   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.polling_active_delay_usecs)
4792   return _internal_polling_active_delay_usecs();
4793 }
_internal_set_polling_active_delay_usecs(::int32_t value)4794 inline void GPUOptions::_internal_set_polling_active_delay_usecs(::int32_t value) {
4795 
4796   _impl_.polling_active_delay_usecs_ = value;
4797 }
set_polling_active_delay_usecs(::int32_t value)4798 inline void GPUOptions::set_polling_active_delay_usecs(::int32_t value) {
4799   _internal_set_polling_active_delay_usecs(value);
4800   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.polling_active_delay_usecs)
4801 }
4802 
4803 // int32 polling_inactive_delay_msecs = 7;
clear_polling_inactive_delay_msecs()4804 inline void GPUOptions::clear_polling_inactive_delay_msecs() {
4805   _impl_.polling_inactive_delay_msecs_ = 0;
4806 }
_internal_polling_inactive_delay_msecs()4807 inline ::int32_t GPUOptions::_internal_polling_inactive_delay_msecs() const {
4808   return _impl_.polling_inactive_delay_msecs_;
4809 }
polling_inactive_delay_msecs()4810 inline ::int32_t GPUOptions::polling_inactive_delay_msecs() const {
4811   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.polling_inactive_delay_msecs)
4812   return _internal_polling_inactive_delay_msecs();
4813 }
_internal_set_polling_inactive_delay_msecs(::int32_t value)4814 inline void GPUOptions::_internal_set_polling_inactive_delay_msecs(::int32_t value) {
4815 
4816   _impl_.polling_inactive_delay_msecs_ = value;
4817 }
set_polling_inactive_delay_msecs(::int32_t value)4818 inline void GPUOptions::set_polling_inactive_delay_msecs(::int32_t value) {
4819   _internal_set_polling_inactive_delay_msecs(value);
4820   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.polling_inactive_delay_msecs)
4821 }
4822 
4823 // bool force_gpu_compatible = 8;
clear_force_gpu_compatible()4824 inline void GPUOptions::clear_force_gpu_compatible() {
4825   _impl_.force_gpu_compatible_ = false;
4826 }
_internal_force_gpu_compatible()4827 inline bool GPUOptions::_internal_force_gpu_compatible() const {
4828   return _impl_.force_gpu_compatible_;
4829 }
force_gpu_compatible()4830 inline bool GPUOptions::force_gpu_compatible() const {
4831   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.force_gpu_compatible)
4832   return _internal_force_gpu_compatible();
4833 }
_internal_set_force_gpu_compatible(bool value)4834 inline void GPUOptions::_internal_set_force_gpu_compatible(bool value) {
4835 
4836   _impl_.force_gpu_compatible_ = value;
4837 }
set_force_gpu_compatible(bool value)4838 inline void GPUOptions::set_force_gpu_compatible(bool value) {
4839   _internal_set_force_gpu_compatible(value);
4840   // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.force_gpu_compatible)
4841 }
4842 
4843 // .tensorflow.GPUOptions.Experimental experimental = 9;
_internal_has_experimental()4844 inline bool GPUOptions::_internal_has_experimental() const {
4845   return this != internal_default_instance() && _impl_.experimental_ != nullptr;
4846 }
has_experimental()4847 inline bool GPUOptions::has_experimental() const {
4848   return _internal_has_experimental();
4849 }
clear_experimental()4850 inline void GPUOptions::clear_experimental() {
4851   if (GetArenaForAllocation() == nullptr && _impl_.experimental_ != nullptr) {
4852     delete _impl_.experimental_;
4853   }
4854   _impl_.experimental_ = nullptr;
4855 }
_internal_experimental()4856 inline const ::tensorflow::GPUOptions_Experimental& GPUOptions::_internal_experimental() const {
4857   const ::tensorflow::GPUOptions_Experimental* p = _impl_.experimental_;
4858   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::GPUOptions_Experimental&>(
4859       ::tensorflow::_GPUOptions_Experimental_default_instance_);
4860 }
experimental()4861 inline const ::tensorflow::GPUOptions_Experimental& GPUOptions::experimental() const {
4862   // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.experimental)
4863   return _internal_experimental();
4864 }
unsafe_arena_set_allocated_experimental(::tensorflow::GPUOptions_Experimental * experimental)4865 inline void GPUOptions::unsafe_arena_set_allocated_experimental(
4866     ::tensorflow::GPUOptions_Experimental* experimental) {
4867   if (GetArenaForAllocation() == nullptr) {
4868     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.experimental_);
4869   }
4870   _impl_.experimental_ = experimental;
4871   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GPUOptions.experimental)
4872 }
release_experimental()4873 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::release_experimental() {
4874 
4875   ::tensorflow::GPUOptions_Experimental* temp = _impl_.experimental_;
4876   _impl_.experimental_ = nullptr;
4877 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
4878   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
4879   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
4880   if (GetArenaForAllocation() == nullptr) { delete old; }
4881 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
4882   if (GetArenaForAllocation() != nullptr) {
4883     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
4884   }
4885 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
4886   return temp;
4887 }
unsafe_arena_release_experimental()4888 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::unsafe_arena_release_experimental() {
4889   // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.experimental)
4890 
4891   ::tensorflow::GPUOptions_Experimental* temp = _impl_.experimental_;
4892   _impl_.experimental_ = nullptr;
4893   return temp;
4894 }
_internal_mutable_experimental()4895 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::_internal_mutable_experimental() {
4896 
4897   if (_impl_.experimental_ == nullptr) {
4898     auto* p = CreateMaybeMessage<::tensorflow::GPUOptions_Experimental>(GetArenaForAllocation());
4899     _impl_.experimental_ = p;
4900   }
4901   return _impl_.experimental_;
4902 }
mutable_experimental()4903 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::mutable_experimental() {
4904   ::tensorflow::GPUOptions_Experimental* _msg = _internal_mutable_experimental();
4905   // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.experimental)
4906   return _msg;
4907 }
set_allocated_experimental(::tensorflow::GPUOptions_Experimental * experimental)4908 inline void GPUOptions::set_allocated_experimental(::tensorflow::GPUOptions_Experimental* experimental) {
4909   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
4910   if (message_arena == nullptr) {
4911     delete _impl_.experimental_;
4912   }
4913   if (experimental) {
4914     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
4915         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(experimental);
4916     if (message_arena != submessage_arena) {
4917       experimental = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
4918           message_arena, experimental, submessage_arena);
4919     }
4920 
4921   } else {
4922 
4923   }
4924   _impl_.experimental_ = experimental;
4925   // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.experimental)
4926 }
4927 
4928 // -------------------------------------------------------------------
4929 
4930 // OptimizerOptions
4931 
4932 // bool do_common_subexpression_elimination = 1;
clear_do_common_subexpression_elimination()4933 inline void OptimizerOptions::clear_do_common_subexpression_elimination() {
4934   _impl_.do_common_subexpression_elimination_ = false;
4935 }
_internal_do_common_subexpression_elimination()4936 inline bool OptimizerOptions::_internal_do_common_subexpression_elimination() const {
4937   return _impl_.do_common_subexpression_elimination_;
4938 }
do_common_subexpression_elimination()4939 inline bool OptimizerOptions::do_common_subexpression_elimination() const {
4940   // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.do_common_subexpression_elimination)
4941   return _internal_do_common_subexpression_elimination();
4942 }
_internal_set_do_common_subexpression_elimination(bool value)4943 inline void OptimizerOptions::_internal_set_do_common_subexpression_elimination(bool value) {
4944 
4945   _impl_.do_common_subexpression_elimination_ = value;
4946 }
set_do_common_subexpression_elimination(bool value)4947 inline void OptimizerOptions::set_do_common_subexpression_elimination(bool value) {
4948   _internal_set_do_common_subexpression_elimination(value);
4949   // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.do_common_subexpression_elimination)
4950 }
4951 
4952 // bool do_constant_folding = 2;
clear_do_constant_folding()4953 inline void OptimizerOptions::clear_do_constant_folding() {
4954   _impl_.do_constant_folding_ = false;
4955 }
_internal_do_constant_folding()4956 inline bool OptimizerOptions::_internal_do_constant_folding() const {
4957   return _impl_.do_constant_folding_;
4958 }
do_constant_folding()4959 inline bool OptimizerOptions::do_constant_folding() const {
4960   // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.do_constant_folding)
4961   return _internal_do_constant_folding();
4962 }
_internal_set_do_constant_folding(bool value)4963 inline void OptimizerOptions::_internal_set_do_constant_folding(bool value) {
4964 
4965   _impl_.do_constant_folding_ = value;
4966 }
set_do_constant_folding(bool value)4967 inline void OptimizerOptions::set_do_constant_folding(bool value) {
4968   _internal_set_do_constant_folding(value);
4969   // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.do_constant_folding)
4970 }
4971 
4972 // int64 max_folded_constant_in_bytes = 6;
clear_max_folded_constant_in_bytes()4973 inline void OptimizerOptions::clear_max_folded_constant_in_bytes() {
4974   _impl_.max_folded_constant_in_bytes_ = ::int64_t{0};
4975 }
_internal_max_folded_constant_in_bytes()4976 inline ::int64_t OptimizerOptions::_internal_max_folded_constant_in_bytes() const {
4977   return _impl_.max_folded_constant_in_bytes_;
4978 }
max_folded_constant_in_bytes()4979 inline ::int64_t OptimizerOptions::max_folded_constant_in_bytes() const {
4980   // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.max_folded_constant_in_bytes)
4981   return _internal_max_folded_constant_in_bytes();
4982 }
_internal_set_max_folded_constant_in_bytes(::int64_t value)4983 inline void OptimizerOptions::_internal_set_max_folded_constant_in_bytes(::int64_t value) {
4984 
4985   _impl_.max_folded_constant_in_bytes_ = value;
4986 }
set_max_folded_constant_in_bytes(::int64_t value)4987 inline void OptimizerOptions::set_max_folded_constant_in_bytes(::int64_t value) {
4988   _internal_set_max_folded_constant_in_bytes(value);
4989   // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.max_folded_constant_in_bytes)
4990 }
4991 
4992 // bool do_function_inlining = 4;
clear_do_function_inlining()4993 inline void OptimizerOptions::clear_do_function_inlining() {
4994   _impl_.do_function_inlining_ = false;
4995 }
_internal_do_function_inlining()4996 inline bool OptimizerOptions::_internal_do_function_inlining() const {
4997   return _impl_.do_function_inlining_;
4998 }
do_function_inlining()4999 inline bool OptimizerOptions::do_function_inlining() const {
5000   // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.do_function_inlining)
5001   return _internal_do_function_inlining();
5002 }
_internal_set_do_function_inlining(bool value)5003 inline void OptimizerOptions::_internal_set_do_function_inlining(bool value) {
5004 
5005   _impl_.do_function_inlining_ = value;
5006 }
set_do_function_inlining(bool value)5007 inline void OptimizerOptions::set_do_function_inlining(bool value) {
5008   _internal_set_do_function_inlining(value);
5009   // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.do_function_inlining)
5010 }
5011 
5012 // .tensorflow.OptimizerOptions.Level opt_level = 3;
clear_opt_level()5013 inline void OptimizerOptions::clear_opt_level() {
5014   _impl_.opt_level_ = 0;
5015 }
_internal_opt_level()5016 inline ::tensorflow::OptimizerOptions_Level OptimizerOptions::_internal_opt_level() const {
5017   return static_cast< ::tensorflow::OptimizerOptions_Level >(_impl_.opt_level_);
5018 }
opt_level()5019 inline ::tensorflow::OptimizerOptions_Level OptimizerOptions::opt_level() const {
5020   // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.opt_level)
5021   return _internal_opt_level();
5022 }
_internal_set_opt_level(::tensorflow::OptimizerOptions_Level value)5023 inline void OptimizerOptions::_internal_set_opt_level(::tensorflow::OptimizerOptions_Level value) {
5024 
5025   _impl_.opt_level_ = value;
5026 }
set_opt_level(::tensorflow::OptimizerOptions_Level value)5027 inline void OptimizerOptions::set_opt_level(::tensorflow::OptimizerOptions_Level value) {
5028   _internal_set_opt_level(value);
5029   // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.opt_level)
5030 }
5031 
5032 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
clear_global_jit_level()5033 inline void OptimizerOptions::clear_global_jit_level() {
5034   _impl_.global_jit_level_ = 0;
5035 }
_internal_global_jit_level()5036 inline ::tensorflow::OptimizerOptions_GlobalJitLevel OptimizerOptions::_internal_global_jit_level() const {
5037   return static_cast< ::tensorflow::OptimizerOptions_GlobalJitLevel >(_impl_.global_jit_level_);
5038 }
global_jit_level()5039 inline ::tensorflow::OptimizerOptions_GlobalJitLevel OptimizerOptions::global_jit_level() const {
5040   // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.global_jit_level)
5041   return _internal_global_jit_level();
5042 }
_internal_set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value)5043 inline void OptimizerOptions::_internal_set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value) {
5044 
5045   _impl_.global_jit_level_ = value;
5046 }
set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value)5047 inline void OptimizerOptions::set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value) {
5048   _internal_set_global_jit_level(value);
5049   // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.global_jit_level)
5050 }
5051 
5052 // bool cpu_global_jit = 7;
clear_cpu_global_jit()5053 inline void OptimizerOptions::clear_cpu_global_jit() {
5054   _impl_.cpu_global_jit_ = false;
5055 }
_internal_cpu_global_jit()5056 inline bool OptimizerOptions::_internal_cpu_global_jit() const {
5057   return _impl_.cpu_global_jit_;
5058 }
cpu_global_jit()5059 inline bool OptimizerOptions::cpu_global_jit() const {
5060   // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.cpu_global_jit)
5061   return _internal_cpu_global_jit();
5062 }
_internal_set_cpu_global_jit(bool value)5063 inline void OptimizerOptions::_internal_set_cpu_global_jit(bool value) {
5064 
5065   _impl_.cpu_global_jit_ = value;
5066 }
set_cpu_global_jit(bool value)5067 inline void OptimizerOptions::set_cpu_global_jit(bool value) {
5068   _internal_set_cpu_global_jit(value);
5069   // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.cpu_global_jit)
5070 }
5071 
5072 // -------------------------------------------------------------------
5073 
5074 // GraphOptions
5075 
5076 // bool enable_recv_scheduling = 2;
clear_enable_recv_scheduling()5077 inline void GraphOptions::clear_enable_recv_scheduling() {
5078   _impl_.enable_recv_scheduling_ = false;
5079 }
_internal_enable_recv_scheduling()5080 inline bool GraphOptions::_internal_enable_recv_scheduling() const {
5081   return _impl_.enable_recv_scheduling_;
5082 }
enable_recv_scheduling()5083 inline bool GraphOptions::enable_recv_scheduling() const {
5084   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.enable_recv_scheduling)
5085   return _internal_enable_recv_scheduling();
5086 }
_internal_set_enable_recv_scheduling(bool value)5087 inline void GraphOptions::_internal_set_enable_recv_scheduling(bool value) {
5088 
5089   _impl_.enable_recv_scheduling_ = value;
5090 }
set_enable_recv_scheduling(bool value)5091 inline void GraphOptions::set_enable_recv_scheduling(bool value) {
5092   _internal_set_enable_recv_scheduling(value);
5093   // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.enable_recv_scheduling)
5094 }
5095 
5096 // .tensorflow.OptimizerOptions optimizer_options = 3;
_internal_has_optimizer_options()5097 inline bool GraphOptions::_internal_has_optimizer_options() const {
5098   return this != internal_default_instance() && _impl_.optimizer_options_ != nullptr;
5099 }
has_optimizer_options()5100 inline bool GraphOptions::has_optimizer_options() const {
5101   return _internal_has_optimizer_options();
5102 }
clear_optimizer_options()5103 inline void GraphOptions::clear_optimizer_options() {
5104   if (GetArenaForAllocation() == nullptr && _impl_.optimizer_options_ != nullptr) {
5105     delete _impl_.optimizer_options_;
5106   }
5107   _impl_.optimizer_options_ = nullptr;
5108 }
_internal_optimizer_options()5109 inline const ::tensorflow::OptimizerOptions& GraphOptions::_internal_optimizer_options() const {
5110   const ::tensorflow::OptimizerOptions* p = _impl_.optimizer_options_;
5111   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::OptimizerOptions&>(
5112       ::tensorflow::_OptimizerOptions_default_instance_);
5113 }
optimizer_options()5114 inline const ::tensorflow::OptimizerOptions& GraphOptions::optimizer_options() const {
5115   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.optimizer_options)
5116   return _internal_optimizer_options();
5117 }
unsafe_arena_set_allocated_optimizer_options(::tensorflow::OptimizerOptions * optimizer_options)5118 inline void GraphOptions::unsafe_arena_set_allocated_optimizer_options(
5119     ::tensorflow::OptimizerOptions* optimizer_options) {
5120   if (GetArenaForAllocation() == nullptr) {
5121     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.optimizer_options_);
5122   }
5123   _impl_.optimizer_options_ = optimizer_options;
5124   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GraphOptions.optimizer_options)
5125 }
release_optimizer_options()5126 inline ::tensorflow::OptimizerOptions* GraphOptions::release_optimizer_options() {
5127 
5128   ::tensorflow::OptimizerOptions* temp = _impl_.optimizer_options_;
5129   _impl_.optimizer_options_ = nullptr;
5130 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
5131   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
5132   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5133   if (GetArenaForAllocation() == nullptr) { delete old; }
5134 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
5135   if (GetArenaForAllocation() != nullptr) {
5136     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5137   }
5138 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
5139   return temp;
5140 }
unsafe_arena_release_optimizer_options()5141 inline ::tensorflow::OptimizerOptions* GraphOptions::unsafe_arena_release_optimizer_options() {
5142   // @@protoc_insertion_point(field_release:tensorflow.GraphOptions.optimizer_options)
5143 
5144   ::tensorflow::OptimizerOptions* temp = _impl_.optimizer_options_;
5145   _impl_.optimizer_options_ = nullptr;
5146   return temp;
5147 }
_internal_mutable_optimizer_options()5148 inline ::tensorflow::OptimizerOptions* GraphOptions::_internal_mutable_optimizer_options() {
5149 
5150   if (_impl_.optimizer_options_ == nullptr) {
5151     auto* p = CreateMaybeMessage<::tensorflow::OptimizerOptions>(GetArenaForAllocation());
5152     _impl_.optimizer_options_ = p;
5153   }
5154   return _impl_.optimizer_options_;
5155 }
mutable_optimizer_options()5156 inline ::tensorflow::OptimizerOptions* GraphOptions::mutable_optimizer_options() {
5157   ::tensorflow::OptimizerOptions* _msg = _internal_mutable_optimizer_options();
5158   // @@protoc_insertion_point(field_mutable:tensorflow.GraphOptions.optimizer_options)
5159   return _msg;
5160 }
set_allocated_optimizer_options(::tensorflow::OptimizerOptions * optimizer_options)5161 inline void GraphOptions::set_allocated_optimizer_options(::tensorflow::OptimizerOptions* optimizer_options) {
5162   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
5163   if (message_arena == nullptr) {
5164     delete _impl_.optimizer_options_;
5165   }
5166   if (optimizer_options) {
5167     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
5168         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(optimizer_options);
5169     if (message_arena != submessage_arena) {
5170       optimizer_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
5171           message_arena, optimizer_options, submessage_arena);
5172     }
5173 
5174   } else {
5175 
5176   }
5177   _impl_.optimizer_options_ = optimizer_options;
5178   // @@protoc_insertion_point(field_set_allocated:tensorflow.GraphOptions.optimizer_options)
5179 }
5180 
5181 // int64 build_cost_model = 4;
clear_build_cost_model()5182 inline void GraphOptions::clear_build_cost_model() {
5183   _impl_.build_cost_model_ = ::int64_t{0};
5184 }
_internal_build_cost_model()5185 inline ::int64_t GraphOptions::_internal_build_cost_model() const {
5186   return _impl_.build_cost_model_;
5187 }
build_cost_model()5188 inline ::int64_t GraphOptions::build_cost_model() const {
5189   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.build_cost_model)
5190   return _internal_build_cost_model();
5191 }
_internal_set_build_cost_model(::int64_t value)5192 inline void GraphOptions::_internal_set_build_cost_model(::int64_t value) {
5193 
5194   _impl_.build_cost_model_ = value;
5195 }
set_build_cost_model(::int64_t value)5196 inline void GraphOptions::set_build_cost_model(::int64_t value) {
5197   _internal_set_build_cost_model(value);
5198   // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.build_cost_model)
5199 }
5200 
5201 // int64 build_cost_model_after = 9;
clear_build_cost_model_after()5202 inline void GraphOptions::clear_build_cost_model_after() {
5203   _impl_.build_cost_model_after_ = ::int64_t{0};
5204 }
_internal_build_cost_model_after()5205 inline ::int64_t GraphOptions::_internal_build_cost_model_after() const {
5206   return _impl_.build_cost_model_after_;
5207 }
build_cost_model_after()5208 inline ::int64_t GraphOptions::build_cost_model_after() const {
5209   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.build_cost_model_after)
5210   return _internal_build_cost_model_after();
5211 }
_internal_set_build_cost_model_after(::int64_t value)5212 inline void GraphOptions::_internal_set_build_cost_model_after(::int64_t value) {
5213 
5214   _impl_.build_cost_model_after_ = value;
5215 }
set_build_cost_model_after(::int64_t value)5216 inline void GraphOptions::set_build_cost_model_after(::int64_t value) {
5217   _internal_set_build_cost_model_after(value);
5218   // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.build_cost_model_after)
5219 }
5220 
5221 // bool infer_shapes = 5;
clear_infer_shapes()5222 inline void GraphOptions::clear_infer_shapes() {
5223   _impl_.infer_shapes_ = false;
5224 }
_internal_infer_shapes()5225 inline bool GraphOptions::_internal_infer_shapes() const {
5226   return _impl_.infer_shapes_;
5227 }
infer_shapes()5228 inline bool GraphOptions::infer_shapes() const {
5229   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.infer_shapes)
5230   return _internal_infer_shapes();
5231 }
_internal_set_infer_shapes(bool value)5232 inline void GraphOptions::_internal_set_infer_shapes(bool value) {
5233 
5234   _impl_.infer_shapes_ = value;
5235 }
set_infer_shapes(bool value)5236 inline void GraphOptions::set_infer_shapes(bool value) {
5237   _internal_set_infer_shapes(value);
5238   // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.infer_shapes)
5239 }
5240 
5241 // bool place_pruned_graph = 6;
clear_place_pruned_graph()5242 inline void GraphOptions::clear_place_pruned_graph() {
5243   _impl_.place_pruned_graph_ = false;
5244 }
_internal_place_pruned_graph()5245 inline bool GraphOptions::_internal_place_pruned_graph() const {
5246   return _impl_.place_pruned_graph_;
5247 }
place_pruned_graph()5248 inline bool GraphOptions::place_pruned_graph() const {
5249   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.place_pruned_graph)
5250   return _internal_place_pruned_graph();
5251 }
_internal_set_place_pruned_graph(bool value)5252 inline void GraphOptions::_internal_set_place_pruned_graph(bool value) {
5253 
5254   _impl_.place_pruned_graph_ = value;
5255 }
set_place_pruned_graph(bool value)5256 inline void GraphOptions::set_place_pruned_graph(bool value) {
5257   _internal_set_place_pruned_graph(value);
5258   // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.place_pruned_graph)
5259 }
5260 
5261 // bool enable_bfloat16_sendrecv = 7;
clear_enable_bfloat16_sendrecv()5262 inline void GraphOptions::clear_enable_bfloat16_sendrecv() {
5263   _impl_.enable_bfloat16_sendrecv_ = false;
5264 }
_internal_enable_bfloat16_sendrecv()5265 inline bool GraphOptions::_internal_enable_bfloat16_sendrecv() const {
5266   return _impl_.enable_bfloat16_sendrecv_;
5267 }
enable_bfloat16_sendrecv()5268 inline bool GraphOptions::enable_bfloat16_sendrecv() const {
5269   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.enable_bfloat16_sendrecv)
5270   return _internal_enable_bfloat16_sendrecv();
5271 }
_internal_set_enable_bfloat16_sendrecv(bool value)5272 inline void GraphOptions::_internal_set_enable_bfloat16_sendrecv(bool value) {
5273 
5274   _impl_.enable_bfloat16_sendrecv_ = value;
5275 }
set_enable_bfloat16_sendrecv(bool value)5276 inline void GraphOptions::set_enable_bfloat16_sendrecv(bool value) {
5277   _internal_set_enable_bfloat16_sendrecv(value);
5278   // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.enable_bfloat16_sendrecv)
5279 }
5280 
5281 // int32 timeline_step = 8;
clear_timeline_step()5282 inline void GraphOptions::clear_timeline_step() {
5283   _impl_.timeline_step_ = 0;
5284 }
_internal_timeline_step()5285 inline ::int32_t GraphOptions::_internal_timeline_step() const {
5286   return _impl_.timeline_step_;
5287 }
timeline_step()5288 inline ::int32_t GraphOptions::timeline_step() const {
5289   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.timeline_step)
5290   return _internal_timeline_step();
5291 }
_internal_set_timeline_step(::int32_t value)5292 inline void GraphOptions::_internal_set_timeline_step(::int32_t value) {
5293 
5294   _impl_.timeline_step_ = value;
5295 }
set_timeline_step(::int32_t value)5296 inline void GraphOptions::set_timeline_step(::int32_t value) {
5297   _internal_set_timeline_step(value);
5298   // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.timeline_step)
5299 }
5300 
5301 // .tensorflow.RewriterConfig rewrite_options = 10;
_internal_has_rewrite_options()5302 inline bool GraphOptions::_internal_has_rewrite_options() const {
5303   return this != internal_default_instance() && _impl_.rewrite_options_ != nullptr;
5304 }
has_rewrite_options()5305 inline bool GraphOptions::has_rewrite_options() const {
5306   return _internal_has_rewrite_options();
5307 }
_internal_rewrite_options()5308 inline const ::tensorflow::RewriterConfig& GraphOptions::_internal_rewrite_options() const {
5309   const ::tensorflow::RewriterConfig* p = _impl_.rewrite_options_;
5310   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::RewriterConfig&>(
5311       ::tensorflow::_RewriterConfig_default_instance_);
5312 }
rewrite_options()5313 inline const ::tensorflow::RewriterConfig& GraphOptions::rewrite_options() const {
5314   // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.rewrite_options)
5315   return _internal_rewrite_options();
5316 }
unsafe_arena_set_allocated_rewrite_options(::tensorflow::RewriterConfig * rewrite_options)5317 inline void GraphOptions::unsafe_arena_set_allocated_rewrite_options(
5318     ::tensorflow::RewriterConfig* rewrite_options) {
5319   if (GetArenaForAllocation() == nullptr) {
5320     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.rewrite_options_);
5321   }
5322   _impl_.rewrite_options_ = rewrite_options;
5323   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GraphOptions.rewrite_options)
5324 }
release_rewrite_options()5325 inline ::tensorflow::RewriterConfig* GraphOptions::release_rewrite_options() {
5326 
5327   ::tensorflow::RewriterConfig* temp = _impl_.rewrite_options_;
5328   _impl_.rewrite_options_ = nullptr;
5329 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
5330   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
5331   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5332   if (GetArenaForAllocation() == nullptr) { delete old; }
5333 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
5334   if (GetArenaForAllocation() != nullptr) {
5335     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5336   }
5337 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
5338   return temp;
5339 }
unsafe_arena_release_rewrite_options()5340 inline ::tensorflow::RewriterConfig* GraphOptions::unsafe_arena_release_rewrite_options() {
5341   // @@protoc_insertion_point(field_release:tensorflow.GraphOptions.rewrite_options)
5342 
5343   ::tensorflow::RewriterConfig* temp = _impl_.rewrite_options_;
5344   _impl_.rewrite_options_ = nullptr;
5345   return temp;
5346 }
_internal_mutable_rewrite_options()5347 inline ::tensorflow::RewriterConfig* GraphOptions::_internal_mutable_rewrite_options() {
5348 
5349   if (_impl_.rewrite_options_ == nullptr) {
5350     auto* p = CreateMaybeMessage<::tensorflow::RewriterConfig>(GetArenaForAllocation());
5351     _impl_.rewrite_options_ = p;
5352   }
5353   return _impl_.rewrite_options_;
5354 }
mutable_rewrite_options()5355 inline ::tensorflow::RewriterConfig* GraphOptions::mutable_rewrite_options() {
5356   ::tensorflow::RewriterConfig* _msg = _internal_mutable_rewrite_options();
5357   // @@protoc_insertion_point(field_mutable:tensorflow.GraphOptions.rewrite_options)
5358   return _msg;
5359 }
set_allocated_rewrite_options(::tensorflow::RewriterConfig * rewrite_options)5360 inline void GraphOptions::set_allocated_rewrite_options(::tensorflow::RewriterConfig* rewrite_options) {
5361   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
5362   if (message_arena == nullptr) {
5363     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.rewrite_options_);
5364   }
5365   if (rewrite_options) {
5366     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
5367         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
5368                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(rewrite_options));
5369     if (message_arena != submessage_arena) {
5370       rewrite_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
5371           message_arena, rewrite_options, submessage_arena);
5372     }
5373 
5374   } else {
5375 
5376   }
5377   _impl_.rewrite_options_ = rewrite_options;
5378   // @@protoc_insertion_point(field_set_allocated:tensorflow.GraphOptions.rewrite_options)
5379 }
5380 
5381 // -------------------------------------------------------------------
5382 
5383 // ThreadPoolOptionProto
5384 
5385 // int32 num_threads = 1;
clear_num_threads()5386 inline void ThreadPoolOptionProto::clear_num_threads() {
5387   _impl_.num_threads_ = 0;
5388 }
_internal_num_threads()5389 inline ::int32_t ThreadPoolOptionProto::_internal_num_threads() const {
5390   return _impl_.num_threads_;
5391 }
num_threads()5392 inline ::int32_t ThreadPoolOptionProto::num_threads() const {
5393   // @@protoc_insertion_point(field_get:tensorflow.ThreadPoolOptionProto.num_threads)
5394   return _internal_num_threads();
5395 }
_internal_set_num_threads(::int32_t value)5396 inline void ThreadPoolOptionProto::_internal_set_num_threads(::int32_t value) {
5397 
5398   _impl_.num_threads_ = value;
5399 }
set_num_threads(::int32_t value)5400 inline void ThreadPoolOptionProto::set_num_threads(::int32_t value) {
5401   _internal_set_num_threads(value);
5402   // @@protoc_insertion_point(field_set:tensorflow.ThreadPoolOptionProto.num_threads)
5403 }
5404 
5405 // string global_name = 2;
clear_global_name()5406 inline void ThreadPoolOptionProto::clear_global_name() {
5407   _impl_.global_name_.ClearToEmpty();
5408 }
global_name()5409 inline const std::string& ThreadPoolOptionProto::global_name() const {
5410   // @@protoc_insertion_point(field_get:tensorflow.ThreadPoolOptionProto.global_name)
5411   return _internal_global_name();
5412 }
5413 template <typename ArgT0, typename... ArgT>
5414 inline PROTOBUF_ALWAYS_INLINE
set_global_name(ArgT0 && arg0,ArgT...args)5415 void ThreadPoolOptionProto::set_global_name(ArgT0&& arg0, ArgT... args) {
5416 
5417  _impl_.global_name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
5418   // @@protoc_insertion_point(field_set:tensorflow.ThreadPoolOptionProto.global_name)
5419 }
mutable_global_name()5420 inline std::string* ThreadPoolOptionProto::mutable_global_name() {
5421   std::string* _s = _internal_mutable_global_name();
5422   // @@protoc_insertion_point(field_mutable:tensorflow.ThreadPoolOptionProto.global_name)
5423   return _s;
5424 }
_internal_global_name()5425 inline const std::string& ThreadPoolOptionProto::_internal_global_name() const {
5426   return _impl_.global_name_.Get();
5427 }
_internal_set_global_name(const std::string & value)5428 inline void ThreadPoolOptionProto::_internal_set_global_name(const std::string& value) {
5429 
5430   _impl_.global_name_.Set(value, GetArenaForAllocation());
5431 }
_internal_mutable_global_name()5432 inline std::string* ThreadPoolOptionProto::_internal_mutable_global_name() {
5433 
5434   return _impl_.global_name_.Mutable(GetArenaForAllocation());
5435 }
release_global_name()5436 inline std::string* ThreadPoolOptionProto::release_global_name() {
5437   // @@protoc_insertion_point(field_release:tensorflow.ThreadPoolOptionProto.global_name)
5438   return _impl_.global_name_.Release();
5439 }
set_allocated_global_name(std::string * global_name)5440 inline void ThreadPoolOptionProto::set_allocated_global_name(std::string* global_name) {
5441   _impl_.global_name_.SetAllocated(global_name, GetArenaForAllocation());
5442 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5443   if (_impl_.global_name_.IsDefault()) {
5444     _impl_.global_name_.Set("", GetArenaForAllocation());
5445   }
5446 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5447   // @@protoc_insertion_point(field_set_allocated:tensorflow.ThreadPoolOptionProto.global_name)
5448 }
5449 
5450 // -------------------------------------------------------------------
5451 
5452 // RPCOptions
5453 
5454 // bool use_rpc_for_inprocess_master = 1;
clear_use_rpc_for_inprocess_master()5455 inline void RPCOptions::clear_use_rpc_for_inprocess_master() {
5456   _impl_.use_rpc_for_inprocess_master_ = false;
5457 }
_internal_use_rpc_for_inprocess_master()5458 inline bool RPCOptions::_internal_use_rpc_for_inprocess_master() const {
5459   return _impl_.use_rpc_for_inprocess_master_;
5460 }
use_rpc_for_inprocess_master()5461 inline bool RPCOptions::use_rpc_for_inprocess_master() const {
5462   // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.use_rpc_for_inprocess_master)
5463   return _internal_use_rpc_for_inprocess_master();
5464 }
_internal_set_use_rpc_for_inprocess_master(bool value)5465 inline void RPCOptions::_internal_set_use_rpc_for_inprocess_master(bool value) {
5466 
5467   _impl_.use_rpc_for_inprocess_master_ = value;
5468 }
set_use_rpc_for_inprocess_master(bool value)5469 inline void RPCOptions::set_use_rpc_for_inprocess_master(bool value) {
5470   _internal_set_use_rpc_for_inprocess_master(value);
5471   // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.use_rpc_for_inprocess_master)
5472 }
5473 
5474 // string compression_algorithm = 2;
clear_compression_algorithm()5475 inline void RPCOptions::clear_compression_algorithm() {
5476   _impl_.compression_algorithm_.ClearToEmpty();
5477 }
compression_algorithm()5478 inline const std::string& RPCOptions::compression_algorithm() const {
5479   // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.compression_algorithm)
5480   return _internal_compression_algorithm();
5481 }
5482 template <typename ArgT0, typename... ArgT>
5483 inline PROTOBUF_ALWAYS_INLINE
set_compression_algorithm(ArgT0 && arg0,ArgT...args)5484 void RPCOptions::set_compression_algorithm(ArgT0&& arg0, ArgT... args) {
5485 
5486  _impl_.compression_algorithm_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
5487   // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.compression_algorithm)
5488 }
mutable_compression_algorithm()5489 inline std::string* RPCOptions::mutable_compression_algorithm() {
5490   std::string* _s = _internal_mutable_compression_algorithm();
5491   // @@protoc_insertion_point(field_mutable:tensorflow.RPCOptions.compression_algorithm)
5492   return _s;
5493 }
_internal_compression_algorithm()5494 inline const std::string& RPCOptions::_internal_compression_algorithm() const {
5495   return _impl_.compression_algorithm_.Get();
5496 }
_internal_set_compression_algorithm(const std::string & value)5497 inline void RPCOptions::_internal_set_compression_algorithm(const std::string& value) {
5498 
5499   _impl_.compression_algorithm_.Set(value, GetArenaForAllocation());
5500 }
_internal_mutable_compression_algorithm()5501 inline std::string* RPCOptions::_internal_mutable_compression_algorithm() {
5502 
5503   return _impl_.compression_algorithm_.Mutable(GetArenaForAllocation());
5504 }
release_compression_algorithm()5505 inline std::string* RPCOptions::release_compression_algorithm() {
5506   // @@protoc_insertion_point(field_release:tensorflow.RPCOptions.compression_algorithm)
5507   return _impl_.compression_algorithm_.Release();
5508 }
set_allocated_compression_algorithm(std::string * compression_algorithm)5509 inline void RPCOptions::set_allocated_compression_algorithm(std::string* compression_algorithm) {
5510   _impl_.compression_algorithm_.SetAllocated(compression_algorithm, GetArenaForAllocation());
5511 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5512   if (_impl_.compression_algorithm_.IsDefault()) {
5513     _impl_.compression_algorithm_.Set("", GetArenaForAllocation());
5514   }
5515 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5516   // @@protoc_insertion_point(field_set_allocated:tensorflow.RPCOptions.compression_algorithm)
5517 }
5518 
5519 // int32 compression_level = 3;
clear_compression_level()5520 inline void RPCOptions::clear_compression_level() {
5521   _impl_.compression_level_ = 0;
5522 }
_internal_compression_level()5523 inline ::int32_t RPCOptions::_internal_compression_level() const {
5524   return _impl_.compression_level_;
5525 }
compression_level()5526 inline ::int32_t RPCOptions::compression_level() const {
5527   // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.compression_level)
5528   return _internal_compression_level();
5529 }
_internal_set_compression_level(::int32_t value)5530 inline void RPCOptions::_internal_set_compression_level(::int32_t value) {
5531 
5532   _impl_.compression_level_ = value;
5533 }
set_compression_level(::int32_t value)5534 inline void RPCOptions::set_compression_level(::int32_t value) {
5535   _internal_set_compression_level(value);
5536   // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.compression_level)
5537 }
5538 
5539 // bool cache_rpc_response = 4;
clear_cache_rpc_response()5540 inline void RPCOptions::clear_cache_rpc_response() {
5541   _impl_.cache_rpc_response_ = false;
5542 }
_internal_cache_rpc_response()5543 inline bool RPCOptions::_internal_cache_rpc_response() const {
5544   return _impl_.cache_rpc_response_;
5545 }
cache_rpc_response()5546 inline bool RPCOptions::cache_rpc_response() const {
5547   // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.cache_rpc_response)
5548   return _internal_cache_rpc_response();
5549 }
_internal_set_cache_rpc_response(bool value)5550 inline void RPCOptions::_internal_set_cache_rpc_response(bool value) {
5551 
5552   _impl_.cache_rpc_response_ = value;
5553 }
set_cache_rpc_response(bool value)5554 inline void RPCOptions::set_cache_rpc_response(bool value) {
5555   _internal_set_cache_rpc_response(value);
5556   // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.cache_rpc_response)
5557 }
5558 
5559 // bool disable_session_connection_sharing = 5;
clear_disable_session_connection_sharing()5560 inline void RPCOptions::clear_disable_session_connection_sharing() {
5561   _impl_.disable_session_connection_sharing_ = false;
5562 }
_internal_disable_session_connection_sharing()5563 inline bool RPCOptions::_internal_disable_session_connection_sharing() const {
5564   return _impl_.disable_session_connection_sharing_;
5565 }
disable_session_connection_sharing()5566 inline bool RPCOptions::disable_session_connection_sharing() const {
5567   // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.disable_session_connection_sharing)
5568   return _internal_disable_session_connection_sharing();
5569 }
_internal_set_disable_session_connection_sharing(bool value)5570 inline void RPCOptions::_internal_set_disable_session_connection_sharing(bool value) {
5571 
5572   _impl_.disable_session_connection_sharing_ = value;
5573 }
set_disable_session_connection_sharing(bool value)5574 inline void RPCOptions::set_disable_session_connection_sharing(bool value) {
5575   _internal_set_disable_session_connection_sharing(value);
5576   // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.disable_session_connection_sharing)
5577 }
5578 
5579 // int32 num_channels_per_target = 6;
clear_num_channels_per_target()5580 inline void RPCOptions::clear_num_channels_per_target() {
5581   _impl_.num_channels_per_target_ = 0;
5582 }
_internal_num_channels_per_target()5583 inline ::int32_t RPCOptions::_internal_num_channels_per_target() const {
5584   return _impl_.num_channels_per_target_;
5585 }
num_channels_per_target()5586 inline ::int32_t RPCOptions::num_channels_per_target() const {
5587   // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.num_channels_per_target)
5588   return _internal_num_channels_per_target();
5589 }
_internal_set_num_channels_per_target(::int32_t value)5590 inline void RPCOptions::_internal_set_num_channels_per_target(::int32_t value) {
5591 
5592   _impl_.num_channels_per_target_ = value;
5593 }
set_num_channels_per_target(::int32_t value)5594 inline void RPCOptions::set_num_channels_per_target(::int32_t value) {
5595   _internal_set_num_channels_per_target(value);
5596   // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.num_channels_per_target)
5597 }
5598 
5599 // -------------------------------------------------------------------
5600 
5601 // SessionMetadata
5602 
5603 // string name = 1;
clear_name()5604 inline void SessionMetadata::clear_name() {
5605   _impl_.name_.ClearToEmpty();
5606 }
name()5607 inline const std::string& SessionMetadata::name() const {
5608   // @@protoc_insertion_point(field_get:tensorflow.SessionMetadata.name)
5609   return _internal_name();
5610 }
5611 template <typename ArgT0, typename... ArgT>
5612 inline PROTOBUF_ALWAYS_INLINE
set_name(ArgT0 && arg0,ArgT...args)5613 void SessionMetadata::set_name(ArgT0&& arg0, ArgT... args) {
5614 
5615  _impl_.name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
5616   // @@protoc_insertion_point(field_set:tensorflow.SessionMetadata.name)
5617 }
mutable_name()5618 inline std::string* SessionMetadata::mutable_name() {
5619   std::string* _s = _internal_mutable_name();
5620   // @@protoc_insertion_point(field_mutable:tensorflow.SessionMetadata.name)
5621   return _s;
5622 }
_internal_name()5623 inline const std::string& SessionMetadata::_internal_name() const {
5624   return _impl_.name_.Get();
5625 }
_internal_set_name(const std::string & value)5626 inline void SessionMetadata::_internal_set_name(const std::string& value) {
5627 
5628   _impl_.name_.Set(value, GetArenaForAllocation());
5629 }
_internal_mutable_name()5630 inline std::string* SessionMetadata::_internal_mutable_name() {
5631 
5632   return _impl_.name_.Mutable(GetArenaForAllocation());
5633 }
release_name()5634 inline std::string* SessionMetadata::release_name() {
5635   // @@protoc_insertion_point(field_release:tensorflow.SessionMetadata.name)
5636   return _impl_.name_.Release();
5637 }
set_allocated_name(std::string * name)5638 inline void SessionMetadata::set_allocated_name(std::string* name) {
5639   _impl_.name_.SetAllocated(name, GetArenaForAllocation());
5640 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5641   if (_impl_.name_.IsDefault()) {
5642     _impl_.name_.Set("", GetArenaForAllocation());
5643   }
5644 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5645   // @@protoc_insertion_point(field_set_allocated:tensorflow.SessionMetadata.name)
5646 }
5647 
5648 // int64 version = 2;
clear_version()5649 inline void SessionMetadata::clear_version() {
5650   _impl_.version_ = ::int64_t{0};
5651 }
_internal_version()5652 inline ::int64_t SessionMetadata::_internal_version() const {
5653   return _impl_.version_;
5654 }
version()5655 inline ::int64_t SessionMetadata::version() const {
5656   // @@protoc_insertion_point(field_get:tensorflow.SessionMetadata.version)
5657   return _internal_version();
5658 }
_internal_set_version(::int64_t value)5659 inline void SessionMetadata::_internal_set_version(::int64_t value) {
5660 
5661   _impl_.version_ = value;
5662 }
set_version(::int64_t value)5663 inline void SessionMetadata::set_version(::int64_t value) {
5664   _internal_set_version(value);
5665   // @@protoc_insertion_point(field_set:tensorflow.SessionMetadata.version)
5666 }
5667 
5668 // -------------------------------------------------------------------
5669 
5670 // -------------------------------------------------------------------
5671 
5672 // ConfigProto_Experimental
5673 
5674 // string collective_group_leader = 1;
clear_collective_group_leader()5675 inline void ConfigProto_Experimental::clear_collective_group_leader() {
5676   _impl_.collective_group_leader_.ClearToEmpty();
5677 }
collective_group_leader()5678 inline const std::string& ConfigProto_Experimental::collective_group_leader() const {
5679   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.collective_group_leader)
5680   return _internal_collective_group_leader();
5681 }
5682 template <typename ArgT0, typename... ArgT>
5683 inline PROTOBUF_ALWAYS_INLINE
set_collective_group_leader(ArgT0 && arg0,ArgT...args)5684 void ConfigProto_Experimental::set_collective_group_leader(ArgT0&& arg0, ArgT... args) {
5685 
5686  _impl_.collective_group_leader_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
5687   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.collective_group_leader)
5688 }
mutable_collective_group_leader()5689 inline std::string* ConfigProto_Experimental::mutable_collective_group_leader() {
5690   std::string* _s = _internal_mutable_collective_group_leader();
5691   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.Experimental.collective_group_leader)
5692   return _s;
5693 }
_internal_collective_group_leader()5694 inline const std::string& ConfigProto_Experimental::_internal_collective_group_leader() const {
5695   return _impl_.collective_group_leader_.Get();
5696 }
_internal_set_collective_group_leader(const std::string & value)5697 inline void ConfigProto_Experimental::_internal_set_collective_group_leader(const std::string& value) {
5698 
5699   _impl_.collective_group_leader_.Set(value, GetArenaForAllocation());
5700 }
_internal_mutable_collective_group_leader()5701 inline std::string* ConfigProto_Experimental::_internal_mutable_collective_group_leader() {
5702 
5703   return _impl_.collective_group_leader_.Mutable(GetArenaForAllocation());
5704 }
release_collective_group_leader()5705 inline std::string* ConfigProto_Experimental::release_collective_group_leader() {
5706   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.Experimental.collective_group_leader)
5707   return _impl_.collective_group_leader_.Release();
5708 }
set_allocated_collective_group_leader(std::string * collective_group_leader)5709 inline void ConfigProto_Experimental::set_allocated_collective_group_leader(std::string* collective_group_leader) {
5710   _impl_.collective_group_leader_.SetAllocated(collective_group_leader, GetArenaForAllocation());
5711 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5712   if (_impl_.collective_group_leader_.IsDefault()) {
5713     _impl_.collective_group_leader_.Set("", GetArenaForAllocation());
5714   }
5715 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5716   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.Experimental.collective_group_leader)
5717 }
5718 
5719 // string executor_type = 3;
clear_executor_type()5720 inline void ConfigProto_Experimental::clear_executor_type() {
5721   _impl_.executor_type_.ClearToEmpty();
5722 }
executor_type()5723 inline const std::string& ConfigProto_Experimental::executor_type() const {
5724   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.executor_type)
5725   return _internal_executor_type();
5726 }
5727 template <typename ArgT0, typename... ArgT>
5728 inline PROTOBUF_ALWAYS_INLINE
set_executor_type(ArgT0 && arg0,ArgT...args)5729 void ConfigProto_Experimental::set_executor_type(ArgT0&& arg0, ArgT... args) {
5730 
5731  _impl_.executor_type_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
5732   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.executor_type)
5733 }
mutable_executor_type()5734 inline std::string* ConfigProto_Experimental::mutable_executor_type() {
5735   std::string* _s = _internal_mutable_executor_type();
5736   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.Experimental.executor_type)
5737   return _s;
5738 }
_internal_executor_type()5739 inline const std::string& ConfigProto_Experimental::_internal_executor_type() const {
5740   return _impl_.executor_type_.Get();
5741 }
_internal_set_executor_type(const std::string & value)5742 inline void ConfigProto_Experimental::_internal_set_executor_type(const std::string& value) {
5743 
5744   _impl_.executor_type_.Set(value, GetArenaForAllocation());
5745 }
_internal_mutable_executor_type()5746 inline std::string* ConfigProto_Experimental::_internal_mutable_executor_type() {
5747 
5748   return _impl_.executor_type_.Mutable(GetArenaForAllocation());
5749 }
release_executor_type()5750 inline std::string* ConfigProto_Experimental::release_executor_type() {
5751   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.Experimental.executor_type)
5752   return _impl_.executor_type_.Release();
5753 }
set_allocated_executor_type(std::string * executor_type)5754 inline void ConfigProto_Experimental::set_allocated_executor_type(std::string* executor_type) {
5755   _impl_.executor_type_.SetAllocated(executor_type, GetArenaForAllocation());
5756 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5757   if (_impl_.executor_type_.IsDefault()) {
5758     _impl_.executor_type_.Set("", GetArenaForAllocation());
5759   }
5760 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5761   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.Experimental.executor_type)
5762 }
5763 
5764 // int32 recv_buf_max_chunk = 4;
clear_recv_buf_max_chunk()5765 inline void ConfigProto_Experimental::clear_recv_buf_max_chunk() {
5766   _impl_.recv_buf_max_chunk_ = 0;
5767 }
_internal_recv_buf_max_chunk()5768 inline ::int32_t ConfigProto_Experimental::_internal_recv_buf_max_chunk() const {
5769   return _impl_.recv_buf_max_chunk_;
5770 }
recv_buf_max_chunk()5771 inline ::int32_t ConfigProto_Experimental::recv_buf_max_chunk() const {
5772   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.recv_buf_max_chunk)
5773   return _internal_recv_buf_max_chunk();
5774 }
_internal_set_recv_buf_max_chunk(::int32_t value)5775 inline void ConfigProto_Experimental::_internal_set_recv_buf_max_chunk(::int32_t value) {
5776 
5777   _impl_.recv_buf_max_chunk_ = value;
5778 }
set_recv_buf_max_chunk(::int32_t value)5779 inline void ConfigProto_Experimental::set_recv_buf_max_chunk(::int32_t value) {
5780   _internal_set_recv_buf_max_chunk(value);
5781   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.recv_buf_max_chunk)
5782 }
5783 
5784 // bool use_numa_affinity = 5;
clear_use_numa_affinity()5785 inline void ConfigProto_Experimental::clear_use_numa_affinity() {
5786   _impl_.use_numa_affinity_ = false;
5787 }
_internal_use_numa_affinity()5788 inline bool ConfigProto_Experimental::_internal_use_numa_affinity() const {
5789   return _impl_.use_numa_affinity_;
5790 }
use_numa_affinity()5791 inline bool ConfigProto_Experimental::use_numa_affinity() const {
5792   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.use_numa_affinity)
5793   return _internal_use_numa_affinity();
5794 }
_internal_set_use_numa_affinity(bool value)5795 inline void ConfigProto_Experimental::_internal_set_use_numa_affinity(bool value) {
5796 
5797   _impl_.use_numa_affinity_ = value;
5798 }
set_use_numa_affinity(bool value)5799 inline void ConfigProto_Experimental::set_use_numa_affinity(bool value) {
5800   _internal_set_use_numa_affinity(value);
5801   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.use_numa_affinity)
5802 }
5803 
5804 // bool collective_deterministic_sequential_execution = 6;
clear_collective_deterministic_sequential_execution()5805 inline void ConfigProto_Experimental::clear_collective_deterministic_sequential_execution() {
5806   _impl_.collective_deterministic_sequential_execution_ = false;
5807 }
_internal_collective_deterministic_sequential_execution()5808 inline bool ConfigProto_Experimental::_internal_collective_deterministic_sequential_execution() const {
5809   return _impl_.collective_deterministic_sequential_execution_;
5810 }
collective_deterministic_sequential_execution()5811 inline bool ConfigProto_Experimental::collective_deterministic_sequential_execution() const {
5812   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.collective_deterministic_sequential_execution)
5813   return _internal_collective_deterministic_sequential_execution();
5814 }
_internal_set_collective_deterministic_sequential_execution(bool value)5815 inline void ConfigProto_Experimental::_internal_set_collective_deterministic_sequential_execution(bool value) {
5816 
5817   _impl_.collective_deterministic_sequential_execution_ = value;
5818 }
set_collective_deterministic_sequential_execution(bool value)5819 inline void ConfigProto_Experimental::set_collective_deterministic_sequential_execution(bool value) {
5820   _internal_set_collective_deterministic_sequential_execution(value);
5821   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.collective_deterministic_sequential_execution)
5822 }
5823 
5824 // bool collective_nccl = 7;
clear_collective_nccl()5825 inline void ConfigProto_Experimental::clear_collective_nccl() {
5826   _impl_.collective_nccl_ = false;
5827 }
_internal_collective_nccl()5828 inline bool ConfigProto_Experimental::_internal_collective_nccl() const {
5829   return _impl_.collective_nccl_;
5830 }
collective_nccl()5831 inline bool ConfigProto_Experimental::collective_nccl() const {
5832   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.collective_nccl)
5833   return _internal_collective_nccl();
5834 }
_internal_set_collective_nccl(bool value)5835 inline void ConfigProto_Experimental::_internal_set_collective_nccl(bool value) {
5836 
5837   _impl_.collective_nccl_ = value;
5838 }
set_collective_nccl(bool value)5839 inline void ConfigProto_Experimental::set_collective_nccl(bool value) {
5840   _internal_set_collective_nccl(value);
5841   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.collective_nccl)
5842 }
5843 
5844 // bool share_session_state_in_clusterspec_propagation = 8;
clear_share_session_state_in_clusterspec_propagation()5845 inline void ConfigProto_Experimental::clear_share_session_state_in_clusterspec_propagation() {
5846   _impl_.share_session_state_in_clusterspec_propagation_ = false;
5847 }
_internal_share_session_state_in_clusterspec_propagation()5848 inline bool ConfigProto_Experimental::_internal_share_session_state_in_clusterspec_propagation() const {
5849   return _impl_.share_session_state_in_clusterspec_propagation_;
5850 }
share_session_state_in_clusterspec_propagation()5851 inline bool ConfigProto_Experimental::share_session_state_in_clusterspec_propagation() const {
5852   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.share_session_state_in_clusterspec_propagation)
5853   return _internal_share_session_state_in_clusterspec_propagation();
5854 }
_internal_set_share_session_state_in_clusterspec_propagation(bool value)5855 inline void ConfigProto_Experimental::_internal_set_share_session_state_in_clusterspec_propagation(bool value) {
5856 
5857   _impl_.share_session_state_in_clusterspec_propagation_ = value;
5858 }
set_share_session_state_in_clusterspec_propagation(bool value)5859 inline void ConfigProto_Experimental::set_share_session_state_in_clusterspec_propagation(bool value) {
5860   _internal_set_share_session_state_in_clusterspec_propagation(value);
5861   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.share_session_state_in_clusterspec_propagation)
5862 }
5863 
5864 // bool disable_thread_spinning = 9;
clear_disable_thread_spinning()5865 inline void ConfigProto_Experimental::clear_disable_thread_spinning() {
5866   _impl_.disable_thread_spinning_ = false;
5867 }
_internal_disable_thread_spinning()5868 inline bool ConfigProto_Experimental::_internal_disable_thread_spinning() const {
5869   return _impl_.disable_thread_spinning_;
5870 }
disable_thread_spinning()5871 inline bool ConfigProto_Experimental::disable_thread_spinning() const {
5872   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.disable_thread_spinning)
5873   return _internal_disable_thread_spinning();
5874 }
_internal_set_disable_thread_spinning(bool value)5875 inline void ConfigProto_Experimental::_internal_set_disable_thread_spinning(bool value) {
5876 
5877   _impl_.disable_thread_spinning_ = value;
5878 }
set_disable_thread_spinning(bool value)5879 inline void ConfigProto_Experimental::set_disable_thread_spinning(bool value) {
5880   _internal_set_disable_thread_spinning(value);
5881   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.disable_thread_spinning)
5882 }
5883 
5884 // bool share_cluster_devices_in_session = 10;
clear_share_cluster_devices_in_session()5885 inline void ConfigProto_Experimental::clear_share_cluster_devices_in_session() {
5886   _impl_.share_cluster_devices_in_session_ = false;
5887 }
_internal_share_cluster_devices_in_session()5888 inline bool ConfigProto_Experimental::_internal_share_cluster_devices_in_session() const {
5889   return _impl_.share_cluster_devices_in_session_;
5890 }
share_cluster_devices_in_session()5891 inline bool ConfigProto_Experimental::share_cluster_devices_in_session() const {
5892   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.share_cluster_devices_in_session)
5893   return _internal_share_cluster_devices_in_session();
5894 }
_internal_set_share_cluster_devices_in_session(bool value)5895 inline void ConfigProto_Experimental::_internal_set_share_cluster_devices_in_session(bool value) {
5896 
5897   _impl_.share_cluster_devices_in_session_ = value;
5898 }
set_share_cluster_devices_in_session(bool value)5899 inline void ConfigProto_Experimental::set_share_cluster_devices_in_session(bool value) {
5900   _internal_set_share_cluster_devices_in_session(value);
5901   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.share_cluster_devices_in_session)
5902 }
5903 
5904 // .tensorflow.SessionMetadata session_metadata = 11;
_internal_has_session_metadata()5905 inline bool ConfigProto_Experimental::_internal_has_session_metadata() const {
5906   return this != internal_default_instance() && _impl_.session_metadata_ != nullptr;
5907 }
has_session_metadata()5908 inline bool ConfigProto_Experimental::has_session_metadata() const {
5909   return _internal_has_session_metadata();
5910 }
clear_session_metadata()5911 inline void ConfigProto_Experimental::clear_session_metadata() {
5912   if (GetArenaForAllocation() == nullptr && _impl_.session_metadata_ != nullptr) {
5913     delete _impl_.session_metadata_;
5914   }
5915   _impl_.session_metadata_ = nullptr;
5916 }
_internal_session_metadata()5917 inline const ::tensorflow::SessionMetadata& ConfigProto_Experimental::_internal_session_metadata() const {
5918   const ::tensorflow::SessionMetadata* p = _impl_.session_metadata_;
5919   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::SessionMetadata&>(
5920       ::tensorflow::_SessionMetadata_default_instance_);
5921 }
session_metadata()5922 inline const ::tensorflow::SessionMetadata& ConfigProto_Experimental::session_metadata() const {
5923   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.session_metadata)
5924   return _internal_session_metadata();
5925 }
unsafe_arena_set_allocated_session_metadata(::tensorflow::SessionMetadata * session_metadata)5926 inline void ConfigProto_Experimental::unsafe_arena_set_allocated_session_metadata(
5927     ::tensorflow::SessionMetadata* session_metadata) {
5928   if (GetArenaForAllocation() == nullptr) {
5929     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.session_metadata_);
5930   }
5931   _impl_.session_metadata_ = session_metadata;
5932   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.Experimental.session_metadata)
5933 }
release_session_metadata()5934 inline ::tensorflow::SessionMetadata* ConfigProto_Experimental::release_session_metadata() {
5935 
5936   ::tensorflow::SessionMetadata* temp = _impl_.session_metadata_;
5937   _impl_.session_metadata_ = nullptr;
5938 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
5939   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
5940   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5941   if (GetArenaForAllocation() == nullptr) { delete old; }
5942 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
5943   if (GetArenaForAllocation() != nullptr) {
5944     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5945   }
5946 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
5947   return temp;
5948 }
unsafe_arena_release_session_metadata()5949 inline ::tensorflow::SessionMetadata* ConfigProto_Experimental::unsafe_arena_release_session_metadata() {
5950   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.Experimental.session_metadata)
5951 
5952   ::tensorflow::SessionMetadata* temp = _impl_.session_metadata_;
5953   _impl_.session_metadata_ = nullptr;
5954   return temp;
5955 }
_internal_mutable_session_metadata()5956 inline ::tensorflow::SessionMetadata* ConfigProto_Experimental::_internal_mutable_session_metadata() {
5957 
5958   if (_impl_.session_metadata_ == nullptr) {
5959     auto* p = CreateMaybeMessage<::tensorflow::SessionMetadata>(GetArenaForAllocation());
5960     _impl_.session_metadata_ = p;
5961   }
5962   return _impl_.session_metadata_;
5963 }
mutable_session_metadata()5964 inline ::tensorflow::SessionMetadata* ConfigProto_Experimental::mutable_session_metadata() {
5965   ::tensorflow::SessionMetadata* _msg = _internal_mutable_session_metadata();
5966   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.Experimental.session_metadata)
5967   return _msg;
5968 }
set_allocated_session_metadata(::tensorflow::SessionMetadata * session_metadata)5969 inline void ConfigProto_Experimental::set_allocated_session_metadata(::tensorflow::SessionMetadata* session_metadata) {
5970   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
5971   if (message_arena == nullptr) {
5972     delete _impl_.session_metadata_;
5973   }
5974   if (session_metadata) {
5975     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
5976         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(session_metadata);
5977     if (message_arena != submessage_arena) {
5978       session_metadata = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
5979           message_arena, session_metadata, submessage_arena);
5980     }
5981 
5982   } else {
5983 
5984   }
5985   _impl_.session_metadata_ = session_metadata;
5986   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.Experimental.session_metadata)
5987 }
5988 
5989 // bool optimize_for_static_graph = 12;
clear_optimize_for_static_graph()5990 inline void ConfigProto_Experimental::clear_optimize_for_static_graph() {
5991   _impl_.optimize_for_static_graph_ = false;
5992 }
_internal_optimize_for_static_graph()5993 inline bool ConfigProto_Experimental::_internal_optimize_for_static_graph() const {
5994   return _impl_.optimize_for_static_graph_;
5995 }
optimize_for_static_graph()5996 inline bool ConfigProto_Experimental::optimize_for_static_graph() const {
5997   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.optimize_for_static_graph)
5998   return _internal_optimize_for_static_graph();
5999 }
_internal_set_optimize_for_static_graph(bool value)6000 inline void ConfigProto_Experimental::_internal_set_optimize_for_static_graph(bool value) {
6001 
6002   _impl_.optimize_for_static_graph_ = value;
6003 }
set_optimize_for_static_graph(bool value)6004 inline void ConfigProto_Experimental::set_optimize_for_static_graph(bool value) {
6005   _internal_set_optimize_for_static_graph(value);
6006   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.optimize_for_static_graph)
6007 }
6008 
6009 // bool enable_mlir_bridge = 13;
clear_enable_mlir_bridge()6010 inline void ConfigProto_Experimental::clear_enable_mlir_bridge() {
6011   _impl_.enable_mlir_bridge_ = false;
6012 }
_internal_enable_mlir_bridge()6013 inline bool ConfigProto_Experimental::_internal_enable_mlir_bridge() const {
6014   return _impl_.enable_mlir_bridge_;
6015 }
enable_mlir_bridge()6016 inline bool ConfigProto_Experimental::enable_mlir_bridge() const {
6017   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.enable_mlir_bridge)
6018   return _internal_enable_mlir_bridge();
6019 }
_internal_set_enable_mlir_bridge(bool value)6020 inline void ConfigProto_Experimental::_internal_set_enable_mlir_bridge(bool value) {
6021 
6022   _impl_.enable_mlir_bridge_ = value;
6023 }
set_enable_mlir_bridge(bool value)6024 inline void ConfigProto_Experimental::set_enable_mlir_bridge(bool value) {
6025   _internal_set_enable_mlir_bridge(value);
6026   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.enable_mlir_bridge)
6027 }
6028 
6029 // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
clear_mlir_bridge_rollout()6030 inline void ConfigProto_Experimental::clear_mlir_bridge_rollout() {
6031   _impl_.mlir_bridge_rollout_ = 0;
6032 }
_internal_mlir_bridge_rollout()6033 inline ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::_internal_mlir_bridge_rollout() const {
6034   return static_cast< ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout >(_impl_.mlir_bridge_rollout_);
6035 }
mlir_bridge_rollout()6036 inline ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::mlir_bridge_rollout() const {
6037   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.mlir_bridge_rollout)
6038   return _internal_mlir_bridge_rollout();
6039 }
_internal_set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value)6040 inline void ConfigProto_Experimental::_internal_set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value) {
6041 
6042   _impl_.mlir_bridge_rollout_ = value;
6043 }
set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value)6044 inline void ConfigProto_Experimental::set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value) {
6045   _internal_set_mlir_bridge_rollout(value);
6046   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.mlir_bridge_rollout)
6047 }
6048 
6049 // bool enable_mlir_graph_optimization = 16;
clear_enable_mlir_graph_optimization()6050 inline void ConfigProto_Experimental::clear_enable_mlir_graph_optimization() {
6051   _impl_.enable_mlir_graph_optimization_ = false;
6052 }
_internal_enable_mlir_graph_optimization()6053 inline bool ConfigProto_Experimental::_internal_enable_mlir_graph_optimization() const {
6054   return _impl_.enable_mlir_graph_optimization_;
6055 }
enable_mlir_graph_optimization()6056 inline bool ConfigProto_Experimental::enable_mlir_graph_optimization() const {
6057   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.enable_mlir_graph_optimization)
6058   return _internal_enable_mlir_graph_optimization();
6059 }
_internal_set_enable_mlir_graph_optimization(bool value)6060 inline void ConfigProto_Experimental::_internal_set_enable_mlir_graph_optimization(bool value) {
6061 
6062   _impl_.enable_mlir_graph_optimization_ = value;
6063 }
set_enable_mlir_graph_optimization(bool value)6064 inline void ConfigProto_Experimental::set_enable_mlir_graph_optimization(bool value) {
6065   _internal_set_enable_mlir_graph_optimization(value);
6066   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.enable_mlir_graph_optimization)
6067 }
6068 
6069 // bool disable_output_partition_graphs = 14;
clear_disable_output_partition_graphs()6070 inline void ConfigProto_Experimental::clear_disable_output_partition_graphs() {
6071   _impl_.disable_output_partition_graphs_ = false;
6072 }
_internal_disable_output_partition_graphs()6073 inline bool ConfigProto_Experimental::_internal_disable_output_partition_graphs() const {
6074   return _impl_.disable_output_partition_graphs_;
6075 }
disable_output_partition_graphs()6076 inline bool ConfigProto_Experimental::disable_output_partition_graphs() const {
6077   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.disable_output_partition_graphs)
6078   return _internal_disable_output_partition_graphs();
6079 }
_internal_set_disable_output_partition_graphs(bool value)6080 inline void ConfigProto_Experimental::_internal_set_disable_output_partition_graphs(bool value) {
6081 
6082   _impl_.disable_output_partition_graphs_ = value;
6083 }
set_disable_output_partition_graphs(bool value)6084 inline void ConfigProto_Experimental::set_disable_output_partition_graphs(bool value) {
6085   _internal_set_disable_output_partition_graphs(value);
6086   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.disable_output_partition_graphs)
6087 }
6088 
6089 // int64 xla_fusion_autotuner_thresh = 15;
clear_xla_fusion_autotuner_thresh()6090 inline void ConfigProto_Experimental::clear_xla_fusion_autotuner_thresh() {
6091   _impl_.xla_fusion_autotuner_thresh_ = ::int64_t{0};
6092 }
_internal_xla_fusion_autotuner_thresh()6093 inline ::int64_t ConfigProto_Experimental::_internal_xla_fusion_autotuner_thresh() const {
6094   return _impl_.xla_fusion_autotuner_thresh_;
6095 }
xla_fusion_autotuner_thresh()6096 inline ::int64_t ConfigProto_Experimental::xla_fusion_autotuner_thresh() const {
6097   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.xla_fusion_autotuner_thresh)
6098   return _internal_xla_fusion_autotuner_thresh();
6099 }
_internal_set_xla_fusion_autotuner_thresh(::int64_t value)6100 inline void ConfigProto_Experimental::_internal_set_xla_fusion_autotuner_thresh(::int64_t value) {
6101 
6102   _impl_.xla_fusion_autotuner_thresh_ = value;
6103 }
set_xla_fusion_autotuner_thresh(::int64_t value)6104 inline void ConfigProto_Experimental::set_xla_fusion_autotuner_thresh(::int64_t value) {
6105   _internal_set_xla_fusion_autotuner_thresh(value);
6106   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.xla_fusion_autotuner_thresh)
6107 }
6108 
6109 // bool use_tfrt = 18;
clear_use_tfrt()6110 inline void ConfigProto_Experimental::clear_use_tfrt() {
6111   _impl_.use_tfrt_ = false;
6112 }
_internal_use_tfrt()6113 inline bool ConfigProto_Experimental::_internal_use_tfrt() const {
6114   return _impl_.use_tfrt_;
6115 }
use_tfrt()6116 inline bool ConfigProto_Experimental::use_tfrt() const {
6117   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.use_tfrt)
6118   return _internal_use_tfrt();
6119 }
_internal_set_use_tfrt(bool value)6120 inline void ConfigProto_Experimental::_internal_set_use_tfrt(bool value) {
6121 
6122   _impl_.use_tfrt_ = value;
6123 }
set_use_tfrt(bool value)6124 inline void ConfigProto_Experimental::set_use_tfrt(bool value) {
6125   _internal_set_use_tfrt(value);
6126   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.use_tfrt)
6127 }
6128 
6129 // bool disable_functional_ops_lowering = 21;
clear_disable_functional_ops_lowering()6130 inline void ConfigProto_Experimental::clear_disable_functional_ops_lowering() {
6131   _impl_.disable_functional_ops_lowering_ = false;
6132 }
_internal_disable_functional_ops_lowering()6133 inline bool ConfigProto_Experimental::_internal_disable_functional_ops_lowering() const {
6134   return _impl_.disable_functional_ops_lowering_;
6135 }
disable_functional_ops_lowering()6136 inline bool ConfigProto_Experimental::disable_functional_ops_lowering() const {
6137   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.disable_functional_ops_lowering)
6138   return _internal_disable_functional_ops_lowering();
6139 }
_internal_set_disable_functional_ops_lowering(bool value)6140 inline void ConfigProto_Experimental::_internal_set_disable_functional_ops_lowering(bool value) {
6141 
6142   _impl_.disable_functional_ops_lowering_ = value;
6143 }
set_disable_functional_ops_lowering(bool value)6144 inline void ConfigProto_Experimental::set_disable_functional_ops_lowering(bool value) {
6145   _internal_set_disable_functional_ops_lowering(value);
6146   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.disable_functional_ops_lowering)
6147 }
6148 
6149 // bool xla_prefer_single_graph_cluster = 22;
clear_xla_prefer_single_graph_cluster()6150 inline void ConfigProto_Experimental::clear_xla_prefer_single_graph_cluster() {
6151   _impl_.xla_prefer_single_graph_cluster_ = false;
6152 }
_internal_xla_prefer_single_graph_cluster()6153 inline bool ConfigProto_Experimental::_internal_xla_prefer_single_graph_cluster() const {
6154   return _impl_.xla_prefer_single_graph_cluster_;
6155 }
xla_prefer_single_graph_cluster()6156 inline bool ConfigProto_Experimental::xla_prefer_single_graph_cluster() const {
6157   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.xla_prefer_single_graph_cluster)
6158   return _internal_xla_prefer_single_graph_cluster();
6159 }
_internal_set_xla_prefer_single_graph_cluster(bool value)6160 inline void ConfigProto_Experimental::_internal_set_xla_prefer_single_graph_cluster(bool value) {
6161 
6162   _impl_.xla_prefer_single_graph_cluster_ = value;
6163 }
set_xla_prefer_single_graph_cluster(bool value)6164 inline void ConfigProto_Experimental::set_xla_prefer_single_graph_cluster(bool value) {
6165   _internal_set_xla_prefer_single_graph_cluster(value);
6166   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.Experimental.xla_prefer_single_graph_cluster)
6167 }
6168 
6169 // .tensorflow.CoordinationServiceConfig coordination_config = 23;
_internal_has_coordination_config()6170 inline bool ConfigProto_Experimental::_internal_has_coordination_config() const {
6171   return this != internal_default_instance() && _impl_.coordination_config_ != nullptr;
6172 }
has_coordination_config()6173 inline bool ConfigProto_Experimental::has_coordination_config() const {
6174   return _internal_has_coordination_config();
6175 }
_internal_coordination_config()6176 inline const ::tensorflow::CoordinationServiceConfig& ConfigProto_Experimental::_internal_coordination_config() const {
6177   const ::tensorflow::CoordinationServiceConfig* p = _impl_.coordination_config_;
6178   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::CoordinationServiceConfig&>(
6179       ::tensorflow::_CoordinationServiceConfig_default_instance_);
6180 }
coordination_config()6181 inline const ::tensorflow::CoordinationServiceConfig& ConfigProto_Experimental::coordination_config() const {
6182   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.Experimental.coordination_config)
6183   return _internal_coordination_config();
6184 }
unsafe_arena_set_allocated_coordination_config(::tensorflow::CoordinationServiceConfig * coordination_config)6185 inline void ConfigProto_Experimental::unsafe_arena_set_allocated_coordination_config(
6186     ::tensorflow::CoordinationServiceConfig* coordination_config) {
6187   if (GetArenaForAllocation() == nullptr) {
6188     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.coordination_config_);
6189   }
6190   _impl_.coordination_config_ = coordination_config;
6191   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.Experimental.coordination_config)
6192 }
release_coordination_config()6193 inline ::tensorflow::CoordinationServiceConfig* ConfigProto_Experimental::release_coordination_config() {
6194 
6195   ::tensorflow::CoordinationServiceConfig* temp = _impl_.coordination_config_;
6196   _impl_.coordination_config_ = nullptr;
6197 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
6198   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
6199   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6200   if (GetArenaForAllocation() == nullptr) { delete old; }
6201 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
6202   if (GetArenaForAllocation() != nullptr) {
6203     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6204   }
6205 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
6206   return temp;
6207 }
unsafe_arena_release_coordination_config()6208 inline ::tensorflow::CoordinationServiceConfig* ConfigProto_Experimental::unsafe_arena_release_coordination_config() {
6209   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.Experimental.coordination_config)
6210 
6211   ::tensorflow::CoordinationServiceConfig* temp = _impl_.coordination_config_;
6212   _impl_.coordination_config_ = nullptr;
6213   return temp;
6214 }
_internal_mutable_coordination_config()6215 inline ::tensorflow::CoordinationServiceConfig* ConfigProto_Experimental::_internal_mutable_coordination_config() {
6216 
6217   if (_impl_.coordination_config_ == nullptr) {
6218     auto* p = CreateMaybeMessage<::tensorflow::CoordinationServiceConfig>(GetArenaForAllocation());
6219     _impl_.coordination_config_ = p;
6220   }
6221   return _impl_.coordination_config_;
6222 }
mutable_coordination_config()6223 inline ::tensorflow::CoordinationServiceConfig* ConfigProto_Experimental::mutable_coordination_config() {
6224   ::tensorflow::CoordinationServiceConfig* _msg = _internal_mutable_coordination_config();
6225   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.Experimental.coordination_config)
6226   return _msg;
6227 }
set_allocated_coordination_config(::tensorflow::CoordinationServiceConfig * coordination_config)6228 inline void ConfigProto_Experimental::set_allocated_coordination_config(::tensorflow::CoordinationServiceConfig* coordination_config) {
6229   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6230   if (message_arena == nullptr) {
6231     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.coordination_config_);
6232   }
6233   if (coordination_config) {
6234     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6235         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
6236                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(coordination_config));
6237     if (message_arena != submessage_arena) {
6238       coordination_config = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6239           message_arena, coordination_config, submessage_arena);
6240     }
6241 
6242   } else {
6243 
6244   }
6245   _impl_.coordination_config_ = coordination_config;
6246   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.Experimental.coordination_config)
6247 }
6248 
6249 // -------------------------------------------------------------------
6250 
6251 // ConfigProto
6252 
6253 // map<string, int32> device_count = 1;
_internal_device_count_size()6254 inline int ConfigProto::_internal_device_count_size() const {
6255   return _impl_.device_count_.size();
6256 }
device_count_size()6257 inline int ConfigProto::device_count_size() const {
6258   return _internal_device_count_size();
6259 }
clear_device_count()6260 inline void ConfigProto::clear_device_count() {
6261   _impl_.device_count_.Clear();
6262 }
6263 inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >&
_internal_device_count()6264 ConfigProto::_internal_device_count() const {
6265   return _impl_.device_count_.GetMap();
6266 }
6267 inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >&
device_count()6268 ConfigProto::device_count() const {
6269   // @@protoc_insertion_point(field_map:tensorflow.ConfigProto.device_count)
6270   return _internal_device_count();
6271 }
6272 inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >*
_internal_mutable_device_count()6273 ConfigProto::_internal_mutable_device_count() {
6274   return _impl_.device_count_.MutableMap();
6275 }
6276 inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >*
mutable_device_count()6277 ConfigProto::mutable_device_count() {
6278   // @@protoc_insertion_point(field_mutable_map:tensorflow.ConfigProto.device_count)
6279   return _internal_mutable_device_count();
6280 }
6281 
6282 // int32 intra_op_parallelism_threads = 2;
clear_intra_op_parallelism_threads()6283 inline void ConfigProto::clear_intra_op_parallelism_threads() {
6284   _impl_.intra_op_parallelism_threads_ = 0;
6285 }
_internal_intra_op_parallelism_threads()6286 inline ::int32_t ConfigProto::_internal_intra_op_parallelism_threads() const {
6287   return _impl_.intra_op_parallelism_threads_;
6288 }
intra_op_parallelism_threads()6289 inline ::int32_t ConfigProto::intra_op_parallelism_threads() const {
6290   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.intra_op_parallelism_threads)
6291   return _internal_intra_op_parallelism_threads();
6292 }
_internal_set_intra_op_parallelism_threads(::int32_t value)6293 inline void ConfigProto::_internal_set_intra_op_parallelism_threads(::int32_t value) {
6294 
6295   _impl_.intra_op_parallelism_threads_ = value;
6296 }
set_intra_op_parallelism_threads(::int32_t value)6297 inline void ConfigProto::set_intra_op_parallelism_threads(::int32_t value) {
6298   _internal_set_intra_op_parallelism_threads(value);
6299   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.intra_op_parallelism_threads)
6300 }
6301 
6302 // int32 inter_op_parallelism_threads = 5;
clear_inter_op_parallelism_threads()6303 inline void ConfigProto::clear_inter_op_parallelism_threads() {
6304   _impl_.inter_op_parallelism_threads_ = 0;
6305 }
_internal_inter_op_parallelism_threads()6306 inline ::int32_t ConfigProto::_internal_inter_op_parallelism_threads() const {
6307   return _impl_.inter_op_parallelism_threads_;
6308 }
inter_op_parallelism_threads()6309 inline ::int32_t ConfigProto::inter_op_parallelism_threads() const {
6310   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.inter_op_parallelism_threads)
6311   return _internal_inter_op_parallelism_threads();
6312 }
_internal_set_inter_op_parallelism_threads(::int32_t value)6313 inline void ConfigProto::_internal_set_inter_op_parallelism_threads(::int32_t value) {
6314 
6315   _impl_.inter_op_parallelism_threads_ = value;
6316 }
set_inter_op_parallelism_threads(::int32_t value)6317 inline void ConfigProto::set_inter_op_parallelism_threads(::int32_t value) {
6318   _internal_set_inter_op_parallelism_threads(value);
6319   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.inter_op_parallelism_threads)
6320 }
6321 
6322 // bool use_per_session_threads = 9;
clear_use_per_session_threads()6323 inline void ConfigProto::clear_use_per_session_threads() {
6324   _impl_.use_per_session_threads_ = false;
6325 }
_internal_use_per_session_threads()6326 inline bool ConfigProto::_internal_use_per_session_threads() const {
6327   return _impl_.use_per_session_threads_;
6328 }
use_per_session_threads()6329 inline bool ConfigProto::use_per_session_threads() const {
6330   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.use_per_session_threads)
6331   return _internal_use_per_session_threads();
6332 }
_internal_set_use_per_session_threads(bool value)6333 inline void ConfigProto::_internal_set_use_per_session_threads(bool value) {
6334 
6335   _impl_.use_per_session_threads_ = value;
6336 }
set_use_per_session_threads(bool value)6337 inline void ConfigProto::set_use_per_session_threads(bool value) {
6338   _internal_set_use_per_session_threads(value);
6339   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.use_per_session_threads)
6340 }
6341 
6342 // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
_internal_session_inter_op_thread_pool_size()6343 inline int ConfigProto::_internal_session_inter_op_thread_pool_size() const {
6344   return _impl_.session_inter_op_thread_pool_.size();
6345 }
session_inter_op_thread_pool_size()6346 inline int ConfigProto::session_inter_op_thread_pool_size() const {
6347   return _internal_session_inter_op_thread_pool_size();
6348 }
clear_session_inter_op_thread_pool()6349 inline void ConfigProto::clear_session_inter_op_thread_pool() {
6350   _impl_.session_inter_op_thread_pool_.Clear();
6351 }
mutable_session_inter_op_thread_pool(int index)6352 inline ::tensorflow::ThreadPoolOptionProto* ConfigProto::mutable_session_inter_op_thread_pool(int index) {
6353   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.session_inter_op_thread_pool)
6354   return _impl_.session_inter_op_thread_pool_.Mutable(index);
6355 }
6356 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto >*
mutable_session_inter_op_thread_pool()6357 ConfigProto::mutable_session_inter_op_thread_pool() {
6358   // @@protoc_insertion_point(field_mutable_list:tensorflow.ConfigProto.session_inter_op_thread_pool)
6359   return &_impl_.session_inter_op_thread_pool_;
6360 }
_internal_session_inter_op_thread_pool(int index)6361 inline const ::tensorflow::ThreadPoolOptionProto& ConfigProto::_internal_session_inter_op_thread_pool(int index) const {
6362   return _impl_.session_inter_op_thread_pool_.Get(index);
6363 }
session_inter_op_thread_pool(int index)6364 inline const ::tensorflow::ThreadPoolOptionProto& ConfigProto::session_inter_op_thread_pool(int index) const {
6365   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.session_inter_op_thread_pool)
6366   return _internal_session_inter_op_thread_pool(index);
6367 }
_internal_add_session_inter_op_thread_pool()6368 inline ::tensorflow::ThreadPoolOptionProto* ConfigProto::_internal_add_session_inter_op_thread_pool() {
6369   return _impl_.session_inter_op_thread_pool_.Add();
6370 }
add_session_inter_op_thread_pool()6371 inline ::tensorflow::ThreadPoolOptionProto* ConfigProto::add_session_inter_op_thread_pool() {
6372   ::tensorflow::ThreadPoolOptionProto* _add = _internal_add_session_inter_op_thread_pool();
6373   // @@protoc_insertion_point(field_add:tensorflow.ConfigProto.session_inter_op_thread_pool)
6374   return _add;
6375 }
6376 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto >&
session_inter_op_thread_pool()6377 ConfigProto::session_inter_op_thread_pool() const {
6378   // @@protoc_insertion_point(field_list:tensorflow.ConfigProto.session_inter_op_thread_pool)
6379   return _impl_.session_inter_op_thread_pool_;
6380 }
6381 
6382 // int32 placement_period = 3;
clear_placement_period()6383 inline void ConfigProto::clear_placement_period() {
6384   _impl_.placement_period_ = 0;
6385 }
_internal_placement_period()6386 inline ::int32_t ConfigProto::_internal_placement_period() const {
6387   return _impl_.placement_period_;
6388 }
placement_period()6389 inline ::int32_t ConfigProto::placement_period() const {
6390   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.placement_period)
6391   return _internal_placement_period();
6392 }
_internal_set_placement_period(::int32_t value)6393 inline void ConfigProto::_internal_set_placement_period(::int32_t value) {
6394 
6395   _impl_.placement_period_ = value;
6396 }
set_placement_period(::int32_t value)6397 inline void ConfigProto::set_placement_period(::int32_t value) {
6398   _internal_set_placement_period(value);
6399   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.placement_period)
6400 }
6401 
6402 // repeated string device_filters = 4;
_internal_device_filters_size()6403 inline int ConfigProto::_internal_device_filters_size() const {
6404   return _impl_.device_filters_.size();
6405 }
device_filters_size()6406 inline int ConfigProto::device_filters_size() const {
6407   return _internal_device_filters_size();
6408 }
clear_device_filters()6409 inline void ConfigProto::clear_device_filters() {
6410   _impl_.device_filters_.Clear();
6411 }
add_device_filters()6412 inline std::string* ConfigProto::add_device_filters() {
6413   std::string* _s = _internal_add_device_filters();
6414   // @@protoc_insertion_point(field_add_mutable:tensorflow.ConfigProto.device_filters)
6415   return _s;
6416 }
_internal_device_filters(int index)6417 inline const std::string& ConfigProto::_internal_device_filters(int index) const {
6418   return _impl_.device_filters_.Get(index);
6419 }
device_filters(int index)6420 inline const std::string& ConfigProto::device_filters(int index) const {
6421   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.device_filters)
6422   return _internal_device_filters(index);
6423 }
mutable_device_filters(int index)6424 inline std::string* ConfigProto::mutable_device_filters(int index) {
6425   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.device_filters)
6426   return _impl_.device_filters_.Mutable(index);
6427 }
set_device_filters(int index,const std::string & value)6428 inline void ConfigProto::set_device_filters(int index, const std::string& value) {
6429   _impl_.device_filters_.Mutable(index)->assign(value);
6430   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.device_filters)
6431 }
set_device_filters(int index,std::string && value)6432 inline void ConfigProto::set_device_filters(int index, std::string&& value) {
6433   _impl_.device_filters_.Mutable(index)->assign(std::move(value));
6434   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.device_filters)
6435 }
set_device_filters(int index,const char * value)6436 inline void ConfigProto::set_device_filters(int index, const char* value) {
6437   GOOGLE_DCHECK(value != nullptr);
6438   _impl_.device_filters_.Mutable(index)->assign(value);
6439   // @@protoc_insertion_point(field_set_char:tensorflow.ConfigProto.device_filters)
6440 }
set_device_filters(int index,const char * value,size_t size)6441 inline void ConfigProto::set_device_filters(int index, const char* value, size_t size) {
6442   _impl_.device_filters_.Mutable(index)->assign(
6443     reinterpret_cast<const char*>(value), size);
6444   // @@protoc_insertion_point(field_set_pointer:tensorflow.ConfigProto.device_filters)
6445 }
_internal_add_device_filters()6446 inline std::string* ConfigProto::_internal_add_device_filters() {
6447   return _impl_.device_filters_.Add();
6448 }
add_device_filters(const std::string & value)6449 inline void ConfigProto::add_device_filters(const std::string& value) {
6450   _impl_.device_filters_.Add()->assign(value);
6451   // @@protoc_insertion_point(field_add:tensorflow.ConfigProto.device_filters)
6452 }
add_device_filters(std::string && value)6453 inline void ConfigProto::add_device_filters(std::string&& value) {
6454   _impl_.device_filters_.Add(std::move(value));
6455   // @@protoc_insertion_point(field_add:tensorflow.ConfigProto.device_filters)
6456 }
add_device_filters(const char * value)6457 inline void ConfigProto::add_device_filters(const char* value) {
6458   GOOGLE_DCHECK(value != nullptr);
6459   _impl_.device_filters_.Add()->assign(value);
6460   // @@protoc_insertion_point(field_add_char:tensorflow.ConfigProto.device_filters)
6461 }
add_device_filters(const char * value,size_t size)6462 inline void ConfigProto::add_device_filters(const char* value, size_t size) {
6463   _impl_.device_filters_.Add()->assign(reinterpret_cast<const char*>(value), size);
6464   // @@protoc_insertion_point(field_add_pointer:tensorflow.ConfigProto.device_filters)
6465 }
6466 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
device_filters()6467 ConfigProto::device_filters() const {
6468   // @@protoc_insertion_point(field_list:tensorflow.ConfigProto.device_filters)
6469   return _impl_.device_filters_;
6470 }
6471 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
mutable_device_filters()6472 ConfigProto::mutable_device_filters() {
6473   // @@protoc_insertion_point(field_mutable_list:tensorflow.ConfigProto.device_filters)
6474   return &_impl_.device_filters_;
6475 }
6476 
6477 // .tensorflow.GPUOptions gpu_options = 6;
_internal_has_gpu_options()6478 inline bool ConfigProto::_internal_has_gpu_options() const {
6479   return this != internal_default_instance() && _impl_.gpu_options_ != nullptr;
6480 }
has_gpu_options()6481 inline bool ConfigProto::has_gpu_options() const {
6482   return _internal_has_gpu_options();
6483 }
clear_gpu_options()6484 inline void ConfigProto::clear_gpu_options() {
6485   if (GetArenaForAllocation() == nullptr && _impl_.gpu_options_ != nullptr) {
6486     delete _impl_.gpu_options_;
6487   }
6488   _impl_.gpu_options_ = nullptr;
6489 }
_internal_gpu_options()6490 inline const ::tensorflow::GPUOptions& ConfigProto::_internal_gpu_options() const {
6491   const ::tensorflow::GPUOptions* p = _impl_.gpu_options_;
6492   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::GPUOptions&>(
6493       ::tensorflow::_GPUOptions_default_instance_);
6494 }
gpu_options()6495 inline const ::tensorflow::GPUOptions& ConfigProto::gpu_options() const {
6496   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.gpu_options)
6497   return _internal_gpu_options();
6498 }
unsafe_arena_set_allocated_gpu_options(::tensorflow::GPUOptions * gpu_options)6499 inline void ConfigProto::unsafe_arena_set_allocated_gpu_options(
6500     ::tensorflow::GPUOptions* gpu_options) {
6501   if (GetArenaForAllocation() == nullptr) {
6502     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.gpu_options_);
6503   }
6504   _impl_.gpu_options_ = gpu_options;
6505   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.gpu_options)
6506 }
release_gpu_options()6507 inline ::tensorflow::GPUOptions* ConfigProto::release_gpu_options() {
6508 
6509   ::tensorflow::GPUOptions* temp = _impl_.gpu_options_;
6510   _impl_.gpu_options_ = nullptr;
6511 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
6512   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
6513   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6514   if (GetArenaForAllocation() == nullptr) { delete old; }
6515 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
6516   if (GetArenaForAllocation() != nullptr) {
6517     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6518   }
6519 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
6520   return temp;
6521 }
unsafe_arena_release_gpu_options()6522 inline ::tensorflow::GPUOptions* ConfigProto::unsafe_arena_release_gpu_options() {
6523   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.gpu_options)
6524 
6525   ::tensorflow::GPUOptions* temp = _impl_.gpu_options_;
6526   _impl_.gpu_options_ = nullptr;
6527   return temp;
6528 }
_internal_mutable_gpu_options()6529 inline ::tensorflow::GPUOptions* ConfigProto::_internal_mutable_gpu_options() {
6530 
6531   if (_impl_.gpu_options_ == nullptr) {
6532     auto* p = CreateMaybeMessage<::tensorflow::GPUOptions>(GetArenaForAllocation());
6533     _impl_.gpu_options_ = p;
6534   }
6535   return _impl_.gpu_options_;
6536 }
mutable_gpu_options()6537 inline ::tensorflow::GPUOptions* ConfigProto::mutable_gpu_options() {
6538   ::tensorflow::GPUOptions* _msg = _internal_mutable_gpu_options();
6539   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.gpu_options)
6540   return _msg;
6541 }
set_allocated_gpu_options(::tensorflow::GPUOptions * gpu_options)6542 inline void ConfigProto::set_allocated_gpu_options(::tensorflow::GPUOptions* gpu_options) {
6543   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6544   if (message_arena == nullptr) {
6545     delete _impl_.gpu_options_;
6546   }
6547   if (gpu_options) {
6548     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6549         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(gpu_options);
6550     if (message_arena != submessage_arena) {
6551       gpu_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6552           message_arena, gpu_options, submessage_arena);
6553     }
6554 
6555   } else {
6556 
6557   }
6558   _impl_.gpu_options_ = gpu_options;
6559   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.gpu_options)
6560 }
6561 
6562 // bool allow_soft_placement = 7;
clear_allow_soft_placement()6563 inline void ConfigProto::clear_allow_soft_placement() {
6564   _impl_.allow_soft_placement_ = false;
6565 }
_internal_allow_soft_placement()6566 inline bool ConfigProto::_internal_allow_soft_placement() const {
6567   return _impl_.allow_soft_placement_;
6568 }
allow_soft_placement()6569 inline bool ConfigProto::allow_soft_placement() const {
6570   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.allow_soft_placement)
6571   return _internal_allow_soft_placement();
6572 }
_internal_set_allow_soft_placement(bool value)6573 inline void ConfigProto::_internal_set_allow_soft_placement(bool value) {
6574 
6575   _impl_.allow_soft_placement_ = value;
6576 }
set_allow_soft_placement(bool value)6577 inline void ConfigProto::set_allow_soft_placement(bool value) {
6578   _internal_set_allow_soft_placement(value);
6579   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.allow_soft_placement)
6580 }
6581 
6582 // bool log_device_placement = 8;
clear_log_device_placement()6583 inline void ConfigProto::clear_log_device_placement() {
6584   _impl_.log_device_placement_ = false;
6585 }
_internal_log_device_placement()6586 inline bool ConfigProto::_internal_log_device_placement() const {
6587   return _impl_.log_device_placement_;
6588 }
log_device_placement()6589 inline bool ConfigProto::log_device_placement() const {
6590   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.log_device_placement)
6591   return _internal_log_device_placement();
6592 }
_internal_set_log_device_placement(bool value)6593 inline void ConfigProto::_internal_set_log_device_placement(bool value) {
6594 
6595   _impl_.log_device_placement_ = value;
6596 }
set_log_device_placement(bool value)6597 inline void ConfigProto::set_log_device_placement(bool value) {
6598   _internal_set_log_device_placement(value);
6599   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.log_device_placement)
6600 }
6601 
6602 // .tensorflow.GraphOptions graph_options = 10;
_internal_has_graph_options()6603 inline bool ConfigProto::_internal_has_graph_options() const {
6604   return this != internal_default_instance() && _impl_.graph_options_ != nullptr;
6605 }
has_graph_options()6606 inline bool ConfigProto::has_graph_options() const {
6607   return _internal_has_graph_options();
6608 }
clear_graph_options()6609 inline void ConfigProto::clear_graph_options() {
6610   if (GetArenaForAllocation() == nullptr && _impl_.graph_options_ != nullptr) {
6611     delete _impl_.graph_options_;
6612   }
6613   _impl_.graph_options_ = nullptr;
6614 }
_internal_graph_options()6615 inline const ::tensorflow::GraphOptions& ConfigProto::_internal_graph_options() const {
6616   const ::tensorflow::GraphOptions* p = _impl_.graph_options_;
6617   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::GraphOptions&>(
6618       ::tensorflow::_GraphOptions_default_instance_);
6619 }
graph_options()6620 inline const ::tensorflow::GraphOptions& ConfigProto::graph_options() const {
6621   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.graph_options)
6622   return _internal_graph_options();
6623 }
unsafe_arena_set_allocated_graph_options(::tensorflow::GraphOptions * graph_options)6624 inline void ConfigProto::unsafe_arena_set_allocated_graph_options(
6625     ::tensorflow::GraphOptions* graph_options) {
6626   if (GetArenaForAllocation() == nullptr) {
6627     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.graph_options_);
6628   }
6629   _impl_.graph_options_ = graph_options;
6630   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.graph_options)
6631 }
release_graph_options()6632 inline ::tensorflow::GraphOptions* ConfigProto::release_graph_options() {
6633 
6634   ::tensorflow::GraphOptions* temp = _impl_.graph_options_;
6635   _impl_.graph_options_ = nullptr;
6636 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
6637   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
6638   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6639   if (GetArenaForAllocation() == nullptr) { delete old; }
6640 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
6641   if (GetArenaForAllocation() != nullptr) {
6642     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6643   }
6644 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
6645   return temp;
6646 }
unsafe_arena_release_graph_options()6647 inline ::tensorflow::GraphOptions* ConfigProto::unsafe_arena_release_graph_options() {
6648   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.graph_options)
6649 
6650   ::tensorflow::GraphOptions* temp = _impl_.graph_options_;
6651   _impl_.graph_options_ = nullptr;
6652   return temp;
6653 }
_internal_mutable_graph_options()6654 inline ::tensorflow::GraphOptions* ConfigProto::_internal_mutable_graph_options() {
6655 
6656   if (_impl_.graph_options_ == nullptr) {
6657     auto* p = CreateMaybeMessage<::tensorflow::GraphOptions>(GetArenaForAllocation());
6658     _impl_.graph_options_ = p;
6659   }
6660   return _impl_.graph_options_;
6661 }
mutable_graph_options()6662 inline ::tensorflow::GraphOptions* ConfigProto::mutable_graph_options() {
6663   ::tensorflow::GraphOptions* _msg = _internal_mutable_graph_options();
6664   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.graph_options)
6665   return _msg;
6666 }
set_allocated_graph_options(::tensorflow::GraphOptions * graph_options)6667 inline void ConfigProto::set_allocated_graph_options(::tensorflow::GraphOptions* graph_options) {
6668   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6669   if (message_arena == nullptr) {
6670     delete _impl_.graph_options_;
6671   }
6672   if (graph_options) {
6673     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6674         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(graph_options);
6675     if (message_arena != submessage_arena) {
6676       graph_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6677           message_arena, graph_options, submessage_arena);
6678     }
6679 
6680   } else {
6681 
6682   }
6683   _impl_.graph_options_ = graph_options;
6684   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.graph_options)
6685 }
6686 
6687 // int64 operation_timeout_in_ms = 11;
clear_operation_timeout_in_ms()6688 inline void ConfigProto::clear_operation_timeout_in_ms() {
6689   _impl_.operation_timeout_in_ms_ = ::int64_t{0};
6690 }
_internal_operation_timeout_in_ms()6691 inline ::int64_t ConfigProto::_internal_operation_timeout_in_ms() const {
6692   return _impl_.operation_timeout_in_ms_;
6693 }
operation_timeout_in_ms()6694 inline ::int64_t ConfigProto::operation_timeout_in_ms() const {
6695   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.operation_timeout_in_ms)
6696   return _internal_operation_timeout_in_ms();
6697 }
_internal_set_operation_timeout_in_ms(::int64_t value)6698 inline void ConfigProto::_internal_set_operation_timeout_in_ms(::int64_t value) {
6699 
6700   _impl_.operation_timeout_in_ms_ = value;
6701 }
set_operation_timeout_in_ms(::int64_t value)6702 inline void ConfigProto::set_operation_timeout_in_ms(::int64_t value) {
6703   _internal_set_operation_timeout_in_ms(value);
6704   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.operation_timeout_in_ms)
6705 }
6706 
6707 // .tensorflow.RPCOptions rpc_options = 13;
_internal_has_rpc_options()6708 inline bool ConfigProto::_internal_has_rpc_options() const {
6709   return this != internal_default_instance() && _impl_.rpc_options_ != nullptr;
6710 }
has_rpc_options()6711 inline bool ConfigProto::has_rpc_options() const {
6712   return _internal_has_rpc_options();
6713 }
clear_rpc_options()6714 inline void ConfigProto::clear_rpc_options() {
6715   if (GetArenaForAllocation() == nullptr && _impl_.rpc_options_ != nullptr) {
6716     delete _impl_.rpc_options_;
6717   }
6718   _impl_.rpc_options_ = nullptr;
6719 }
_internal_rpc_options()6720 inline const ::tensorflow::RPCOptions& ConfigProto::_internal_rpc_options() const {
6721   const ::tensorflow::RPCOptions* p = _impl_.rpc_options_;
6722   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::RPCOptions&>(
6723       ::tensorflow::_RPCOptions_default_instance_);
6724 }
rpc_options()6725 inline const ::tensorflow::RPCOptions& ConfigProto::rpc_options() const {
6726   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.rpc_options)
6727   return _internal_rpc_options();
6728 }
unsafe_arena_set_allocated_rpc_options(::tensorflow::RPCOptions * rpc_options)6729 inline void ConfigProto::unsafe_arena_set_allocated_rpc_options(
6730     ::tensorflow::RPCOptions* rpc_options) {
6731   if (GetArenaForAllocation() == nullptr) {
6732     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.rpc_options_);
6733   }
6734   _impl_.rpc_options_ = rpc_options;
6735   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.rpc_options)
6736 }
release_rpc_options()6737 inline ::tensorflow::RPCOptions* ConfigProto::release_rpc_options() {
6738 
6739   ::tensorflow::RPCOptions* temp = _impl_.rpc_options_;
6740   _impl_.rpc_options_ = nullptr;
6741 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
6742   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
6743   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6744   if (GetArenaForAllocation() == nullptr) { delete old; }
6745 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
6746   if (GetArenaForAllocation() != nullptr) {
6747     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6748   }
6749 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
6750   return temp;
6751 }
unsafe_arena_release_rpc_options()6752 inline ::tensorflow::RPCOptions* ConfigProto::unsafe_arena_release_rpc_options() {
6753   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.rpc_options)
6754 
6755   ::tensorflow::RPCOptions* temp = _impl_.rpc_options_;
6756   _impl_.rpc_options_ = nullptr;
6757   return temp;
6758 }
_internal_mutable_rpc_options()6759 inline ::tensorflow::RPCOptions* ConfigProto::_internal_mutable_rpc_options() {
6760 
6761   if (_impl_.rpc_options_ == nullptr) {
6762     auto* p = CreateMaybeMessage<::tensorflow::RPCOptions>(GetArenaForAllocation());
6763     _impl_.rpc_options_ = p;
6764   }
6765   return _impl_.rpc_options_;
6766 }
mutable_rpc_options()6767 inline ::tensorflow::RPCOptions* ConfigProto::mutable_rpc_options() {
6768   ::tensorflow::RPCOptions* _msg = _internal_mutable_rpc_options();
6769   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.rpc_options)
6770   return _msg;
6771 }
set_allocated_rpc_options(::tensorflow::RPCOptions * rpc_options)6772 inline void ConfigProto::set_allocated_rpc_options(::tensorflow::RPCOptions* rpc_options) {
6773   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6774   if (message_arena == nullptr) {
6775     delete _impl_.rpc_options_;
6776   }
6777   if (rpc_options) {
6778     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6779         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(rpc_options);
6780     if (message_arena != submessage_arena) {
6781       rpc_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6782           message_arena, rpc_options, submessage_arena);
6783     }
6784 
6785   } else {
6786 
6787   }
6788   _impl_.rpc_options_ = rpc_options;
6789   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.rpc_options)
6790 }
6791 
6792 // .tensorflow.ClusterDef cluster_def = 14;
_internal_has_cluster_def()6793 inline bool ConfigProto::_internal_has_cluster_def() const {
6794   return this != internal_default_instance() && _impl_.cluster_def_ != nullptr;
6795 }
has_cluster_def()6796 inline bool ConfigProto::has_cluster_def() const {
6797   return _internal_has_cluster_def();
6798 }
_internal_cluster_def()6799 inline const ::tensorflow::ClusterDef& ConfigProto::_internal_cluster_def() const {
6800   const ::tensorflow::ClusterDef* p = _impl_.cluster_def_;
6801   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::ClusterDef&>(
6802       ::tensorflow::_ClusterDef_default_instance_);
6803 }
cluster_def()6804 inline const ::tensorflow::ClusterDef& ConfigProto::cluster_def() const {
6805   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.cluster_def)
6806   return _internal_cluster_def();
6807 }
unsafe_arena_set_allocated_cluster_def(::tensorflow::ClusterDef * cluster_def)6808 inline void ConfigProto::unsafe_arena_set_allocated_cluster_def(
6809     ::tensorflow::ClusterDef* cluster_def) {
6810   if (GetArenaForAllocation() == nullptr) {
6811     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.cluster_def_);
6812   }
6813   _impl_.cluster_def_ = cluster_def;
6814   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.cluster_def)
6815 }
release_cluster_def()6816 inline ::tensorflow::ClusterDef* ConfigProto::release_cluster_def() {
6817 
6818   ::tensorflow::ClusterDef* temp = _impl_.cluster_def_;
6819   _impl_.cluster_def_ = nullptr;
6820 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
6821   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
6822   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6823   if (GetArenaForAllocation() == nullptr) { delete old; }
6824 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
6825   if (GetArenaForAllocation() != nullptr) {
6826     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6827   }
6828 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
6829   return temp;
6830 }
unsafe_arena_release_cluster_def()6831 inline ::tensorflow::ClusterDef* ConfigProto::unsafe_arena_release_cluster_def() {
6832   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.cluster_def)
6833 
6834   ::tensorflow::ClusterDef* temp = _impl_.cluster_def_;
6835   _impl_.cluster_def_ = nullptr;
6836   return temp;
6837 }
_internal_mutable_cluster_def()6838 inline ::tensorflow::ClusterDef* ConfigProto::_internal_mutable_cluster_def() {
6839 
6840   if (_impl_.cluster_def_ == nullptr) {
6841     auto* p = CreateMaybeMessage<::tensorflow::ClusterDef>(GetArenaForAllocation());
6842     _impl_.cluster_def_ = p;
6843   }
6844   return _impl_.cluster_def_;
6845 }
mutable_cluster_def()6846 inline ::tensorflow::ClusterDef* ConfigProto::mutable_cluster_def() {
6847   ::tensorflow::ClusterDef* _msg = _internal_mutable_cluster_def();
6848   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.cluster_def)
6849   return _msg;
6850 }
set_allocated_cluster_def(::tensorflow::ClusterDef * cluster_def)6851 inline void ConfigProto::set_allocated_cluster_def(::tensorflow::ClusterDef* cluster_def) {
6852   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6853   if (message_arena == nullptr) {
6854     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.cluster_def_);
6855   }
6856   if (cluster_def) {
6857     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6858         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
6859                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(cluster_def));
6860     if (message_arena != submessage_arena) {
6861       cluster_def = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6862           message_arena, cluster_def, submessage_arena);
6863     }
6864 
6865   } else {
6866 
6867   }
6868   _impl_.cluster_def_ = cluster_def;
6869   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.cluster_def)
6870 }
6871 
6872 // bool isolate_session_state = 15;
clear_isolate_session_state()6873 inline void ConfigProto::clear_isolate_session_state() {
6874   _impl_.isolate_session_state_ = false;
6875 }
_internal_isolate_session_state()6876 inline bool ConfigProto::_internal_isolate_session_state() const {
6877   return _impl_.isolate_session_state_;
6878 }
isolate_session_state()6879 inline bool ConfigProto::isolate_session_state() const {
6880   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.isolate_session_state)
6881   return _internal_isolate_session_state();
6882 }
_internal_set_isolate_session_state(bool value)6883 inline void ConfigProto::_internal_set_isolate_session_state(bool value) {
6884 
6885   _impl_.isolate_session_state_ = value;
6886 }
set_isolate_session_state(bool value)6887 inline void ConfigProto::set_isolate_session_state(bool value) {
6888   _internal_set_isolate_session_state(value);
6889   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.isolate_session_state)
6890 }
6891 
6892 // bool share_cluster_devices_in_session = 17;
clear_share_cluster_devices_in_session()6893 inline void ConfigProto::clear_share_cluster_devices_in_session() {
6894   _impl_.share_cluster_devices_in_session_ = false;
6895 }
_internal_share_cluster_devices_in_session()6896 inline bool ConfigProto::_internal_share_cluster_devices_in_session() const {
6897   return _impl_.share_cluster_devices_in_session_;
6898 }
share_cluster_devices_in_session()6899 inline bool ConfigProto::share_cluster_devices_in_session() const {
6900   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.share_cluster_devices_in_session)
6901   return _internal_share_cluster_devices_in_session();
6902 }
_internal_set_share_cluster_devices_in_session(bool value)6903 inline void ConfigProto::_internal_set_share_cluster_devices_in_session(bool value) {
6904 
6905   _impl_.share_cluster_devices_in_session_ = value;
6906 }
set_share_cluster_devices_in_session(bool value)6907 inline void ConfigProto::set_share_cluster_devices_in_session(bool value) {
6908   _internal_set_share_cluster_devices_in_session(value);
6909   // @@protoc_insertion_point(field_set:tensorflow.ConfigProto.share_cluster_devices_in_session)
6910 }
6911 
6912 // .tensorflow.ConfigProto.Experimental experimental = 16;
_internal_has_experimental()6913 inline bool ConfigProto::_internal_has_experimental() const {
6914   return this != internal_default_instance() && _impl_.experimental_ != nullptr;
6915 }
has_experimental()6916 inline bool ConfigProto::has_experimental() const {
6917   return _internal_has_experimental();
6918 }
clear_experimental()6919 inline void ConfigProto::clear_experimental() {
6920   if (GetArenaForAllocation() == nullptr && _impl_.experimental_ != nullptr) {
6921     delete _impl_.experimental_;
6922   }
6923   _impl_.experimental_ = nullptr;
6924 }
_internal_experimental()6925 inline const ::tensorflow::ConfigProto_Experimental& ConfigProto::_internal_experimental() const {
6926   const ::tensorflow::ConfigProto_Experimental* p = _impl_.experimental_;
6927   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::ConfigProto_Experimental&>(
6928       ::tensorflow::_ConfigProto_Experimental_default_instance_);
6929 }
experimental()6930 inline const ::tensorflow::ConfigProto_Experimental& ConfigProto::experimental() const {
6931   // @@protoc_insertion_point(field_get:tensorflow.ConfigProto.experimental)
6932   return _internal_experimental();
6933 }
unsafe_arena_set_allocated_experimental(::tensorflow::ConfigProto_Experimental * experimental)6934 inline void ConfigProto::unsafe_arena_set_allocated_experimental(
6935     ::tensorflow::ConfigProto_Experimental* experimental) {
6936   if (GetArenaForAllocation() == nullptr) {
6937     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.experimental_);
6938   }
6939   _impl_.experimental_ = experimental;
6940   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.experimental)
6941 }
release_experimental()6942 inline ::tensorflow::ConfigProto_Experimental* ConfigProto::release_experimental() {
6943 
6944   ::tensorflow::ConfigProto_Experimental* temp = _impl_.experimental_;
6945   _impl_.experimental_ = nullptr;
6946 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
6947   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
6948   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6949   if (GetArenaForAllocation() == nullptr) { delete old; }
6950 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
6951   if (GetArenaForAllocation() != nullptr) {
6952     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
6953   }
6954 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
6955   return temp;
6956 }
unsafe_arena_release_experimental()6957 inline ::tensorflow::ConfigProto_Experimental* ConfigProto::unsafe_arena_release_experimental() {
6958   // @@protoc_insertion_point(field_release:tensorflow.ConfigProto.experimental)
6959 
6960   ::tensorflow::ConfigProto_Experimental* temp = _impl_.experimental_;
6961   _impl_.experimental_ = nullptr;
6962   return temp;
6963 }
_internal_mutable_experimental()6964 inline ::tensorflow::ConfigProto_Experimental* ConfigProto::_internal_mutable_experimental() {
6965 
6966   if (_impl_.experimental_ == nullptr) {
6967     auto* p = CreateMaybeMessage<::tensorflow::ConfigProto_Experimental>(GetArenaForAllocation());
6968     _impl_.experimental_ = p;
6969   }
6970   return _impl_.experimental_;
6971 }
mutable_experimental()6972 inline ::tensorflow::ConfigProto_Experimental* ConfigProto::mutable_experimental() {
6973   ::tensorflow::ConfigProto_Experimental* _msg = _internal_mutable_experimental();
6974   // @@protoc_insertion_point(field_mutable:tensorflow.ConfigProto.experimental)
6975   return _msg;
6976 }
set_allocated_experimental(::tensorflow::ConfigProto_Experimental * experimental)6977 inline void ConfigProto::set_allocated_experimental(::tensorflow::ConfigProto_Experimental* experimental) {
6978   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
6979   if (message_arena == nullptr) {
6980     delete _impl_.experimental_;
6981   }
6982   if (experimental) {
6983     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
6984         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(experimental);
6985     if (message_arena != submessage_arena) {
6986       experimental = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
6987           message_arena, experimental, submessage_arena);
6988     }
6989 
6990   } else {
6991 
6992   }
6993   _impl_.experimental_ = experimental;
6994   // @@protoc_insertion_point(field_set_allocated:tensorflow.ConfigProto.experimental)
6995 }
6996 
6997 // -------------------------------------------------------------------
6998 
6999 // RunOptions_Experimental_RunHandlerPoolOptions
7000 
7001 // int64 priority = 1;
clear_priority()7002 inline void RunOptions_Experimental_RunHandlerPoolOptions::clear_priority() {
7003   _impl_.priority_ = ::int64_t{0};
7004 }
_internal_priority()7005 inline ::int64_t RunOptions_Experimental_RunHandlerPoolOptions::_internal_priority() const {
7006   return _impl_.priority_;
7007 }
priority()7008 inline ::int64_t RunOptions_Experimental_RunHandlerPoolOptions::priority() const {
7009   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions.priority)
7010   return _internal_priority();
7011 }
_internal_set_priority(::int64_t value)7012 inline void RunOptions_Experimental_RunHandlerPoolOptions::_internal_set_priority(::int64_t value) {
7013 
7014   _impl_.priority_ = value;
7015 }
set_priority(::int64_t value)7016 inline void RunOptions_Experimental_RunHandlerPoolOptions::set_priority(::int64_t value) {
7017   _internal_set_priority(value);
7018   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions.priority)
7019 }
7020 
7021 // -------------------------------------------------------------------
7022 
7023 // RunOptions_Experimental
7024 
7025 // int64 collective_graph_key = 1;
clear_collective_graph_key()7026 inline void RunOptions_Experimental::clear_collective_graph_key() {
7027   _impl_.collective_graph_key_ = ::int64_t{0};
7028 }
_internal_collective_graph_key()7029 inline ::int64_t RunOptions_Experimental::_internal_collective_graph_key() const {
7030   return _impl_.collective_graph_key_;
7031 }
collective_graph_key()7032 inline ::int64_t RunOptions_Experimental::collective_graph_key() const {
7033   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.Experimental.collective_graph_key)
7034   return _internal_collective_graph_key();
7035 }
_internal_set_collective_graph_key(::int64_t value)7036 inline void RunOptions_Experimental::_internal_set_collective_graph_key(::int64_t value) {
7037 
7038   _impl_.collective_graph_key_ = value;
7039 }
set_collective_graph_key(::int64_t value)7040 inline void RunOptions_Experimental::set_collective_graph_key(::int64_t value) {
7041   _internal_set_collective_graph_key(value);
7042   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.Experimental.collective_graph_key)
7043 }
7044 
7045 // bool use_run_handler_pool = 2;
clear_use_run_handler_pool()7046 inline void RunOptions_Experimental::clear_use_run_handler_pool() {
7047   _impl_.use_run_handler_pool_ = false;
7048 }
_internal_use_run_handler_pool()7049 inline bool RunOptions_Experimental::_internal_use_run_handler_pool() const {
7050   return _impl_.use_run_handler_pool_;
7051 }
use_run_handler_pool()7052 inline bool RunOptions_Experimental::use_run_handler_pool() const {
7053   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.Experimental.use_run_handler_pool)
7054   return _internal_use_run_handler_pool();
7055 }
_internal_set_use_run_handler_pool(bool value)7056 inline void RunOptions_Experimental::_internal_set_use_run_handler_pool(bool value) {
7057 
7058   _impl_.use_run_handler_pool_ = value;
7059 }
set_use_run_handler_pool(bool value)7060 inline void RunOptions_Experimental::set_use_run_handler_pool(bool value) {
7061   _internal_set_use_run_handler_pool(value);
7062   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.Experimental.use_run_handler_pool)
7063 }
7064 
7065 // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
_internal_has_run_handler_pool_options()7066 inline bool RunOptions_Experimental::_internal_has_run_handler_pool_options() const {
7067   return this != internal_default_instance() && _impl_.run_handler_pool_options_ != nullptr;
7068 }
has_run_handler_pool_options()7069 inline bool RunOptions_Experimental::has_run_handler_pool_options() const {
7070   return _internal_has_run_handler_pool_options();
7071 }
clear_run_handler_pool_options()7072 inline void RunOptions_Experimental::clear_run_handler_pool_options() {
7073   if (GetArenaForAllocation() == nullptr && _impl_.run_handler_pool_options_ != nullptr) {
7074     delete _impl_.run_handler_pool_options_;
7075   }
7076   _impl_.run_handler_pool_options_ = nullptr;
7077 }
_internal_run_handler_pool_options()7078 inline const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& RunOptions_Experimental::_internal_run_handler_pool_options() const {
7079   const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* p = _impl_.run_handler_pool_options_;
7080   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions&>(
7081       ::tensorflow::_RunOptions_Experimental_RunHandlerPoolOptions_default_instance_);
7082 }
run_handler_pool_options()7083 inline const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& RunOptions_Experimental::run_handler_pool_options() const {
7084   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.Experimental.run_handler_pool_options)
7085   return _internal_run_handler_pool_options();
7086 }
unsafe_arena_set_allocated_run_handler_pool_options(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions * run_handler_pool_options)7087 inline void RunOptions_Experimental::unsafe_arena_set_allocated_run_handler_pool_options(
7088     ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options) {
7089   if (GetArenaForAllocation() == nullptr) {
7090     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.run_handler_pool_options_);
7091   }
7092   _impl_.run_handler_pool_options_ = run_handler_pool_options;
7093   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunOptions.Experimental.run_handler_pool_options)
7094 }
release_run_handler_pool_options()7095 inline ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* RunOptions_Experimental::release_run_handler_pool_options() {
7096 
7097   ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* temp = _impl_.run_handler_pool_options_;
7098   _impl_.run_handler_pool_options_ = nullptr;
7099 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7100   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7101   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7102   if (GetArenaForAllocation() == nullptr) { delete old; }
7103 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7104   if (GetArenaForAllocation() != nullptr) {
7105     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7106   }
7107 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7108   return temp;
7109 }
unsafe_arena_release_run_handler_pool_options()7110 inline ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* RunOptions_Experimental::unsafe_arena_release_run_handler_pool_options() {
7111   // @@protoc_insertion_point(field_release:tensorflow.RunOptions.Experimental.run_handler_pool_options)
7112 
7113   ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* temp = _impl_.run_handler_pool_options_;
7114   _impl_.run_handler_pool_options_ = nullptr;
7115   return temp;
7116 }
_internal_mutable_run_handler_pool_options()7117 inline ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* RunOptions_Experimental::_internal_mutable_run_handler_pool_options() {
7118 
7119   if (_impl_.run_handler_pool_options_ == nullptr) {
7120     auto* p = CreateMaybeMessage<::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions>(GetArenaForAllocation());
7121     _impl_.run_handler_pool_options_ = p;
7122   }
7123   return _impl_.run_handler_pool_options_;
7124 }
mutable_run_handler_pool_options()7125 inline ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* RunOptions_Experimental::mutable_run_handler_pool_options() {
7126   ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* _msg = _internal_mutable_run_handler_pool_options();
7127   // @@protoc_insertion_point(field_mutable:tensorflow.RunOptions.Experimental.run_handler_pool_options)
7128   return _msg;
7129 }
set_allocated_run_handler_pool_options(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions * run_handler_pool_options)7130 inline void RunOptions_Experimental::set_allocated_run_handler_pool_options(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options) {
7131   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7132   if (message_arena == nullptr) {
7133     delete _impl_.run_handler_pool_options_;
7134   }
7135   if (run_handler_pool_options) {
7136     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7137         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(run_handler_pool_options);
7138     if (message_arena != submessage_arena) {
7139       run_handler_pool_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7140           message_arena, run_handler_pool_options, submessage_arena);
7141     }
7142 
7143   } else {
7144 
7145   }
7146   _impl_.run_handler_pool_options_ = run_handler_pool_options;
7147   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunOptions.Experimental.run_handler_pool_options)
7148 }
7149 
7150 // -------------------------------------------------------------------
7151 
7152 // RunOptions
7153 
7154 // .tensorflow.RunOptions.TraceLevel trace_level = 1;
clear_trace_level()7155 inline void RunOptions::clear_trace_level() {
7156   _impl_.trace_level_ = 0;
7157 }
_internal_trace_level()7158 inline ::tensorflow::RunOptions_TraceLevel RunOptions::_internal_trace_level() const {
7159   return static_cast< ::tensorflow::RunOptions_TraceLevel >(_impl_.trace_level_);
7160 }
trace_level()7161 inline ::tensorflow::RunOptions_TraceLevel RunOptions::trace_level() const {
7162   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.trace_level)
7163   return _internal_trace_level();
7164 }
_internal_set_trace_level(::tensorflow::RunOptions_TraceLevel value)7165 inline void RunOptions::_internal_set_trace_level(::tensorflow::RunOptions_TraceLevel value) {
7166 
7167   _impl_.trace_level_ = value;
7168 }
set_trace_level(::tensorflow::RunOptions_TraceLevel value)7169 inline void RunOptions::set_trace_level(::tensorflow::RunOptions_TraceLevel value) {
7170   _internal_set_trace_level(value);
7171   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.trace_level)
7172 }
7173 
7174 // int64 timeout_in_ms = 2;
clear_timeout_in_ms()7175 inline void RunOptions::clear_timeout_in_ms() {
7176   _impl_.timeout_in_ms_ = ::int64_t{0};
7177 }
_internal_timeout_in_ms()7178 inline ::int64_t RunOptions::_internal_timeout_in_ms() const {
7179   return _impl_.timeout_in_ms_;
7180 }
timeout_in_ms()7181 inline ::int64_t RunOptions::timeout_in_ms() const {
7182   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.timeout_in_ms)
7183   return _internal_timeout_in_ms();
7184 }
_internal_set_timeout_in_ms(::int64_t value)7185 inline void RunOptions::_internal_set_timeout_in_ms(::int64_t value) {
7186 
7187   _impl_.timeout_in_ms_ = value;
7188 }
set_timeout_in_ms(::int64_t value)7189 inline void RunOptions::set_timeout_in_ms(::int64_t value) {
7190   _internal_set_timeout_in_ms(value);
7191   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.timeout_in_ms)
7192 }
7193 
7194 // int32 inter_op_thread_pool = 3;
clear_inter_op_thread_pool()7195 inline void RunOptions::clear_inter_op_thread_pool() {
7196   _impl_.inter_op_thread_pool_ = 0;
7197 }
_internal_inter_op_thread_pool()7198 inline ::int32_t RunOptions::_internal_inter_op_thread_pool() const {
7199   return _impl_.inter_op_thread_pool_;
7200 }
inter_op_thread_pool()7201 inline ::int32_t RunOptions::inter_op_thread_pool() const {
7202   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.inter_op_thread_pool)
7203   return _internal_inter_op_thread_pool();
7204 }
_internal_set_inter_op_thread_pool(::int32_t value)7205 inline void RunOptions::_internal_set_inter_op_thread_pool(::int32_t value) {
7206 
7207   _impl_.inter_op_thread_pool_ = value;
7208 }
set_inter_op_thread_pool(::int32_t value)7209 inline void RunOptions::set_inter_op_thread_pool(::int32_t value) {
7210   _internal_set_inter_op_thread_pool(value);
7211   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.inter_op_thread_pool)
7212 }
7213 
7214 // bool output_partition_graphs = 5;
clear_output_partition_graphs()7215 inline void RunOptions::clear_output_partition_graphs() {
7216   _impl_.output_partition_graphs_ = false;
7217 }
_internal_output_partition_graphs()7218 inline bool RunOptions::_internal_output_partition_graphs() const {
7219   return _impl_.output_partition_graphs_;
7220 }
output_partition_graphs()7221 inline bool RunOptions::output_partition_graphs() const {
7222   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.output_partition_graphs)
7223   return _internal_output_partition_graphs();
7224 }
_internal_set_output_partition_graphs(bool value)7225 inline void RunOptions::_internal_set_output_partition_graphs(bool value) {
7226 
7227   _impl_.output_partition_graphs_ = value;
7228 }
set_output_partition_graphs(bool value)7229 inline void RunOptions::set_output_partition_graphs(bool value) {
7230   _internal_set_output_partition_graphs(value);
7231   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.output_partition_graphs)
7232 }
7233 
7234 // .tensorflow.DebugOptions debug_options = 6;
_internal_has_debug_options()7235 inline bool RunOptions::_internal_has_debug_options() const {
7236   return this != internal_default_instance() && _impl_.debug_options_ != nullptr;
7237 }
has_debug_options()7238 inline bool RunOptions::has_debug_options() const {
7239   return _internal_has_debug_options();
7240 }
_internal_debug_options()7241 inline const ::tensorflow::DebugOptions& RunOptions::_internal_debug_options() const {
7242   const ::tensorflow::DebugOptions* p = _impl_.debug_options_;
7243   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::DebugOptions&>(
7244       ::tensorflow::_DebugOptions_default_instance_);
7245 }
debug_options()7246 inline const ::tensorflow::DebugOptions& RunOptions::debug_options() const {
7247   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.debug_options)
7248   return _internal_debug_options();
7249 }
unsafe_arena_set_allocated_debug_options(::tensorflow::DebugOptions * debug_options)7250 inline void RunOptions::unsafe_arena_set_allocated_debug_options(
7251     ::tensorflow::DebugOptions* debug_options) {
7252   if (GetArenaForAllocation() == nullptr) {
7253     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.debug_options_);
7254   }
7255   _impl_.debug_options_ = debug_options;
7256   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunOptions.debug_options)
7257 }
release_debug_options()7258 inline ::tensorflow::DebugOptions* RunOptions::release_debug_options() {
7259 
7260   ::tensorflow::DebugOptions* temp = _impl_.debug_options_;
7261   _impl_.debug_options_ = nullptr;
7262 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7263   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7264   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7265   if (GetArenaForAllocation() == nullptr) { delete old; }
7266 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7267   if (GetArenaForAllocation() != nullptr) {
7268     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7269   }
7270 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7271   return temp;
7272 }
unsafe_arena_release_debug_options()7273 inline ::tensorflow::DebugOptions* RunOptions::unsafe_arena_release_debug_options() {
7274   // @@protoc_insertion_point(field_release:tensorflow.RunOptions.debug_options)
7275 
7276   ::tensorflow::DebugOptions* temp = _impl_.debug_options_;
7277   _impl_.debug_options_ = nullptr;
7278   return temp;
7279 }
_internal_mutable_debug_options()7280 inline ::tensorflow::DebugOptions* RunOptions::_internal_mutable_debug_options() {
7281 
7282   if (_impl_.debug_options_ == nullptr) {
7283     auto* p = CreateMaybeMessage<::tensorflow::DebugOptions>(GetArenaForAllocation());
7284     _impl_.debug_options_ = p;
7285   }
7286   return _impl_.debug_options_;
7287 }
mutable_debug_options()7288 inline ::tensorflow::DebugOptions* RunOptions::mutable_debug_options() {
7289   ::tensorflow::DebugOptions* _msg = _internal_mutable_debug_options();
7290   // @@protoc_insertion_point(field_mutable:tensorflow.RunOptions.debug_options)
7291   return _msg;
7292 }
set_allocated_debug_options(::tensorflow::DebugOptions * debug_options)7293 inline void RunOptions::set_allocated_debug_options(::tensorflow::DebugOptions* debug_options) {
7294   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7295   if (message_arena == nullptr) {
7296     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.debug_options_);
7297   }
7298   if (debug_options) {
7299     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7300         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
7301                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(debug_options));
7302     if (message_arena != submessage_arena) {
7303       debug_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7304           message_arena, debug_options, submessage_arena);
7305     }
7306 
7307   } else {
7308 
7309   }
7310   _impl_.debug_options_ = debug_options;
7311   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunOptions.debug_options)
7312 }
7313 
7314 // bool report_tensor_allocations_upon_oom = 7;
clear_report_tensor_allocations_upon_oom()7315 inline void RunOptions::clear_report_tensor_allocations_upon_oom() {
7316   _impl_.report_tensor_allocations_upon_oom_ = false;
7317 }
_internal_report_tensor_allocations_upon_oom()7318 inline bool RunOptions::_internal_report_tensor_allocations_upon_oom() const {
7319   return _impl_.report_tensor_allocations_upon_oom_;
7320 }
report_tensor_allocations_upon_oom()7321 inline bool RunOptions::report_tensor_allocations_upon_oom() const {
7322   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.report_tensor_allocations_upon_oom)
7323   return _internal_report_tensor_allocations_upon_oom();
7324 }
_internal_set_report_tensor_allocations_upon_oom(bool value)7325 inline void RunOptions::_internal_set_report_tensor_allocations_upon_oom(bool value) {
7326 
7327   _impl_.report_tensor_allocations_upon_oom_ = value;
7328 }
set_report_tensor_allocations_upon_oom(bool value)7329 inline void RunOptions::set_report_tensor_allocations_upon_oom(bool value) {
7330   _internal_set_report_tensor_allocations_upon_oom(value);
7331   // @@protoc_insertion_point(field_set:tensorflow.RunOptions.report_tensor_allocations_upon_oom)
7332 }
7333 
7334 // .tensorflow.RunOptions.Experimental experimental = 8;
_internal_has_experimental()7335 inline bool RunOptions::_internal_has_experimental() const {
7336   return this != internal_default_instance() && _impl_.experimental_ != nullptr;
7337 }
has_experimental()7338 inline bool RunOptions::has_experimental() const {
7339   return _internal_has_experimental();
7340 }
clear_experimental()7341 inline void RunOptions::clear_experimental() {
7342   if (GetArenaForAllocation() == nullptr && _impl_.experimental_ != nullptr) {
7343     delete _impl_.experimental_;
7344   }
7345   _impl_.experimental_ = nullptr;
7346 }
_internal_experimental()7347 inline const ::tensorflow::RunOptions_Experimental& RunOptions::_internal_experimental() const {
7348   const ::tensorflow::RunOptions_Experimental* p = _impl_.experimental_;
7349   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::RunOptions_Experimental&>(
7350       ::tensorflow::_RunOptions_Experimental_default_instance_);
7351 }
experimental()7352 inline const ::tensorflow::RunOptions_Experimental& RunOptions::experimental() const {
7353   // @@protoc_insertion_point(field_get:tensorflow.RunOptions.experimental)
7354   return _internal_experimental();
7355 }
unsafe_arena_set_allocated_experimental(::tensorflow::RunOptions_Experimental * experimental)7356 inline void RunOptions::unsafe_arena_set_allocated_experimental(
7357     ::tensorflow::RunOptions_Experimental* experimental) {
7358   if (GetArenaForAllocation() == nullptr) {
7359     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.experimental_);
7360   }
7361   _impl_.experimental_ = experimental;
7362   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunOptions.experimental)
7363 }
release_experimental()7364 inline ::tensorflow::RunOptions_Experimental* RunOptions::release_experimental() {
7365 
7366   ::tensorflow::RunOptions_Experimental* temp = _impl_.experimental_;
7367   _impl_.experimental_ = nullptr;
7368 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7369   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7370   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7371   if (GetArenaForAllocation() == nullptr) { delete old; }
7372 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7373   if (GetArenaForAllocation() != nullptr) {
7374     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7375   }
7376 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7377   return temp;
7378 }
unsafe_arena_release_experimental()7379 inline ::tensorflow::RunOptions_Experimental* RunOptions::unsafe_arena_release_experimental() {
7380   // @@protoc_insertion_point(field_release:tensorflow.RunOptions.experimental)
7381 
7382   ::tensorflow::RunOptions_Experimental* temp = _impl_.experimental_;
7383   _impl_.experimental_ = nullptr;
7384   return temp;
7385 }
_internal_mutable_experimental()7386 inline ::tensorflow::RunOptions_Experimental* RunOptions::_internal_mutable_experimental() {
7387 
7388   if (_impl_.experimental_ == nullptr) {
7389     auto* p = CreateMaybeMessage<::tensorflow::RunOptions_Experimental>(GetArenaForAllocation());
7390     _impl_.experimental_ = p;
7391   }
7392   return _impl_.experimental_;
7393 }
mutable_experimental()7394 inline ::tensorflow::RunOptions_Experimental* RunOptions::mutable_experimental() {
7395   ::tensorflow::RunOptions_Experimental* _msg = _internal_mutable_experimental();
7396   // @@protoc_insertion_point(field_mutable:tensorflow.RunOptions.experimental)
7397   return _msg;
7398 }
set_allocated_experimental(::tensorflow::RunOptions_Experimental * experimental)7399 inline void RunOptions::set_allocated_experimental(::tensorflow::RunOptions_Experimental* experimental) {
7400   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7401   if (message_arena == nullptr) {
7402     delete _impl_.experimental_;
7403   }
7404   if (experimental) {
7405     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7406         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(experimental);
7407     if (message_arena != submessage_arena) {
7408       experimental = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7409           message_arena, experimental, submessage_arena);
7410     }
7411 
7412   } else {
7413 
7414   }
7415   _impl_.experimental_ = experimental;
7416   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunOptions.experimental)
7417 }
7418 
7419 // -------------------------------------------------------------------
7420 
7421 // RunMetadata_FunctionGraphs
7422 
7423 // repeated .tensorflow.GraphDef partition_graphs = 1;
_internal_partition_graphs_size()7424 inline int RunMetadata_FunctionGraphs::_internal_partition_graphs_size() const {
7425   return _impl_.partition_graphs_.size();
7426 }
partition_graphs_size()7427 inline int RunMetadata_FunctionGraphs::partition_graphs_size() const {
7428   return _internal_partition_graphs_size();
7429 }
mutable_partition_graphs(int index)7430 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::mutable_partition_graphs(int index) {
7431   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.FunctionGraphs.partition_graphs)
7432   return _impl_.partition_graphs_.Mutable(index);
7433 }
7434 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >*
mutable_partition_graphs()7435 RunMetadata_FunctionGraphs::mutable_partition_graphs() {
7436   // @@protoc_insertion_point(field_mutable_list:tensorflow.RunMetadata.FunctionGraphs.partition_graphs)
7437   return &_impl_.partition_graphs_;
7438 }
_internal_partition_graphs(int index)7439 inline const ::tensorflow::GraphDef& RunMetadata_FunctionGraphs::_internal_partition_graphs(int index) const {
7440   return _impl_.partition_graphs_.Get(index);
7441 }
partition_graphs(int index)7442 inline const ::tensorflow::GraphDef& RunMetadata_FunctionGraphs::partition_graphs(int index) const {
7443   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.FunctionGraphs.partition_graphs)
7444   return _internal_partition_graphs(index);
7445 }
_internal_add_partition_graphs()7446 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::_internal_add_partition_graphs() {
7447   return _impl_.partition_graphs_.Add();
7448 }
add_partition_graphs()7449 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::add_partition_graphs() {
7450   ::tensorflow::GraphDef* _add = _internal_add_partition_graphs();
7451   // @@protoc_insertion_point(field_add:tensorflow.RunMetadata.FunctionGraphs.partition_graphs)
7452   return _add;
7453 }
7454 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >&
partition_graphs()7455 RunMetadata_FunctionGraphs::partition_graphs() const {
7456   // @@protoc_insertion_point(field_list:tensorflow.RunMetadata.FunctionGraphs.partition_graphs)
7457   return _impl_.partition_graphs_;
7458 }
7459 
7460 // .tensorflow.GraphDef pre_optimization_graph = 2;
_internal_has_pre_optimization_graph()7461 inline bool RunMetadata_FunctionGraphs::_internal_has_pre_optimization_graph() const {
7462   return this != internal_default_instance() && _impl_.pre_optimization_graph_ != nullptr;
7463 }
has_pre_optimization_graph()7464 inline bool RunMetadata_FunctionGraphs::has_pre_optimization_graph() const {
7465   return _internal_has_pre_optimization_graph();
7466 }
_internal_pre_optimization_graph()7467 inline const ::tensorflow::GraphDef& RunMetadata_FunctionGraphs::_internal_pre_optimization_graph() const {
7468   const ::tensorflow::GraphDef* p = _impl_.pre_optimization_graph_;
7469   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::GraphDef&>(
7470       ::tensorflow::_GraphDef_default_instance_);
7471 }
pre_optimization_graph()7472 inline const ::tensorflow::GraphDef& RunMetadata_FunctionGraphs::pre_optimization_graph() const {
7473   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.FunctionGraphs.pre_optimization_graph)
7474   return _internal_pre_optimization_graph();
7475 }
unsafe_arena_set_allocated_pre_optimization_graph(::tensorflow::GraphDef * pre_optimization_graph)7476 inline void RunMetadata_FunctionGraphs::unsafe_arena_set_allocated_pre_optimization_graph(
7477     ::tensorflow::GraphDef* pre_optimization_graph) {
7478   if (GetArenaForAllocation() == nullptr) {
7479     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.pre_optimization_graph_);
7480   }
7481   _impl_.pre_optimization_graph_ = pre_optimization_graph;
7482   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.FunctionGraphs.pre_optimization_graph)
7483 }
release_pre_optimization_graph()7484 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::release_pre_optimization_graph() {
7485 
7486   ::tensorflow::GraphDef* temp = _impl_.pre_optimization_graph_;
7487   _impl_.pre_optimization_graph_ = nullptr;
7488 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7489   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7490   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7491   if (GetArenaForAllocation() == nullptr) { delete old; }
7492 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7493   if (GetArenaForAllocation() != nullptr) {
7494     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7495   }
7496 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7497   return temp;
7498 }
unsafe_arena_release_pre_optimization_graph()7499 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::unsafe_arena_release_pre_optimization_graph() {
7500   // @@protoc_insertion_point(field_release:tensorflow.RunMetadata.FunctionGraphs.pre_optimization_graph)
7501 
7502   ::tensorflow::GraphDef* temp = _impl_.pre_optimization_graph_;
7503   _impl_.pre_optimization_graph_ = nullptr;
7504   return temp;
7505 }
_internal_mutable_pre_optimization_graph()7506 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::_internal_mutable_pre_optimization_graph() {
7507 
7508   if (_impl_.pre_optimization_graph_ == nullptr) {
7509     auto* p = CreateMaybeMessage<::tensorflow::GraphDef>(GetArenaForAllocation());
7510     _impl_.pre_optimization_graph_ = p;
7511   }
7512   return _impl_.pre_optimization_graph_;
7513 }
mutable_pre_optimization_graph()7514 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::mutable_pre_optimization_graph() {
7515   ::tensorflow::GraphDef* _msg = _internal_mutable_pre_optimization_graph();
7516   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.FunctionGraphs.pre_optimization_graph)
7517   return _msg;
7518 }
set_allocated_pre_optimization_graph(::tensorflow::GraphDef * pre_optimization_graph)7519 inline void RunMetadata_FunctionGraphs::set_allocated_pre_optimization_graph(::tensorflow::GraphDef* pre_optimization_graph) {
7520   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7521   if (message_arena == nullptr) {
7522     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.pre_optimization_graph_);
7523   }
7524   if (pre_optimization_graph) {
7525     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7526         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
7527                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(pre_optimization_graph));
7528     if (message_arena != submessage_arena) {
7529       pre_optimization_graph = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7530           message_arena, pre_optimization_graph, submessage_arena);
7531     }
7532 
7533   } else {
7534 
7535   }
7536   _impl_.pre_optimization_graph_ = pre_optimization_graph;
7537   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunMetadata.FunctionGraphs.pre_optimization_graph)
7538 }
7539 
7540 // .tensorflow.GraphDef post_optimization_graph = 3;
_internal_has_post_optimization_graph()7541 inline bool RunMetadata_FunctionGraphs::_internal_has_post_optimization_graph() const {
7542   return this != internal_default_instance() && _impl_.post_optimization_graph_ != nullptr;
7543 }
has_post_optimization_graph()7544 inline bool RunMetadata_FunctionGraphs::has_post_optimization_graph() const {
7545   return _internal_has_post_optimization_graph();
7546 }
_internal_post_optimization_graph()7547 inline const ::tensorflow::GraphDef& RunMetadata_FunctionGraphs::_internal_post_optimization_graph() const {
7548   const ::tensorflow::GraphDef* p = _impl_.post_optimization_graph_;
7549   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::GraphDef&>(
7550       ::tensorflow::_GraphDef_default_instance_);
7551 }
post_optimization_graph()7552 inline const ::tensorflow::GraphDef& RunMetadata_FunctionGraphs::post_optimization_graph() const {
7553   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.FunctionGraphs.post_optimization_graph)
7554   return _internal_post_optimization_graph();
7555 }
unsafe_arena_set_allocated_post_optimization_graph(::tensorflow::GraphDef * post_optimization_graph)7556 inline void RunMetadata_FunctionGraphs::unsafe_arena_set_allocated_post_optimization_graph(
7557     ::tensorflow::GraphDef* post_optimization_graph) {
7558   if (GetArenaForAllocation() == nullptr) {
7559     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.post_optimization_graph_);
7560   }
7561   _impl_.post_optimization_graph_ = post_optimization_graph;
7562   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.FunctionGraphs.post_optimization_graph)
7563 }
release_post_optimization_graph()7564 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::release_post_optimization_graph() {
7565 
7566   ::tensorflow::GraphDef* temp = _impl_.post_optimization_graph_;
7567   _impl_.post_optimization_graph_ = nullptr;
7568 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7569   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7570   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7571   if (GetArenaForAllocation() == nullptr) { delete old; }
7572 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7573   if (GetArenaForAllocation() != nullptr) {
7574     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7575   }
7576 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7577   return temp;
7578 }
unsafe_arena_release_post_optimization_graph()7579 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::unsafe_arena_release_post_optimization_graph() {
7580   // @@protoc_insertion_point(field_release:tensorflow.RunMetadata.FunctionGraphs.post_optimization_graph)
7581 
7582   ::tensorflow::GraphDef* temp = _impl_.post_optimization_graph_;
7583   _impl_.post_optimization_graph_ = nullptr;
7584   return temp;
7585 }
_internal_mutable_post_optimization_graph()7586 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::_internal_mutable_post_optimization_graph() {
7587 
7588   if (_impl_.post_optimization_graph_ == nullptr) {
7589     auto* p = CreateMaybeMessage<::tensorflow::GraphDef>(GetArenaForAllocation());
7590     _impl_.post_optimization_graph_ = p;
7591   }
7592   return _impl_.post_optimization_graph_;
7593 }
mutable_post_optimization_graph()7594 inline ::tensorflow::GraphDef* RunMetadata_FunctionGraphs::mutable_post_optimization_graph() {
7595   ::tensorflow::GraphDef* _msg = _internal_mutable_post_optimization_graph();
7596   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.FunctionGraphs.post_optimization_graph)
7597   return _msg;
7598 }
set_allocated_post_optimization_graph(::tensorflow::GraphDef * post_optimization_graph)7599 inline void RunMetadata_FunctionGraphs::set_allocated_post_optimization_graph(::tensorflow::GraphDef* post_optimization_graph) {
7600   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7601   if (message_arena == nullptr) {
7602     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.post_optimization_graph_);
7603   }
7604   if (post_optimization_graph) {
7605     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7606         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
7607                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(post_optimization_graph));
7608     if (message_arena != submessage_arena) {
7609       post_optimization_graph = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7610           message_arena, post_optimization_graph, submessage_arena);
7611     }
7612 
7613   } else {
7614 
7615   }
7616   _impl_.post_optimization_graph_ = post_optimization_graph;
7617   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunMetadata.FunctionGraphs.post_optimization_graph)
7618 }
7619 
7620 // -------------------------------------------------------------------
7621 
7622 // RunMetadata
7623 
7624 // .tensorflow.StepStats step_stats = 1;
_internal_has_step_stats()7625 inline bool RunMetadata::_internal_has_step_stats() const {
7626   return this != internal_default_instance() && _impl_.step_stats_ != nullptr;
7627 }
has_step_stats()7628 inline bool RunMetadata::has_step_stats() const {
7629   return _internal_has_step_stats();
7630 }
_internal_step_stats()7631 inline const ::tensorflow::StepStats& RunMetadata::_internal_step_stats() const {
7632   const ::tensorflow::StepStats* p = _impl_.step_stats_;
7633   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::StepStats&>(
7634       ::tensorflow::_StepStats_default_instance_);
7635 }
step_stats()7636 inline const ::tensorflow::StepStats& RunMetadata::step_stats() const {
7637   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.step_stats)
7638   return _internal_step_stats();
7639 }
unsafe_arena_set_allocated_step_stats(::tensorflow::StepStats * step_stats)7640 inline void RunMetadata::unsafe_arena_set_allocated_step_stats(
7641     ::tensorflow::StepStats* step_stats) {
7642   if (GetArenaForAllocation() == nullptr) {
7643     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.step_stats_);
7644   }
7645   _impl_.step_stats_ = step_stats;
7646   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.step_stats)
7647 }
release_step_stats()7648 inline ::tensorflow::StepStats* RunMetadata::release_step_stats() {
7649 
7650   ::tensorflow::StepStats* temp = _impl_.step_stats_;
7651   _impl_.step_stats_ = nullptr;
7652 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7653   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7654   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7655   if (GetArenaForAllocation() == nullptr) { delete old; }
7656 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7657   if (GetArenaForAllocation() != nullptr) {
7658     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7659   }
7660 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7661   return temp;
7662 }
unsafe_arena_release_step_stats()7663 inline ::tensorflow::StepStats* RunMetadata::unsafe_arena_release_step_stats() {
7664   // @@protoc_insertion_point(field_release:tensorflow.RunMetadata.step_stats)
7665 
7666   ::tensorflow::StepStats* temp = _impl_.step_stats_;
7667   _impl_.step_stats_ = nullptr;
7668   return temp;
7669 }
_internal_mutable_step_stats()7670 inline ::tensorflow::StepStats* RunMetadata::_internal_mutable_step_stats() {
7671 
7672   if (_impl_.step_stats_ == nullptr) {
7673     auto* p = CreateMaybeMessage<::tensorflow::StepStats>(GetArenaForAllocation());
7674     _impl_.step_stats_ = p;
7675   }
7676   return _impl_.step_stats_;
7677 }
mutable_step_stats()7678 inline ::tensorflow::StepStats* RunMetadata::mutable_step_stats() {
7679   ::tensorflow::StepStats* _msg = _internal_mutable_step_stats();
7680   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.step_stats)
7681   return _msg;
7682 }
set_allocated_step_stats(::tensorflow::StepStats * step_stats)7683 inline void RunMetadata::set_allocated_step_stats(::tensorflow::StepStats* step_stats) {
7684   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7685   if (message_arena == nullptr) {
7686     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.step_stats_);
7687   }
7688   if (step_stats) {
7689     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7690         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
7691                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(step_stats));
7692     if (message_arena != submessage_arena) {
7693       step_stats = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7694           message_arena, step_stats, submessage_arena);
7695     }
7696 
7697   } else {
7698 
7699   }
7700   _impl_.step_stats_ = step_stats;
7701   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunMetadata.step_stats)
7702 }
7703 
7704 // .tensorflow.CostGraphDef cost_graph = 2;
_internal_has_cost_graph()7705 inline bool RunMetadata::_internal_has_cost_graph() const {
7706   return this != internal_default_instance() && _impl_.cost_graph_ != nullptr;
7707 }
has_cost_graph()7708 inline bool RunMetadata::has_cost_graph() const {
7709   return _internal_has_cost_graph();
7710 }
_internal_cost_graph()7711 inline const ::tensorflow::CostGraphDef& RunMetadata::_internal_cost_graph() const {
7712   const ::tensorflow::CostGraphDef* p = _impl_.cost_graph_;
7713   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::CostGraphDef&>(
7714       ::tensorflow::_CostGraphDef_default_instance_);
7715 }
cost_graph()7716 inline const ::tensorflow::CostGraphDef& RunMetadata::cost_graph() const {
7717   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.cost_graph)
7718   return _internal_cost_graph();
7719 }
unsafe_arena_set_allocated_cost_graph(::tensorflow::CostGraphDef * cost_graph)7720 inline void RunMetadata::unsafe_arena_set_allocated_cost_graph(
7721     ::tensorflow::CostGraphDef* cost_graph) {
7722   if (GetArenaForAllocation() == nullptr) {
7723     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.cost_graph_);
7724   }
7725   _impl_.cost_graph_ = cost_graph;
7726   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.cost_graph)
7727 }
release_cost_graph()7728 inline ::tensorflow::CostGraphDef* RunMetadata::release_cost_graph() {
7729 
7730   ::tensorflow::CostGraphDef* temp = _impl_.cost_graph_;
7731   _impl_.cost_graph_ = nullptr;
7732 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7733   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7734   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7735   if (GetArenaForAllocation() == nullptr) { delete old; }
7736 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7737   if (GetArenaForAllocation() != nullptr) {
7738     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7739   }
7740 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7741   return temp;
7742 }
unsafe_arena_release_cost_graph()7743 inline ::tensorflow::CostGraphDef* RunMetadata::unsafe_arena_release_cost_graph() {
7744   // @@protoc_insertion_point(field_release:tensorflow.RunMetadata.cost_graph)
7745 
7746   ::tensorflow::CostGraphDef* temp = _impl_.cost_graph_;
7747   _impl_.cost_graph_ = nullptr;
7748   return temp;
7749 }
_internal_mutable_cost_graph()7750 inline ::tensorflow::CostGraphDef* RunMetadata::_internal_mutable_cost_graph() {
7751 
7752   if (_impl_.cost_graph_ == nullptr) {
7753     auto* p = CreateMaybeMessage<::tensorflow::CostGraphDef>(GetArenaForAllocation());
7754     _impl_.cost_graph_ = p;
7755   }
7756   return _impl_.cost_graph_;
7757 }
mutable_cost_graph()7758 inline ::tensorflow::CostGraphDef* RunMetadata::mutable_cost_graph() {
7759   ::tensorflow::CostGraphDef* _msg = _internal_mutable_cost_graph();
7760   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.cost_graph)
7761   return _msg;
7762 }
set_allocated_cost_graph(::tensorflow::CostGraphDef * cost_graph)7763 inline void RunMetadata::set_allocated_cost_graph(::tensorflow::CostGraphDef* cost_graph) {
7764   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7765   if (message_arena == nullptr) {
7766     delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.cost_graph_);
7767   }
7768   if (cost_graph) {
7769     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7770         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
7771                 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(cost_graph));
7772     if (message_arena != submessage_arena) {
7773       cost_graph = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7774           message_arena, cost_graph, submessage_arena);
7775     }
7776 
7777   } else {
7778 
7779   }
7780   _impl_.cost_graph_ = cost_graph;
7781   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunMetadata.cost_graph)
7782 }
7783 
7784 // repeated .tensorflow.GraphDef partition_graphs = 3;
_internal_partition_graphs_size()7785 inline int RunMetadata::_internal_partition_graphs_size() const {
7786   return _impl_.partition_graphs_.size();
7787 }
partition_graphs_size()7788 inline int RunMetadata::partition_graphs_size() const {
7789   return _internal_partition_graphs_size();
7790 }
mutable_partition_graphs(int index)7791 inline ::tensorflow::GraphDef* RunMetadata::mutable_partition_graphs(int index) {
7792   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.partition_graphs)
7793   return _impl_.partition_graphs_.Mutable(index);
7794 }
7795 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >*
mutable_partition_graphs()7796 RunMetadata::mutable_partition_graphs() {
7797   // @@protoc_insertion_point(field_mutable_list:tensorflow.RunMetadata.partition_graphs)
7798   return &_impl_.partition_graphs_;
7799 }
_internal_partition_graphs(int index)7800 inline const ::tensorflow::GraphDef& RunMetadata::_internal_partition_graphs(int index) const {
7801   return _impl_.partition_graphs_.Get(index);
7802 }
partition_graphs(int index)7803 inline const ::tensorflow::GraphDef& RunMetadata::partition_graphs(int index) const {
7804   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.partition_graphs)
7805   return _internal_partition_graphs(index);
7806 }
_internal_add_partition_graphs()7807 inline ::tensorflow::GraphDef* RunMetadata::_internal_add_partition_graphs() {
7808   return _impl_.partition_graphs_.Add();
7809 }
add_partition_graphs()7810 inline ::tensorflow::GraphDef* RunMetadata::add_partition_graphs() {
7811   ::tensorflow::GraphDef* _add = _internal_add_partition_graphs();
7812   // @@protoc_insertion_point(field_add:tensorflow.RunMetadata.partition_graphs)
7813   return _add;
7814 }
7815 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >&
partition_graphs()7816 RunMetadata::partition_graphs() const {
7817   // @@protoc_insertion_point(field_list:tensorflow.RunMetadata.partition_graphs)
7818   return _impl_.partition_graphs_;
7819 }
7820 
7821 // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
_internal_function_graphs_size()7822 inline int RunMetadata::_internal_function_graphs_size() const {
7823   return _impl_.function_graphs_.size();
7824 }
function_graphs_size()7825 inline int RunMetadata::function_graphs_size() const {
7826   return _internal_function_graphs_size();
7827 }
clear_function_graphs()7828 inline void RunMetadata::clear_function_graphs() {
7829   _impl_.function_graphs_.Clear();
7830 }
mutable_function_graphs(int index)7831 inline ::tensorflow::RunMetadata_FunctionGraphs* RunMetadata::mutable_function_graphs(int index) {
7832   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.function_graphs)
7833   return _impl_.function_graphs_.Mutable(index);
7834 }
7835 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs >*
mutable_function_graphs()7836 RunMetadata::mutable_function_graphs() {
7837   // @@protoc_insertion_point(field_mutable_list:tensorflow.RunMetadata.function_graphs)
7838   return &_impl_.function_graphs_;
7839 }
_internal_function_graphs(int index)7840 inline const ::tensorflow::RunMetadata_FunctionGraphs& RunMetadata::_internal_function_graphs(int index) const {
7841   return _impl_.function_graphs_.Get(index);
7842 }
function_graphs(int index)7843 inline const ::tensorflow::RunMetadata_FunctionGraphs& RunMetadata::function_graphs(int index) const {
7844   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.function_graphs)
7845   return _internal_function_graphs(index);
7846 }
_internal_add_function_graphs()7847 inline ::tensorflow::RunMetadata_FunctionGraphs* RunMetadata::_internal_add_function_graphs() {
7848   return _impl_.function_graphs_.Add();
7849 }
add_function_graphs()7850 inline ::tensorflow::RunMetadata_FunctionGraphs* RunMetadata::add_function_graphs() {
7851   ::tensorflow::RunMetadata_FunctionGraphs* _add = _internal_add_function_graphs();
7852   // @@protoc_insertion_point(field_add:tensorflow.RunMetadata.function_graphs)
7853   return _add;
7854 }
7855 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs >&
function_graphs()7856 RunMetadata::function_graphs() const {
7857   // @@protoc_insertion_point(field_list:tensorflow.RunMetadata.function_graphs)
7858   return _impl_.function_graphs_;
7859 }
7860 
7861 // .tensorflow.SessionMetadata session_metadata = 5;
_internal_has_session_metadata()7862 inline bool RunMetadata::_internal_has_session_metadata() const {
7863   return this != internal_default_instance() && _impl_.session_metadata_ != nullptr;
7864 }
has_session_metadata()7865 inline bool RunMetadata::has_session_metadata() const {
7866   return _internal_has_session_metadata();
7867 }
clear_session_metadata()7868 inline void RunMetadata::clear_session_metadata() {
7869   if (GetArenaForAllocation() == nullptr && _impl_.session_metadata_ != nullptr) {
7870     delete _impl_.session_metadata_;
7871   }
7872   _impl_.session_metadata_ = nullptr;
7873 }
_internal_session_metadata()7874 inline const ::tensorflow::SessionMetadata& RunMetadata::_internal_session_metadata() const {
7875   const ::tensorflow::SessionMetadata* p = _impl_.session_metadata_;
7876   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::SessionMetadata&>(
7877       ::tensorflow::_SessionMetadata_default_instance_);
7878 }
session_metadata()7879 inline const ::tensorflow::SessionMetadata& RunMetadata::session_metadata() const {
7880   // @@protoc_insertion_point(field_get:tensorflow.RunMetadata.session_metadata)
7881   return _internal_session_metadata();
7882 }
unsafe_arena_set_allocated_session_metadata(::tensorflow::SessionMetadata * session_metadata)7883 inline void RunMetadata::unsafe_arena_set_allocated_session_metadata(
7884     ::tensorflow::SessionMetadata* session_metadata) {
7885   if (GetArenaForAllocation() == nullptr) {
7886     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.session_metadata_);
7887   }
7888   _impl_.session_metadata_ = session_metadata;
7889   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.session_metadata)
7890 }
release_session_metadata()7891 inline ::tensorflow::SessionMetadata* RunMetadata::release_session_metadata() {
7892 
7893   ::tensorflow::SessionMetadata* temp = _impl_.session_metadata_;
7894   _impl_.session_metadata_ = nullptr;
7895 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
7896   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
7897   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7898   if (GetArenaForAllocation() == nullptr) { delete old; }
7899 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
7900   if (GetArenaForAllocation() != nullptr) {
7901     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
7902   }
7903 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
7904   return temp;
7905 }
unsafe_arena_release_session_metadata()7906 inline ::tensorflow::SessionMetadata* RunMetadata::unsafe_arena_release_session_metadata() {
7907   // @@protoc_insertion_point(field_release:tensorflow.RunMetadata.session_metadata)
7908 
7909   ::tensorflow::SessionMetadata* temp = _impl_.session_metadata_;
7910   _impl_.session_metadata_ = nullptr;
7911   return temp;
7912 }
_internal_mutable_session_metadata()7913 inline ::tensorflow::SessionMetadata* RunMetadata::_internal_mutable_session_metadata() {
7914 
7915   if (_impl_.session_metadata_ == nullptr) {
7916     auto* p = CreateMaybeMessage<::tensorflow::SessionMetadata>(GetArenaForAllocation());
7917     _impl_.session_metadata_ = p;
7918   }
7919   return _impl_.session_metadata_;
7920 }
mutable_session_metadata()7921 inline ::tensorflow::SessionMetadata* RunMetadata::mutable_session_metadata() {
7922   ::tensorflow::SessionMetadata* _msg = _internal_mutable_session_metadata();
7923   // @@protoc_insertion_point(field_mutable:tensorflow.RunMetadata.session_metadata)
7924   return _msg;
7925 }
set_allocated_session_metadata(::tensorflow::SessionMetadata * session_metadata)7926 inline void RunMetadata::set_allocated_session_metadata(::tensorflow::SessionMetadata* session_metadata) {
7927   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
7928   if (message_arena == nullptr) {
7929     delete _impl_.session_metadata_;
7930   }
7931   if (session_metadata) {
7932     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
7933         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(session_metadata);
7934     if (message_arena != submessage_arena) {
7935       session_metadata = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
7936           message_arena, session_metadata, submessage_arena);
7937     }
7938 
7939   } else {
7940 
7941   }
7942   _impl_.session_metadata_ = session_metadata;
7943   // @@protoc_insertion_point(field_set_allocated:tensorflow.RunMetadata.session_metadata)
7944 }
7945 
7946 // -------------------------------------------------------------------
7947 
7948 // TensorConnection
7949 
7950 // string from_tensor = 1;
clear_from_tensor()7951 inline void TensorConnection::clear_from_tensor() {
7952   _impl_.from_tensor_.ClearToEmpty();
7953 }
from_tensor()7954 inline const std::string& TensorConnection::from_tensor() const {
7955   // @@protoc_insertion_point(field_get:tensorflow.TensorConnection.from_tensor)
7956   return _internal_from_tensor();
7957 }
7958 template <typename ArgT0, typename... ArgT>
7959 inline PROTOBUF_ALWAYS_INLINE
set_from_tensor(ArgT0 && arg0,ArgT...args)7960 void TensorConnection::set_from_tensor(ArgT0&& arg0, ArgT... args) {
7961 
7962  _impl_.from_tensor_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
7963   // @@protoc_insertion_point(field_set:tensorflow.TensorConnection.from_tensor)
7964 }
mutable_from_tensor()7965 inline std::string* TensorConnection::mutable_from_tensor() {
7966   std::string* _s = _internal_mutable_from_tensor();
7967   // @@protoc_insertion_point(field_mutable:tensorflow.TensorConnection.from_tensor)
7968   return _s;
7969 }
_internal_from_tensor()7970 inline const std::string& TensorConnection::_internal_from_tensor() const {
7971   return _impl_.from_tensor_.Get();
7972 }
_internal_set_from_tensor(const std::string & value)7973 inline void TensorConnection::_internal_set_from_tensor(const std::string& value) {
7974 
7975   _impl_.from_tensor_.Set(value, GetArenaForAllocation());
7976 }
_internal_mutable_from_tensor()7977 inline std::string* TensorConnection::_internal_mutable_from_tensor() {
7978 
7979   return _impl_.from_tensor_.Mutable(GetArenaForAllocation());
7980 }
release_from_tensor()7981 inline std::string* TensorConnection::release_from_tensor() {
7982   // @@protoc_insertion_point(field_release:tensorflow.TensorConnection.from_tensor)
7983   return _impl_.from_tensor_.Release();
7984 }
set_allocated_from_tensor(std::string * from_tensor)7985 inline void TensorConnection::set_allocated_from_tensor(std::string* from_tensor) {
7986   _impl_.from_tensor_.SetAllocated(from_tensor, GetArenaForAllocation());
7987 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
7988   if (_impl_.from_tensor_.IsDefault()) {
7989     _impl_.from_tensor_.Set("", GetArenaForAllocation());
7990   }
7991 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
7992   // @@protoc_insertion_point(field_set_allocated:tensorflow.TensorConnection.from_tensor)
7993 }
7994 
7995 // string to_tensor = 2;
clear_to_tensor()7996 inline void TensorConnection::clear_to_tensor() {
7997   _impl_.to_tensor_.ClearToEmpty();
7998 }
to_tensor()7999 inline const std::string& TensorConnection::to_tensor() const {
8000   // @@protoc_insertion_point(field_get:tensorflow.TensorConnection.to_tensor)
8001   return _internal_to_tensor();
8002 }
8003 template <typename ArgT0, typename... ArgT>
8004 inline PROTOBUF_ALWAYS_INLINE
set_to_tensor(ArgT0 && arg0,ArgT...args)8005 void TensorConnection::set_to_tensor(ArgT0&& arg0, ArgT... args) {
8006 
8007  _impl_.to_tensor_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
8008   // @@protoc_insertion_point(field_set:tensorflow.TensorConnection.to_tensor)
8009 }
mutable_to_tensor()8010 inline std::string* TensorConnection::mutable_to_tensor() {
8011   std::string* _s = _internal_mutable_to_tensor();
8012   // @@protoc_insertion_point(field_mutable:tensorflow.TensorConnection.to_tensor)
8013   return _s;
8014 }
_internal_to_tensor()8015 inline const std::string& TensorConnection::_internal_to_tensor() const {
8016   return _impl_.to_tensor_.Get();
8017 }
_internal_set_to_tensor(const std::string & value)8018 inline void TensorConnection::_internal_set_to_tensor(const std::string& value) {
8019 
8020   _impl_.to_tensor_.Set(value, GetArenaForAllocation());
8021 }
_internal_mutable_to_tensor()8022 inline std::string* TensorConnection::_internal_mutable_to_tensor() {
8023 
8024   return _impl_.to_tensor_.Mutable(GetArenaForAllocation());
8025 }
release_to_tensor()8026 inline std::string* TensorConnection::release_to_tensor() {
8027   // @@protoc_insertion_point(field_release:tensorflow.TensorConnection.to_tensor)
8028   return _impl_.to_tensor_.Release();
8029 }
set_allocated_to_tensor(std::string * to_tensor)8030 inline void TensorConnection::set_allocated_to_tensor(std::string* to_tensor) {
8031   _impl_.to_tensor_.SetAllocated(to_tensor, GetArenaForAllocation());
8032 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
8033   if (_impl_.to_tensor_.IsDefault()) {
8034     _impl_.to_tensor_.Set("", GetArenaForAllocation());
8035   }
8036 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
8037   // @@protoc_insertion_point(field_set_allocated:tensorflow.TensorConnection.to_tensor)
8038 }
8039 
8040 // -------------------------------------------------------------------
8041 
8042 // -------------------------------------------------------------------
8043 
8044 // -------------------------------------------------------------------
8045 
8046 // CallableOptions
8047 
8048 // repeated string feed = 1;
_internal_feed_size()8049 inline int CallableOptions::_internal_feed_size() const {
8050   return _impl_.feed_.size();
8051 }
feed_size()8052 inline int CallableOptions::feed_size() const {
8053   return _internal_feed_size();
8054 }
clear_feed()8055 inline void CallableOptions::clear_feed() {
8056   _impl_.feed_.Clear();
8057 }
add_feed()8058 inline std::string* CallableOptions::add_feed() {
8059   std::string* _s = _internal_add_feed();
8060   // @@protoc_insertion_point(field_add_mutable:tensorflow.CallableOptions.feed)
8061   return _s;
8062 }
_internal_feed(int index)8063 inline const std::string& CallableOptions::_internal_feed(int index) const {
8064   return _impl_.feed_.Get(index);
8065 }
feed(int index)8066 inline const std::string& CallableOptions::feed(int index) const {
8067   // @@protoc_insertion_point(field_get:tensorflow.CallableOptions.feed)
8068   return _internal_feed(index);
8069 }
mutable_feed(int index)8070 inline std::string* CallableOptions::mutable_feed(int index) {
8071   // @@protoc_insertion_point(field_mutable:tensorflow.CallableOptions.feed)
8072   return _impl_.feed_.Mutable(index);
8073 }
set_feed(int index,const std::string & value)8074 inline void CallableOptions::set_feed(int index, const std::string& value) {
8075   _impl_.feed_.Mutable(index)->assign(value);
8076   // @@protoc_insertion_point(field_set:tensorflow.CallableOptions.feed)
8077 }
set_feed(int index,std::string && value)8078 inline void CallableOptions::set_feed(int index, std::string&& value) {
8079   _impl_.feed_.Mutable(index)->assign(std::move(value));
8080   // @@protoc_insertion_point(field_set:tensorflow.CallableOptions.feed)
8081 }
set_feed(int index,const char * value)8082 inline void CallableOptions::set_feed(int index, const char* value) {
8083   GOOGLE_DCHECK(value != nullptr);
8084   _impl_.feed_.Mutable(index)->assign(value);
8085   // @@protoc_insertion_point(field_set_char:tensorflow.CallableOptions.feed)
8086 }
set_feed(int index,const char * value,size_t size)8087 inline void CallableOptions::set_feed(int index, const char* value, size_t size) {
8088   _impl_.feed_.Mutable(index)->assign(
8089     reinterpret_cast<const char*>(value), size);
8090   // @@protoc_insertion_point(field_set_pointer:tensorflow.CallableOptions.feed)
8091 }
_internal_add_feed()8092 inline std::string* CallableOptions::_internal_add_feed() {
8093   return _impl_.feed_.Add();
8094 }
add_feed(const std::string & value)8095 inline void CallableOptions::add_feed(const std::string& value) {
8096   _impl_.feed_.Add()->assign(value);
8097   // @@protoc_insertion_point(field_add:tensorflow.CallableOptions.feed)
8098 }
add_feed(std::string && value)8099 inline void CallableOptions::add_feed(std::string&& value) {
8100   _impl_.feed_.Add(std::move(value));
8101   // @@protoc_insertion_point(field_add:tensorflow.CallableOptions.feed)
8102 }
add_feed(const char * value)8103 inline void CallableOptions::add_feed(const char* value) {
8104   GOOGLE_DCHECK(value != nullptr);
8105   _impl_.feed_.Add()->assign(value);
8106   // @@protoc_insertion_point(field_add_char:tensorflow.CallableOptions.feed)
8107 }
add_feed(const char * value,size_t size)8108 inline void CallableOptions::add_feed(const char* value, size_t size) {
8109   _impl_.feed_.Add()->assign(reinterpret_cast<const char*>(value), size);
8110   // @@protoc_insertion_point(field_add_pointer:tensorflow.CallableOptions.feed)
8111 }
8112 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
feed()8113 CallableOptions::feed() const {
8114   // @@protoc_insertion_point(field_list:tensorflow.CallableOptions.feed)
8115   return _impl_.feed_;
8116 }
8117 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
mutable_feed()8118 CallableOptions::mutable_feed() {
8119   // @@protoc_insertion_point(field_mutable_list:tensorflow.CallableOptions.feed)
8120   return &_impl_.feed_;
8121 }
8122 
8123 // repeated string fetch = 2;
_internal_fetch_size()8124 inline int CallableOptions::_internal_fetch_size() const {
8125   return _impl_.fetch_.size();
8126 }
fetch_size()8127 inline int CallableOptions::fetch_size() const {
8128   return _internal_fetch_size();
8129 }
clear_fetch()8130 inline void CallableOptions::clear_fetch() {
8131   _impl_.fetch_.Clear();
8132 }
add_fetch()8133 inline std::string* CallableOptions::add_fetch() {
8134   std::string* _s = _internal_add_fetch();
8135   // @@protoc_insertion_point(field_add_mutable:tensorflow.CallableOptions.fetch)
8136   return _s;
8137 }
_internal_fetch(int index)8138 inline const std::string& CallableOptions::_internal_fetch(int index) const {
8139   return _impl_.fetch_.Get(index);
8140 }
fetch(int index)8141 inline const std::string& CallableOptions::fetch(int index) const {
8142   // @@protoc_insertion_point(field_get:tensorflow.CallableOptions.fetch)
8143   return _internal_fetch(index);
8144 }
mutable_fetch(int index)8145 inline std::string* CallableOptions::mutable_fetch(int index) {
8146   // @@protoc_insertion_point(field_mutable:tensorflow.CallableOptions.fetch)
8147   return _impl_.fetch_.Mutable(index);
8148 }
set_fetch(int index,const std::string & value)8149 inline void CallableOptions::set_fetch(int index, const std::string& value) {
8150   _impl_.fetch_.Mutable(index)->assign(value);
8151   // @@protoc_insertion_point(field_set:tensorflow.CallableOptions.fetch)
8152 }
set_fetch(int index,std::string && value)8153 inline void CallableOptions::set_fetch(int index, std::string&& value) {
8154   _impl_.fetch_.Mutable(index)->assign(std::move(value));
8155   // @@protoc_insertion_point(field_set:tensorflow.CallableOptions.fetch)
8156 }
set_fetch(int index,const char * value)8157 inline void CallableOptions::set_fetch(int index, const char* value) {
8158   GOOGLE_DCHECK(value != nullptr);
8159   _impl_.fetch_.Mutable(index)->assign(value);
8160   // @@protoc_insertion_point(field_set_char:tensorflow.CallableOptions.fetch)
8161 }
set_fetch(int index,const char * value,size_t size)8162 inline void CallableOptions::set_fetch(int index, const char* value, size_t size) {
8163   _impl_.fetch_.Mutable(index)->assign(
8164     reinterpret_cast<const char*>(value), size);
8165   // @@protoc_insertion_point(field_set_pointer:tensorflow.CallableOptions.fetch)
8166 }
_internal_add_fetch()8167 inline std::string* CallableOptions::_internal_add_fetch() {
8168   return _impl_.fetch_.Add();
8169 }
add_fetch(const std::string & value)8170 inline void CallableOptions::add_fetch(const std::string& value) {
8171   _impl_.fetch_.Add()->assign(value);
8172   // @@protoc_insertion_point(field_add:tensorflow.CallableOptions.fetch)
8173 }
add_fetch(std::string && value)8174 inline void CallableOptions::add_fetch(std::string&& value) {
8175   _impl_.fetch_.Add(std::move(value));
8176   // @@protoc_insertion_point(field_add:tensorflow.CallableOptions.fetch)
8177 }
add_fetch(const char * value)8178 inline void CallableOptions::add_fetch(const char* value) {
8179   GOOGLE_DCHECK(value != nullptr);
8180   _impl_.fetch_.Add()->assign(value);
8181   // @@protoc_insertion_point(field_add_char:tensorflow.CallableOptions.fetch)
8182 }
add_fetch(const char * value,size_t size)8183 inline void CallableOptions::add_fetch(const char* value, size_t size) {
8184   _impl_.fetch_.Add()->assign(reinterpret_cast<const char*>(value), size);
8185   // @@protoc_insertion_point(field_add_pointer:tensorflow.CallableOptions.fetch)
8186 }
8187 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
fetch()8188 CallableOptions::fetch() const {
8189   // @@protoc_insertion_point(field_list:tensorflow.CallableOptions.fetch)
8190   return _impl_.fetch_;
8191 }
8192 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
mutable_fetch()8193 CallableOptions::mutable_fetch() {
8194   // @@protoc_insertion_point(field_mutable_list:tensorflow.CallableOptions.fetch)
8195   return &_impl_.fetch_;
8196 }
8197 
8198 // repeated string target = 3;
_internal_target_size()8199 inline int CallableOptions::_internal_target_size() const {
8200   return _impl_.target_.size();
8201 }
target_size()8202 inline int CallableOptions::target_size() const {
8203   return _internal_target_size();
8204 }
clear_target()8205 inline void CallableOptions::clear_target() {
8206   _impl_.target_.Clear();
8207 }
add_target()8208 inline std::string* CallableOptions::add_target() {
8209   std::string* _s = _internal_add_target();
8210   // @@protoc_insertion_point(field_add_mutable:tensorflow.CallableOptions.target)
8211   return _s;
8212 }
_internal_target(int index)8213 inline const std::string& CallableOptions::_internal_target(int index) const {
8214   return _impl_.target_.Get(index);
8215 }
target(int index)8216 inline const std::string& CallableOptions::target(int index) const {
8217   // @@protoc_insertion_point(field_get:tensorflow.CallableOptions.target)
8218   return _internal_target(index);
8219 }
mutable_target(int index)8220 inline std::string* CallableOptions::mutable_target(int index) {
8221   // @@protoc_insertion_point(field_mutable:tensorflow.CallableOptions.target)
8222   return _impl_.target_.Mutable(index);
8223 }
set_target(int index,const std::string & value)8224 inline void CallableOptions::set_target(int index, const std::string& value) {
8225   _impl_.target_.Mutable(index)->assign(value);
8226   // @@protoc_insertion_point(field_set:tensorflow.CallableOptions.target)
8227 }
set_target(int index,std::string && value)8228 inline void CallableOptions::set_target(int index, std::string&& value) {
8229   _impl_.target_.Mutable(index)->assign(std::move(value));
8230   // @@protoc_insertion_point(field_set:tensorflow.CallableOptions.target)
8231 }
set_target(int index,const char * value)8232 inline void CallableOptions::set_target(int index, const char* value) {
8233   GOOGLE_DCHECK(value != nullptr);
8234   _impl_.target_.Mutable(index)->assign(value);
8235   // @@protoc_insertion_point(field_set_char:tensorflow.CallableOptions.target)
8236 }
set_target(int index,const char * value,size_t size)8237 inline void CallableOptions::set_target(int index, const char* value, size_t size) {
8238   _impl_.target_.Mutable(index)->assign(
8239     reinterpret_cast<const char*>(value), size);
8240   // @@protoc_insertion_point(field_set_pointer:tensorflow.CallableOptions.target)
8241 }
_internal_add_target()8242 inline std::string* CallableOptions::_internal_add_target() {
8243   return _impl_.target_.Add();
8244 }
add_target(const std::string & value)8245 inline void CallableOptions::add_target(const std::string& value) {
8246   _impl_.target_.Add()->assign(value);
8247   // @@protoc_insertion_point(field_add:tensorflow.CallableOptions.target)
8248 }
add_target(std::string && value)8249 inline void CallableOptions::add_target(std::string&& value) {
8250   _impl_.target_.Add(std::move(value));
8251   // @@protoc_insertion_point(field_add:tensorflow.CallableOptions.target)
8252 }
add_target(const char * value)8253 inline void CallableOptions::add_target(const char* value) {
8254   GOOGLE_DCHECK(value != nullptr);
8255   _impl_.target_.Add()->assign(value);
8256   // @@protoc_insertion_point(field_add_char:tensorflow.CallableOptions.target)
8257 }
add_target(const char * value,size_t size)8258 inline void CallableOptions::add_target(const char* value, size_t size) {
8259   _impl_.target_.Add()->assign(reinterpret_cast<const char*>(value), size);
8260   // @@protoc_insertion_point(field_add_pointer:tensorflow.CallableOptions.target)
8261 }
8262 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
target()8263 CallableOptions::target() const {
8264   // @@protoc_insertion_point(field_list:tensorflow.CallableOptions.target)
8265   return _impl_.target_;
8266 }
8267 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
mutable_target()8268 CallableOptions::mutable_target() {
8269   // @@protoc_insertion_point(field_mutable_list:tensorflow.CallableOptions.target)
8270   return &_impl_.target_;
8271 }
8272 
8273 // .tensorflow.RunOptions run_options = 4;
_internal_has_run_options()8274 inline bool CallableOptions::_internal_has_run_options() const {
8275   return this != internal_default_instance() && _impl_.run_options_ != nullptr;
8276 }
has_run_options()8277 inline bool CallableOptions::has_run_options() const {
8278   return _internal_has_run_options();
8279 }
clear_run_options()8280 inline void CallableOptions::clear_run_options() {
8281   if (GetArenaForAllocation() == nullptr && _impl_.run_options_ != nullptr) {
8282     delete _impl_.run_options_;
8283   }
8284   _impl_.run_options_ = nullptr;
8285 }
_internal_run_options()8286 inline const ::tensorflow::RunOptions& CallableOptions::_internal_run_options() const {
8287   const ::tensorflow::RunOptions* p = _impl_.run_options_;
8288   return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::RunOptions&>(
8289       ::tensorflow::_RunOptions_default_instance_);
8290 }
run_options()8291 inline const ::tensorflow::RunOptions& CallableOptions::run_options() const {
8292   // @@protoc_insertion_point(field_get:tensorflow.CallableOptions.run_options)
8293   return _internal_run_options();
8294 }
unsafe_arena_set_allocated_run_options(::tensorflow::RunOptions * run_options)8295 inline void CallableOptions::unsafe_arena_set_allocated_run_options(
8296     ::tensorflow::RunOptions* run_options) {
8297   if (GetArenaForAllocation() == nullptr) {
8298     delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.run_options_);
8299   }
8300   _impl_.run_options_ = run_options;
8301   // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.CallableOptions.run_options)
8302 }
release_run_options()8303 inline ::tensorflow::RunOptions* CallableOptions::release_run_options() {
8304 
8305   ::tensorflow::RunOptions* temp = _impl_.run_options_;
8306   _impl_.run_options_ = nullptr;
8307 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
8308   auto* old =  reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
8309   temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
8310   if (GetArenaForAllocation() == nullptr) { delete old; }
8311 #else  // PROTOBUF_FORCE_COPY_IN_RELEASE
8312   if (GetArenaForAllocation() != nullptr) {
8313     temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
8314   }
8315 #endif  // !PROTOBUF_FORCE_COPY_IN_RELEASE
8316   return temp;
8317 }
unsafe_arena_release_run_options()8318 inline ::tensorflow::RunOptions* CallableOptions::unsafe_arena_release_run_options() {
8319   // @@protoc_insertion_point(field_release:tensorflow.CallableOptions.run_options)
8320 
8321   ::tensorflow::RunOptions* temp = _impl_.run_options_;
8322   _impl_.run_options_ = nullptr;
8323   return temp;
8324 }
_internal_mutable_run_options()8325 inline ::tensorflow::RunOptions* CallableOptions::_internal_mutable_run_options() {
8326 
8327   if (_impl_.run_options_ == nullptr) {
8328     auto* p = CreateMaybeMessage<::tensorflow::RunOptions>(GetArenaForAllocation());
8329     _impl_.run_options_ = p;
8330   }
8331   return _impl_.run_options_;
8332 }
mutable_run_options()8333 inline ::tensorflow::RunOptions* CallableOptions::mutable_run_options() {
8334   ::tensorflow::RunOptions* _msg = _internal_mutable_run_options();
8335   // @@protoc_insertion_point(field_mutable:tensorflow.CallableOptions.run_options)
8336   return _msg;
8337 }
set_allocated_run_options(::tensorflow::RunOptions * run_options)8338 inline void CallableOptions::set_allocated_run_options(::tensorflow::RunOptions* run_options) {
8339   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
8340   if (message_arena == nullptr) {
8341     delete _impl_.run_options_;
8342   }
8343   if (run_options) {
8344     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
8345         ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(run_options);
8346     if (message_arena != submessage_arena) {
8347       run_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
8348           message_arena, run_options, submessage_arena);
8349     }
8350 
8351   } else {
8352 
8353   }
8354   _impl_.run_options_ = run_options;
8355   // @@protoc_insertion_point(field_set_allocated:tensorflow.CallableOptions.run_options)
8356 }
8357 
8358 // repeated .tensorflow.TensorConnection tensor_connection = 5;
_internal_tensor_connection_size()8359 inline int CallableOptions::_internal_tensor_connection_size() const {
8360   return _impl_.tensor_connection_.size();
8361 }
tensor_connection_size()8362 inline int CallableOptions::tensor_connection_size() const {
8363   return _internal_tensor_connection_size();
8364 }
clear_tensor_connection()8365 inline void CallableOptions::clear_tensor_connection() {
8366   _impl_.tensor_connection_.Clear();
8367 }
mutable_tensor_connection(int index)8368 inline ::tensorflow::TensorConnection* CallableOptions::mutable_tensor_connection(int index) {
8369   // @@protoc_insertion_point(field_mutable:tensorflow.CallableOptions.tensor_connection)
8370   return _impl_.tensor_connection_.Mutable(index);
8371 }
8372 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection >*
mutable_tensor_connection()8373 CallableOptions::mutable_tensor_connection() {
8374   // @@protoc_insertion_point(field_mutable_list:tensorflow.CallableOptions.tensor_connection)
8375   return &_impl_.tensor_connection_;
8376 }
_internal_tensor_connection(int index)8377 inline const ::tensorflow::TensorConnection& CallableOptions::_internal_tensor_connection(int index) const {
8378   return _impl_.tensor_connection_.Get(index);
8379 }
tensor_connection(int index)8380 inline const ::tensorflow::TensorConnection& CallableOptions::tensor_connection(int index) const {
8381   // @@protoc_insertion_point(field_get:tensorflow.CallableOptions.tensor_connection)
8382   return _internal_tensor_connection(index);
8383 }
_internal_add_tensor_connection()8384 inline ::tensorflow::TensorConnection* CallableOptions::_internal_add_tensor_connection() {
8385   return _impl_.tensor_connection_.Add();
8386 }
add_tensor_connection()8387 inline ::tensorflow::TensorConnection* CallableOptions::add_tensor_connection() {
8388   ::tensorflow::TensorConnection* _add = _internal_add_tensor_connection();
8389   // @@protoc_insertion_point(field_add:tensorflow.CallableOptions.tensor_connection)
8390   return _add;
8391 }
8392 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection >&
tensor_connection()8393 CallableOptions::tensor_connection() const {
8394   // @@protoc_insertion_point(field_list:tensorflow.CallableOptions.tensor_connection)
8395   return _impl_.tensor_connection_;
8396 }
8397 
8398 // map<string, string> feed_devices = 6;
_internal_feed_devices_size()8399 inline int CallableOptions::_internal_feed_devices_size() const {
8400   return _impl_.feed_devices_.size();
8401 }
feed_devices_size()8402 inline int CallableOptions::feed_devices_size() const {
8403   return _internal_feed_devices_size();
8404 }
clear_feed_devices()8405 inline void CallableOptions::clear_feed_devices() {
8406   _impl_.feed_devices_.Clear();
8407 }
8408 inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
_internal_feed_devices()8409 CallableOptions::_internal_feed_devices() const {
8410   return _impl_.feed_devices_.GetMap();
8411 }
8412 inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
feed_devices()8413 CallableOptions::feed_devices() const {
8414   // @@protoc_insertion_point(field_map:tensorflow.CallableOptions.feed_devices)
8415   return _internal_feed_devices();
8416 }
8417 inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
_internal_mutable_feed_devices()8418 CallableOptions::_internal_mutable_feed_devices() {
8419   return _impl_.feed_devices_.MutableMap();
8420 }
8421 inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
mutable_feed_devices()8422 CallableOptions::mutable_feed_devices() {
8423   // @@protoc_insertion_point(field_mutable_map:tensorflow.CallableOptions.feed_devices)
8424   return _internal_mutable_feed_devices();
8425 }
8426 
8427 // map<string, string> fetch_devices = 7;
_internal_fetch_devices_size()8428 inline int CallableOptions::_internal_fetch_devices_size() const {
8429   return _impl_.fetch_devices_.size();
8430 }
fetch_devices_size()8431 inline int CallableOptions::fetch_devices_size() const {
8432   return _internal_fetch_devices_size();
8433 }
clear_fetch_devices()8434 inline void CallableOptions::clear_fetch_devices() {
8435   _impl_.fetch_devices_.Clear();
8436 }
8437 inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
_internal_fetch_devices()8438 CallableOptions::_internal_fetch_devices() const {
8439   return _impl_.fetch_devices_.GetMap();
8440 }
8441 inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
fetch_devices()8442 CallableOptions::fetch_devices() const {
8443   // @@protoc_insertion_point(field_map:tensorflow.CallableOptions.fetch_devices)
8444   return _internal_fetch_devices();
8445 }
8446 inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
_internal_mutable_fetch_devices()8447 CallableOptions::_internal_mutable_fetch_devices() {
8448   return _impl_.fetch_devices_.MutableMap();
8449 }
8450 inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
mutable_fetch_devices()8451 CallableOptions::mutable_fetch_devices() {
8452   // @@protoc_insertion_point(field_mutable_map:tensorflow.CallableOptions.fetch_devices)
8453   return _internal_mutable_fetch_devices();
8454 }
8455 
8456 // bool fetch_skip_sync = 8;
clear_fetch_skip_sync()8457 inline void CallableOptions::clear_fetch_skip_sync() {
8458   _impl_.fetch_skip_sync_ = false;
8459 }
_internal_fetch_skip_sync()8460 inline bool CallableOptions::_internal_fetch_skip_sync() const {
8461   return _impl_.fetch_skip_sync_;
8462 }
fetch_skip_sync()8463 inline bool CallableOptions::fetch_skip_sync() const {
8464   // @@protoc_insertion_point(field_get:tensorflow.CallableOptions.fetch_skip_sync)
8465   return _internal_fetch_skip_sync();
8466 }
_internal_set_fetch_skip_sync(bool value)8467 inline void CallableOptions::_internal_set_fetch_skip_sync(bool value) {
8468 
8469   _impl_.fetch_skip_sync_ = value;
8470 }
set_fetch_skip_sync(bool value)8471 inline void CallableOptions::set_fetch_skip_sync(bool value) {
8472   _internal_set_fetch_skip_sync(value);
8473   // @@protoc_insertion_point(field_set:tensorflow.CallableOptions.fetch_skip_sync)
8474 }
8475 
8476 #ifdef __GNUC__
8477   #pragma GCC diagnostic pop
8478 #endif  // __GNUC__
8479 // -------------------------------------------------------------------
8480 
8481 // -------------------------------------------------------------------
8482 
8483 // -------------------------------------------------------------------
8484 
8485 // -------------------------------------------------------------------
8486 
8487 // -------------------------------------------------------------------
8488 
8489 // -------------------------------------------------------------------
8490 
8491 // -------------------------------------------------------------------
8492 
8493 // -------------------------------------------------------------------
8494 
8495 // -------------------------------------------------------------------
8496 
8497 // -------------------------------------------------------------------
8498 
8499 // -------------------------------------------------------------------
8500 
8501 // -------------------------------------------------------------------
8502 
8503 // -------------------------------------------------------------------
8504 
8505 // -------------------------------------------------------------------
8506 
8507 // -------------------------------------------------------------------
8508 
8509 // -------------------------------------------------------------------
8510 
8511 // -------------------------------------------------------------------
8512 
8513 // -------------------------------------------------------------------
8514 
8515 // -------------------------------------------------------------------
8516 
8517 
8518 // @@protoc_insertion_point(namespace_scope)
8519 
8520 }  // namespace tensorflow
8521 
8522 PROTOBUF_NAMESPACE_OPEN
8523 
8524 template <> struct is_proto_enum< ::tensorflow::OptimizerOptions_Level> : ::std::true_type {};
8525 template <> struct is_proto_enum< ::tensorflow::OptimizerOptions_GlobalJitLevel> : ::std::true_type {};
8526 template <> struct is_proto_enum< ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout> : ::std::true_type {};
8527 template <> struct is_proto_enum< ::tensorflow::RunOptions_TraceLevel> : ::std::true_type {};
8528 
8529 PROTOBUF_NAMESPACE_CLOSE
8530 
8531 // @@protoc_insertion_point(global_scope)
8532 
8533 #include <google/protobuf/port_undef.inc>
8534 #endif  // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto
8535