1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/protobuf/config.proto
3
4 #ifndef GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto
5 #define GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto
6
7 #include <cstdint>
8 #include <limits>
9 #include <string>
10
11 #include <google/protobuf/port_def.inc>
12 #if PROTOBUF_VERSION < 3021000
13 #error This file was generated by a newer version of protoc which is
14 #error incompatible with your Protocol Buffer headers. Please update
15 #error your headers.
16 #endif
17 #if 3021012 < PROTOBUF_MIN_PROTOC_VERSION
18 #error This file was generated by an older version of protoc which is
19 #error incompatible with your Protocol Buffer headers. Please
20 #error regenerate this file with a newer version of protoc.
21 #endif
22
23 #include <google/protobuf/port_undef.inc>
24 #include <google/protobuf/io/coded_stream.h>
25 #include <google/protobuf/arena.h>
26 #include <google/protobuf/arenastring.h>
27 #include <google/protobuf/generated_message_util.h>
28 #include <google/protobuf/metadata_lite.h>
29 #include <google/protobuf/message_lite.h>
30 #include <google/protobuf/repeated_field.h> // IWYU pragma: export
31 #include <google/protobuf/extension_set.h> // IWYU pragma: export
32 #include <google/protobuf/map.h> // IWYU pragma: export
33 #include <google/protobuf/map_entry_lite.h>
34 #include <google/protobuf/map_field_lite.h>
35 #include <google/protobuf/generated_enum_util.h>
36 #include "tensorflow/core/framework/cost_graph.pb.h"
37 #include "tensorflow/core/framework/graph.pb.h"
38 #include "tensorflow/core/framework/step_stats.pb.h"
39 #include "tensorflow/core/protobuf/cluster.pb.h"
40 #include "tensorflow/core/protobuf/coordination_config.pb.h"
41 #include "tensorflow/core/protobuf/debug.pb.h"
42 #include "tensorflow/core/protobuf/rewriter_config.pb.h"
43 // @@protoc_insertion_point(includes)
44 #include <google/protobuf/port_def.inc>
45 #define PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto
46 PROTOBUF_NAMESPACE_OPEN
47 namespace internal {
48 class AnyMetadata;
49 } // namespace internal
50 PROTOBUF_NAMESPACE_CLOSE
51
52 // Internal implementation detail -- do not use these members.
53 struct TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto {
54 static const ::uint32_t offsets[];
55 };
56 namespace tensorflow {
57 class CallableOptions;
58 struct CallableOptionsDefaultTypeInternal;
59 extern CallableOptionsDefaultTypeInternal _CallableOptions_default_instance_;
60 class CallableOptions_FeedDevicesEntry_DoNotUse;
61 struct CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal;
62 extern CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal _CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_;
63 class CallableOptions_FetchDevicesEntry_DoNotUse;
64 struct CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal;
65 extern CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal _CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_;
66 class ConfigProto;
67 struct ConfigProtoDefaultTypeInternal;
68 extern ConfigProtoDefaultTypeInternal _ConfigProto_default_instance_;
69 class ConfigProto_DeviceCountEntry_DoNotUse;
70 struct ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal;
71 extern ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal _ConfigProto_DeviceCountEntry_DoNotUse_default_instance_;
72 class ConfigProto_Experimental;
73 struct ConfigProto_ExperimentalDefaultTypeInternal;
74 extern ConfigProto_ExperimentalDefaultTypeInternal _ConfigProto_Experimental_default_instance_;
75 class GPUOptions;
76 struct GPUOptionsDefaultTypeInternal;
77 extern GPUOptionsDefaultTypeInternal _GPUOptions_default_instance_;
78 class GPUOptions_Experimental;
79 struct GPUOptions_ExperimentalDefaultTypeInternal;
80 extern GPUOptions_ExperimentalDefaultTypeInternal _GPUOptions_Experimental_default_instance_;
81 class GPUOptions_Experimental_VirtualDevices;
82 struct GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal;
83 extern GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal _GPUOptions_Experimental_VirtualDevices_default_instance_;
84 class GraphOptions;
85 struct GraphOptionsDefaultTypeInternal;
86 extern GraphOptionsDefaultTypeInternal _GraphOptions_default_instance_;
87 class OptimizerOptions;
88 struct OptimizerOptionsDefaultTypeInternal;
89 extern OptimizerOptionsDefaultTypeInternal _OptimizerOptions_default_instance_;
90 class RPCOptions;
91 struct RPCOptionsDefaultTypeInternal;
92 extern RPCOptionsDefaultTypeInternal _RPCOptions_default_instance_;
93 class RunMetadata;
94 struct RunMetadataDefaultTypeInternal;
95 extern RunMetadataDefaultTypeInternal _RunMetadata_default_instance_;
96 class RunMetadata_FunctionGraphs;
97 struct RunMetadata_FunctionGraphsDefaultTypeInternal;
98 extern RunMetadata_FunctionGraphsDefaultTypeInternal _RunMetadata_FunctionGraphs_default_instance_;
99 class RunOptions;
100 struct RunOptionsDefaultTypeInternal;
101 extern RunOptionsDefaultTypeInternal _RunOptions_default_instance_;
102 class RunOptions_Experimental;
103 struct RunOptions_ExperimentalDefaultTypeInternal;
104 extern RunOptions_ExperimentalDefaultTypeInternal _RunOptions_Experimental_default_instance_;
105 class RunOptions_Experimental_RunHandlerPoolOptions;
106 struct RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal;
107 extern RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal _RunOptions_Experimental_RunHandlerPoolOptions_default_instance_;
108 class SessionMetadata;
109 struct SessionMetadataDefaultTypeInternal;
110 extern SessionMetadataDefaultTypeInternal _SessionMetadata_default_instance_;
111 class TensorConnection;
112 struct TensorConnectionDefaultTypeInternal;
113 extern TensorConnectionDefaultTypeInternal _TensorConnection_default_instance_;
114 class ThreadPoolOptionProto;
115 struct ThreadPoolOptionProtoDefaultTypeInternal;
116 extern ThreadPoolOptionProtoDefaultTypeInternal _ThreadPoolOptionProto_default_instance_;
117 } // namespace tensorflow
118 PROTOBUF_NAMESPACE_OPEN
119 template<> ::tensorflow::CallableOptions* Arena::CreateMaybeMessage<::tensorflow::CallableOptions>(Arena*);
120 template<> ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse>(Arena*);
121 template<> ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse>(Arena*);
122 template<> ::tensorflow::ConfigProto* Arena::CreateMaybeMessage<::tensorflow::ConfigProto>(Arena*);
123 template<> ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse>(Arena*);
124 template<> ::tensorflow::ConfigProto_Experimental* Arena::CreateMaybeMessage<::tensorflow::ConfigProto_Experimental>(Arena*);
125 template<> ::tensorflow::GPUOptions* Arena::CreateMaybeMessage<::tensorflow::GPUOptions>(Arena*);
126 template<> ::tensorflow::GPUOptions_Experimental* Arena::CreateMaybeMessage<::tensorflow::GPUOptions_Experimental>(Arena*);
127 template<> ::tensorflow::GPUOptions_Experimental_VirtualDevices* Arena::CreateMaybeMessage<::tensorflow::GPUOptions_Experimental_VirtualDevices>(Arena*);
128 template<> ::tensorflow::GraphOptions* Arena::CreateMaybeMessage<::tensorflow::GraphOptions>(Arena*);
129 template<> ::tensorflow::OptimizerOptions* Arena::CreateMaybeMessage<::tensorflow::OptimizerOptions>(Arena*);
130 template<> ::tensorflow::RPCOptions* Arena::CreateMaybeMessage<::tensorflow::RPCOptions>(Arena*);
131 template<> ::tensorflow::RunMetadata* Arena::CreateMaybeMessage<::tensorflow::RunMetadata>(Arena*);
132 template<> ::tensorflow::RunMetadata_FunctionGraphs* Arena::CreateMaybeMessage<::tensorflow::RunMetadata_FunctionGraphs>(Arena*);
133 template<> ::tensorflow::RunOptions* Arena::CreateMaybeMessage<::tensorflow::RunOptions>(Arena*);
134 template<> ::tensorflow::RunOptions_Experimental* Arena::CreateMaybeMessage<::tensorflow::RunOptions_Experimental>(Arena*);
135 template<> ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* Arena::CreateMaybeMessage<::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions>(Arena*);
136 template<> ::tensorflow::SessionMetadata* Arena::CreateMaybeMessage<::tensorflow::SessionMetadata>(Arena*);
137 template<> ::tensorflow::TensorConnection* Arena::CreateMaybeMessage<::tensorflow::TensorConnection>(Arena*);
138 template<> ::tensorflow::ThreadPoolOptionProto* Arena::CreateMaybeMessage<::tensorflow::ThreadPoolOptionProto>(Arena*);
139 PROTOBUF_NAMESPACE_CLOSE
140 namespace tensorflow {
141
142 enum OptimizerOptions_Level : int {
143 OptimizerOptions_Level_L1 = 0,
144 OptimizerOptions_Level_L0 = -1,
145 OptimizerOptions_Level_OptimizerOptions_Level_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
146 OptimizerOptions_Level_OptimizerOptions_Level_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
147 };
148 bool OptimizerOptions_Level_IsValid(int value);
149 constexpr OptimizerOptions_Level OptimizerOptions_Level_Level_MIN = OptimizerOptions_Level_L0;
150 constexpr OptimizerOptions_Level OptimizerOptions_Level_Level_MAX = OptimizerOptions_Level_L1;
151 constexpr int OptimizerOptions_Level_Level_ARRAYSIZE = OptimizerOptions_Level_Level_MAX + 1;
152
153 const std::string& OptimizerOptions_Level_Name(OptimizerOptions_Level value);
154 template<typename T>
OptimizerOptions_Level_Name(T enum_t_value)155 inline const std::string& OptimizerOptions_Level_Name(T enum_t_value) {
156 static_assert(::std::is_same<T, OptimizerOptions_Level>::value ||
157 ::std::is_integral<T>::value,
158 "Incorrect type passed to function OptimizerOptions_Level_Name.");
159 return OptimizerOptions_Level_Name(static_cast<OptimizerOptions_Level>(enum_t_value));
160 }
161 bool OptimizerOptions_Level_Parse(
162 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, OptimizerOptions_Level* value);
163 enum OptimizerOptions_GlobalJitLevel : int {
164 OptimizerOptions_GlobalJitLevel_DEFAULT = 0,
165 OptimizerOptions_GlobalJitLevel_OFF = -1,
166 OptimizerOptions_GlobalJitLevel_ON_1 = 1,
167 OptimizerOptions_GlobalJitLevel_ON_2 = 2,
168 OptimizerOptions_GlobalJitLevel_OptimizerOptions_GlobalJitLevel_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
169 OptimizerOptions_GlobalJitLevel_OptimizerOptions_GlobalJitLevel_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
170 };
171 bool OptimizerOptions_GlobalJitLevel_IsValid(int value);
172 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MIN = OptimizerOptions_GlobalJitLevel_OFF;
173 constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MAX = OptimizerOptions_GlobalJitLevel_ON_2;
174 constexpr int OptimizerOptions_GlobalJitLevel_GlobalJitLevel_ARRAYSIZE = OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MAX + 1;
175
176 const std::string& OptimizerOptions_GlobalJitLevel_Name(OptimizerOptions_GlobalJitLevel value);
177 template<typename T>
OptimizerOptions_GlobalJitLevel_Name(T enum_t_value)178 inline const std::string& OptimizerOptions_GlobalJitLevel_Name(T enum_t_value) {
179 static_assert(::std::is_same<T, OptimizerOptions_GlobalJitLevel>::value ||
180 ::std::is_integral<T>::value,
181 "Incorrect type passed to function OptimizerOptions_GlobalJitLevel_Name.");
182 return OptimizerOptions_GlobalJitLevel_Name(static_cast<OptimizerOptions_GlobalJitLevel>(enum_t_value));
183 }
184 bool OptimizerOptions_GlobalJitLevel_Parse(
185 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, OptimizerOptions_GlobalJitLevel* value);
186 enum ConfigProto_Experimental_MlirBridgeRollout : int {
187 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED = 0,
188 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_ENABLED = 1,
189 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_DISABLED = 2,
190 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3,
191 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED = 4,
192 ConfigProto_Experimental_MlirBridgeRollout_ConfigProto_Experimental_MlirBridgeRollout_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
193 ConfigProto_Experimental_MlirBridgeRollout_ConfigProto_Experimental_MlirBridgeRollout_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
194 };
195 bool ConfigProto_Experimental_MlirBridgeRollout_IsValid(int value);
196 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MIN = ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
197 constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MAX = ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED;
198 constexpr int ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_ARRAYSIZE = ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MAX + 1;
199
200 const std::string& ConfigProto_Experimental_MlirBridgeRollout_Name(ConfigProto_Experimental_MlirBridgeRollout value);
201 template<typename T>
ConfigProto_Experimental_MlirBridgeRollout_Name(T enum_t_value)202 inline const std::string& ConfigProto_Experimental_MlirBridgeRollout_Name(T enum_t_value) {
203 static_assert(::std::is_same<T, ConfigProto_Experimental_MlirBridgeRollout>::value ||
204 ::std::is_integral<T>::value,
205 "Incorrect type passed to function ConfigProto_Experimental_MlirBridgeRollout_Name.");
206 return ConfigProto_Experimental_MlirBridgeRollout_Name(static_cast<ConfigProto_Experimental_MlirBridgeRollout>(enum_t_value));
207 }
208 bool ConfigProto_Experimental_MlirBridgeRollout_Parse(
209 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ConfigProto_Experimental_MlirBridgeRollout* value);
210 enum RunOptions_TraceLevel : int {
211 RunOptions_TraceLevel_NO_TRACE = 0,
212 RunOptions_TraceLevel_SOFTWARE_TRACE = 1,
213 RunOptions_TraceLevel_HARDWARE_TRACE = 2,
214 RunOptions_TraceLevel_FULL_TRACE = 3,
215 RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
216 RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
217 };
218 bool RunOptions_TraceLevel_IsValid(int value);
219 constexpr RunOptions_TraceLevel RunOptions_TraceLevel_TraceLevel_MIN = RunOptions_TraceLevel_NO_TRACE;
220 constexpr RunOptions_TraceLevel RunOptions_TraceLevel_TraceLevel_MAX = RunOptions_TraceLevel_FULL_TRACE;
221 constexpr int RunOptions_TraceLevel_TraceLevel_ARRAYSIZE = RunOptions_TraceLevel_TraceLevel_MAX + 1;
222
223 const std::string& RunOptions_TraceLevel_Name(RunOptions_TraceLevel value);
224 template<typename T>
RunOptions_TraceLevel_Name(T enum_t_value)225 inline const std::string& RunOptions_TraceLevel_Name(T enum_t_value) {
226 static_assert(::std::is_same<T, RunOptions_TraceLevel>::value ||
227 ::std::is_integral<T>::value,
228 "Incorrect type passed to function RunOptions_TraceLevel_Name.");
229 return RunOptions_TraceLevel_Name(static_cast<RunOptions_TraceLevel>(enum_t_value));
230 }
231 bool RunOptions_TraceLevel_Parse(
232 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, RunOptions_TraceLevel* value);
233 // ===================================================================
234
235 class GPUOptions_Experimental_VirtualDevices final :
236 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GPUOptions.Experimental.VirtualDevices) */ {
237 public:
GPUOptions_Experimental_VirtualDevices()238 inline GPUOptions_Experimental_VirtualDevices() : GPUOptions_Experimental_VirtualDevices(nullptr) {}
239 ~GPUOptions_Experimental_VirtualDevices() override;
240 explicit PROTOBUF_CONSTEXPR GPUOptions_Experimental_VirtualDevices(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
241
242 GPUOptions_Experimental_VirtualDevices(const GPUOptions_Experimental_VirtualDevices& from);
GPUOptions_Experimental_VirtualDevices(GPUOptions_Experimental_VirtualDevices && from)243 GPUOptions_Experimental_VirtualDevices(GPUOptions_Experimental_VirtualDevices&& from) noexcept
244 : GPUOptions_Experimental_VirtualDevices() {
245 *this = ::std::move(from);
246 }
247
248 inline GPUOptions_Experimental_VirtualDevices& operator=(const GPUOptions_Experimental_VirtualDevices& from) {
249 if (this == &from) return *this;
250 CopyFrom(from);
251 return *this;
252 }
253 inline GPUOptions_Experimental_VirtualDevices& operator=(GPUOptions_Experimental_VirtualDevices&& from) noexcept {
254 if (this == &from) return *this;
255 if (GetOwningArena() == from.GetOwningArena()
256 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
257 && GetOwningArena() != nullptr
258 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
259 ) {
260 InternalSwap(&from);
261 } else {
262 CopyFrom(from);
263 }
264 return *this;
265 }
266
default_instance()267 static const GPUOptions_Experimental_VirtualDevices& default_instance() {
268 return *internal_default_instance();
269 }
internal_default_instance()270 static inline const GPUOptions_Experimental_VirtualDevices* internal_default_instance() {
271 return reinterpret_cast<const GPUOptions_Experimental_VirtualDevices*>(
272 &_GPUOptions_Experimental_VirtualDevices_default_instance_);
273 }
274 static constexpr int kIndexInFileMessages =
275 0;
276
swap(GPUOptions_Experimental_VirtualDevices & a,GPUOptions_Experimental_VirtualDevices & b)277 friend void swap(GPUOptions_Experimental_VirtualDevices& a, GPUOptions_Experimental_VirtualDevices& b) {
278 a.Swap(&b);
279 }
Swap(GPUOptions_Experimental_VirtualDevices * other)280 inline void Swap(GPUOptions_Experimental_VirtualDevices* other) {
281 if (other == this) return;
282 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
283 if (GetOwningArena() != nullptr &&
284 GetOwningArena() == other->GetOwningArena()) {
285 #else // PROTOBUF_FORCE_COPY_IN_SWAP
286 if (GetOwningArena() == other->GetOwningArena()) {
287 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
288 InternalSwap(other);
289 } else {
290 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
291 }
292 }
293 void UnsafeArenaSwap(GPUOptions_Experimental_VirtualDevices* other) {
294 if (other == this) return;
295 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
296 InternalSwap(other);
297 }
298
299 // implements Message ----------------------------------------------
300
301 GPUOptions_Experimental_VirtualDevices* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
302 return CreateMaybeMessage<GPUOptions_Experimental_VirtualDevices>(arena);
303 }
304 GPUOptions_Experimental_VirtualDevices* New() const {
305 return New(nullptr);
306 }
307 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
308 void CopyFrom(const GPUOptions_Experimental_VirtualDevices& from);
309 void MergeFrom(const GPUOptions_Experimental_VirtualDevices& from);
310 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
311 bool IsInitialized() const final;
312
313 size_t ByteSizeLong() const final;
314 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
315 ::uint8_t* _InternalSerialize(
316 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
317 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
318
319 private:
320 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
321 void SharedDtor();
322 void SetCachedSize(int size) const;
323 void InternalSwap(GPUOptions_Experimental_VirtualDevices* other);
324
325 private:
326 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
327 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
328 return "tensorflow.GPUOptions.Experimental.VirtualDevices";
329 }
330 protected:
331 explicit GPUOptions_Experimental_VirtualDevices(::PROTOBUF_NAMESPACE_ID::Arena* arena,
332 bool is_message_owned = false);
333 public:
334
335 std::string GetTypeName() const final;
336
337 // nested types ----------------------------------------------------
338
339 // accessors -------------------------------------------------------
340
341 enum : int {
342 kMemoryLimitMbFieldNumber = 1,
343 kPriorityFieldNumber = 2,
344 kDeviceOrdinalFieldNumber = 3,
345 };
346 // repeated float memory_limit_mb = 1;
347 int memory_limit_mb_size() const;
348 private:
349 int _internal_memory_limit_mb_size() const;
350 public:
351 void clear_memory_limit_mb();
352 private:
353 float _internal_memory_limit_mb(int index) const;
354 const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
355 _internal_memory_limit_mb() const;
356 void _internal_add_memory_limit_mb(float value);
357 ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
358 _internal_mutable_memory_limit_mb();
359 public:
360 float memory_limit_mb(int index) const;
361 void set_memory_limit_mb(int index, float value);
362 void add_memory_limit_mb(float value);
363 const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
364 memory_limit_mb() const;
365 ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
366 mutable_memory_limit_mb();
367
368 // repeated int32 priority = 2;
369 int priority_size() const;
370 private:
371 int _internal_priority_size() const;
372 public:
373 void clear_priority();
374 private:
375 ::int32_t _internal_priority(int index) const;
376 const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
377 _internal_priority() const;
378 void _internal_add_priority(::int32_t value);
379 ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
380 _internal_mutable_priority();
381 public:
382 ::int32_t priority(int index) const;
383 void set_priority(int index, ::int32_t value);
384 void add_priority(::int32_t value);
385 const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
386 priority() const;
387 ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
388 mutable_priority();
389
390 // repeated int32 device_ordinal = 3;
391 int device_ordinal_size() const;
392 private:
393 int _internal_device_ordinal_size() const;
394 public:
395 void clear_device_ordinal();
396 private:
397 ::int32_t _internal_device_ordinal(int index) const;
398 const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
399 _internal_device_ordinal() const;
400 void _internal_add_device_ordinal(::int32_t value);
401 ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
402 _internal_mutable_device_ordinal();
403 public:
404 ::int32_t device_ordinal(int index) const;
405 void set_device_ordinal(int index, ::int32_t value);
406 void add_device_ordinal(::int32_t value);
407 const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
408 device_ordinal() const;
409 ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
410 mutable_device_ordinal();
411
412 // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
413 private:
414 class _Internal;
415
416 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
417 typedef void InternalArenaConstructable_;
418 typedef void DestructorSkippable_;
419 struct Impl_ {
420 ::PROTOBUF_NAMESPACE_ID::RepeatedField< float > memory_limit_mb_;
421 ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t > priority_;
422 mutable std::atomic<int> _priority_cached_byte_size_;
423 ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t > device_ordinal_;
424 mutable std::atomic<int> _device_ordinal_cached_byte_size_;
425 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
426 };
427 union { Impl_ _impl_; };
428 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
429 };
430 // -------------------------------------------------------------------
431
432 class GPUOptions_Experimental final :
433 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GPUOptions.Experimental) */ {
434 public:
GPUOptions_Experimental()435 inline GPUOptions_Experimental() : GPUOptions_Experimental(nullptr) {}
436 ~GPUOptions_Experimental() override;
437 explicit PROTOBUF_CONSTEXPR GPUOptions_Experimental(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
438
439 GPUOptions_Experimental(const GPUOptions_Experimental& from);
GPUOptions_Experimental(GPUOptions_Experimental && from)440 GPUOptions_Experimental(GPUOptions_Experimental&& from) noexcept
441 : GPUOptions_Experimental() {
442 *this = ::std::move(from);
443 }
444
445 inline GPUOptions_Experimental& operator=(const GPUOptions_Experimental& from) {
446 if (this == &from) return *this;
447 CopyFrom(from);
448 return *this;
449 }
450 inline GPUOptions_Experimental& operator=(GPUOptions_Experimental&& from) noexcept {
451 if (this == &from) return *this;
452 if (GetOwningArena() == from.GetOwningArena()
453 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
454 && GetOwningArena() != nullptr
455 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
456 ) {
457 InternalSwap(&from);
458 } else {
459 CopyFrom(from);
460 }
461 return *this;
462 }
463
default_instance()464 static const GPUOptions_Experimental& default_instance() {
465 return *internal_default_instance();
466 }
internal_default_instance()467 static inline const GPUOptions_Experimental* internal_default_instance() {
468 return reinterpret_cast<const GPUOptions_Experimental*>(
469 &_GPUOptions_Experimental_default_instance_);
470 }
471 static constexpr int kIndexInFileMessages =
472 1;
473
swap(GPUOptions_Experimental & a,GPUOptions_Experimental & b)474 friend void swap(GPUOptions_Experimental& a, GPUOptions_Experimental& b) {
475 a.Swap(&b);
476 }
Swap(GPUOptions_Experimental * other)477 inline void Swap(GPUOptions_Experimental* other) {
478 if (other == this) return;
479 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
480 if (GetOwningArena() != nullptr &&
481 GetOwningArena() == other->GetOwningArena()) {
482 #else // PROTOBUF_FORCE_COPY_IN_SWAP
483 if (GetOwningArena() == other->GetOwningArena()) {
484 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
485 InternalSwap(other);
486 } else {
487 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
488 }
489 }
490 void UnsafeArenaSwap(GPUOptions_Experimental* other) {
491 if (other == this) return;
492 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
493 InternalSwap(other);
494 }
495
496 // implements Message ----------------------------------------------
497
498 GPUOptions_Experimental* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
499 return CreateMaybeMessage<GPUOptions_Experimental>(arena);
500 }
501 GPUOptions_Experimental* New() const {
502 return New(nullptr);
503 }
504 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
505 void CopyFrom(const GPUOptions_Experimental& from);
506 void MergeFrom(const GPUOptions_Experimental& from);
507 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
508 bool IsInitialized() const final;
509
510 size_t ByteSizeLong() const final;
511 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
512 ::uint8_t* _InternalSerialize(
513 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
514 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
515
516 private:
517 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
518 void SharedDtor();
519 void SetCachedSize(int size) const;
520 void InternalSwap(GPUOptions_Experimental* other);
521
522 private:
523 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
524 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
525 return "tensorflow.GPUOptions.Experimental";
526 }
527 protected:
528 explicit GPUOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
529 bool is_message_owned = false);
530 public:
531
532 std::string GetTypeName() const final;
533
534 // nested types ----------------------------------------------------
535
536 typedef GPUOptions_Experimental_VirtualDevices VirtualDevices;
537
538 // accessors -------------------------------------------------------
539
540 enum : int {
541 kVirtualDevicesFieldNumber = 1,
542 kCollectiveRingOrderFieldNumber = 4,
543 kNumDevToDevCopyStreamsFieldNumber = 3,
544 kKernelTrackerMaxIntervalFieldNumber = 7,
545 kUseUnifiedMemoryFieldNumber = 2,
546 kTimestampedAllocatorFieldNumber = 5,
547 kUseCudaMallocAsyncFieldNumber = 11,
548 kDisallowRetryOnAllocationFailureFieldNumber = 12,
549 kKernelTrackerMaxBytesFieldNumber = 8,
550 kInternalFragmentationFractionFieldNumber = 10,
551 kKernelTrackerMaxPendingFieldNumber = 9,
552 };
553 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
554 int virtual_devices_size() const;
555 private:
556 int _internal_virtual_devices_size() const;
557 public:
558 void clear_virtual_devices();
559 ::tensorflow::GPUOptions_Experimental_VirtualDevices* mutable_virtual_devices(int index);
560 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >*
561 mutable_virtual_devices();
562 private:
563 const ::tensorflow::GPUOptions_Experimental_VirtualDevices& _internal_virtual_devices(int index) const;
564 ::tensorflow::GPUOptions_Experimental_VirtualDevices* _internal_add_virtual_devices();
565 public:
566 const ::tensorflow::GPUOptions_Experimental_VirtualDevices& virtual_devices(int index) const;
567 ::tensorflow::GPUOptions_Experimental_VirtualDevices* add_virtual_devices();
568 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >&
569 virtual_devices() const;
570
571 // string collective_ring_order = 4;
572 void clear_collective_ring_order();
573 const std::string& collective_ring_order() const;
574 template <typename ArgT0 = const std::string&, typename... ArgT>
575 void set_collective_ring_order(ArgT0&& arg0, ArgT... args);
576 std::string* mutable_collective_ring_order();
577 PROTOBUF_NODISCARD std::string* release_collective_ring_order();
578 void set_allocated_collective_ring_order(std::string* collective_ring_order);
579 private:
580 const std::string& _internal_collective_ring_order() const;
581 inline PROTOBUF_ALWAYS_INLINE void _internal_set_collective_ring_order(const std::string& value);
582 std::string* _internal_mutable_collective_ring_order();
583 public:
584
585 // int32 num_dev_to_dev_copy_streams = 3;
586 void clear_num_dev_to_dev_copy_streams();
587 ::int32_t num_dev_to_dev_copy_streams() const;
588 void set_num_dev_to_dev_copy_streams(::int32_t value);
589 private:
590 ::int32_t _internal_num_dev_to_dev_copy_streams() const;
591 void _internal_set_num_dev_to_dev_copy_streams(::int32_t value);
592 public:
593
594 // int32 kernel_tracker_max_interval = 7;
595 void clear_kernel_tracker_max_interval();
596 ::int32_t kernel_tracker_max_interval() const;
597 void set_kernel_tracker_max_interval(::int32_t value);
598 private:
599 ::int32_t _internal_kernel_tracker_max_interval() const;
600 void _internal_set_kernel_tracker_max_interval(::int32_t value);
601 public:
602
603 // bool use_unified_memory = 2;
604 void clear_use_unified_memory();
605 bool use_unified_memory() const;
606 void set_use_unified_memory(bool value);
607 private:
608 bool _internal_use_unified_memory() const;
609 void _internal_set_use_unified_memory(bool value);
610 public:
611
612 // bool timestamped_allocator = 5;
613 void clear_timestamped_allocator();
614 bool timestamped_allocator() const;
615 void set_timestamped_allocator(bool value);
616 private:
617 bool _internal_timestamped_allocator() const;
618 void _internal_set_timestamped_allocator(bool value);
619 public:
620
621 // bool use_cuda_malloc_async = 11;
622 void clear_use_cuda_malloc_async();
623 bool use_cuda_malloc_async() const;
624 void set_use_cuda_malloc_async(bool value);
625 private:
626 bool _internal_use_cuda_malloc_async() const;
627 void _internal_set_use_cuda_malloc_async(bool value);
628 public:
629
630 // bool disallow_retry_on_allocation_failure = 12;
631 void clear_disallow_retry_on_allocation_failure();
632 bool disallow_retry_on_allocation_failure() const;
633 void set_disallow_retry_on_allocation_failure(bool value);
634 private:
635 bool _internal_disallow_retry_on_allocation_failure() const;
636 void _internal_set_disallow_retry_on_allocation_failure(bool value);
637 public:
638
639 // int32 kernel_tracker_max_bytes = 8;
640 void clear_kernel_tracker_max_bytes();
641 ::int32_t kernel_tracker_max_bytes() const;
642 void set_kernel_tracker_max_bytes(::int32_t value);
643 private:
644 ::int32_t _internal_kernel_tracker_max_bytes() const;
645 void _internal_set_kernel_tracker_max_bytes(::int32_t value);
646 public:
647
648 // double internal_fragmentation_fraction = 10;
649 void clear_internal_fragmentation_fraction();
650 double internal_fragmentation_fraction() const;
651 void set_internal_fragmentation_fraction(double value);
652 private:
653 double _internal_internal_fragmentation_fraction() const;
654 void _internal_set_internal_fragmentation_fraction(double value);
655 public:
656
657 // int32 kernel_tracker_max_pending = 9;
658 void clear_kernel_tracker_max_pending();
659 ::int32_t kernel_tracker_max_pending() const;
660 void set_kernel_tracker_max_pending(::int32_t value);
661 private:
662 ::int32_t _internal_kernel_tracker_max_pending() const;
663 void _internal_set_kernel_tracker_max_pending(::int32_t value);
664 public:
665
666 // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental)
667 private:
668 class _Internal;
669
670 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
671 typedef void InternalArenaConstructable_;
672 typedef void DestructorSkippable_;
673 struct Impl_ {
674 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices > virtual_devices_;
675 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collective_ring_order_;
676 ::int32_t num_dev_to_dev_copy_streams_;
677 ::int32_t kernel_tracker_max_interval_;
678 bool use_unified_memory_;
679 bool timestamped_allocator_;
680 bool use_cuda_malloc_async_;
681 bool disallow_retry_on_allocation_failure_;
682 ::int32_t kernel_tracker_max_bytes_;
683 double internal_fragmentation_fraction_;
684 ::int32_t kernel_tracker_max_pending_;
685 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
686 };
687 union { Impl_ _impl_; };
688 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
689 };
690 // -------------------------------------------------------------------
691
692 class GPUOptions final :
693 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GPUOptions) */ {
694 public:
GPUOptions()695 inline GPUOptions() : GPUOptions(nullptr) {}
696 ~GPUOptions() override;
697 explicit PROTOBUF_CONSTEXPR GPUOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
698
699 GPUOptions(const GPUOptions& from);
GPUOptions(GPUOptions && from)700 GPUOptions(GPUOptions&& from) noexcept
701 : GPUOptions() {
702 *this = ::std::move(from);
703 }
704
705 inline GPUOptions& operator=(const GPUOptions& from) {
706 if (this == &from) return *this;
707 CopyFrom(from);
708 return *this;
709 }
710 inline GPUOptions& operator=(GPUOptions&& from) noexcept {
711 if (this == &from) return *this;
712 if (GetOwningArena() == from.GetOwningArena()
713 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
714 && GetOwningArena() != nullptr
715 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
716 ) {
717 InternalSwap(&from);
718 } else {
719 CopyFrom(from);
720 }
721 return *this;
722 }
723
default_instance()724 static const GPUOptions& default_instance() {
725 return *internal_default_instance();
726 }
internal_default_instance()727 static inline const GPUOptions* internal_default_instance() {
728 return reinterpret_cast<const GPUOptions*>(
729 &_GPUOptions_default_instance_);
730 }
731 static constexpr int kIndexInFileMessages =
732 2;
733
swap(GPUOptions & a,GPUOptions & b)734 friend void swap(GPUOptions& a, GPUOptions& b) {
735 a.Swap(&b);
736 }
Swap(GPUOptions * other)737 inline void Swap(GPUOptions* other) {
738 if (other == this) return;
739 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
740 if (GetOwningArena() != nullptr &&
741 GetOwningArena() == other->GetOwningArena()) {
742 #else // PROTOBUF_FORCE_COPY_IN_SWAP
743 if (GetOwningArena() == other->GetOwningArena()) {
744 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
745 InternalSwap(other);
746 } else {
747 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
748 }
749 }
750 void UnsafeArenaSwap(GPUOptions* other) {
751 if (other == this) return;
752 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
753 InternalSwap(other);
754 }
755
756 // implements Message ----------------------------------------------
757
758 GPUOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
759 return CreateMaybeMessage<GPUOptions>(arena);
760 }
761 GPUOptions* New() const {
762 return New(nullptr);
763 }
764 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
765 void CopyFrom(const GPUOptions& from);
766 void MergeFrom(const GPUOptions& from);
767 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
768 bool IsInitialized() const final;
769
770 size_t ByteSizeLong() const final;
771 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
772 ::uint8_t* _InternalSerialize(
773 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
774 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
775
776 private:
777 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
778 void SharedDtor();
779 void SetCachedSize(int size) const;
780 void InternalSwap(GPUOptions* other);
781
782 private:
783 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
784 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
785 return "tensorflow.GPUOptions";
786 }
787 protected:
788 explicit GPUOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
789 bool is_message_owned = false);
790 public:
791
792 std::string GetTypeName() const final;
793
794 // nested types ----------------------------------------------------
795
796 typedef GPUOptions_Experimental Experimental;
797
798 // accessors -------------------------------------------------------
799
800 enum : int {
801 kAllocatorTypeFieldNumber = 2,
802 kVisibleDeviceListFieldNumber = 5,
803 kExperimentalFieldNumber = 9,
804 kPerProcessGpuMemoryFractionFieldNumber = 1,
805 kDeferredDeletionBytesFieldNumber = 3,
806 kPollingActiveDelayUsecsFieldNumber = 6,
807 kAllowGrowthFieldNumber = 4,
808 kForceGpuCompatibleFieldNumber = 8,
809 kPollingInactiveDelayMsecsFieldNumber = 7,
810 };
811 // string allocator_type = 2;
812 void clear_allocator_type();
813 const std::string& allocator_type() const;
814 template <typename ArgT0 = const std::string&, typename... ArgT>
815 void set_allocator_type(ArgT0&& arg0, ArgT... args);
816 std::string* mutable_allocator_type();
817 PROTOBUF_NODISCARD std::string* release_allocator_type();
818 void set_allocated_allocator_type(std::string* allocator_type);
819 private:
820 const std::string& _internal_allocator_type() const;
821 inline PROTOBUF_ALWAYS_INLINE void _internal_set_allocator_type(const std::string& value);
822 std::string* _internal_mutable_allocator_type();
823 public:
824
825 // string visible_device_list = 5;
826 void clear_visible_device_list();
827 const std::string& visible_device_list() const;
828 template <typename ArgT0 = const std::string&, typename... ArgT>
829 void set_visible_device_list(ArgT0&& arg0, ArgT... args);
830 std::string* mutable_visible_device_list();
831 PROTOBUF_NODISCARD std::string* release_visible_device_list();
832 void set_allocated_visible_device_list(std::string* visible_device_list);
833 private:
834 const std::string& _internal_visible_device_list() const;
835 inline PROTOBUF_ALWAYS_INLINE void _internal_set_visible_device_list(const std::string& value);
836 std::string* _internal_mutable_visible_device_list();
837 public:
838
839 // .tensorflow.GPUOptions.Experimental experimental = 9;
840 bool has_experimental() const;
841 private:
842 bool _internal_has_experimental() const;
843 public:
844 void clear_experimental();
845 const ::tensorflow::GPUOptions_Experimental& experimental() const;
846 PROTOBUF_NODISCARD ::tensorflow::GPUOptions_Experimental* release_experimental();
847 ::tensorflow::GPUOptions_Experimental* mutable_experimental();
848 void set_allocated_experimental(::tensorflow::GPUOptions_Experimental* experimental);
849 private:
850 const ::tensorflow::GPUOptions_Experimental& _internal_experimental() const;
851 ::tensorflow::GPUOptions_Experimental* _internal_mutable_experimental();
852 public:
853 void unsafe_arena_set_allocated_experimental(
854 ::tensorflow::GPUOptions_Experimental* experimental);
855 ::tensorflow::GPUOptions_Experimental* unsafe_arena_release_experimental();
856
857 // double per_process_gpu_memory_fraction = 1;
858 void clear_per_process_gpu_memory_fraction();
859 double per_process_gpu_memory_fraction() const;
860 void set_per_process_gpu_memory_fraction(double value);
861 private:
862 double _internal_per_process_gpu_memory_fraction() const;
863 void _internal_set_per_process_gpu_memory_fraction(double value);
864 public:
865
866 // int64 deferred_deletion_bytes = 3;
867 void clear_deferred_deletion_bytes();
868 ::int64_t deferred_deletion_bytes() const;
869 void set_deferred_deletion_bytes(::int64_t value);
870 private:
871 ::int64_t _internal_deferred_deletion_bytes() const;
872 void _internal_set_deferred_deletion_bytes(::int64_t value);
873 public:
874
875 // int32 polling_active_delay_usecs = 6;
876 void clear_polling_active_delay_usecs();
877 ::int32_t polling_active_delay_usecs() const;
878 void set_polling_active_delay_usecs(::int32_t value);
879 private:
880 ::int32_t _internal_polling_active_delay_usecs() const;
881 void _internal_set_polling_active_delay_usecs(::int32_t value);
882 public:
883
884 // bool allow_growth = 4;
885 void clear_allow_growth();
886 bool allow_growth() const;
887 void set_allow_growth(bool value);
888 private:
889 bool _internal_allow_growth() const;
890 void _internal_set_allow_growth(bool value);
891 public:
892
893 // bool force_gpu_compatible = 8;
894 void clear_force_gpu_compatible();
895 bool force_gpu_compatible() const;
896 void set_force_gpu_compatible(bool value);
897 private:
898 bool _internal_force_gpu_compatible() const;
899 void _internal_set_force_gpu_compatible(bool value);
900 public:
901
902 // int32 polling_inactive_delay_msecs = 7;
903 void clear_polling_inactive_delay_msecs();
904 ::int32_t polling_inactive_delay_msecs() const;
905 void set_polling_inactive_delay_msecs(::int32_t value);
906 private:
907 ::int32_t _internal_polling_inactive_delay_msecs() const;
908 void _internal_set_polling_inactive_delay_msecs(::int32_t value);
909 public:
910
911 // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
912 private:
913 class _Internal;
914
915 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
916 typedef void InternalArenaConstructable_;
917 typedef void DestructorSkippable_;
918 struct Impl_ {
919 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr allocator_type_;
920 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr visible_device_list_;
921 ::tensorflow::GPUOptions_Experimental* experimental_;
922 double per_process_gpu_memory_fraction_;
923 ::int64_t deferred_deletion_bytes_;
924 ::int32_t polling_active_delay_usecs_;
925 bool allow_growth_;
926 bool force_gpu_compatible_;
927 ::int32_t polling_inactive_delay_msecs_;
928 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
929 };
930 union { Impl_ _impl_; };
931 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
932 };
933 // -------------------------------------------------------------------
934
935 class OptimizerOptions final :
936 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.OptimizerOptions) */ {
937 public:
OptimizerOptions()938 inline OptimizerOptions() : OptimizerOptions(nullptr) {}
939 ~OptimizerOptions() override;
940 explicit PROTOBUF_CONSTEXPR OptimizerOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
941
942 OptimizerOptions(const OptimizerOptions& from);
OptimizerOptions(OptimizerOptions && from)943 OptimizerOptions(OptimizerOptions&& from) noexcept
944 : OptimizerOptions() {
945 *this = ::std::move(from);
946 }
947
948 inline OptimizerOptions& operator=(const OptimizerOptions& from) {
949 if (this == &from) return *this;
950 CopyFrom(from);
951 return *this;
952 }
953 inline OptimizerOptions& operator=(OptimizerOptions&& from) noexcept {
954 if (this == &from) return *this;
955 if (GetOwningArena() == from.GetOwningArena()
956 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
957 && GetOwningArena() != nullptr
958 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
959 ) {
960 InternalSwap(&from);
961 } else {
962 CopyFrom(from);
963 }
964 return *this;
965 }
966
default_instance()967 static const OptimizerOptions& default_instance() {
968 return *internal_default_instance();
969 }
internal_default_instance()970 static inline const OptimizerOptions* internal_default_instance() {
971 return reinterpret_cast<const OptimizerOptions*>(
972 &_OptimizerOptions_default_instance_);
973 }
974 static constexpr int kIndexInFileMessages =
975 3;
976
swap(OptimizerOptions & a,OptimizerOptions & b)977 friend void swap(OptimizerOptions& a, OptimizerOptions& b) {
978 a.Swap(&b);
979 }
Swap(OptimizerOptions * other)980 inline void Swap(OptimizerOptions* other) {
981 if (other == this) return;
982 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
983 if (GetOwningArena() != nullptr &&
984 GetOwningArena() == other->GetOwningArena()) {
985 #else // PROTOBUF_FORCE_COPY_IN_SWAP
986 if (GetOwningArena() == other->GetOwningArena()) {
987 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
988 InternalSwap(other);
989 } else {
990 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
991 }
992 }
993 void UnsafeArenaSwap(OptimizerOptions* other) {
994 if (other == this) return;
995 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
996 InternalSwap(other);
997 }
998
999 // implements Message ----------------------------------------------
1000
1001 OptimizerOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1002 return CreateMaybeMessage<OptimizerOptions>(arena);
1003 }
1004 OptimizerOptions* New() const {
1005 return New(nullptr);
1006 }
1007 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1008 void CopyFrom(const OptimizerOptions& from);
1009 void MergeFrom(const OptimizerOptions& from);
1010 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1011 bool IsInitialized() const final;
1012
1013 size_t ByteSizeLong() const final;
1014 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1015 ::uint8_t* _InternalSerialize(
1016 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1017 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1018
1019 private:
1020 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1021 void SharedDtor();
1022 void SetCachedSize(int size) const;
1023 void InternalSwap(OptimizerOptions* other);
1024
1025 private:
1026 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1027 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1028 return "tensorflow.OptimizerOptions";
1029 }
1030 protected:
1031 explicit OptimizerOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1032 bool is_message_owned = false);
1033 public:
1034
1035 std::string GetTypeName() const final;
1036
1037 // nested types ----------------------------------------------------
1038
1039 typedef OptimizerOptions_Level Level;
1040 static constexpr Level L1 =
1041 OptimizerOptions_Level_L1;
1042 static constexpr Level L0 =
1043 OptimizerOptions_Level_L0;
1044 static inline bool Level_IsValid(int value) {
1045 return OptimizerOptions_Level_IsValid(value);
1046 }
1047 static constexpr Level Level_MIN =
1048 OptimizerOptions_Level_Level_MIN;
1049 static constexpr Level Level_MAX =
1050 OptimizerOptions_Level_Level_MAX;
1051 static constexpr int Level_ARRAYSIZE =
1052 OptimizerOptions_Level_Level_ARRAYSIZE;
1053 template<typename T>
1054 static inline const std::string& Level_Name(T enum_t_value) {
1055 static_assert(::std::is_same<T, Level>::value ||
1056 ::std::is_integral<T>::value,
1057 "Incorrect type passed to function Level_Name.");
1058 return OptimizerOptions_Level_Name(enum_t_value);
1059 }
1060 static inline bool Level_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
1061 Level* value) {
1062 return OptimizerOptions_Level_Parse(name, value);
1063 }
1064
1065 typedef OptimizerOptions_GlobalJitLevel GlobalJitLevel;
1066 static constexpr GlobalJitLevel DEFAULT =
1067 OptimizerOptions_GlobalJitLevel_DEFAULT;
1068 static constexpr GlobalJitLevel OFF =
1069 OptimizerOptions_GlobalJitLevel_OFF;
1070 static constexpr GlobalJitLevel ON_1 =
1071 OptimizerOptions_GlobalJitLevel_ON_1;
1072 static constexpr GlobalJitLevel ON_2 =
1073 OptimizerOptions_GlobalJitLevel_ON_2;
1074 static inline bool GlobalJitLevel_IsValid(int value) {
1075 return OptimizerOptions_GlobalJitLevel_IsValid(value);
1076 }
1077 static constexpr GlobalJitLevel GlobalJitLevel_MIN =
1078 OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MIN;
1079 static constexpr GlobalJitLevel GlobalJitLevel_MAX =
1080 OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MAX;
1081 static constexpr int GlobalJitLevel_ARRAYSIZE =
1082 OptimizerOptions_GlobalJitLevel_GlobalJitLevel_ARRAYSIZE;
1083 template<typename T>
1084 static inline const std::string& GlobalJitLevel_Name(T enum_t_value) {
1085 static_assert(::std::is_same<T, GlobalJitLevel>::value ||
1086 ::std::is_integral<T>::value,
1087 "Incorrect type passed to function GlobalJitLevel_Name.");
1088 return OptimizerOptions_GlobalJitLevel_Name(enum_t_value);
1089 }
1090 static inline bool GlobalJitLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
1091 GlobalJitLevel* value) {
1092 return OptimizerOptions_GlobalJitLevel_Parse(name, value);
1093 }
1094
1095 // accessors -------------------------------------------------------
1096
1097 enum : int {
1098 kOptLevelFieldNumber = 3,
1099 kDoCommonSubexpressionEliminationFieldNumber = 1,
1100 kDoConstantFoldingFieldNumber = 2,
1101 kDoFunctionInliningFieldNumber = 4,
1102 kCpuGlobalJitFieldNumber = 7,
1103 kMaxFoldedConstantInBytesFieldNumber = 6,
1104 kGlobalJitLevelFieldNumber = 5,
1105 };
1106 // .tensorflow.OptimizerOptions.Level opt_level = 3;
1107 void clear_opt_level();
1108 ::tensorflow::OptimizerOptions_Level opt_level() const;
1109 void set_opt_level(::tensorflow::OptimizerOptions_Level value);
1110 private:
1111 ::tensorflow::OptimizerOptions_Level _internal_opt_level() const;
1112 void _internal_set_opt_level(::tensorflow::OptimizerOptions_Level value);
1113 public:
1114
1115 // bool do_common_subexpression_elimination = 1;
1116 void clear_do_common_subexpression_elimination();
1117 bool do_common_subexpression_elimination() const;
1118 void set_do_common_subexpression_elimination(bool value);
1119 private:
1120 bool _internal_do_common_subexpression_elimination() const;
1121 void _internal_set_do_common_subexpression_elimination(bool value);
1122 public:
1123
1124 // bool do_constant_folding = 2;
1125 void clear_do_constant_folding();
1126 bool do_constant_folding() const;
1127 void set_do_constant_folding(bool value);
1128 private:
1129 bool _internal_do_constant_folding() const;
1130 void _internal_set_do_constant_folding(bool value);
1131 public:
1132
1133 // bool do_function_inlining = 4;
1134 void clear_do_function_inlining();
1135 bool do_function_inlining() const;
1136 void set_do_function_inlining(bool value);
1137 private:
1138 bool _internal_do_function_inlining() const;
1139 void _internal_set_do_function_inlining(bool value);
1140 public:
1141
1142 // bool cpu_global_jit = 7;
1143 void clear_cpu_global_jit();
1144 bool cpu_global_jit() const;
1145 void set_cpu_global_jit(bool value);
1146 private:
1147 bool _internal_cpu_global_jit() const;
1148 void _internal_set_cpu_global_jit(bool value);
1149 public:
1150
1151 // int64 max_folded_constant_in_bytes = 6;
1152 void clear_max_folded_constant_in_bytes();
1153 ::int64_t max_folded_constant_in_bytes() const;
1154 void set_max_folded_constant_in_bytes(::int64_t value);
1155 private:
1156 ::int64_t _internal_max_folded_constant_in_bytes() const;
1157 void _internal_set_max_folded_constant_in_bytes(::int64_t value);
1158 public:
1159
1160 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
1161 void clear_global_jit_level();
1162 ::tensorflow::OptimizerOptions_GlobalJitLevel global_jit_level() const;
1163 void set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value);
1164 private:
1165 ::tensorflow::OptimizerOptions_GlobalJitLevel _internal_global_jit_level() const;
1166 void _internal_set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value);
1167 public:
1168
1169 // @@protoc_insertion_point(class_scope:tensorflow.OptimizerOptions)
1170 private:
1171 class _Internal;
1172
1173 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1174 typedef void InternalArenaConstructable_;
1175 typedef void DestructorSkippable_;
1176 struct Impl_ {
1177 int opt_level_;
1178 bool do_common_subexpression_elimination_;
1179 bool do_constant_folding_;
1180 bool do_function_inlining_;
1181 bool cpu_global_jit_;
1182 ::int64_t max_folded_constant_in_bytes_;
1183 int global_jit_level_;
1184 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1185 };
1186 union { Impl_ _impl_; };
1187 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1188 };
1189 // -------------------------------------------------------------------
1190
1191 class GraphOptions final :
1192 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.GraphOptions) */ {
1193 public:
GraphOptions()1194 inline GraphOptions() : GraphOptions(nullptr) {}
1195 ~GraphOptions() override;
1196 explicit PROTOBUF_CONSTEXPR GraphOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1197
1198 GraphOptions(const GraphOptions& from);
GraphOptions(GraphOptions && from)1199 GraphOptions(GraphOptions&& from) noexcept
1200 : GraphOptions() {
1201 *this = ::std::move(from);
1202 }
1203
1204 inline GraphOptions& operator=(const GraphOptions& from) {
1205 if (this == &from) return *this;
1206 CopyFrom(from);
1207 return *this;
1208 }
1209 inline GraphOptions& operator=(GraphOptions&& from) noexcept {
1210 if (this == &from) return *this;
1211 if (GetOwningArena() == from.GetOwningArena()
1212 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1213 && GetOwningArena() != nullptr
1214 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
1215 ) {
1216 InternalSwap(&from);
1217 } else {
1218 CopyFrom(from);
1219 }
1220 return *this;
1221 }
1222
default_instance()1223 static const GraphOptions& default_instance() {
1224 return *internal_default_instance();
1225 }
internal_default_instance()1226 static inline const GraphOptions* internal_default_instance() {
1227 return reinterpret_cast<const GraphOptions*>(
1228 &_GraphOptions_default_instance_);
1229 }
1230 static constexpr int kIndexInFileMessages =
1231 4;
1232
swap(GraphOptions & a,GraphOptions & b)1233 friend void swap(GraphOptions& a, GraphOptions& b) {
1234 a.Swap(&b);
1235 }
Swap(GraphOptions * other)1236 inline void Swap(GraphOptions* other) {
1237 if (other == this) return;
1238 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1239 if (GetOwningArena() != nullptr &&
1240 GetOwningArena() == other->GetOwningArena()) {
1241 #else // PROTOBUF_FORCE_COPY_IN_SWAP
1242 if (GetOwningArena() == other->GetOwningArena()) {
1243 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
1244 InternalSwap(other);
1245 } else {
1246 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1247 }
1248 }
1249 void UnsafeArenaSwap(GraphOptions* other) {
1250 if (other == this) return;
1251 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1252 InternalSwap(other);
1253 }
1254
1255 // implements Message ----------------------------------------------
1256
1257 GraphOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1258 return CreateMaybeMessage<GraphOptions>(arena);
1259 }
1260 GraphOptions* New() const {
1261 return New(nullptr);
1262 }
1263 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1264 void CopyFrom(const GraphOptions& from);
1265 void MergeFrom(const GraphOptions& from);
1266 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1267 bool IsInitialized() const final;
1268
1269 size_t ByteSizeLong() const final;
1270 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1271 ::uint8_t* _InternalSerialize(
1272 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1273 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1274
1275 private:
1276 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1277 void SharedDtor();
1278 void SetCachedSize(int size) const;
1279 void InternalSwap(GraphOptions* other);
1280
1281 private:
1282 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1283 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1284 return "tensorflow.GraphOptions";
1285 }
1286 protected:
1287 explicit GraphOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1288 bool is_message_owned = false);
1289 public:
1290
1291 std::string GetTypeName() const final;
1292
1293 // nested types ----------------------------------------------------
1294
1295 // accessors -------------------------------------------------------
1296
1297 enum : int {
1298 kOptimizerOptionsFieldNumber = 3,
1299 kRewriteOptionsFieldNumber = 10,
1300 kBuildCostModelFieldNumber = 4,
1301 kEnableRecvSchedulingFieldNumber = 2,
1302 kInferShapesFieldNumber = 5,
1303 kPlacePrunedGraphFieldNumber = 6,
1304 kEnableBfloat16SendrecvFieldNumber = 7,
1305 kTimelineStepFieldNumber = 8,
1306 kBuildCostModelAfterFieldNumber = 9,
1307 };
1308 // .tensorflow.OptimizerOptions optimizer_options = 3;
1309 bool has_optimizer_options() const;
1310 private:
1311 bool _internal_has_optimizer_options() const;
1312 public:
1313 void clear_optimizer_options();
1314 const ::tensorflow::OptimizerOptions& optimizer_options() const;
1315 PROTOBUF_NODISCARD ::tensorflow::OptimizerOptions* release_optimizer_options();
1316 ::tensorflow::OptimizerOptions* mutable_optimizer_options();
1317 void set_allocated_optimizer_options(::tensorflow::OptimizerOptions* optimizer_options);
1318 private:
1319 const ::tensorflow::OptimizerOptions& _internal_optimizer_options() const;
1320 ::tensorflow::OptimizerOptions* _internal_mutable_optimizer_options();
1321 public:
1322 void unsafe_arena_set_allocated_optimizer_options(
1323 ::tensorflow::OptimizerOptions* optimizer_options);
1324 ::tensorflow::OptimizerOptions* unsafe_arena_release_optimizer_options();
1325
1326 // .tensorflow.RewriterConfig rewrite_options = 10;
1327 bool has_rewrite_options() const;
1328 private:
1329 bool _internal_has_rewrite_options() const;
1330 public:
1331 void clear_rewrite_options();
1332 const ::tensorflow::RewriterConfig& rewrite_options() const;
1333 PROTOBUF_NODISCARD ::tensorflow::RewriterConfig* release_rewrite_options();
1334 ::tensorflow::RewriterConfig* mutable_rewrite_options();
1335 void set_allocated_rewrite_options(::tensorflow::RewriterConfig* rewrite_options);
1336 private:
1337 const ::tensorflow::RewriterConfig& _internal_rewrite_options() const;
1338 ::tensorflow::RewriterConfig* _internal_mutable_rewrite_options();
1339 public:
1340 void unsafe_arena_set_allocated_rewrite_options(
1341 ::tensorflow::RewriterConfig* rewrite_options);
1342 ::tensorflow::RewriterConfig* unsafe_arena_release_rewrite_options();
1343
1344 // int64 build_cost_model = 4;
1345 void clear_build_cost_model();
1346 ::int64_t build_cost_model() const;
1347 void set_build_cost_model(::int64_t value);
1348 private:
1349 ::int64_t _internal_build_cost_model() const;
1350 void _internal_set_build_cost_model(::int64_t value);
1351 public:
1352
1353 // bool enable_recv_scheduling = 2;
1354 void clear_enable_recv_scheduling();
1355 bool enable_recv_scheduling() const;
1356 void set_enable_recv_scheduling(bool value);
1357 private:
1358 bool _internal_enable_recv_scheduling() const;
1359 void _internal_set_enable_recv_scheduling(bool value);
1360 public:
1361
1362 // bool infer_shapes = 5;
1363 void clear_infer_shapes();
1364 bool infer_shapes() const;
1365 void set_infer_shapes(bool value);
1366 private:
1367 bool _internal_infer_shapes() const;
1368 void _internal_set_infer_shapes(bool value);
1369 public:
1370
1371 // bool place_pruned_graph = 6;
1372 void clear_place_pruned_graph();
1373 bool place_pruned_graph() const;
1374 void set_place_pruned_graph(bool value);
1375 private:
1376 bool _internal_place_pruned_graph() const;
1377 void _internal_set_place_pruned_graph(bool value);
1378 public:
1379
1380 // bool enable_bfloat16_sendrecv = 7;
1381 void clear_enable_bfloat16_sendrecv();
1382 bool enable_bfloat16_sendrecv() const;
1383 void set_enable_bfloat16_sendrecv(bool value);
1384 private:
1385 bool _internal_enable_bfloat16_sendrecv() const;
1386 void _internal_set_enable_bfloat16_sendrecv(bool value);
1387 public:
1388
1389 // int32 timeline_step = 8;
1390 void clear_timeline_step();
1391 ::int32_t timeline_step() const;
1392 void set_timeline_step(::int32_t value);
1393 private:
1394 ::int32_t _internal_timeline_step() const;
1395 void _internal_set_timeline_step(::int32_t value);
1396 public:
1397
1398 // int64 build_cost_model_after = 9;
1399 void clear_build_cost_model_after();
1400 ::int64_t build_cost_model_after() const;
1401 void set_build_cost_model_after(::int64_t value);
1402 private:
1403 ::int64_t _internal_build_cost_model_after() const;
1404 void _internal_set_build_cost_model_after(::int64_t value);
1405 public:
1406
1407 // @@protoc_insertion_point(class_scope:tensorflow.GraphOptions)
1408 private:
1409 class _Internal;
1410
1411 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1412 typedef void InternalArenaConstructable_;
1413 typedef void DestructorSkippable_;
1414 struct Impl_ {
1415 ::tensorflow::OptimizerOptions* optimizer_options_;
1416 ::tensorflow::RewriterConfig* rewrite_options_;
1417 ::int64_t build_cost_model_;
1418 bool enable_recv_scheduling_;
1419 bool infer_shapes_;
1420 bool place_pruned_graph_;
1421 bool enable_bfloat16_sendrecv_;
1422 ::int32_t timeline_step_;
1423 ::int64_t build_cost_model_after_;
1424 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1425 };
1426 union { Impl_ _impl_; };
1427 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1428 };
1429 // -------------------------------------------------------------------
1430
1431 class ThreadPoolOptionProto final :
1432 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.ThreadPoolOptionProto) */ {
1433 public:
ThreadPoolOptionProto()1434 inline ThreadPoolOptionProto() : ThreadPoolOptionProto(nullptr) {}
1435 ~ThreadPoolOptionProto() override;
1436 explicit PROTOBUF_CONSTEXPR ThreadPoolOptionProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1437
1438 ThreadPoolOptionProto(const ThreadPoolOptionProto& from);
ThreadPoolOptionProto(ThreadPoolOptionProto && from)1439 ThreadPoolOptionProto(ThreadPoolOptionProto&& from) noexcept
1440 : ThreadPoolOptionProto() {
1441 *this = ::std::move(from);
1442 }
1443
1444 inline ThreadPoolOptionProto& operator=(const ThreadPoolOptionProto& from) {
1445 if (this == &from) return *this;
1446 CopyFrom(from);
1447 return *this;
1448 }
1449 inline ThreadPoolOptionProto& operator=(ThreadPoolOptionProto&& from) noexcept {
1450 if (this == &from) return *this;
1451 if (GetOwningArena() == from.GetOwningArena()
1452 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1453 && GetOwningArena() != nullptr
1454 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
1455 ) {
1456 InternalSwap(&from);
1457 } else {
1458 CopyFrom(from);
1459 }
1460 return *this;
1461 }
1462
default_instance()1463 static const ThreadPoolOptionProto& default_instance() {
1464 return *internal_default_instance();
1465 }
internal_default_instance()1466 static inline const ThreadPoolOptionProto* internal_default_instance() {
1467 return reinterpret_cast<const ThreadPoolOptionProto*>(
1468 &_ThreadPoolOptionProto_default_instance_);
1469 }
1470 static constexpr int kIndexInFileMessages =
1471 5;
1472
swap(ThreadPoolOptionProto & a,ThreadPoolOptionProto & b)1473 friend void swap(ThreadPoolOptionProto& a, ThreadPoolOptionProto& b) {
1474 a.Swap(&b);
1475 }
Swap(ThreadPoolOptionProto * other)1476 inline void Swap(ThreadPoolOptionProto* other) {
1477 if (other == this) return;
1478 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1479 if (GetOwningArena() != nullptr &&
1480 GetOwningArena() == other->GetOwningArena()) {
1481 #else // PROTOBUF_FORCE_COPY_IN_SWAP
1482 if (GetOwningArena() == other->GetOwningArena()) {
1483 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
1484 InternalSwap(other);
1485 } else {
1486 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1487 }
1488 }
1489 void UnsafeArenaSwap(ThreadPoolOptionProto* other) {
1490 if (other == this) return;
1491 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1492 InternalSwap(other);
1493 }
1494
1495 // implements Message ----------------------------------------------
1496
1497 ThreadPoolOptionProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1498 return CreateMaybeMessage<ThreadPoolOptionProto>(arena);
1499 }
1500 ThreadPoolOptionProto* New() const {
1501 return New(nullptr);
1502 }
1503 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1504 void CopyFrom(const ThreadPoolOptionProto& from);
1505 void MergeFrom(const ThreadPoolOptionProto& from);
1506 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1507 bool IsInitialized() const final;
1508
1509 size_t ByteSizeLong() const final;
1510 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1511 ::uint8_t* _InternalSerialize(
1512 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1513 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1514
1515 private:
1516 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1517 void SharedDtor();
1518 void SetCachedSize(int size) const;
1519 void InternalSwap(ThreadPoolOptionProto* other);
1520
1521 private:
1522 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1523 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1524 return "tensorflow.ThreadPoolOptionProto";
1525 }
1526 protected:
1527 explicit ThreadPoolOptionProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1528 bool is_message_owned = false);
1529 public:
1530
1531 std::string GetTypeName() const final;
1532
1533 // nested types ----------------------------------------------------
1534
1535 // accessors -------------------------------------------------------
1536
1537 enum : int {
1538 kGlobalNameFieldNumber = 2,
1539 kNumThreadsFieldNumber = 1,
1540 };
1541 // string global_name = 2;
1542 void clear_global_name();
1543 const std::string& global_name() const;
1544 template <typename ArgT0 = const std::string&, typename... ArgT>
1545 void set_global_name(ArgT0&& arg0, ArgT... args);
1546 std::string* mutable_global_name();
1547 PROTOBUF_NODISCARD std::string* release_global_name();
1548 void set_allocated_global_name(std::string* global_name);
1549 private:
1550 const std::string& _internal_global_name() const;
1551 inline PROTOBUF_ALWAYS_INLINE void _internal_set_global_name(const std::string& value);
1552 std::string* _internal_mutable_global_name();
1553 public:
1554
1555 // int32 num_threads = 1;
1556 void clear_num_threads();
1557 ::int32_t num_threads() const;
1558 void set_num_threads(::int32_t value);
1559 private:
1560 ::int32_t _internal_num_threads() const;
1561 void _internal_set_num_threads(::int32_t value);
1562 public:
1563
1564 // @@protoc_insertion_point(class_scope:tensorflow.ThreadPoolOptionProto)
1565 private:
1566 class _Internal;
1567
1568 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1569 typedef void InternalArenaConstructable_;
1570 typedef void DestructorSkippable_;
1571 struct Impl_ {
1572 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr global_name_;
1573 ::int32_t num_threads_;
1574 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1575 };
1576 union { Impl_ _impl_; };
1577 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1578 };
1579 // -------------------------------------------------------------------
1580
1581 class RPCOptions final :
1582 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RPCOptions) */ {
1583 public:
RPCOptions()1584 inline RPCOptions() : RPCOptions(nullptr) {}
1585 ~RPCOptions() override;
1586 explicit PROTOBUF_CONSTEXPR RPCOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1587
1588 RPCOptions(const RPCOptions& from);
RPCOptions(RPCOptions && from)1589 RPCOptions(RPCOptions&& from) noexcept
1590 : RPCOptions() {
1591 *this = ::std::move(from);
1592 }
1593
1594 inline RPCOptions& operator=(const RPCOptions& from) {
1595 if (this == &from) return *this;
1596 CopyFrom(from);
1597 return *this;
1598 }
1599 inline RPCOptions& operator=(RPCOptions&& from) noexcept {
1600 if (this == &from) return *this;
1601 if (GetOwningArena() == from.GetOwningArena()
1602 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1603 && GetOwningArena() != nullptr
1604 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
1605 ) {
1606 InternalSwap(&from);
1607 } else {
1608 CopyFrom(from);
1609 }
1610 return *this;
1611 }
1612
default_instance()1613 static const RPCOptions& default_instance() {
1614 return *internal_default_instance();
1615 }
internal_default_instance()1616 static inline const RPCOptions* internal_default_instance() {
1617 return reinterpret_cast<const RPCOptions*>(
1618 &_RPCOptions_default_instance_);
1619 }
1620 static constexpr int kIndexInFileMessages =
1621 6;
1622
swap(RPCOptions & a,RPCOptions & b)1623 friend void swap(RPCOptions& a, RPCOptions& b) {
1624 a.Swap(&b);
1625 }
Swap(RPCOptions * other)1626 inline void Swap(RPCOptions* other) {
1627 if (other == this) return;
1628 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1629 if (GetOwningArena() != nullptr &&
1630 GetOwningArena() == other->GetOwningArena()) {
1631 #else // PROTOBUF_FORCE_COPY_IN_SWAP
1632 if (GetOwningArena() == other->GetOwningArena()) {
1633 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
1634 InternalSwap(other);
1635 } else {
1636 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1637 }
1638 }
1639 void UnsafeArenaSwap(RPCOptions* other) {
1640 if (other == this) return;
1641 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1642 InternalSwap(other);
1643 }
1644
1645 // implements Message ----------------------------------------------
1646
1647 RPCOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1648 return CreateMaybeMessage<RPCOptions>(arena);
1649 }
1650 RPCOptions* New() const {
1651 return New(nullptr);
1652 }
1653 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1654 void CopyFrom(const RPCOptions& from);
1655 void MergeFrom(const RPCOptions& from);
1656 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1657 bool IsInitialized() const final;
1658
1659 size_t ByteSizeLong() const final;
1660 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1661 ::uint8_t* _InternalSerialize(
1662 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1663 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1664
1665 private:
1666 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1667 void SharedDtor();
1668 void SetCachedSize(int size) const;
1669 void InternalSwap(RPCOptions* other);
1670
1671 private:
1672 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1673 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1674 return "tensorflow.RPCOptions";
1675 }
1676 protected:
1677 explicit RPCOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1678 bool is_message_owned = false);
1679 public:
1680
1681 std::string GetTypeName() const final;
1682
1683 // nested types ----------------------------------------------------
1684
1685 // accessors -------------------------------------------------------
1686
1687 enum : int {
1688 kCompressionAlgorithmFieldNumber = 2,
1689 kCompressionLevelFieldNumber = 3,
1690 kUseRpcForInprocessMasterFieldNumber = 1,
1691 kCacheRpcResponseFieldNumber = 4,
1692 kDisableSessionConnectionSharingFieldNumber = 5,
1693 kNumChannelsPerTargetFieldNumber = 6,
1694 };
1695 // string compression_algorithm = 2;
1696 void clear_compression_algorithm();
1697 const std::string& compression_algorithm() const;
1698 template <typename ArgT0 = const std::string&, typename... ArgT>
1699 void set_compression_algorithm(ArgT0&& arg0, ArgT... args);
1700 std::string* mutable_compression_algorithm();
1701 PROTOBUF_NODISCARD std::string* release_compression_algorithm();
1702 void set_allocated_compression_algorithm(std::string* compression_algorithm);
1703 private:
1704 const std::string& _internal_compression_algorithm() const;
1705 inline PROTOBUF_ALWAYS_INLINE void _internal_set_compression_algorithm(const std::string& value);
1706 std::string* _internal_mutable_compression_algorithm();
1707 public:
1708
1709 // int32 compression_level = 3;
1710 void clear_compression_level();
1711 ::int32_t compression_level() const;
1712 void set_compression_level(::int32_t value);
1713 private:
1714 ::int32_t _internal_compression_level() const;
1715 void _internal_set_compression_level(::int32_t value);
1716 public:
1717
1718 // bool use_rpc_for_inprocess_master = 1;
1719 void clear_use_rpc_for_inprocess_master();
1720 bool use_rpc_for_inprocess_master() const;
1721 void set_use_rpc_for_inprocess_master(bool value);
1722 private:
1723 bool _internal_use_rpc_for_inprocess_master() const;
1724 void _internal_set_use_rpc_for_inprocess_master(bool value);
1725 public:
1726
1727 // bool cache_rpc_response = 4;
1728 void clear_cache_rpc_response();
1729 bool cache_rpc_response() const;
1730 void set_cache_rpc_response(bool value);
1731 private:
1732 bool _internal_cache_rpc_response() const;
1733 void _internal_set_cache_rpc_response(bool value);
1734 public:
1735
1736 // bool disable_session_connection_sharing = 5;
1737 void clear_disable_session_connection_sharing();
1738 bool disable_session_connection_sharing() const;
1739 void set_disable_session_connection_sharing(bool value);
1740 private:
1741 bool _internal_disable_session_connection_sharing() const;
1742 void _internal_set_disable_session_connection_sharing(bool value);
1743 public:
1744
1745 // int32 num_channels_per_target = 6;
1746 void clear_num_channels_per_target();
1747 ::int32_t num_channels_per_target() const;
1748 void set_num_channels_per_target(::int32_t value);
1749 private:
1750 ::int32_t _internal_num_channels_per_target() const;
1751 void _internal_set_num_channels_per_target(::int32_t value);
1752 public:
1753
1754 // @@protoc_insertion_point(class_scope:tensorflow.RPCOptions)
1755 private:
1756 class _Internal;
1757
1758 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1759 typedef void InternalArenaConstructable_;
1760 typedef void DestructorSkippable_;
1761 struct Impl_ {
1762 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr compression_algorithm_;
1763 ::int32_t compression_level_;
1764 bool use_rpc_for_inprocess_master_;
1765 bool cache_rpc_response_;
1766 bool disable_session_connection_sharing_;
1767 ::int32_t num_channels_per_target_;
1768 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1769 };
1770 union { Impl_ _impl_; };
1771 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1772 };
1773 // -------------------------------------------------------------------
1774
1775 class SessionMetadata final :
1776 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.SessionMetadata) */ {
1777 public:
SessionMetadata()1778 inline SessionMetadata() : SessionMetadata(nullptr) {}
1779 ~SessionMetadata() override;
1780 explicit PROTOBUF_CONSTEXPR SessionMetadata(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1781
1782 SessionMetadata(const SessionMetadata& from);
SessionMetadata(SessionMetadata && from)1783 SessionMetadata(SessionMetadata&& from) noexcept
1784 : SessionMetadata() {
1785 *this = ::std::move(from);
1786 }
1787
1788 inline SessionMetadata& operator=(const SessionMetadata& from) {
1789 if (this == &from) return *this;
1790 CopyFrom(from);
1791 return *this;
1792 }
1793 inline SessionMetadata& operator=(SessionMetadata&& from) noexcept {
1794 if (this == &from) return *this;
1795 if (GetOwningArena() == from.GetOwningArena()
1796 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1797 && GetOwningArena() != nullptr
1798 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
1799 ) {
1800 InternalSwap(&from);
1801 } else {
1802 CopyFrom(from);
1803 }
1804 return *this;
1805 }
1806
default_instance()1807 static const SessionMetadata& default_instance() {
1808 return *internal_default_instance();
1809 }
internal_default_instance()1810 static inline const SessionMetadata* internal_default_instance() {
1811 return reinterpret_cast<const SessionMetadata*>(
1812 &_SessionMetadata_default_instance_);
1813 }
1814 static constexpr int kIndexInFileMessages =
1815 7;
1816
swap(SessionMetadata & a,SessionMetadata & b)1817 friend void swap(SessionMetadata& a, SessionMetadata& b) {
1818 a.Swap(&b);
1819 }
Swap(SessionMetadata * other)1820 inline void Swap(SessionMetadata* other) {
1821 if (other == this) return;
1822 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1823 if (GetOwningArena() != nullptr &&
1824 GetOwningArena() == other->GetOwningArena()) {
1825 #else // PROTOBUF_FORCE_COPY_IN_SWAP
1826 if (GetOwningArena() == other->GetOwningArena()) {
1827 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
1828 InternalSwap(other);
1829 } else {
1830 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1831 }
1832 }
1833 void UnsafeArenaSwap(SessionMetadata* other) {
1834 if (other == this) return;
1835 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1836 InternalSwap(other);
1837 }
1838
1839 // implements Message ----------------------------------------------
1840
1841 SessionMetadata* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1842 return CreateMaybeMessage<SessionMetadata>(arena);
1843 }
1844 SessionMetadata* New() const {
1845 return New(nullptr);
1846 }
1847 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1848 void CopyFrom(const SessionMetadata& from);
1849 void MergeFrom(const SessionMetadata& from);
1850 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1851 bool IsInitialized() const final;
1852
1853 size_t ByteSizeLong() const final;
1854 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1855 ::uint8_t* _InternalSerialize(
1856 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1857 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1858
1859 private:
1860 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1861 void SharedDtor();
1862 void SetCachedSize(int size) const;
1863 void InternalSwap(SessionMetadata* other);
1864
1865 private:
1866 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1867 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1868 return "tensorflow.SessionMetadata";
1869 }
1870 protected:
1871 explicit SessionMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1872 bool is_message_owned = false);
1873 public:
1874
1875 std::string GetTypeName() const final;
1876
1877 // nested types ----------------------------------------------------
1878
1879 // accessors -------------------------------------------------------
1880
1881 enum : int {
1882 kNameFieldNumber = 1,
1883 kVersionFieldNumber = 2,
1884 };
1885 // string name = 1;
1886 void clear_name();
1887 const std::string& name() const;
1888 template <typename ArgT0 = const std::string&, typename... ArgT>
1889 void set_name(ArgT0&& arg0, ArgT... args);
1890 std::string* mutable_name();
1891 PROTOBUF_NODISCARD std::string* release_name();
1892 void set_allocated_name(std::string* name);
1893 private:
1894 const std::string& _internal_name() const;
1895 inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value);
1896 std::string* _internal_mutable_name();
1897 public:
1898
1899 // int64 version = 2;
1900 void clear_version();
1901 ::int64_t version() const;
1902 void set_version(::int64_t value);
1903 private:
1904 ::int64_t _internal_version() const;
1905 void _internal_set_version(::int64_t value);
1906 public:
1907
1908 // @@protoc_insertion_point(class_scope:tensorflow.SessionMetadata)
1909 private:
1910 class _Internal;
1911
1912 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1913 typedef void InternalArenaConstructable_;
1914 typedef void DestructorSkippable_;
1915 struct Impl_ {
1916 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_;
1917 ::int64_t version_;
1918 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1919 };
1920 union { Impl_ _impl_; };
1921 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1922 };
1923 // -------------------------------------------------------------------
1924
1925 class ConfigProto_DeviceCountEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<ConfigProto_DeviceCountEntry_DoNotUse,
1926 std::string, ::int32_t,
1927 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
1928 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32> {
1929 public:
1930 typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<ConfigProto_DeviceCountEntry_DoNotUse,
1931 std::string, ::int32_t,
1932 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
1933 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32> SuperType;
1934 ConfigProto_DeviceCountEntry_DoNotUse();
1935 explicit PROTOBUF_CONSTEXPR ConfigProto_DeviceCountEntry_DoNotUse(
1936 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1937 explicit ConfigProto_DeviceCountEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena);
1938 void MergeFrom(const ConfigProto_DeviceCountEntry_DoNotUse& other);
internal_default_instance()1939 static const ConfigProto_DeviceCountEntry_DoNotUse* internal_default_instance() { return reinterpret_cast<const ConfigProto_DeviceCountEntry_DoNotUse*>(&_ConfigProto_DeviceCountEntry_DoNotUse_default_instance_); }
ValidateKey(std::string * s)1940 static bool ValidateKey(std::string* s) {
1941 return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.ConfigProto.DeviceCountEntry.key");
1942 }
ValidateValue(void *)1943 static bool ValidateValue(void*) { return true; }
1944 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
1945 };
1946
1947 // -------------------------------------------------------------------
1948
1949 class ConfigProto_Experimental final :
1950 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.ConfigProto.Experimental) */ {
1951 public:
ConfigProto_Experimental()1952 inline ConfigProto_Experimental() : ConfigProto_Experimental(nullptr) {}
1953 ~ConfigProto_Experimental() override;
1954 explicit PROTOBUF_CONSTEXPR ConfigProto_Experimental(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1955
1956 ConfigProto_Experimental(const ConfigProto_Experimental& from);
ConfigProto_Experimental(ConfigProto_Experimental && from)1957 ConfigProto_Experimental(ConfigProto_Experimental&& from) noexcept
1958 : ConfigProto_Experimental() {
1959 *this = ::std::move(from);
1960 }
1961
1962 inline ConfigProto_Experimental& operator=(const ConfigProto_Experimental& from) {
1963 if (this == &from) return *this;
1964 CopyFrom(from);
1965 return *this;
1966 }
1967 inline ConfigProto_Experimental& operator=(ConfigProto_Experimental&& from) noexcept {
1968 if (this == &from) return *this;
1969 if (GetOwningArena() == from.GetOwningArena()
1970 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1971 && GetOwningArena() != nullptr
1972 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
1973 ) {
1974 InternalSwap(&from);
1975 } else {
1976 CopyFrom(from);
1977 }
1978 return *this;
1979 }
1980
default_instance()1981 static const ConfigProto_Experimental& default_instance() {
1982 return *internal_default_instance();
1983 }
internal_default_instance()1984 static inline const ConfigProto_Experimental* internal_default_instance() {
1985 return reinterpret_cast<const ConfigProto_Experimental*>(
1986 &_ConfigProto_Experimental_default_instance_);
1987 }
1988 static constexpr int kIndexInFileMessages =
1989 9;
1990
swap(ConfigProto_Experimental & a,ConfigProto_Experimental & b)1991 friend void swap(ConfigProto_Experimental& a, ConfigProto_Experimental& b) {
1992 a.Swap(&b);
1993 }
Swap(ConfigProto_Experimental * other)1994 inline void Swap(ConfigProto_Experimental* other) {
1995 if (other == this) return;
1996 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1997 if (GetOwningArena() != nullptr &&
1998 GetOwningArena() == other->GetOwningArena()) {
1999 #else // PROTOBUF_FORCE_COPY_IN_SWAP
2000 if (GetOwningArena() == other->GetOwningArena()) {
2001 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
2002 InternalSwap(other);
2003 } else {
2004 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2005 }
2006 }
2007 void UnsafeArenaSwap(ConfigProto_Experimental* other) {
2008 if (other == this) return;
2009 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2010 InternalSwap(other);
2011 }
2012
2013 // implements Message ----------------------------------------------
2014
2015 ConfigProto_Experimental* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2016 return CreateMaybeMessage<ConfigProto_Experimental>(arena);
2017 }
2018 ConfigProto_Experimental* New() const {
2019 return New(nullptr);
2020 }
2021 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
2022 void CopyFrom(const ConfigProto_Experimental& from);
2023 void MergeFrom(const ConfigProto_Experimental& from);
2024 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2025 bool IsInitialized() const final;
2026
2027 size_t ByteSizeLong() const final;
2028 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2029 ::uint8_t* _InternalSerialize(
2030 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2031 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2032
2033 private:
2034 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2035 void SharedDtor();
2036 void SetCachedSize(int size) const;
2037 void InternalSwap(ConfigProto_Experimental* other);
2038
2039 private:
2040 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2041 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2042 return "tensorflow.ConfigProto.Experimental";
2043 }
2044 protected:
2045 explicit ConfigProto_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2046 bool is_message_owned = false);
2047 public:
2048
2049 std::string GetTypeName() const final;
2050
2051 // nested types ----------------------------------------------------
2052
2053 typedef ConfigProto_Experimental_MlirBridgeRollout MlirBridgeRollout;
2054 static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_UNSPECIFIED =
2055 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
2056 static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_ENABLED =
2057 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_ENABLED;
2058 static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_DISABLED =
2059 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_DISABLED;
2060 static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED =
2061 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED;
2062 static constexpr MlirBridgeRollout MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED =
2063 ConfigProto_Experimental_MlirBridgeRollout_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED;
2064 static inline bool MlirBridgeRollout_IsValid(int value) {
2065 return ConfigProto_Experimental_MlirBridgeRollout_IsValid(value);
2066 }
2067 static constexpr MlirBridgeRollout MlirBridgeRollout_MIN =
2068 ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MIN;
2069 static constexpr MlirBridgeRollout MlirBridgeRollout_MAX =
2070 ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_MAX;
2071 static constexpr int MlirBridgeRollout_ARRAYSIZE =
2072 ConfigProto_Experimental_MlirBridgeRollout_MlirBridgeRollout_ARRAYSIZE;
2073 template<typename T>
2074 static inline const std::string& MlirBridgeRollout_Name(T enum_t_value) {
2075 static_assert(::std::is_same<T, MlirBridgeRollout>::value ||
2076 ::std::is_integral<T>::value,
2077 "Incorrect type passed to function MlirBridgeRollout_Name.");
2078 return ConfigProto_Experimental_MlirBridgeRollout_Name(enum_t_value);
2079 }
2080 static inline bool MlirBridgeRollout_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
2081 MlirBridgeRollout* value) {
2082 return ConfigProto_Experimental_MlirBridgeRollout_Parse(name, value);
2083 }
2084
2085 // accessors -------------------------------------------------------
2086
2087 enum : int {
2088 kCollectiveGroupLeaderFieldNumber = 1,
2089 kExecutorTypeFieldNumber = 3,
2090 kSessionMetadataFieldNumber = 11,
2091 kCoordinationConfigFieldNumber = 23,
2092 kRecvBufMaxChunkFieldNumber = 4,
2093 kUseNumaAffinityFieldNumber = 5,
2094 kCollectiveDeterministicSequentialExecutionFieldNumber = 6,
2095 kCollectiveNcclFieldNumber = 7,
2096 kShareSessionStateInClusterspecPropagationFieldNumber = 8,
2097 kDisableThreadSpinningFieldNumber = 9,
2098 kShareClusterDevicesInSessionFieldNumber = 10,
2099 kOptimizeForStaticGraphFieldNumber = 12,
2100 kEnableMlirBridgeFieldNumber = 13,
2101 kMlirBridgeRolloutFieldNumber = 17,
2102 kXlaFusionAutotunerThreshFieldNumber = 15,
2103 kEnableMlirGraphOptimizationFieldNumber = 16,
2104 kDisableOutputPartitionGraphsFieldNumber = 14,
2105 kUseTfrtFieldNumber = 18,
2106 kDisableFunctionalOpsLoweringFieldNumber = 21,
2107 kXlaPreferSingleGraphClusterFieldNumber = 22,
2108 };
2109 // string collective_group_leader = 1;
2110 void clear_collective_group_leader();
2111 const std::string& collective_group_leader() const;
2112 template <typename ArgT0 = const std::string&, typename... ArgT>
2113 void set_collective_group_leader(ArgT0&& arg0, ArgT... args);
2114 std::string* mutable_collective_group_leader();
2115 PROTOBUF_NODISCARD std::string* release_collective_group_leader();
2116 void set_allocated_collective_group_leader(std::string* collective_group_leader);
2117 private:
2118 const std::string& _internal_collective_group_leader() const;
2119 inline PROTOBUF_ALWAYS_INLINE void _internal_set_collective_group_leader(const std::string& value);
2120 std::string* _internal_mutable_collective_group_leader();
2121 public:
2122
2123 // string executor_type = 3;
2124 void clear_executor_type();
2125 const std::string& executor_type() const;
2126 template <typename ArgT0 = const std::string&, typename... ArgT>
2127 void set_executor_type(ArgT0&& arg0, ArgT... args);
2128 std::string* mutable_executor_type();
2129 PROTOBUF_NODISCARD std::string* release_executor_type();
2130 void set_allocated_executor_type(std::string* executor_type);
2131 private:
2132 const std::string& _internal_executor_type() const;
2133 inline PROTOBUF_ALWAYS_INLINE void _internal_set_executor_type(const std::string& value);
2134 std::string* _internal_mutable_executor_type();
2135 public:
2136
2137 // .tensorflow.SessionMetadata session_metadata = 11;
2138 bool has_session_metadata() const;
2139 private:
2140 bool _internal_has_session_metadata() const;
2141 public:
2142 void clear_session_metadata();
2143 const ::tensorflow::SessionMetadata& session_metadata() const;
2144 PROTOBUF_NODISCARD ::tensorflow::SessionMetadata* release_session_metadata();
2145 ::tensorflow::SessionMetadata* mutable_session_metadata();
2146 void set_allocated_session_metadata(::tensorflow::SessionMetadata* session_metadata);
2147 private:
2148 const ::tensorflow::SessionMetadata& _internal_session_metadata() const;
2149 ::tensorflow::SessionMetadata* _internal_mutable_session_metadata();
2150 public:
2151 void unsafe_arena_set_allocated_session_metadata(
2152 ::tensorflow::SessionMetadata* session_metadata);
2153 ::tensorflow::SessionMetadata* unsafe_arena_release_session_metadata();
2154
2155 // .tensorflow.CoordinationServiceConfig coordination_config = 23;
2156 bool has_coordination_config() const;
2157 private:
2158 bool _internal_has_coordination_config() const;
2159 public:
2160 void clear_coordination_config();
2161 const ::tensorflow::CoordinationServiceConfig& coordination_config() const;
2162 PROTOBUF_NODISCARD ::tensorflow::CoordinationServiceConfig* release_coordination_config();
2163 ::tensorflow::CoordinationServiceConfig* mutable_coordination_config();
2164 void set_allocated_coordination_config(::tensorflow::CoordinationServiceConfig* coordination_config);
2165 private:
2166 const ::tensorflow::CoordinationServiceConfig& _internal_coordination_config() const;
2167 ::tensorflow::CoordinationServiceConfig* _internal_mutable_coordination_config();
2168 public:
2169 void unsafe_arena_set_allocated_coordination_config(
2170 ::tensorflow::CoordinationServiceConfig* coordination_config);
2171 ::tensorflow::CoordinationServiceConfig* unsafe_arena_release_coordination_config();
2172
2173 // int32 recv_buf_max_chunk = 4;
2174 void clear_recv_buf_max_chunk();
2175 ::int32_t recv_buf_max_chunk() const;
2176 void set_recv_buf_max_chunk(::int32_t value);
2177 private:
2178 ::int32_t _internal_recv_buf_max_chunk() const;
2179 void _internal_set_recv_buf_max_chunk(::int32_t value);
2180 public:
2181
2182 // bool use_numa_affinity = 5;
2183 void clear_use_numa_affinity();
2184 bool use_numa_affinity() const;
2185 void set_use_numa_affinity(bool value);
2186 private:
2187 bool _internal_use_numa_affinity() const;
2188 void _internal_set_use_numa_affinity(bool value);
2189 public:
2190
2191 // bool collective_deterministic_sequential_execution = 6;
2192 void clear_collective_deterministic_sequential_execution();
2193 bool collective_deterministic_sequential_execution() const;
2194 void set_collective_deterministic_sequential_execution(bool value);
2195 private:
2196 bool _internal_collective_deterministic_sequential_execution() const;
2197 void _internal_set_collective_deterministic_sequential_execution(bool value);
2198 public:
2199
2200 // bool collective_nccl = 7;
2201 void clear_collective_nccl();
2202 bool collective_nccl() const;
2203 void set_collective_nccl(bool value);
2204 private:
2205 bool _internal_collective_nccl() const;
2206 void _internal_set_collective_nccl(bool value);
2207 public:
2208
2209 // bool share_session_state_in_clusterspec_propagation = 8;
2210 void clear_share_session_state_in_clusterspec_propagation();
2211 bool share_session_state_in_clusterspec_propagation() const;
2212 void set_share_session_state_in_clusterspec_propagation(bool value);
2213 private:
2214 bool _internal_share_session_state_in_clusterspec_propagation() const;
2215 void _internal_set_share_session_state_in_clusterspec_propagation(bool value);
2216 public:
2217
2218 // bool disable_thread_spinning = 9;
2219 void clear_disable_thread_spinning();
2220 bool disable_thread_spinning() const;
2221 void set_disable_thread_spinning(bool value);
2222 private:
2223 bool _internal_disable_thread_spinning() const;
2224 void _internal_set_disable_thread_spinning(bool value);
2225 public:
2226
2227 // bool share_cluster_devices_in_session = 10;
2228 void clear_share_cluster_devices_in_session();
2229 bool share_cluster_devices_in_session() const;
2230 void set_share_cluster_devices_in_session(bool value);
2231 private:
2232 bool _internal_share_cluster_devices_in_session() const;
2233 void _internal_set_share_cluster_devices_in_session(bool value);
2234 public:
2235
2236 // bool optimize_for_static_graph = 12;
2237 void clear_optimize_for_static_graph();
2238 bool optimize_for_static_graph() const;
2239 void set_optimize_for_static_graph(bool value);
2240 private:
2241 bool _internal_optimize_for_static_graph() const;
2242 void _internal_set_optimize_for_static_graph(bool value);
2243 public:
2244
2245 // bool enable_mlir_bridge = 13;
2246 void clear_enable_mlir_bridge();
2247 bool enable_mlir_bridge() const;
2248 void set_enable_mlir_bridge(bool value);
2249 private:
2250 bool _internal_enable_mlir_bridge() const;
2251 void _internal_set_enable_mlir_bridge(bool value);
2252 public:
2253
2254 // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
2255 void clear_mlir_bridge_rollout();
2256 ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout mlir_bridge_rollout() const;
2257 void set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value);
2258 private:
2259 ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout _internal_mlir_bridge_rollout() const;
2260 void _internal_set_mlir_bridge_rollout(::tensorflow::ConfigProto_Experimental_MlirBridgeRollout value);
2261 public:
2262
2263 // int64 xla_fusion_autotuner_thresh = 15;
2264 void clear_xla_fusion_autotuner_thresh();
2265 ::int64_t xla_fusion_autotuner_thresh() const;
2266 void set_xla_fusion_autotuner_thresh(::int64_t value);
2267 private:
2268 ::int64_t _internal_xla_fusion_autotuner_thresh() const;
2269 void _internal_set_xla_fusion_autotuner_thresh(::int64_t value);
2270 public:
2271
2272 // bool enable_mlir_graph_optimization = 16;
2273 void clear_enable_mlir_graph_optimization();
2274 bool enable_mlir_graph_optimization() const;
2275 void set_enable_mlir_graph_optimization(bool value);
2276 private:
2277 bool _internal_enable_mlir_graph_optimization() const;
2278 void _internal_set_enable_mlir_graph_optimization(bool value);
2279 public:
2280
2281 // bool disable_output_partition_graphs = 14;
2282 void clear_disable_output_partition_graphs();
2283 bool disable_output_partition_graphs() const;
2284 void set_disable_output_partition_graphs(bool value);
2285 private:
2286 bool _internal_disable_output_partition_graphs() const;
2287 void _internal_set_disable_output_partition_graphs(bool value);
2288 public:
2289
2290 // bool use_tfrt = 18;
2291 void clear_use_tfrt();
2292 bool use_tfrt() const;
2293 void set_use_tfrt(bool value);
2294 private:
2295 bool _internal_use_tfrt() const;
2296 void _internal_set_use_tfrt(bool value);
2297 public:
2298
2299 // bool disable_functional_ops_lowering = 21;
2300 void clear_disable_functional_ops_lowering();
2301 bool disable_functional_ops_lowering() const;
2302 void set_disable_functional_ops_lowering(bool value);
2303 private:
2304 bool _internal_disable_functional_ops_lowering() const;
2305 void _internal_set_disable_functional_ops_lowering(bool value);
2306 public:
2307
2308 // bool xla_prefer_single_graph_cluster = 22;
2309 void clear_xla_prefer_single_graph_cluster();
2310 bool xla_prefer_single_graph_cluster() const;
2311 void set_xla_prefer_single_graph_cluster(bool value);
2312 private:
2313 bool _internal_xla_prefer_single_graph_cluster() const;
2314 void _internal_set_xla_prefer_single_graph_cluster(bool value);
2315 public:
2316
2317 // @@protoc_insertion_point(class_scope:tensorflow.ConfigProto.Experimental)
2318 private:
2319 class _Internal;
2320
2321 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
2322 typedef void InternalArenaConstructable_;
2323 typedef void DestructorSkippable_;
2324 struct Impl_ {
2325 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collective_group_leader_;
2326 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr executor_type_;
2327 ::tensorflow::SessionMetadata* session_metadata_;
2328 ::tensorflow::CoordinationServiceConfig* coordination_config_;
2329 ::int32_t recv_buf_max_chunk_;
2330 bool use_numa_affinity_;
2331 bool collective_deterministic_sequential_execution_;
2332 bool collective_nccl_;
2333 bool share_session_state_in_clusterspec_propagation_;
2334 bool disable_thread_spinning_;
2335 bool share_cluster_devices_in_session_;
2336 bool optimize_for_static_graph_;
2337 bool enable_mlir_bridge_;
2338 int mlir_bridge_rollout_;
2339 ::int64_t xla_fusion_autotuner_thresh_;
2340 bool enable_mlir_graph_optimization_;
2341 bool disable_output_partition_graphs_;
2342 bool use_tfrt_;
2343 bool disable_functional_ops_lowering_;
2344 bool xla_prefer_single_graph_cluster_;
2345 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
2346 };
2347 union { Impl_ _impl_; };
2348 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
2349 };
2350 // -------------------------------------------------------------------
2351
2352 class ConfigProto final :
2353 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.ConfigProto) */ {
2354 public:
ConfigProto()2355 inline ConfigProto() : ConfigProto(nullptr) {}
2356 ~ConfigProto() override;
2357 explicit PROTOBUF_CONSTEXPR ConfigProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
2358
2359 ConfigProto(const ConfigProto& from);
ConfigProto(ConfigProto && from)2360 ConfigProto(ConfigProto&& from) noexcept
2361 : ConfigProto() {
2362 *this = ::std::move(from);
2363 }
2364
2365 inline ConfigProto& operator=(const ConfigProto& from) {
2366 if (this == &from) return *this;
2367 CopyFrom(from);
2368 return *this;
2369 }
2370 inline ConfigProto& operator=(ConfigProto&& from) noexcept {
2371 if (this == &from) return *this;
2372 if (GetOwningArena() == from.GetOwningArena()
2373 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
2374 && GetOwningArena() != nullptr
2375 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
2376 ) {
2377 InternalSwap(&from);
2378 } else {
2379 CopyFrom(from);
2380 }
2381 return *this;
2382 }
2383
default_instance()2384 static const ConfigProto& default_instance() {
2385 return *internal_default_instance();
2386 }
internal_default_instance()2387 static inline const ConfigProto* internal_default_instance() {
2388 return reinterpret_cast<const ConfigProto*>(
2389 &_ConfigProto_default_instance_);
2390 }
2391 static constexpr int kIndexInFileMessages =
2392 10;
2393
swap(ConfigProto & a,ConfigProto & b)2394 friend void swap(ConfigProto& a, ConfigProto& b) {
2395 a.Swap(&b);
2396 }
Swap(ConfigProto * other)2397 inline void Swap(ConfigProto* other) {
2398 if (other == this) return;
2399 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
2400 if (GetOwningArena() != nullptr &&
2401 GetOwningArena() == other->GetOwningArena()) {
2402 #else // PROTOBUF_FORCE_COPY_IN_SWAP
2403 if (GetOwningArena() == other->GetOwningArena()) {
2404 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
2405 InternalSwap(other);
2406 } else {
2407 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2408 }
2409 }
2410 void UnsafeArenaSwap(ConfigProto* other) {
2411 if (other == this) return;
2412 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2413 InternalSwap(other);
2414 }
2415
2416 // implements Message ----------------------------------------------
2417
2418 ConfigProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2419 return CreateMaybeMessage<ConfigProto>(arena);
2420 }
2421 ConfigProto* New() const {
2422 return New(nullptr);
2423 }
2424 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
2425 void CopyFrom(const ConfigProto& from);
2426 void MergeFrom(const ConfigProto& from);
2427 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2428 bool IsInitialized() const final;
2429
2430 size_t ByteSizeLong() const final;
2431 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2432 ::uint8_t* _InternalSerialize(
2433 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2434 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2435
2436 private:
2437 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2438 void SharedDtor();
2439 void SetCachedSize(int size) const;
2440 void InternalSwap(ConfigProto* other);
2441
2442 private:
2443 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2444 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2445 return "tensorflow.ConfigProto";
2446 }
2447 protected:
2448 explicit ConfigProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2449 bool is_message_owned = false);
2450 public:
2451
2452 std::string GetTypeName() const final;
2453
2454 // nested types ----------------------------------------------------
2455
2456 typedef ConfigProto_Experimental Experimental;
2457
2458 // accessors -------------------------------------------------------
2459
2460 enum : int {
2461 kDeviceCountFieldNumber = 1,
2462 kDeviceFiltersFieldNumber = 4,
2463 kSessionInterOpThreadPoolFieldNumber = 12,
2464 kGpuOptionsFieldNumber = 6,
2465 kGraphOptionsFieldNumber = 10,
2466 kRpcOptionsFieldNumber = 13,
2467 kClusterDefFieldNumber = 14,
2468 kExperimentalFieldNumber = 16,
2469 kIntraOpParallelismThreadsFieldNumber = 2,
2470 kPlacementPeriodFieldNumber = 3,
2471 kInterOpParallelismThreadsFieldNumber = 5,
2472 kUsePerSessionThreadsFieldNumber = 9,
2473 kAllowSoftPlacementFieldNumber = 7,
2474 kLogDevicePlacementFieldNumber = 8,
2475 kIsolateSessionStateFieldNumber = 15,
2476 kOperationTimeoutInMsFieldNumber = 11,
2477 kShareClusterDevicesInSessionFieldNumber = 17,
2478 };
2479 // map<string, int32> device_count = 1;
2480 int device_count_size() const;
2481 private:
2482 int _internal_device_count_size() const;
2483 public:
2484 void clear_device_count();
2485 private:
2486 const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >&
2487 _internal_device_count() const;
2488 ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >*
2489 _internal_mutable_device_count();
2490 public:
2491 const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >&
2492 device_count() const;
2493 ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::int32_t >*
2494 mutable_device_count();
2495
2496 // repeated string device_filters = 4;
2497 int device_filters_size() const;
2498 private:
2499 int _internal_device_filters_size() const;
2500 public:
2501 void clear_device_filters();
2502 const std::string& device_filters(int index) const;
2503 std::string* mutable_device_filters(int index);
2504 void set_device_filters(int index, const std::string& value);
2505 void set_device_filters(int index, std::string&& value);
2506 void set_device_filters(int index, const char* value);
2507 void set_device_filters(int index, const char* value, size_t size);
2508 std::string* add_device_filters();
2509 void add_device_filters(const std::string& value);
2510 void add_device_filters(std::string&& value);
2511 void add_device_filters(const char* value);
2512 void add_device_filters(const char* value, size_t size);
2513 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& device_filters() const;
2514 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_device_filters();
2515 private:
2516 const std::string& _internal_device_filters(int index) const;
2517 std::string* _internal_add_device_filters();
2518 public:
2519
2520 // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
2521 int session_inter_op_thread_pool_size() const;
2522 private:
2523 int _internal_session_inter_op_thread_pool_size() const;
2524 public:
2525 void clear_session_inter_op_thread_pool();
2526 ::tensorflow::ThreadPoolOptionProto* mutable_session_inter_op_thread_pool(int index);
2527 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto >*
2528 mutable_session_inter_op_thread_pool();
2529 private:
2530 const ::tensorflow::ThreadPoolOptionProto& _internal_session_inter_op_thread_pool(int index) const;
2531 ::tensorflow::ThreadPoolOptionProto* _internal_add_session_inter_op_thread_pool();
2532 public:
2533 const ::tensorflow::ThreadPoolOptionProto& session_inter_op_thread_pool(int index) const;
2534 ::tensorflow::ThreadPoolOptionProto* add_session_inter_op_thread_pool();
2535 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto >&
2536 session_inter_op_thread_pool() const;
2537
2538 // .tensorflow.GPUOptions gpu_options = 6;
2539 bool has_gpu_options() const;
2540 private:
2541 bool _internal_has_gpu_options() const;
2542 public:
2543 void clear_gpu_options();
2544 const ::tensorflow::GPUOptions& gpu_options() const;
2545 PROTOBUF_NODISCARD ::tensorflow::GPUOptions* release_gpu_options();
2546 ::tensorflow::GPUOptions* mutable_gpu_options();
2547 void set_allocated_gpu_options(::tensorflow::GPUOptions* gpu_options);
2548 private:
2549 const ::tensorflow::GPUOptions& _internal_gpu_options() const;
2550 ::tensorflow::GPUOptions* _internal_mutable_gpu_options();
2551 public:
2552 void unsafe_arena_set_allocated_gpu_options(
2553 ::tensorflow::GPUOptions* gpu_options);
2554 ::tensorflow::GPUOptions* unsafe_arena_release_gpu_options();
2555
2556 // .tensorflow.GraphOptions graph_options = 10;
2557 bool has_graph_options() const;
2558 private:
2559 bool _internal_has_graph_options() const;
2560 public:
2561 void clear_graph_options();
2562 const ::tensorflow::GraphOptions& graph_options() const;
2563 PROTOBUF_NODISCARD ::tensorflow::GraphOptions* release_graph_options();
2564 ::tensorflow::GraphOptions* mutable_graph_options();
2565 void set_allocated_graph_options(::tensorflow::GraphOptions* graph_options);
2566 private:
2567 const ::tensorflow::GraphOptions& _internal_graph_options() const;
2568 ::tensorflow::GraphOptions* _internal_mutable_graph_options();
2569 public:
2570 void unsafe_arena_set_allocated_graph_options(
2571 ::tensorflow::GraphOptions* graph_options);
2572 ::tensorflow::GraphOptions* unsafe_arena_release_graph_options();
2573
2574 // .tensorflow.RPCOptions rpc_options = 13;
2575 bool has_rpc_options() const;
2576 private:
2577 bool _internal_has_rpc_options() const;
2578 public:
2579 void clear_rpc_options();
2580 const ::tensorflow::RPCOptions& rpc_options() const;
2581 PROTOBUF_NODISCARD ::tensorflow::RPCOptions* release_rpc_options();
2582 ::tensorflow::RPCOptions* mutable_rpc_options();
2583 void set_allocated_rpc_options(::tensorflow::RPCOptions* rpc_options);
2584 private:
2585 const ::tensorflow::RPCOptions& _internal_rpc_options() const;
2586 ::tensorflow::RPCOptions* _internal_mutable_rpc_options();
2587 public:
2588 void unsafe_arena_set_allocated_rpc_options(
2589 ::tensorflow::RPCOptions* rpc_options);
2590 ::tensorflow::RPCOptions* unsafe_arena_release_rpc_options();
2591
2592 // .tensorflow.ClusterDef cluster_def = 14;
2593 bool has_cluster_def() const;
2594 private:
2595 bool _internal_has_cluster_def() const;
2596 public:
2597 void clear_cluster_def();
2598 const ::tensorflow::ClusterDef& cluster_def() const;
2599 PROTOBUF_NODISCARD ::tensorflow::ClusterDef* release_cluster_def();
2600 ::tensorflow::ClusterDef* mutable_cluster_def();
2601 void set_allocated_cluster_def(::tensorflow::ClusterDef* cluster_def);
2602 private:
2603 const ::tensorflow::ClusterDef& _internal_cluster_def() const;
2604 ::tensorflow::ClusterDef* _internal_mutable_cluster_def();
2605 public:
2606 void unsafe_arena_set_allocated_cluster_def(
2607 ::tensorflow::ClusterDef* cluster_def);
2608 ::tensorflow::ClusterDef* unsafe_arena_release_cluster_def();
2609
2610 // .tensorflow.ConfigProto.Experimental experimental = 16;
2611 bool has_experimental() const;
2612 private:
2613 bool _internal_has_experimental() const;
2614 public:
2615 void clear_experimental();
2616 const ::tensorflow::ConfigProto_Experimental& experimental() const;
2617 PROTOBUF_NODISCARD ::tensorflow::ConfigProto_Experimental* release_experimental();
2618 ::tensorflow::ConfigProto_Experimental* mutable_experimental();
2619 void set_allocated_experimental(::tensorflow::ConfigProto_Experimental* experimental);
2620 private:
2621 const ::tensorflow::ConfigProto_Experimental& _internal_experimental() const;
2622 ::tensorflow::ConfigProto_Experimental* _internal_mutable_experimental();
2623 public:
2624 void unsafe_arena_set_allocated_experimental(
2625 ::tensorflow::ConfigProto_Experimental* experimental);
2626 ::tensorflow::ConfigProto_Experimental* unsafe_arena_release_experimental();
2627
2628 // int32 intra_op_parallelism_threads = 2;
2629 void clear_intra_op_parallelism_threads();
2630 ::int32_t intra_op_parallelism_threads() const;
2631 void set_intra_op_parallelism_threads(::int32_t value);
2632 private:
2633 ::int32_t _internal_intra_op_parallelism_threads() const;
2634 void _internal_set_intra_op_parallelism_threads(::int32_t value);
2635 public:
2636
2637 // int32 placement_period = 3;
2638 void clear_placement_period();
2639 ::int32_t placement_period() const;
2640 void set_placement_period(::int32_t value);
2641 private:
2642 ::int32_t _internal_placement_period() const;
2643 void _internal_set_placement_period(::int32_t value);
2644 public:
2645
2646 // int32 inter_op_parallelism_threads = 5;
2647 void clear_inter_op_parallelism_threads();
2648 ::int32_t inter_op_parallelism_threads() const;
2649 void set_inter_op_parallelism_threads(::int32_t value);
2650 private:
2651 ::int32_t _internal_inter_op_parallelism_threads() const;
2652 void _internal_set_inter_op_parallelism_threads(::int32_t value);
2653 public:
2654
2655 // bool use_per_session_threads = 9;
2656 void clear_use_per_session_threads();
2657 bool use_per_session_threads() const;
2658 void set_use_per_session_threads(bool value);
2659 private:
2660 bool _internal_use_per_session_threads() const;
2661 void _internal_set_use_per_session_threads(bool value);
2662 public:
2663
2664 // bool allow_soft_placement = 7;
2665 void clear_allow_soft_placement();
2666 bool allow_soft_placement() const;
2667 void set_allow_soft_placement(bool value);
2668 private:
2669 bool _internal_allow_soft_placement() const;
2670 void _internal_set_allow_soft_placement(bool value);
2671 public:
2672
2673 // bool log_device_placement = 8;
2674 void clear_log_device_placement();
2675 bool log_device_placement() const;
2676 void set_log_device_placement(bool value);
2677 private:
2678 bool _internal_log_device_placement() const;
2679 void _internal_set_log_device_placement(bool value);
2680 public:
2681
2682 // bool isolate_session_state = 15;
2683 void clear_isolate_session_state();
2684 bool isolate_session_state() const;
2685 void set_isolate_session_state(bool value);
2686 private:
2687 bool _internal_isolate_session_state() const;
2688 void _internal_set_isolate_session_state(bool value);
2689 public:
2690
2691 // int64 operation_timeout_in_ms = 11;
2692 void clear_operation_timeout_in_ms();
2693 ::int64_t operation_timeout_in_ms() const;
2694 void set_operation_timeout_in_ms(::int64_t value);
2695 private:
2696 ::int64_t _internal_operation_timeout_in_ms() const;
2697 void _internal_set_operation_timeout_in_ms(::int64_t value);
2698 public:
2699
2700 // bool share_cluster_devices_in_session = 17;
2701 void clear_share_cluster_devices_in_session();
2702 bool share_cluster_devices_in_session() const;
2703 void set_share_cluster_devices_in_session(bool value);
2704 private:
2705 bool _internal_share_cluster_devices_in_session() const;
2706 void _internal_set_share_cluster_devices_in_session(bool value);
2707 public:
2708
2709 // @@protoc_insertion_point(class_scope:tensorflow.ConfigProto)
2710 private:
2711 class _Internal;
2712
2713 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
2714 typedef void InternalArenaConstructable_;
2715 typedef void DestructorSkippable_;
2716 struct Impl_ {
2717 ::PROTOBUF_NAMESPACE_ID::internal::MapFieldLite<
2718 ConfigProto_DeviceCountEntry_DoNotUse,
2719 std::string, ::int32_t,
2720 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
2721 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32> device_count_;
2722 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> device_filters_;
2723 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::ThreadPoolOptionProto > session_inter_op_thread_pool_;
2724 ::tensorflow::GPUOptions* gpu_options_;
2725 ::tensorflow::GraphOptions* graph_options_;
2726 ::tensorflow::RPCOptions* rpc_options_;
2727 ::tensorflow::ClusterDef* cluster_def_;
2728 ::tensorflow::ConfigProto_Experimental* experimental_;
2729 ::int32_t intra_op_parallelism_threads_;
2730 ::int32_t placement_period_;
2731 ::int32_t inter_op_parallelism_threads_;
2732 bool use_per_session_threads_;
2733 bool allow_soft_placement_;
2734 bool log_device_placement_;
2735 bool isolate_session_state_;
2736 ::int64_t operation_timeout_in_ms_;
2737 bool share_cluster_devices_in_session_;
2738 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
2739 };
2740 union { Impl_ _impl_; };
2741 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
2742 };
2743 // -------------------------------------------------------------------
2744
2745 class RunOptions_Experimental_RunHandlerPoolOptions final :
2746 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions) */ {
2747 public:
RunOptions_Experimental_RunHandlerPoolOptions()2748 inline RunOptions_Experimental_RunHandlerPoolOptions() : RunOptions_Experimental_RunHandlerPoolOptions(nullptr) {}
2749 ~RunOptions_Experimental_RunHandlerPoolOptions() override;
2750 explicit PROTOBUF_CONSTEXPR RunOptions_Experimental_RunHandlerPoolOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
2751
2752 RunOptions_Experimental_RunHandlerPoolOptions(const RunOptions_Experimental_RunHandlerPoolOptions& from);
RunOptions_Experimental_RunHandlerPoolOptions(RunOptions_Experimental_RunHandlerPoolOptions && from)2753 RunOptions_Experimental_RunHandlerPoolOptions(RunOptions_Experimental_RunHandlerPoolOptions&& from) noexcept
2754 : RunOptions_Experimental_RunHandlerPoolOptions() {
2755 *this = ::std::move(from);
2756 }
2757
2758 inline RunOptions_Experimental_RunHandlerPoolOptions& operator=(const RunOptions_Experimental_RunHandlerPoolOptions& from) {
2759 if (this == &from) return *this;
2760 CopyFrom(from);
2761 return *this;
2762 }
2763 inline RunOptions_Experimental_RunHandlerPoolOptions& operator=(RunOptions_Experimental_RunHandlerPoolOptions&& from) noexcept {
2764 if (this == &from) return *this;
2765 if (GetOwningArena() == from.GetOwningArena()
2766 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
2767 && GetOwningArena() != nullptr
2768 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
2769 ) {
2770 InternalSwap(&from);
2771 } else {
2772 CopyFrom(from);
2773 }
2774 return *this;
2775 }
2776
default_instance()2777 static const RunOptions_Experimental_RunHandlerPoolOptions& default_instance() {
2778 return *internal_default_instance();
2779 }
internal_default_instance()2780 static inline const RunOptions_Experimental_RunHandlerPoolOptions* internal_default_instance() {
2781 return reinterpret_cast<const RunOptions_Experimental_RunHandlerPoolOptions*>(
2782 &_RunOptions_Experimental_RunHandlerPoolOptions_default_instance_);
2783 }
2784 static constexpr int kIndexInFileMessages =
2785 11;
2786
swap(RunOptions_Experimental_RunHandlerPoolOptions & a,RunOptions_Experimental_RunHandlerPoolOptions & b)2787 friend void swap(RunOptions_Experimental_RunHandlerPoolOptions& a, RunOptions_Experimental_RunHandlerPoolOptions& b) {
2788 a.Swap(&b);
2789 }
Swap(RunOptions_Experimental_RunHandlerPoolOptions * other)2790 inline void Swap(RunOptions_Experimental_RunHandlerPoolOptions* other) {
2791 if (other == this) return;
2792 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
2793 if (GetOwningArena() != nullptr &&
2794 GetOwningArena() == other->GetOwningArena()) {
2795 #else // PROTOBUF_FORCE_COPY_IN_SWAP
2796 if (GetOwningArena() == other->GetOwningArena()) {
2797 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
2798 InternalSwap(other);
2799 } else {
2800 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2801 }
2802 }
2803 void UnsafeArenaSwap(RunOptions_Experimental_RunHandlerPoolOptions* other) {
2804 if (other == this) return;
2805 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2806 InternalSwap(other);
2807 }
2808
2809 // implements Message ----------------------------------------------
2810
2811 RunOptions_Experimental_RunHandlerPoolOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2812 return CreateMaybeMessage<RunOptions_Experimental_RunHandlerPoolOptions>(arena);
2813 }
2814 RunOptions_Experimental_RunHandlerPoolOptions* New() const {
2815 return New(nullptr);
2816 }
2817 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
2818 void CopyFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from);
2819 void MergeFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from);
2820 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2821 bool IsInitialized() const final;
2822
2823 size_t ByteSizeLong() const final;
2824 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2825 ::uint8_t* _InternalSerialize(
2826 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2827 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2828
2829 private:
2830 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2831 void SharedDtor();
2832 void SetCachedSize(int size) const;
2833 void InternalSwap(RunOptions_Experimental_RunHandlerPoolOptions* other);
2834
2835 private:
2836 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2837 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2838 return "tensorflow.RunOptions.Experimental.RunHandlerPoolOptions";
2839 }
2840 protected:
2841 explicit RunOptions_Experimental_RunHandlerPoolOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2842 bool is_message_owned = false);
2843 public:
2844
2845 std::string GetTypeName() const final;
2846
2847 // nested types ----------------------------------------------------
2848
2849 // accessors -------------------------------------------------------
2850
2851 enum : int {
2852 kPriorityFieldNumber = 1,
2853 };
2854 // int64 priority = 1;
2855 void clear_priority();
2856 ::int64_t priority() const;
2857 void set_priority(::int64_t value);
2858 private:
2859 ::int64_t _internal_priority() const;
2860 void _internal_set_priority(::int64_t value);
2861 public:
2862
2863 // @@protoc_insertion_point(class_scope:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
2864 private:
2865 class _Internal;
2866
2867 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
2868 typedef void InternalArenaConstructable_;
2869 typedef void DestructorSkippable_;
2870 struct Impl_ {
2871 ::int64_t priority_;
2872 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
2873 };
2874 union { Impl_ _impl_; };
2875 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
2876 };
2877 // -------------------------------------------------------------------
2878
2879 class RunOptions_Experimental final :
2880 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunOptions.Experimental) */ {
2881 public:
RunOptions_Experimental()2882 inline RunOptions_Experimental() : RunOptions_Experimental(nullptr) {}
2883 ~RunOptions_Experimental() override;
2884 explicit PROTOBUF_CONSTEXPR RunOptions_Experimental(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
2885
2886 RunOptions_Experimental(const RunOptions_Experimental& from);
RunOptions_Experimental(RunOptions_Experimental && from)2887 RunOptions_Experimental(RunOptions_Experimental&& from) noexcept
2888 : RunOptions_Experimental() {
2889 *this = ::std::move(from);
2890 }
2891
2892 inline RunOptions_Experimental& operator=(const RunOptions_Experimental& from) {
2893 if (this == &from) return *this;
2894 CopyFrom(from);
2895 return *this;
2896 }
2897 inline RunOptions_Experimental& operator=(RunOptions_Experimental&& from) noexcept {
2898 if (this == &from) return *this;
2899 if (GetOwningArena() == from.GetOwningArena()
2900 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
2901 && GetOwningArena() != nullptr
2902 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
2903 ) {
2904 InternalSwap(&from);
2905 } else {
2906 CopyFrom(from);
2907 }
2908 return *this;
2909 }
2910
default_instance()2911 static const RunOptions_Experimental& default_instance() {
2912 return *internal_default_instance();
2913 }
internal_default_instance()2914 static inline const RunOptions_Experimental* internal_default_instance() {
2915 return reinterpret_cast<const RunOptions_Experimental*>(
2916 &_RunOptions_Experimental_default_instance_);
2917 }
2918 static constexpr int kIndexInFileMessages =
2919 12;
2920
swap(RunOptions_Experimental & a,RunOptions_Experimental & b)2921 friend void swap(RunOptions_Experimental& a, RunOptions_Experimental& b) {
2922 a.Swap(&b);
2923 }
Swap(RunOptions_Experimental * other)2924 inline void Swap(RunOptions_Experimental* other) {
2925 if (other == this) return;
2926 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
2927 if (GetOwningArena() != nullptr &&
2928 GetOwningArena() == other->GetOwningArena()) {
2929 #else // PROTOBUF_FORCE_COPY_IN_SWAP
2930 if (GetOwningArena() == other->GetOwningArena()) {
2931 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
2932 InternalSwap(other);
2933 } else {
2934 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
2935 }
2936 }
2937 void UnsafeArenaSwap(RunOptions_Experimental* other) {
2938 if (other == this) return;
2939 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
2940 InternalSwap(other);
2941 }
2942
2943 // implements Message ----------------------------------------------
2944
2945 RunOptions_Experimental* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
2946 return CreateMaybeMessage<RunOptions_Experimental>(arena);
2947 }
2948 RunOptions_Experimental* New() const {
2949 return New(nullptr);
2950 }
2951 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
2952 void CopyFrom(const RunOptions_Experimental& from);
2953 void MergeFrom(const RunOptions_Experimental& from);
2954 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
2955 bool IsInitialized() const final;
2956
2957 size_t ByteSizeLong() const final;
2958 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
2959 ::uint8_t* _InternalSerialize(
2960 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
2961 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
2962
2963 private:
2964 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
2965 void SharedDtor();
2966 void SetCachedSize(int size) const;
2967 void InternalSwap(RunOptions_Experimental* other);
2968
2969 private:
2970 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
2971 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
2972 return "tensorflow.RunOptions.Experimental";
2973 }
2974 protected:
2975 explicit RunOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2976 bool is_message_owned = false);
2977 public:
2978
2979 std::string GetTypeName() const final;
2980
2981 // nested types ----------------------------------------------------
2982
2983 typedef RunOptions_Experimental_RunHandlerPoolOptions RunHandlerPoolOptions;
2984
2985 // accessors -------------------------------------------------------
2986
2987 enum : int {
2988 kRunHandlerPoolOptionsFieldNumber = 3,
2989 kCollectiveGraphKeyFieldNumber = 1,
2990 kUseRunHandlerPoolFieldNumber = 2,
2991 };
2992 // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
2993 bool has_run_handler_pool_options() const;
2994 private:
2995 bool _internal_has_run_handler_pool_options() const;
2996 public:
2997 void clear_run_handler_pool_options();
2998 const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& run_handler_pool_options() const;
2999 PROTOBUF_NODISCARD ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* release_run_handler_pool_options();
3000 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* mutable_run_handler_pool_options();
3001 void set_allocated_run_handler_pool_options(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options);
3002 private:
3003 const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& _internal_run_handler_pool_options() const;
3004 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* _internal_mutable_run_handler_pool_options();
3005 public:
3006 void unsafe_arena_set_allocated_run_handler_pool_options(
3007 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options);
3008 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* unsafe_arena_release_run_handler_pool_options();
3009
3010 // int64 collective_graph_key = 1;
3011 void clear_collective_graph_key();
3012 ::int64_t collective_graph_key() const;
3013 void set_collective_graph_key(::int64_t value);
3014 private:
3015 ::int64_t _internal_collective_graph_key() const;
3016 void _internal_set_collective_graph_key(::int64_t value);
3017 public:
3018
3019 // bool use_run_handler_pool = 2;
3020 void clear_use_run_handler_pool();
3021 bool use_run_handler_pool() const;
3022 void set_use_run_handler_pool(bool value);
3023 private:
3024 bool _internal_use_run_handler_pool() const;
3025 void _internal_set_use_run_handler_pool(bool value);
3026 public:
3027
3028 // @@protoc_insertion_point(class_scope:tensorflow.RunOptions.Experimental)
3029 private:
3030 class _Internal;
3031
3032 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3033 typedef void InternalArenaConstructable_;
3034 typedef void DestructorSkippable_;
3035 struct Impl_ {
3036 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options_;
3037 ::int64_t collective_graph_key_;
3038 bool use_run_handler_pool_;
3039 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3040 };
3041 union { Impl_ _impl_; };
3042 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3043 };
3044 // -------------------------------------------------------------------
3045
3046 class RunOptions final :
3047 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunOptions) */ {
3048 public:
RunOptions()3049 inline RunOptions() : RunOptions(nullptr) {}
3050 ~RunOptions() override;
3051 explicit PROTOBUF_CONSTEXPR RunOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3052
3053 RunOptions(const RunOptions& from);
RunOptions(RunOptions && from)3054 RunOptions(RunOptions&& from) noexcept
3055 : RunOptions() {
3056 *this = ::std::move(from);
3057 }
3058
3059 inline RunOptions& operator=(const RunOptions& from) {
3060 if (this == &from) return *this;
3061 CopyFrom(from);
3062 return *this;
3063 }
3064 inline RunOptions& operator=(RunOptions&& from) noexcept {
3065 if (this == &from) return *this;
3066 if (GetOwningArena() == from.GetOwningArena()
3067 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3068 && GetOwningArena() != nullptr
3069 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
3070 ) {
3071 InternalSwap(&from);
3072 } else {
3073 CopyFrom(from);
3074 }
3075 return *this;
3076 }
3077
default_instance()3078 static const RunOptions& default_instance() {
3079 return *internal_default_instance();
3080 }
internal_default_instance()3081 static inline const RunOptions* internal_default_instance() {
3082 return reinterpret_cast<const RunOptions*>(
3083 &_RunOptions_default_instance_);
3084 }
3085 static constexpr int kIndexInFileMessages =
3086 13;
3087
swap(RunOptions & a,RunOptions & b)3088 friend void swap(RunOptions& a, RunOptions& b) {
3089 a.Swap(&b);
3090 }
Swap(RunOptions * other)3091 inline void Swap(RunOptions* other) {
3092 if (other == this) return;
3093 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3094 if (GetOwningArena() != nullptr &&
3095 GetOwningArena() == other->GetOwningArena()) {
3096 #else // PROTOBUF_FORCE_COPY_IN_SWAP
3097 if (GetOwningArena() == other->GetOwningArena()) {
3098 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
3099 InternalSwap(other);
3100 } else {
3101 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3102 }
3103 }
3104 void UnsafeArenaSwap(RunOptions* other) {
3105 if (other == this) return;
3106 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3107 InternalSwap(other);
3108 }
3109
3110 // implements Message ----------------------------------------------
3111
3112 RunOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3113 return CreateMaybeMessage<RunOptions>(arena);
3114 }
3115 RunOptions* New() const {
3116 return New(nullptr);
3117 }
3118 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
3119 void CopyFrom(const RunOptions& from);
3120 void MergeFrom(const RunOptions& from);
3121 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3122 bool IsInitialized() const final;
3123
3124 size_t ByteSizeLong() const final;
3125 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3126 ::uint8_t* _InternalSerialize(
3127 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3128 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3129
3130 private:
3131 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3132 void SharedDtor();
3133 void SetCachedSize(int size) const;
3134 void InternalSwap(RunOptions* other);
3135
3136 private:
3137 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3138 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3139 return "tensorflow.RunOptions";
3140 }
3141 protected:
3142 explicit RunOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3143 bool is_message_owned = false);
3144 public:
3145
3146 std::string GetTypeName() const final;
3147
3148 // nested types ----------------------------------------------------
3149
3150 typedef RunOptions_Experimental Experimental;
3151
3152 typedef RunOptions_TraceLevel TraceLevel;
3153 static constexpr TraceLevel NO_TRACE =
3154 RunOptions_TraceLevel_NO_TRACE;
3155 static constexpr TraceLevel SOFTWARE_TRACE =
3156 RunOptions_TraceLevel_SOFTWARE_TRACE;
3157 static constexpr TraceLevel HARDWARE_TRACE =
3158 RunOptions_TraceLevel_HARDWARE_TRACE;
3159 static constexpr TraceLevel FULL_TRACE =
3160 RunOptions_TraceLevel_FULL_TRACE;
3161 static inline bool TraceLevel_IsValid(int value) {
3162 return RunOptions_TraceLevel_IsValid(value);
3163 }
3164 static constexpr TraceLevel TraceLevel_MIN =
3165 RunOptions_TraceLevel_TraceLevel_MIN;
3166 static constexpr TraceLevel TraceLevel_MAX =
3167 RunOptions_TraceLevel_TraceLevel_MAX;
3168 static constexpr int TraceLevel_ARRAYSIZE =
3169 RunOptions_TraceLevel_TraceLevel_ARRAYSIZE;
3170 template<typename T>
3171 static inline const std::string& TraceLevel_Name(T enum_t_value) {
3172 static_assert(::std::is_same<T, TraceLevel>::value ||
3173 ::std::is_integral<T>::value,
3174 "Incorrect type passed to function TraceLevel_Name.");
3175 return RunOptions_TraceLevel_Name(enum_t_value);
3176 }
3177 static inline bool TraceLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
3178 TraceLevel* value) {
3179 return RunOptions_TraceLevel_Parse(name, value);
3180 }
3181
3182 // accessors -------------------------------------------------------
3183
3184 enum : int {
3185 kDebugOptionsFieldNumber = 6,
3186 kExperimentalFieldNumber = 8,
3187 kTimeoutInMsFieldNumber = 2,
3188 kTraceLevelFieldNumber = 1,
3189 kInterOpThreadPoolFieldNumber = 3,
3190 kOutputPartitionGraphsFieldNumber = 5,
3191 kReportTensorAllocationsUponOomFieldNumber = 7,
3192 };
3193 // .tensorflow.DebugOptions debug_options = 6;
3194 bool has_debug_options() const;
3195 private:
3196 bool _internal_has_debug_options() const;
3197 public:
3198 void clear_debug_options();
3199 const ::tensorflow::DebugOptions& debug_options() const;
3200 PROTOBUF_NODISCARD ::tensorflow::DebugOptions* release_debug_options();
3201 ::tensorflow::DebugOptions* mutable_debug_options();
3202 void set_allocated_debug_options(::tensorflow::DebugOptions* debug_options);
3203 private:
3204 const ::tensorflow::DebugOptions& _internal_debug_options() const;
3205 ::tensorflow::DebugOptions* _internal_mutable_debug_options();
3206 public:
3207 void unsafe_arena_set_allocated_debug_options(
3208 ::tensorflow::DebugOptions* debug_options);
3209 ::tensorflow::DebugOptions* unsafe_arena_release_debug_options();
3210
3211 // .tensorflow.RunOptions.Experimental experimental = 8;
3212 bool has_experimental() const;
3213 private:
3214 bool _internal_has_experimental() const;
3215 public:
3216 void clear_experimental();
3217 const ::tensorflow::RunOptions_Experimental& experimental() const;
3218 PROTOBUF_NODISCARD ::tensorflow::RunOptions_Experimental* release_experimental();
3219 ::tensorflow::RunOptions_Experimental* mutable_experimental();
3220 void set_allocated_experimental(::tensorflow::RunOptions_Experimental* experimental);
3221 private:
3222 const ::tensorflow::RunOptions_Experimental& _internal_experimental() const;
3223 ::tensorflow::RunOptions_Experimental* _internal_mutable_experimental();
3224 public:
3225 void unsafe_arena_set_allocated_experimental(
3226 ::tensorflow::RunOptions_Experimental* experimental);
3227 ::tensorflow::RunOptions_Experimental* unsafe_arena_release_experimental();
3228
3229 // int64 timeout_in_ms = 2;
3230 void clear_timeout_in_ms();
3231 ::int64_t timeout_in_ms() const;
3232 void set_timeout_in_ms(::int64_t value);
3233 private:
3234 ::int64_t _internal_timeout_in_ms() const;
3235 void _internal_set_timeout_in_ms(::int64_t value);
3236 public:
3237
3238 // .tensorflow.RunOptions.TraceLevel trace_level = 1;
3239 void clear_trace_level();
3240 ::tensorflow::RunOptions_TraceLevel trace_level() const;
3241 void set_trace_level(::tensorflow::RunOptions_TraceLevel value);
3242 private:
3243 ::tensorflow::RunOptions_TraceLevel _internal_trace_level() const;
3244 void _internal_set_trace_level(::tensorflow::RunOptions_TraceLevel value);
3245 public:
3246
3247 // int32 inter_op_thread_pool = 3;
3248 void clear_inter_op_thread_pool();
3249 ::int32_t inter_op_thread_pool() const;
3250 void set_inter_op_thread_pool(::int32_t value);
3251 private:
3252 ::int32_t _internal_inter_op_thread_pool() const;
3253 void _internal_set_inter_op_thread_pool(::int32_t value);
3254 public:
3255
3256 // bool output_partition_graphs = 5;
3257 void clear_output_partition_graphs();
3258 bool output_partition_graphs() const;
3259 void set_output_partition_graphs(bool value);
3260 private:
3261 bool _internal_output_partition_graphs() const;
3262 void _internal_set_output_partition_graphs(bool value);
3263 public:
3264
3265 // bool report_tensor_allocations_upon_oom = 7;
3266 void clear_report_tensor_allocations_upon_oom();
3267 bool report_tensor_allocations_upon_oom() const;
3268 void set_report_tensor_allocations_upon_oom(bool value);
3269 private:
3270 bool _internal_report_tensor_allocations_upon_oom() const;
3271 void _internal_set_report_tensor_allocations_upon_oom(bool value);
3272 public:
3273
3274 // @@protoc_insertion_point(class_scope:tensorflow.RunOptions)
3275 private:
3276 class _Internal;
3277
3278 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3279 typedef void InternalArenaConstructable_;
3280 typedef void DestructorSkippable_;
3281 struct Impl_ {
3282 ::tensorflow::DebugOptions* debug_options_;
3283 ::tensorflow::RunOptions_Experimental* experimental_;
3284 ::int64_t timeout_in_ms_;
3285 int trace_level_;
3286 ::int32_t inter_op_thread_pool_;
3287 bool output_partition_graphs_;
3288 bool report_tensor_allocations_upon_oom_;
3289 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3290 };
3291 union { Impl_ _impl_; };
3292 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3293 };
3294 // -------------------------------------------------------------------
3295
3296 class RunMetadata_FunctionGraphs final :
3297 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunMetadata.FunctionGraphs) */ {
3298 public:
RunMetadata_FunctionGraphs()3299 inline RunMetadata_FunctionGraphs() : RunMetadata_FunctionGraphs(nullptr) {}
3300 ~RunMetadata_FunctionGraphs() override;
3301 explicit PROTOBUF_CONSTEXPR RunMetadata_FunctionGraphs(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3302
3303 RunMetadata_FunctionGraphs(const RunMetadata_FunctionGraphs& from);
RunMetadata_FunctionGraphs(RunMetadata_FunctionGraphs && from)3304 RunMetadata_FunctionGraphs(RunMetadata_FunctionGraphs&& from) noexcept
3305 : RunMetadata_FunctionGraphs() {
3306 *this = ::std::move(from);
3307 }
3308
3309 inline RunMetadata_FunctionGraphs& operator=(const RunMetadata_FunctionGraphs& from) {
3310 if (this == &from) return *this;
3311 CopyFrom(from);
3312 return *this;
3313 }
3314 inline RunMetadata_FunctionGraphs& operator=(RunMetadata_FunctionGraphs&& from) noexcept {
3315 if (this == &from) return *this;
3316 if (GetOwningArena() == from.GetOwningArena()
3317 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3318 && GetOwningArena() != nullptr
3319 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
3320 ) {
3321 InternalSwap(&from);
3322 } else {
3323 CopyFrom(from);
3324 }
3325 return *this;
3326 }
3327
default_instance()3328 static const RunMetadata_FunctionGraphs& default_instance() {
3329 return *internal_default_instance();
3330 }
internal_default_instance()3331 static inline const RunMetadata_FunctionGraphs* internal_default_instance() {
3332 return reinterpret_cast<const RunMetadata_FunctionGraphs*>(
3333 &_RunMetadata_FunctionGraphs_default_instance_);
3334 }
3335 static constexpr int kIndexInFileMessages =
3336 14;
3337
swap(RunMetadata_FunctionGraphs & a,RunMetadata_FunctionGraphs & b)3338 friend void swap(RunMetadata_FunctionGraphs& a, RunMetadata_FunctionGraphs& b) {
3339 a.Swap(&b);
3340 }
Swap(RunMetadata_FunctionGraphs * other)3341 inline void Swap(RunMetadata_FunctionGraphs* other) {
3342 if (other == this) return;
3343 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3344 if (GetOwningArena() != nullptr &&
3345 GetOwningArena() == other->GetOwningArena()) {
3346 #else // PROTOBUF_FORCE_COPY_IN_SWAP
3347 if (GetOwningArena() == other->GetOwningArena()) {
3348 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
3349 InternalSwap(other);
3350 } else {
3351 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3352 }
3353 }
3354 void UnsafeArenaSwap(RunMetadata_FunctionGraphs* other) {
3355 if (other == this) return;
3356 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3357 InternalSwap(other);
3358 }
3359
3360 // implements Message ----------------------------------------------
3361
3362 RunMetadata_FunctionGraphs* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3363 return CreateMaybeMessage<RunMetadata_FunctionGraphs>(arena);
3364 }
3365 RunMetadata_FunctionGraphs* New() const {
3366 return New(nullptr);
3367 }
3368 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
3369 void CopyFrom(const RunMetadata_FunctionGraphs& from);
3370 void MergeFrom(const RunMetadata_FunctionGraphs& from);
3371 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3372 bool IsInitialized() const final;
3373
3374 size_t ByteSizeLong() const final;
3375 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3376 ::uint8_t* _InternalSerialize(
3377 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3378 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3379
3380 private:
3381 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3382 void SharedDtor();
3383 void SetCachedSize(int size) const;
3384 void InternalSwap(RunMetadata_FunctionGraphs* other);
3385
3386 private:
3387 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3388 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3389 return "tensorflow.RunMetadata.FunctionGraphs";
3390 }
3391 protected:
3392 explicit RunMetadata_FunctionGraphs(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3393 bool is_message_owned = false);
3394 public:
3395
3396 std::string GetTypeName() const final;
3397
3398 // nested types ----------------------------------------------------
3399
3400 // accessors -------------------------------------------------------
3401
3402 enum : int {
3403 kPartitionGraphsFieldNumber = 1,
3404 kPreOptimizationGraphFieldNumber = 2,
3405 kPostOptimizationGraphFieldNumber = 3,
3406 };
3407 // repeated .tensorflow.GraphDef partition_graphs = 1;
3408 int partition_graphs_size() const;
3409 private:
3410 int _internal_partition_graphs_size() const;
3411 public:
3412 void clear_partition_graphs();
3413 ::tensorflow::GraphDef* mutable_partition_graphs(int index);
3414 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >*
3415 mutable_partition_graphs();
3416 private:
3417 const ::tensorflow::GraphDef& _internal_partition_graphs(int index) const;
3418 ::tensorflow::GraphDef* _internal_add_partition_graphs();
3419 public:
3420 const ::tensorflow::GraphDef& partition_graphs(int index) const;
3421 ::tensorflow::GraphDef* add_partition_graphs();
3422 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >&
3423 partition_graphs() const;
3424
3425 // .tensorflow.GraphDef pre_optimization_graph = 2;
3426 bool has_pre_optimization_graph() const;
3427 private:
3428 bool _internal_has_pre_optimization_graph() const;
3429 public:
3430 void clear_pre_optimization_graph();
3431 const ::tensorflow::GraphDef& pre_optimization_graph() const;
3432 PROTOBUF_NODISCARD ::tensorflow::GraphDef* release_pre_optimization_graph();
3433 ::tensorflow::GraphDef* mutable_pre_optimization_graph();
3434 void set_allocated_pre_optimization_graph(::tensorflow::GraphDef* pre_optimization_graph);
3435 private:
3436 const ::tensorflow::GraphDef& _internal_pre_optimization_graph() const;
3437 ::tensorflow::GraphDef* _internal_mutable_pre_optimization_graph();
3438 public:
3439 void unsafe_arena_set_allocated_pre_optimization_graph(
3440 ::tensorflow::GraphDef* pre_optimization_graph);
3441 ::tensorflow::GraphDef* unsafe_arena_release_pre_optimization_graph();
3442
3443 // .tensorflow.GraphDef post_optimization_graph = 3;
3444 bool has_post_optimization_graph() const;
3445 private:
3446 bool _internal_has_post_optimization_graph() const;
3447 public:
3448 void clear_post_optimization_graph();
3449 const ::tensorflow::GraphDef& post_optimization_graph() const;
3450 PROTOBUF_NODISCARD ::tensorflow::GraphDef* release_post_optimization_graph();
3451 ::tensorflow::GraphDef* mutable_post_optimization_graph();
3452 void set_allocated_post_optimization_graph(::tensorflow::GraphDef* post_optimization_graph);
3453 private:
3454 const ::tensorflow::GraphDef& _internal_post_optimization_graph() const;
3455 ::tensorflow::GraphDef* _internal_mutable_post_optimization_graph();
3456 public:
3457 void unsafe_arena_set_allocated_post_optimization_graph(
3458 ::tensorflow::GraphDef* post_optimization_graph);
3459 ::tensorflow::GraphDef* unsafe_arena_release_post_optimization_graph();
3460
3461 // @@protoc_insertion_point(class_scope:tensorflow.RunMetadata.FunctionGraphs)
3462 private:
3463 class _Internal;
3464
3465 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3466 typedef void InternalArenaConstructable_;
3467 typedef void DestructorSkippable_;
3468 struct Impl_ {
3469 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef > partition_graphs_;
3470 ::tensorflow::GraphDef* pre_optimization_graph_;
3471 ::tensorflow::GraphDef* post_optimization_graph_;
3472 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3473 };
3474 union { Impl_ _impl_; };
3475 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3476 };
3477 // -------------------------------------------------------------------
3478
3479 class RunMetadata final :
3480 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.RunMetadata) */ {
3481 public:
RunMetadata()3482 inline RunMetadata() : RunMetadata(nullptr) {}
3483 ~RunMetadata() override;
3484 explicit PROTOBUF_CONSTEXPR RunMetadata(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3485
3486 RunMetadata(const RunMetadata& from);
RunMetadata(RunMetadata && from)3487 RunMetadata(RunMetadata&& from) noexcept
3488 : RunMetadata() {
3489 *this = ::std::move(from);
3490 }
3491
3492 inline RunMetadata& operator=(const RunMetadata& from) {
3493 if (this == &from) return *this;
3494 CopyFrom(from);
3495 return *this;
3496 }
3497 inline RunMetadata& operator=(RunMetadata&& from) noexcept {
3498 if (this == &from) return *this;
3499 if (GetOwningArena() == from.GetOwningArena()
3500 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3501 && GetOwningArena() != nullptr
3502 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
3503 ) {
3504 InternalSwap(&from);
3505 } else {
3506 CopyFrom(from);
3507 }
3508 return *this;
3509 }
3510
default_instance()3511 static const RunMetadata& default_instance() {
3512 return *internal_default_instance();
3513 }
internal_default_instance()3514 static inline const RunMetadata* internal_default_instance() {
3515 return reinterpret_cast<const RunMetadata*>(
3516 &_RunMetadata_default_instance_);
3517 }
3518 static constexpr int kIndexInFileMessages =
3519 15;
3520
swap(RunMetadata & a,RunMetadata & b)3521 friend void swap(RunMetadata& a, RunMetadata& b) {
3522 a.Swap(&b);
3523 }
Swap(RunMetadata * other)3524 inline void Swap(RunMetadata* other) {
3525 if (other == this) return;
3526 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3527 if (GetOwningArena() != nullptr &&
3528 GetOwningArena() == other->GetOwningArena()) {
3529 #else // PROTOBUF_FORCE_COPY_IN_SWAP
3530 if (GetOwningArena() == other->GetOwningArena()) {
3531 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
3532 InternalSwap(other);
3533 } else {
3534 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3535 }
3536 }
3537 void UnsafeArenaSwap(RunMetadata* other) {
3538 if (other == this) return;
3539 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3540 InternalSwap(other);
3541 }
3542
3543 // implements Message ----------------------------------------------
3544
3545 RunMetadata* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3546 return CreateMaybeMessage<RunMetadata>(arena);
3547 }
3548 RunMetadata* New() const {
3549 return New(nullptr);
3550 }
3551 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
3552 void CopyFrom(const RunMetadata& from);
3553 void MergeFrom(const RunMetadata& from);
3554 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3555 bool IsInitialized() const final;
3556
3557 size_t ByteSizeLong() const final;
3558 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3559 ::uint8_t* _InternalSerialize(
3560 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3561 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3562
3563 private:
3564 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3565 void SharedDtor();
3566 void SetCachedSize(int size) const;
3567 void InternalSwap(RunMetadata* other);
3568
3569 private:
3570 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3571 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3572 return "tensorflow.RunMetadata";
3573 }
3574 protected:
3575 explicit RunMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3576 bool is_message_owned = false);
3577 public:
3578
3579 std::string GetTypeName() const final;
3580
3581 // nested types ----------------------------------------------------
3582
3583 typedef RunMetadata_FunctionGraphs FunctionGraphs;
3584
3585 // accessors -------------------------------------------------------
3586
3587 enum : int {
3588 kPartitionGraphsFieldNumber = 3,
3589 kFunctionGraphsFieldNumber = 4,
3590 kStepStatsFieldNumber = 1,
3591 kCostGraphFieldNumber = 2,
3592 kSessionMetadataFieldNumber = 5,
3593 };
3594 // repeated .tensorflow.GraphDef partition_graphs = 3;
3595 int partition_graphs_size() const;
3596 private:
3597 int _internal_partition_graphs_size() const;
3598 public:
3599 void clear_partition_graphs();
3600 ::tensorflow::GraphDef* mutable_partition_graphs(int index);
3601 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >*
3602 mutable_partition_graphs();
3603 private:
3604 const ::tensorflow::GraphDef& _internal_partition_graphs(int index) const;
3605 ::tensorflow::GraphDef* _internal_add_partition_graphs();
3606 public:
3607 const ::tensorflow::GraphDef& partition_graphs(int index) const;
3608 ::tensorflow::GraphDef* add_partition_graphs();
3609 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef >&
3610 partition_graphs() const;
3611
3612 // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
3613 int function_graphs_size() const;
3614 private:
3615 int _internal_function_graphs_size() const;
3616 public:
3617 void clear_function_graphs();
3618 ::tensorflow::RunMetadata_FunctionGraphs* mutable_function_graphs(int index);
3619 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs >*
3620 mutable_function_graphs();
3621 private:
3622 const ::tensorflow::RunMetadata_FunctionGraphs& _internal_function_graphs(int index) const;
3623 ::tensorflow::RunMetadata_FunctionGraphs* _internal_add_function_graphs();
3624 public:
3625 const ::tensorflow::RunMetadata_FunctionGraphs& function_graphs(int index) const;
3626 ::tensorflow::RunMetadata_FunctionGraphs* add_function_graphs();
3627 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs >&
3628 function_graphs() const;
3629
3630 // .tensorflow.StepStats step_stats = 1;
3631 bool has_step_stats() const;
3632 private:
3633 bool _internal_has_step_stats() const;
3634 public:
3635 void clear_step_stats();
3636 const ::tensorflow::StepStats& step_stats() const;
3637 PROTOBUF_NODISCARD ::tensorflow::StepStats* release_step_stats();
3638 ::tensorflow::StepStats* mutable_step_stats();
3639 void set_allocated_step_stats(::tensorflow::StepStats* step_stats);
3640 private:
3641 const ::tensorflow::StepStats& _internal_step_stats() const;
3642 ::tensorflow::StepStats* _internal_mutable_step_stats();
3643 public:
3644 void unsafe_arena_set_allocated_step_stats(
3645 ::tensorflow::StepStats* step_stats);
3646 ::tensorflow::StepStats* unsafe_arena_release_step_stats();
3647
3648 // .tensorflow.CostGraphDef cost_graph = 2;
3649 bool has_cost_graph() const;
3650 private:
3651 bool _internal_has_cost_graph() const;
3652 public:
3653 void clear_cost_graph();
3654 const ::tensorflow::CostGraphDef& cost_graph() const;
3655 PROTOBUF_NODISCARD ::tensorflow::CostGraphDef* release_cost_graph();
3656 ::tensorflow::CostGraphDef* mutable_cost_graph();
3657 void set_allocated_cost_graph(::tensorflow::CostGraphDef* cost_graph);
3658 private:
3659 const ::tensorflow::CostGraphDef& _internal_cost_graph() const;
3660 ::tensorflow::CostGraphDef* _internal_mutable_cost_graph();
3661 public:
3662 void unsafe_arena_set_allocated_cost_graph(
3663 ::tensorflow::CostGraphDef* cost_graph);
3664 ::tensorflow::CostGraphDef* unsafe_arena_release_cost_graph();
3665
3666 // .tensorflow.SessionMetadata session_metadata = 5;
3667 bool has_session_metadata() const;
3668 private:
3669 bool _internal_has_session_metadata() const;
3670 public:
3671 void clear_session_metadata();
3672 const ::tensorflow::SessionMetadata& session_metadata() const;
3673 PROTOBUF_NODISCARD ::tensorflow::SessionMetadata* release_session_metadata();
3674 ::tensorflow::SessionMetadata* mutable_session_metadata();
3675 void set_allocated_session_metadata(::tensorflow::SessionMetadata* session_metadata);
3676 private:
3677 const ::tensorflow::SessionMetadata& _internal_session_metadata() const;
3678 ::tensorflow::SessionMetadata* _internal_mutable_session_metadata();
3679 public:
3680 void unsafe_arena_set_allocated_session_metadata(
3681 ::tensorflow::SessionMetadata* session_metadata);
3682 ::tensorflow::SessionMetadata* unsafe_arena_release_session_metadata();
3683
3684 // @@protoc_insertion_point(class_scope:tensorflow.RunMetadata)
3685 private:
3686 class _Internal;
3687
3688 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3689 typedef void InternalArenaConstructable_;
3690 typedef void DestructorSkippable_;
3691 struct Impl_ {
3692 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GraphDef > partition_graphs_;
3693 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::RunMetadata_FunctionGraphs > function_graphs_;
3694 ::tensorflow::StepStats* step_stats_;
3695 ::tensorflow::CostGraphDef* cost_graph_;
3696 ::tensorflow::SessionMetadata* session_metadata_;
3697 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3698 };
3699 union { Impl_ _impl_; };
3700 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3701 };
3702 // -------------------------------------------------------------------
3703
3704 class TensorConnection final :
3705 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.TensorConnection) */ {
3706 public:
TensorConnection()3707 inline TensorConnection() : TensorConnection(nullptr) {}
3708 ~TensorConnection() override;
3709 explicit PROTOBUF_CONSTEXPR TensorConnection(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3710
3711 TensorConnection(const TensorConnection& from);
TensorConnection(TensorConnection && from)3712 TensorConnection(TensorConnection&& from) noexcept
3713 : TensorConnection() {
3714 *this = ::std::move(from);
3715 }
3716
3717 inline TensorConnection& operator=(const TensorConnection& from) {
3718 if (this == &from) return *this;
3719 CopyFrom(from);
3720 return *this;
3721 }
3722 inline TensorConnection& operator=(TensorConnection&& from) noexcept {
3723 if (this == &from) return *this;
3724 if (GetOwningArena() == from.GetOwningArena()
3725 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3726 && GetOwningArena() != nullptr
3727 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
3728 ) {
3729 InternalSwap(&from);
3730 } else {
3731 CopyFrom(from);
3732 }
3733 return *this;
3734 }
3735
default_instance()3736 static const TensorConnection& default_instance() {
3737 return *internal_default_instance();
3738 }
internal_default_instance()3739 static inline const TensorConnection* internal_default_instance() {
3740 return reinterpret_cast<const TensorConnection*>(
3741 &_TensorConnection_default_instance_);
3742 }
3743 static constexpr int kIndexInFileMessages =
3744 16;
3745
swap(TensorConnection & a,TensorConnection & b)3746 friend void swap(TensorConnection& a, TensorConnection& b) {
3747 a.Swap(&b);
3748 }
Swap(TensorConnection * other)3749 inline void Swap(TensorConnection* other) {
3750 if (other == this) return;
3751 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3752 if (GetOwningArena() != nullptr &&
3753 GetOwningArena() == other->GetOwningArena()) {
3754 #else // PROTOBUF_FORCE_COPY_IN_SWAP
3755 if (GetOwningArena() == other->GetOwningArena()) {
3756 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
3757 InternalSwap(other);
3758 } else {
3759 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3760 }
3761 }
3762 void UnsafeArenaSwap(TensorConnection* other) {
3763 if (other == this) return;
3764 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3765 InternalSwap(other);
3766 }
3767
3768 // implements Message ----------------------------------------------
3769
3770 TensorConnection* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3771 return CreateMaybeMessage<TensorConnection>(arena);
3772 }
3773 TensorConnection* New() const {
3774 return New(nullptr);
3775 }
3776 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
3777 void CopyFrom(const TensorConnection& from);
3778 void MergeFrom(const TensorConnection& from);
3779 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3780 bool IsInitialized() const final;
3781
3782 size_t ByteSizeLong() const final;
3783 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3784 ::uint8_t* _InternalSerialize(
3785 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3786 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3787
3788 private:
3789 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3790 void SharedDtor();
3791 void SetCachedSize(int size) const;
3792 void InternalSwap(TensorConnection* other);
3793
3794 private:
3795 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
3796 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
3797 return "tensorflow.TensorConnection";
3798 }
3799 protected:
3800 explicit TensorConnection(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3801 bool is_message_owned = false);
3802 public:
3803
3804 std::string GetTypeName() const final;
3805
3806 // nested types ----------------------------------------------------
3807
3808 // accessors -------------------------------------------------------
3809
3810 enum : int {
3811 kFromTensorFieldNumber = 1,
3812 kToTensorFieldNumber = 2,
3813 };
3814 // string from_tensor = 1;
3815 void clear_from_tensor();
3816 const std::string& from_tensor() const;
3817 template <typename ArgT0 = const std::string&, typename... ArgT>
3818 void set_from_tensor(ArgT0&& arg0, ArgT... args);
3819 std::string* mutable_from_tensor();
3820 PROTOBUF_NODISCARD std::string* release_from_tensor();
3821 void set_allocated_from_tensor(std::string* from_tensor);
3822 private:
3823 const std::string& _internal_from_tensor() const;
3824 inline PROTOBUF_ALWAYS_INLINE void _internal_set_from_tensor(const std::string& value);
3825 std::string* _internal_mutable_from_tensor();
3826 public:
3827
3828 // string to_tensor = 2;
3829 void clear_to_tensor();
3830 const std::string& to_tensor() const;
3831 template <typename ArgT0 = const std::string&, typename... ArgT>
3832 void set_to_tensor(ArgT0&& arg0, ArgT... args);
3833 std::string* mutable_to_tensor();
3834 PROTOBUF_NODISCARD std::string* release_to_tensor();
3835 void set_allocated_to_tensor(std::string* to_tensor);
3836 private:
3837 const std::string& _internal_to_tensor() const;
3838 inline PROTOBUF_ALWAYS_INLINE void _internal_set_to_tensor(const std::string& value);
3839 std::string* _internal_mutable_to_tensor();
3840 public:
3841
3842 // @@protoc_insertion_point(class_scope:tensorflow.TensorConnection)
3843 private:
3844 class _Internal;
3845
3846 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
3847 typedef void InternalArenaConstructable_;
3848 typedef void DestructorSkippable_;
3849 struct Impl_ {
3850 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr from_tensor_;
3851 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr to_tensor_;
3852 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
3853 };
3854 union { Impl_ _impl_; };
3855 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3856 };
3857 // -------------------------------------------------------------------
3858
3859 class CallableOptions_FeedDevicesEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FeedDevicesEntry_DoNotUse,
3860 std::string, std::string,
3861 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3862 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> {
3863 public:
3864 typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FeedDevicesEntry_DoNotUse,
3865 std::string, std::string,
3866 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3867 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> SuperType;
3868 CallableOptions_FeedDevicesEntry_DoNotUse();
3869 explicit PROTOBUF_CONSTEXPR CallableOptions_FeedDevicesEntry_DoNotUse(
3870 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3871 explicit CallableOptions_FeedDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena);
3872 void MergeFrom(const CallableOptions_FeedDevicesEntry_DoNotUse& other);
internal_default_instance()3873 static const CallableOptions_FeedDevicesEntry_DoNotUse* internal_default_instance() { return reinterpret_cast<const CallableOptions_FeedDevicesEntry_DoNotUse*>(&_CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_); }
ValidateKey(std::string * s)3874 static bool ValidateKey(std::string* s) {
3875 return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FeedDevicesEntry.key");
3876 }
ValidateValue(std::string * s)3877 static bool ValidateValue(std::string* s) {
3878 return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FeedDevicesEntry.value");
3879 }
3880 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3881 };
3882
3883 // -------------------------------------------------------------------
3884
3885 class CallableOptions_FetchDevicesEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FetchDevicesEntry_DoNotUse,
3886 std::string, std::string,
3887 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3888 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> {
3889 public:
3890 typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntryLite<CallableOptions_FetchDevicesEntry_DoNotUse,
3891 std::string, std::string,
3892 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3893 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> SuperType;
3894 CallableOptions_FetchDevicesEntry_DoNotUse();
3895 explicit PROTOBUF_CONSTEXPR CallableOptions_FetchDevicesEntry_DoNotUse(
3896 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3897 explicit CallableOptions_FetchDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena);
3898 void MergeFrom(const CallableOptions_FetchDevicesEntry_DoNotUse& other);
internal_default_instance()3899 static const CallableOptions_FetchDevicesEntry_DoNotUse* internal_default_instance() { return reinterpret_cast<const CallableOptions_FetchDevicesEntry_DoNotUse*>(&_CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_); }
ValidateKey(std::string * s)3900 static bool ValidateKey(std::string* s) {
3901 return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FetchDevicesEntry.key");
3902 }
ValidateValue(std::string * s)3903 static bool ValidateValue(std::string* s) {
3904 return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CallableOptions.FetchDevicesEntry.value");
3905 }
3906 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
3907 };
3908
3909 // -------------------------------------------------------------------
3910
3911 class CallableOptions final :
3912 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.CallableOptions) */ {
3913 public:
CallableOptions()3914 inline CallableOptions() : CallableOptions(nullptr) {}
3915 ~CallableOptions() override;
3916 explicit PROTOBUF_CONSTEXPR CallableOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
3917
3918 CallableOptions(const CallableOptions& from);
CallableOptions(CallableOptions && from)3919 CallableOptions(CallableOptions&& from) noexcept
3920 : CallableOptions() {
3921 *this = ::std::move(from);
3922 }
3923
3924 inline CallableOptions& operator=(const CallableOptions& from) {
3925 if (this == &from) return *this;
3926 CopyFrom(from);
3927 return *this;
3928 }
3929 inline CallableOptions& operator=(CallableOptions&& from) noexcept {
3930 if (this == &from) return *this;
3931 if (GetOwningArena() == from.GetOwningArena()
3932 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
3933 && GetOwningArena() != nullptr
3934 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
3935 ) {
3936 InternalSwap(&from);
3937 } else {
3938 CopyFrom(from);
3939 }
3940 return *this;
3941 }
3942
default_instance()3943 static const CallableOptions& default_instance() {
3944 return *internal_default_instance();
3945 }
internal_default_instance()3946 static inline const CallableOptions* internal_default_instance() {
3947 return reinterpret_cast<const CallableOptions*>(
3948 &_CallableOptions_default_instance_);
3949 }
3950 static constexpr int kIndexInFileMessages =
3951 19;
3952
swap(CallableOptions & a,CallableOptions & b)3953 friend void swap(CallableOptions& a, CallableOptions& b) {
3954 a.Swap(&b);
3955 }
Swap(CallableOptions * other)3956 inline void Swap(CallableOptions* other) {
3957 if (other == this) return;
3958 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
3959 if (GetOwningArena() != nullptr &&
3960 GetOwningArena() == other->GetOwningArena()) {
3961 #else // PROTOBUF_FORCE_COPY_IN_SWAP
3962 if (GetOwningArena() == other->GetOwningArena()) {
3963 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
3964 InternalSwap(other);
3965 } else {
3966 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
3967 }
3968 }
3969 void UnsafeArenaSwap(CallableOptions* other) {
3970 if (other == this) return;
3971 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
3972 InternalSwap(other);
3973 }
3974
3975 // implements Message ----------------------------------------------
3976
3977 CallableOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
3978 return CreateMaybeMessage<CallableOptions>(arena);
3979 }
3980 CallableOptions* New() const {
3981 return New(nullptr);
3982 }
3983 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
3984 void CopyFrom(const CallableOptions& from);
3985 void MergeFrom(const CallableOptions& from);
3986 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
3987 bool IsInitialized() const final;
3988
3989 size_t ByteSizeLong() const final;
3990 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
3991 ::uint8_t* _InternalSerialize(
3992 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
3993 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
3994
3995 private:
3996 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
3997 void SharedDtor();
3998 void SetCachedSize(int size) const;
3999 void InternalSwap(CallableOptions* other);
4000
4001 private:
4002 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
4003 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
4004 return "tensorflow.CallableOptions";
4005 }
4006 protected:
4007 explicit CallableOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
4008 bool is_message_owned = false);
4009 public:
4010
4011 std::string GetTypeName() const final;
4012
4013 // nested types ----------------------------------------------------
4014
4015
4016 // accessors -------------------------------------------------------
4017
4018 enum : int {
4019 kFeedFieldNumber = 1,
4020 kFetchFieldNumber = 2,
4021 kTargetFieldNumber = 3,
4022 kTensorConnectionFieldNumber = 5,
4023 kFeedDevicesFieldNumber = 6,
4024 kFetchDevicesFieldNumber = 7,
4025 kRunOptionsFieldNumber = 4,
4026 kFetchSkipSyncFieldNumber = 8,
4027 };
4028 // repeated string feed = 1;
4029 int feed_size() const;
4030 private:
4031 int _internal_feed_size() const;
4032 public:
4033 void clear_feed();
4034 const std::string& feed(int index) const;
4035 std::string* mutable_feed(int index);
4036 void set_feed(int index, const std::string& value);
4037 void set_feed(int index, std::string&& value);
4038 void set_feed(int index, const char* value);
4039 void set_feed(int index, const char* value, size_t size);
4040 std::string* add_feed();
4041 void add_feed(const std::string& value);
4042 void add_feed(std::string&& value);
4043 void add_feed(const char* value);
4044 void add_feed(const char* value, size_t size);
4045 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& feed() const;
4046 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_feed();
4047 private:
4048 const std::string& _internal_feed(int index) const;
4049 std::string* _internal_add_feed();
4050 public:
4051
4052 // repeated string fetch = 2;
4053 int fetch_size() const;
4054 private:
4055 int _internal_fetch_size() const;
4056 public:
4057 void clear_fetch();
4058 const std::string& fetch(int index) const;
4059 std::string* mutable_fetch(int index);
4060 void set_fetch(int index, const std::string& value);
4061 void set_fetch(int index, std::string&& value);
4062 void set_fetch(int index, const char* value);
4063 void set_fetch(int index, const char* value, size_t size);
4064 std::string* add_fetch();
4065 void add_fetch(const std::string& value);
4066 void add_fetch(std::string&& value);
4067 void add_fetch(const char* value);
4068 void add_fetch(const char* value, size_t size);
4069 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& fetch() const;
4070 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_fetch();
4071 private:
4072 const std::string& _internal_fetch(int index) const;
4073 std::string* _internal_add_fetch();
4074 public:
4075
4076 // repeated string target = 3;
4077 int target_size() const;
4078 private:
4079 int _internal_target_size() const;
4080 public:
4081 void clear_target();
4082 const std::string& target(int index) const;
4083 std::string* mutable_target(int index);
4084 void set_target(int index, const std::string& value);
4085 void set_target(int index, std::string&& value);
4086 void set_target(int index, const char* value);
4087 void set_target(int index, const char* value, size_t size);
4088 std::string* add_target();
4089 void add_target(const std::string& value);
4090 void add_target(std::string&& value);
4091 void add_target(const char* value);
4092 void add_target(const char* value, size_t size);
4093 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& target() const;
4094 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_target();
4095 private:
4096 const std::string& _internal_target(int index) const;
4097 std::string* _internal_add_target();
4098 public:
4099
4100 // repeated .tensorflow.TensorConnection tensor_connection = 5;
4101 int tensor_connection_size() const;
4102 private:
4103 int _internal_tensor_connection_size() const;
4104 public:
4105 void clear_tensor_connection();
4106 ::tensorflow::TensorConnection* mutable_tensor_connection(int index);
4107 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection >*
4108 mutable_tensor_connection();
4109 private:
4110 const ::tensorflow::TensorConnection& _internal_tensor_connection(int index) const;
4111 ::tensorflow::TensorConnection* _internal_add_tensor_connection();
4112 public:
4113 const ::tensorflow::TensorConnection& tensor_connection(int index) const;
4114 ::tensorflow::TensorConnection* add_tensor_connection();
4115 const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection >&
4116 tensor_connection() const;
4117
4118 // map<string, string> feed_devices = 6;
4119 int feed_devices_size() const;
4120 private:
4121 int _internal_feed_devices_size() const;
4122 public:
4123 void clear_feed_devices();
4124 private:
4125 const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4126 _internal_feed_devices() const;
4127 ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4128 _internal_mutable_feed_devices();
4129 public:
4130 const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4131 feed_devices() const;
4132 ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4133 mutable_feed_devices();
4134
4135 // map<string, string> fetch_devices = 7;
4136 int fetch_devices_size() const;
4137 private:
4138 int _internal_fetch_devices_size() const;
4139 public:
4140 void clear_fetch_devices();
4141 private:
4142 const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4143 _internal_fetch_devices() const;
4144 ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4145 _internal_mutable_fetch_devices();
4146 public:
4147 const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >&
4148 fetch_devices() const;
4149 ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
4150 mutable_fetch_devices();
4151
4152 // .tensorflow.RunOptions run_options = 4;
4153 bool has_run_options() const;
4154 private:
4155 bool _internal_has_run_options() const;
4156 public:
4157 void clear_run_options();
4158 const ::tensorflow::RunOptions& run_options() const;
4159 PROTOBUF_NODISCARD ::tensorflow::RunOptions* release_run_options();
4160 ::tensorflow::RunOptions* mutable_run_options();
4161 void set_allocated_run_options(::tensorflow::RunOptions* run_options);
4162 private:
4163 const ::tensorflow::RunOptions& _internal_run_options() const;
4164 ::tensorflow::RunOptions* _internal_mutable_run_options();
4165 public:
4166 void unsafe_arena_set_allocated_run_options(
4167 ::tensorflow::RunOptions* run_options);
4168 ::tensorflow::RunOptions* unsafe_arena_release_run_options();
4169
4170 // bool fetch_skip_sync = 8;
4171 void clear_fetch_skip_sync();
4172 bool fetch_skip_sync() const;
4173 void set_fetch_skip_sync(bool value);
4174 private:
4175 bool _internal_fetch_skip_sync() const;
4176 void _internal_set_fetch_skip_sync(bool value);
4177 public:
4178
4179 // @@protoc_insertion_point(class_scope:tensorflow.CallableOptions)
4180 private:
4181 class _Internal;
4182
4183 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
4184 typedef void InternalArenaConstructable_;
4185 typedef void DestructorSkippable_;
4186 struct Impl_ {
4187 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> feed_;
4188 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> fetch_;
4189 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> target_;
4190 ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::TensorConnection > tensor_connection_;
4191 ::PROTOBUF_NAMESPACE_ID::internal::MapFieldLite<
4192 CallableOptions_FeedDevicesEntry_DoNotUse,
4193 std::string, std::string,
4194 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
4195 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> feed_devices_;
4196 ::PROTOBUF_NAMESPACE_ID::internal::MapFieldLite<
4197 CallableOptions_FetchDevicesEntry_DoNotUse,
4198 std::string, std::string,
4199 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
4200 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> fetch_devices_;
4201 ::tensorflow::RunOptions* run_options_;
4202 bool fetch_skip_sync_;
4203 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
4204 };
4205 union { Impl_ _impl_; };
4206 friend struct ::TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
4207 };
4208 // ===================================================================
4209
4210
4211 // ===================================================================
4212
4213 #ifdef __GNUC__
4214 #pragma GCC diagnostic push
4215 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
4216 #endif // __GNUC__
4217 // GPUOptions_Experimental_VirtualDevices
4218
4219 // repeated float memory_limit_mb = 1;
_internal_memory_limit_mb_size()4220 inline int GPUOptions_Experimental_VirtualDevices::_internal_memory_limit_mb_size() const {
4221 return _impl_.memory_limit_mb_.size();
4222 }
memory_limit_mb_size()4223 inline int GPUOptions_Experimental_VirtualDevices::memory_limit_mb_size() const {
4224 return _internal_memory_limit_mb_size();
4225 }
clear_memory_limit_mb()4226 inline void GPUOptions_Experimental_VirtualDevices::clear_memory_limit_mb() {
4227 _impl_.memory_limit_mb_.Clear();
4228 }
_internal_memory_limit_mb(int index)4229 inline float GPUOptions_Experimental_VirtualDevices::_internal_memory_limit_mb(int index) const {
4230 return _impl_.memory_limit_mb_.Get(index);
4231 }
memory_limit_mb(int index)4232 inline float GPUOptions_Experimental_VirtualDevices::memory_limit_mb(int index) const {
4233 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4234 return _internal_memory_limit_mb(index);
4235 }
set_memory_limit_mb(int index,float value)4236 inline void GPUOptions_Experimental_VirtualDevices::set_memory_limit_mb(int index, float value) {
4237 _impl_.memory_limit_mb_.Set(index, value);
4238 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4239 }
_internal_add_memory_limit_mb(float value)4240 inline void GPUOptions_Experimental_VirtualDevices::_internal_add_memory_limit_mb(float value) {
4241 _impl_.memory_limit_mb_.Add(value);
4242 }
add_memory_limit_mb(float value)4243 inline void GPUOptions_Experimental_VirtualDevices::add_memory_limit_mb(float value) {
4244 _internal_add_memory_limit_mb(value);
4245 // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4246 }
4247 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
_internal_memory_limit_mb()4248 GPUOptions_Experimental_VirtualDevices::_internal_memory_limit_mb() const {
4249 return _impl_.memory_limit_mb_;
4250 }
4251 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >&
memory_limit_mb()4252 GPUOptions_Experimental_VirtualDevices::memory_limit_mb() const {
4253 // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4254 return _internal_memory_limit_mb();
4255 }
4256 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
_internal_mutable_memory_limit_mb()4257 GPUOptions_Experimental_VirtualDevices::_internal_mutable_memory_limit_mb() {
4258 return &_impl_.memory_limit_mb_;
4259 }
4260 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< float >*
mutable_memory_limit_mb()4261 GPUOptions_Experimental_VirtualDevices::mutable_memory_limit_mb() {
4262 // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.VirtualDevices.memory_limit_mb)
4263 return _internal_mutable_memory_limit_mb();
4264 }
4265
4266 // repeated int32 priority = 2;
_internal_priority_size()4267 inline int GPUOptions_Experimental_VirtualDevices::_internal_priority_size() const {
4268 return _impl_.priority_.size();
4269 }
priority_size()4270 inline int GPUOptions_Experimental_VirtualDevices::priority_size() const {
4271 return _internal_priority_size();
4272 }
clear_priority()4273 inline void GPUOptions_Experimental_VirtualDevices::clear_priority() {
4274 _impl_.priority_.Clear();
4275 }
_internal_priority(int index)4276 inline ::int32_t GPUOptions_Experimental_VirtualDevices::_internal_priority(int index) const {
4277 return _impl_.priority_.Get(index);
4278 }
priority(int index)4279 inline ::int32_t GPUOptions_Experimental_VirtualDevices::priority(int index) const {
4280 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4281 return _internal_priority(index);
4282 }
set_priority(int index,::int32_t value)4283 inline void GPUOptions_Experimental_VirtualDevices::set_priority(int index, ::int32_t value) {
4284 _impl_.priority_.Set(index, value);
4285 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4286 }
_internal_add_priority(::int32_t value)4287 inline void GPUOptions_Experimental_VirtualDevices::_internal_add_priority(::int32_t value) {
4288 _impl_.priority_.Add(value);
4289 }
add_priority(::int32_t value)4290 inline void GPUOptions_Experimental_VirtualDevices::add_priority(::int32_t value) {
4291 _internal_add_priority(value);
4292 // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4293 }
4294 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
_internal_priority()4295 GPUOptions_Experimental_VirtualDevices::_internal_priority() const {
4296 return _impl_.priority_;
4297 }
4298 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
priority()4299 GPUOptions_Experimental_VirtualDevices::priority() const {
4300 // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4301 return _internal_priority();
4302 }
4303 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
_internal_mutable_priority()4304 GPUOptions_Experimental_VirtualDevices::_internal_mutable_priority() {
4305 return &_impl_.priority_;
4306 }
4307 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
mutable_priority()4308 GPUOptions_Experimental_VirtualDevices::mutable_priority() {
4309 // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.VirtualDevices.priority)
4310 return _internal_mutable_priority();
4311 }
4312
4313 // repeated int32 device_ordinal = 3;
_internal_device_ordinal_size()4314 inline int GPUOptions_Experimental_VirtualDevices::_internal_device_ordinal_size() const {
4315 return _impl_.device_ordinal_.size();
4316 }
device_ordinal_size()4317 inline int GPUOptions_Experimental_VirtualDevices::device_ordinal_size() const {
4318 return _internal_device_ordinal_size();
4319 }
clear_device_ordinal()4320 inline void GPUOptions_Experimental_VirtualDevices::clear_device_ordinal() {
4321 _impl_.device_ordinal_.Clear();
4322 }
_internal_device_ordinal(int index)4323 inline ::int32_t GPUOptions_Experimental_VirtualDevices::_internal_device_ordinal(int index) const {
4324 return _impl_.device_ordinal_.Get(index);
4325 }
device_ordinal(int index)4326 inline ::int32_t GPUOptions_Experimental_VirtualDevices::device_ordinal(int index) const {
4327 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4328 return _internal_device_ordinal(index);
4329 }
set_device_ordinal(int index,::int32_t value)4330 inline void GPUOptions_Experimental_VirtualDevices::set_device_ordinal(int index, ::int32_t value) {
4331 _impl_.device_ordinal_.Set(index, value);
4332 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4333 }
_internal_add_device_ordinal(::int32_t value)4334 inline void GPUOptions_Experimental_VirtualDevices::_internal_add_device_ordinal(::int32_t value) {
4335 _impl_.device_ordinal_.Add(value);
4336 }
add_device_ordinal(::int32_t value)4337 inline void GPUOptions_Experimental_VirtualDevices::add_device_ordinal(::int32_t value) {
4338 _internal_add_device_ordinal(value);
4339 // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4340 }
4341 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
_internal_device_ordinal()4342 GPUOptions_Experimental_VirtualDevices::_internal_device_ordinal() const {
4343 return _impl_.device_ordinal_;
4344 }
4345 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >&
device_ordinal()4346 GPUOptions_Experimental_VirtualDevices::device_ordinal() const {
4347 // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4348 return _internal_device_ordinal();
4349 }
4350 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
_internal_mutable_device_ordinal()4351 GPUOptions_Experimental_VirtualDevices::_internal_mutable_device_ordinal() {
4352 return &_impl_.device_ordinal_;
4353 }
4354 inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::int32_t >*
mutable_device_ordinal()4355 GPUOptions_Experimental_VirtualDevices::mutable_device_ordinal() {
4356 // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.VirtualDevices.device_ordinal)
4357 return _internal_mutable_device_ordinal();
4358 }
4359
4360 // -------------------------------------------------------------------
4361
4362 // GPUOptions_Experimental
4363
4364 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
_internal_virtual_devices_size()4365 inline int GPUOptions_Experimental::_internal_virtual_devices_size() const {
4366 return _impl_.virtual_devices_.size();
4367 }
virtual_devices_size()4368 inline int GPUOptions_Experimental::virtual_devices_size() const {
4369 return _internal_virtual_devices_size();
4370 }
clear_virtual_devices()4371 inline void GPUOptions_Experimental::clear_virtual_devices() {
4372 _impl_.virtual_devices_.Clear();
4373 }
mutable_virtual_devices(int index)4374 inline ::tensorflow::GPUOptions_Experimental_VirtualDevices* GPUOptions_Experimental::mutable_virtual_devices(int index) {
4375 // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.Experimental.virtual_devices)
4376 return _impl_.virtual_devices_.Mutable(index);
4377 }
4378 inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >*
mutable_virtual_devices()4379 GPUOptions_Experimental::mutable_virtual_devices() {
4380 // @@protoc_insertion_point(field_mutable_list:tensorflow.GPUOptions.Experimental.virtual_devices)
4381 return &_impl_.virtual_devices_;
4382 }
_internal_virtual_devices(int index)4383 inline const ::tensorflow::GPUOptions_Experimental_VirtualDevices& GPUOptions_Experimental::_internal_virtual_devices(int index) const {
4384 return _impl_.virtual_devices_.Get(index);
4385 }
virtual_devices(int index)4386 inline const ::tensorflow::GPUOptions_Experimental_VirtualDevices& GPUOptions_Experimental::virtual_devices(int index) const {
4387 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.virtual_devices)
4388 return _internal_virtual_devices(index);
4389 }
_internal_add_virtual_devices()4390 inline ::tensorflow::GPUOptions_Experimental_VirtualDevices* GPUOptions_Experimental::_internal_add_virtual_devices() {
4391 return _impl_.virtual_devices_.Add();
4392 }
add_virtual_devices()4393 inline ::tensorflow::GPUOptions_Experimental_VirtualDevices* GPUOptions_Experimental::add_virtual_devices() {
4394 ::tensorflow::GPUOptions_Experimental_VirtualDevices* _add = _internal_add_virtual_devices();
4395 // @@protoc_insertion_point(field_add:tensorflow.GPUOptions.Experimental.virtual_devices)
4396 return _add;
4397 }
4398 inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::GPUOptions_Experimental_VirtualDevices >&
virtual_devices()4399 GPUOptions_Experimental::virtual_devices() const {
4400 // @@protoc_insertion_point(field_list:tensorflow.GPUOptions.Experimental.virtual_devices)
4401 return _impl_.virtual_devices_;
4402 }
4403
4404 // bool use_unified_memory = 2;
clear_use_unified_memory()4405 inline void GPUOptions_Experimental::clear_use_unified_memory() {
4406 _impl_.use_unified_memory_ = false;
4407 }
_internal_use_unified_memory()4408 inline bool GPUOptions_Experimental::_internal_use_unified_memory() const {
4409 return _impl_.use_unified_memory_;
4410 }
use_unified_memory()4411 inline bool GPUOptions_Experimental::use_unified_memory() const {
4412 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.use_unified_memory)
4413 return _internal_use_unified_memory();
4414 }
_internal_set_use_unified_memory(bool value)4415 inline void GPUOptions_Experimental::_internal_set_use_unified_memory(bool value) {
4416
4417 _impl_.use_unified_memory_ = value;
4418 }
set_use_unified_memory(bool value)4419 inline void GPUOptions_Experimental::set_use_unified_memory(bool value) {
4420 _internal_set_use_unified_memory(value);
4421 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.use_unified_memory)
4422 }
4423
4424 // int32 num_dev_to_dev_copy_streams = 3;
clear_num_dev_to_dev_copy_streams()4425 inline void GPUOptions_Experimental::clear_num_dev_to_dev_copy_streams() {
4426 _impl_.num_dev_to_dev_copy_streams_ = 0;
4427 }
_internal_num_dev_to_dev_copy_streams()4428 inline ::int32_t GPUOptions_Experimental::_internal_num_dev_to_dev_copy_streams() const {
4429 return _impl_.num_dev_to_dev_copy_streams_;
4430 }
num_dev_to_dev_copy_streams()4431 inline ::int32_t GPUOptions_Experimental::num_dev_to_dev_copy_streams() const {
4432 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.num_dev_to_dev_copy_streams)
4433 return _internal_num_dev_to_dev_copy_streams();
4434 }
_internal_set_num_dev_to_dev_copy_streams(::int32_t value)4435 inline void GPUOptions_Experimental::_internal_set_num_dev_to_dev_copy_streams(::int32_t value) {
4436
4437 _impl_.num_dev_to_dev_copy_streams_ = value;
4438 }
set_num_dev_to_dev_copy_streams(::int32_t value)4439 inline void GPUOptions_Experimental::set_num_dev_to_dev_copy_streams(::int32_t value) {
4440 _internal_set_num_dev_to_dev_copy_streams(value);
4441 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.num_dev_to_dev_copy_streams)
4442 }
4443
4444 // string collective_ring_order = 4;
clear_collective_ring_order()4445 inline void GPUOptions_Experimental::clear_collective_ring_order() {
4446 _impl_.collective_ring_order_.ClearToEmpty();
4447 }
collective_ring_order()4448 inline const std::string& GPUOptions_Experimental::collective_ring_order() const {
4449 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.collective_ring_order)
4450 return _internal_collective_ring_order();
4451 }
4452 template <typename ArgT0, typename... ArgT>
4453 inline PROTOBUF_ALWAYS_INLINE
set_collective_ring_order(ArgT0 && arg0,ArgT...args)4454 void GPUOptions_Experimental::set_collective_ring_order(ArgT0&& arg0, ArgT... args) {
4455
4456 _impl_.collective_ring_order_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
4457 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.collective_ring_order)
4458 }
mutable_collective_ring_order()4459 inline std::string* GPUOptions_Experimental::mutable_collective_ring_order() {
4460 std::string* _s = _internal_mutable_collective_ring_order();
4461 // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.Experimental.collective_ring_order)
4462 return _s;
4463 }
_internal_collective_ring_order()4464 inline const std::string& GPUOptions_Experimental::_internal_collective_ring_order() const {
4465 return _impl_.collective_ring_order_.Get();
4466 }
_internal_set_collective_ring_order(const std::string & value)4467 inline void GPUOptions_Experimental::_internal_set_collective_ring_order(const std::string& value) {
4468
4469 _impl_.collective_ring_order_.Set(value, GetArenaForAllocation());
4470 }
_internal_mutable_collective_ring_order()4471 inline std::string* GPUOptions_Experimental::_internal_mutable_collective_ring_order() {
4472
4473 return _impl_.collective_ring_order_.Mutable(GetArenaForAllocation());
4474 }
release_collective_ring_order()4475 inline std::string* GPUOptions_Experimental::release_collective_ring_order() {
4476 // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.Experimental.collective_ring_order)
4477 return _impl_.collective_ring_order_.Release();
4478 }
set_allocated_collective_ring_order(std::string * collective_ring_order)4479 inline void GPUOptions_Experimental::set_allocated_collective_ring_order(std::string* collective_ring_order) {
4480 _impl_.collective_ring_order_.SetAllocated(collective_ring_order, GetArenaForAllocation());
4481 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
4482 if (_impl_.collective_ring_order_.IsDefault()) {
4483 _impl_.collective_ring_order_.Set("", GetArenaForAllocation());
4484 }
4485 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
4486 // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.Experimental.collective_ring_order)
4487 }
4488
4489 // bool timestamped_allocator = 5;
clear_timestamped_allocator()4490 inline void GPUOptions_Experimental::clear_timestamped_allocator() {
4491 _impl_.timestamped_allocator_ = false;
4492 }
_internal_timestamped_allocator()4493 inline bool GPUOptions_Experimental::_internal_timestamped_allocator() const {
4494 return _impl_.timestamped_allocator_;
4495 }
timestamped_allocator()4496 inline bool GPUOptions_Experimental::timestamped_allocator() const {
4497 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.timestamped_allocator)
4498 return _internal_timestamped_allocator();
4499 }
_internal_set_timestamped_allocator(bool value)4500 inline void GPUOptions_Experimental::_internal_set_timestamped_allocator(bool value) {
4501
4502 _impl_.timestamped_allocator_ = value;
4503 }
set_timestamped_allocator(bool value)4504 inline void GPUOptions_Experimental::set_timestamped_allocator(bool value) {
4505 _internal_set_timestamped_allocator(value);
4506 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.timestamped_allocator)
4507 }
4508
4509 // int32 kernel_tracker_max_interval = 7;
clear_kernel_tracker_max_interval()4510 inline void GPUOptions_Experimental::clear_kernel_tracker_max_interval() {
4511 _impl_.kernel_tracker_max_interval_ = 0;
4512 }
_internal_kernel_tracker_max_interval()4513 inline ::int32_t GPUOptions_Experimental::_internal_kernel_tracker_max_interval() const {
4514 return _impl_.kernel_tracker_max_interval_;
4515 }
kernel_tracker_max_interval()4516 inline ::int32_t GPUOptions_Experimental::kernel_tracker_max_interval() const {
4517 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.kernel_tracker_max_interval)
4518 return _internal_kernel_tracker_max_interval();
4519 }
_internal_set_kernel_tracker_max_interval(::int32_t value)4520 inline void GPUOptions_Experimental::_internal_set_kernel_tracker_max_interval(::int32_t value) {
4521
4522 _impl_.kernel_tracker_max_interval_ = value;
4523 }
set_kernel_tracker_max_interval(::int32_t value)4524 inline void GPUOptions_Experimental::set_kernel_tracker_max_interval(::int32_t value) {
4525 _internal_set_kernel_tracker_max_interval(value);
4526 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.kernel_tracker_max_interval)
4527 }
4528
4529 // int32 kernel_tracker_max_bytes = 8;
clear_kernel_tracker_max_bytes()4530 inline void GPUOptions_Experimental::clear_kernel_tracker_max_bytes() {
4531 _impl_.kernel_tracker_max_bytes_ = 0;
4532 }
_internal_kernel_tracker_max_bytes()4533 inline ::int32_t GPUOptions_Experimental::_internal_kernel_tracker_max_bytes() const {
4534 return _impl_.kernel_tracker_max_bytes_;
4535 }
kernel_tracker_max_bytes()4536 inline ::int32_t GPUOptions_Experimental::kernel_tracker_max_bytes() const {
4537 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.kernel_tracker_max_bytes)
4538 return _internal_kernel_tracker_max_bytes();
4539 }
_internal_set_kernel_tracker_max_bytes(::int32_t value)4540 inline void GPUOptions_Experimental::_internal_set_kernel_tracker_max_bytes(::int32_t value) {
4541
4542 _impl_.kernel_tracker_max_bytes_ = value;
4543 }
set_kernel_tracker_max_bytes(::int32_t value)4544 inline void GPUOptions_Experimental::set_kernel_tracker_max_bytes(::int32_t value) {
4545 _internal_set_kernel_tracker_max_bytes(value);
4546 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.kernel_tracker_max_bytes)
4547 }
4548
4549 // int32 kernel_tracker_max_pending = 9;
clear_kernel_tracker_max_pending()4550 inline void GPUOptions_Experimental::clear_kernel_tracker_max_pending() {
4551 _impl_.kernel_tracker_max_pending_ = 0;
4552 }
_internal_kernel_tracker_max_pending()4553 inline ::int32_t GPUOptions_Experimental::_internal_kernel_tracker_max_pending() const {
4554 return _impl_.kernel_tracker_max_pending_;
4555 }
kernel_tracker_max_pending()4556 inline ::int32_t GPUOptions_Experimental::kernel_tracker_max_pending() const {
4557 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.kernel_tracker_max_pending)
4558 return _internal_kernel_tracker_max_pending();
4559 }
_internal_set_kernel_tracker_max_pending(::int32_t value)4560 inline void GPUOptions_Experimental::_internal_set_kernel_tracker_max_pending(::int32_t value) {
4561
4562 _impl_.kernel_tracker_max_pending_ = value;
4563 }
set_kernel_tracker_max_pending(::int32_t value)4564 inline void GPUOptions_Experimental::set_kernel_tracker_max_pending(::int32_t value) {
4565 _internal_set_kernel_tracker_max_pending(value);
4566 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.kernel_tracker_max_pending)
4567 }
4568
4569 // double internal_fragmentation_fraction = 10;
clear_internal_fragmentation_fraction()4570 inline void GPUOptions_Experimental::clear_internal_fragmentation_fraction() {
4571 _impl_.internal_fragmentation_fraction_ = 0;
4572 }
_internal_internal_fragmentation_fraction()4573 inline double GPUOptions_Experimental::_internal_internal_fragmentation_fraction() const {
4574 return _impl_.internal_fragmentation_fraction_;
4575 }
internal_fragmentation_fraction()4576 inline double GPUOptions_Experimental::internal_fragmentation_fraction() const {
4577 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.internal_fragmentation_fraction)
4578 return _internal_internal_fragmentation_fraction();
4579 }
_internal_set_internal_fragmentation_fraction(double value)4580 inline void GPUOptions_Experimental::_internal_set_internal_fragmentation_fraction(double value) {
4581
4582 _impl_.internal_fragmentation_fraction_ = value;
4583 }
set_internal_fragmentation_fraction(double value)4584 inline void GPUOptions_Experimental::set_internal_fragmentation_fraction(double value) {
4585 _internal_set_internal_fragmentation_fraction(value);
4586 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.internal_fragmentation_fraction)
4587 }
4588
4589 // bool use_cuda_malloc_async = 11;
clear_use_cuda_malloc_async()4590 inline void GPUOptions_Experimental::clear_use_cuda_malloc_async() {
4591 _impl_.use_cuda_malloc_async_ = false;
4592 }
_internal_use_cuda_malloc_async()4593 inline bool GPUOptions_Experimental::_internal_use_cuda_malloc_async() const {
4594 return _impl_.use_cuda_malloc_async_;
4595 }
use_cuda_malloc_async()4596 inline bool GPUOptions_Experimental::use_cuda_malloc_async() const {
4597 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.use_cuda_malloc_async)
4598 return _internal_use_cuda_malloc_async();
4599 }
_internal_set_use_cuda_malloc_async(bool value)4600 inline void GPUOptions_Experimental::_internal_set_use_cuda_malloc_async(bool value) {
4601
4602 _impl_.use_cuda_malloc_async_ = value;
4603 }
set_use_cuda_malloc_async(bool value)4604 inline void GPUOptions_Experimental::set_use_cuda_malloc_async(bool value) {
4605 _internal_set_use_cuda_malloc_async(value);
4606 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.use_cuda_malloc_async)
4607 }
4608
4609 // bool disallow_retry_on_allocation_failure = 12;
clear_disallow_retry_on_allocation_failure()4610 inline void GPUOptions_Experimental::clear_disallow_retry_on_allocation_failure() {
4611 _impl_.disallow_retry_on_allocation_failure_ = false;
4612 }
_internal_disallow_retry_on_allocation_failure()4613 inline bool GPUOptions_Experimental::_internal_disallow_retry_on_allocation_failure() const {
4614 return _impl_.disallow_retry_on_allocation_failure_;
4615 }
disallow_retry_on_allocation_failure()4616 inline bool GPUOptions_Experimental::disallow_retry_on_allocation_failure() const {
4617 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.Experimental.disallow_retry_on_allocation_failure)
4618 return _internal_disallow_retry_on_allocation_failure();
4619 }
_internal_set_disallow_retry_on_allocation_failure(bool value)4620 inline void GPUOptions_Experimental::_internal_set_disallow_retry_on_allocation_failure(bool value) {
4621
4622 _impl_.disallow_retry_on_allocation_failure_ = value;
4623 }
set_disallow_retry_on_allocation_failure(bool value)4624 inline void GPUOptions_Experimental::set_disallow_retry_on_allocation_failure(bool value) {
4625 _internal_set_disallow_retry_on_allocation_failure(value);
4626 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.Experimental.disallow_retry_on_allocation_failure)
4627 }
4628
4629 // -------------------------------------------------------------------
4630
4631 // GPUOptions
4632
4633 // double per_process_gpu_memory_fraction = 1;
clear_per_process_gpu_memory_fraction()4634 inline void GPUOptions::clear_per_process_gpu_memory_fraction() {
4635 _impl_.per_process_gpu_memory_fraction_ = 0;
4636 }
_internal_per_process_gpu_memory_fraction()4637 inline double GPUOptions::_internal_per_process_gpu_memory_fraction() const {
4638 return _impl_.per_process_gpu_memory_fraction_;
4639 }
per_process_gpu_memory_fraction()4640 inline double GPUOptions::per_process_gpu_memory_fraction() const {
4641 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.per_process_gpu_memory_fraction)
4642 return _internal_per_process_gpu_memory_fraction();
4643 }
_internal_set_per_process_gpu_memory_fraction(double value)4644 inline void GPUOptions::_internal_set_per_process_gpu_memory_fraction(double value) {
4645
4646 _impl_.per_process_gpu_memory_fraction_ = value;
4647 }
set_per_process_gpu_memory_fraction(double value)4648 inline void GPUOptions::set_per_process_gpu_memory_fraction(double value) {
4649 _internal_set_per_process_gpu_memory_fraction(value);
4650 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.per_process_gpu_memory_fraction)
4651 }
4652
4653 // bool allow_growth = 4;
clear_allow_growth()4654 inline void GPUOptions::clear_allow_growth() {
4655 _impl_.allow_growth_ = false;
4656 }
_internal_allow_growth()4657 inline bool GPUOptions::_internal_allow_growth() const {
4658 return _impl_.allow_growth_;
4659 }
allow_growth()4660 inline bool GPUOptions::allow_growth() const {
4661 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.allow_growth)
4662 return _internal_allow_growth();
4663 }
_internal_set_allow_growth(bool value)4664 inline void GPUOptions::_internal_set_allow_growth(bool value) {
4665
4666 _impl_.allow_growth_ = value;
4667 }
set_allow_growth(bool value)4668 inline void GPUOptions::set_allow_growth(bool value) {
4669 _internal_set_allow_growth(value);
4670 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.allow_growth)
4671 }
4672
4673 // string allocator_type = 2;
clear_allocator_type()4674 inline void GPUOptions::clear_allocator_type() {
4675 _impl_.allocator_type_.ClearToEmpty();
4676 }
allocator_type()4677 inline const std::string& GPUOptions::allocator_type() const {
4678 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.allocator_type)
4679 return _internal_allocator_type();
4680 }
4681 template <typename ArgT0, typename... ArgT>
4682 inline PROTOBUF_ALWAYS_INLINE
set_allocator_type(ArgT0 && arg0,ArgT...args)4683 void GPUOptions::set_allocator_type(ArgT0&& arg0, ArgT... args) {
4684
4685 _impl_.allocator_type_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
4686 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.allocator_type)
4687 }
mutable_allocator_type()4688 inline std::string* GPUOptions::mutable_allocator_type() {
4689 std::string* _s = _internal_mutable_allocator_type();
4690 // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.allocator_type)
4691 return _s;
4692 }
_internal_allocator_type()4693 inline const std::string& GPUOptions::_internal_allocator_type() const {
4694 return _impl_.allocator_type_.Get();
4695 }
_internal_set_allocator_type(const std::string & value)4696 inline void GPUOptions::_internal_set_allocator_type(const std::string& value) {
4697
4698 _impl_.allocator_type_.Set(value, GetArenaForAllocation());
4699 }
_internal_mutable_allocator_type()4700 inline std::string* GPUOptions::_internal_mutable_allocator_type() {
4701
4702 return _impl_.allocator_type_.Mutable(GetArenaForAllocation());
4703 }
release_allocator_type()4704 inline std::string* GPUOptions::release_allocator_type() {
4705 // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.allocator_type)
4706 return _impl_.allocator_type_.Release();
4707 }
set_allocated_allocator_type(std::string * allocator_type)4708 inline void GPUOptions::set_allocated_allocator_type(std::string* allocator_type) {
4709 _impl_.allocator_type_.SetAllocated(allocator_type, GetArenaForAllocation());
4710 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
4711 if (_impl_.allocator_type_.IsDefault()) {
4712 _impl_.allocator_type_.Set("", GetArenaForAllocation());
4713 }
4714 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
4715 // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.allocator_type)
4716 }
4717
4718 // int64 deferred_deletion_bytes = 3;
clear_deferred_deletion_bytes()4719 inline void GPUOptions::clear_deferred_deletion_bytes() {
4720 _impl_.deferred_deletion_bytes_ = ::int64_t{0};
4721 }
_internal_deferred_deletion_bytes()4722 inline ::int64_t GPUOptions::_internal_deferred_deletion_bytes() const {
4723 return _impl_.deferred_deletion_bytes_;
4724 }
deferred_deletion_bytes()4725 inline ::int64_t GPUOptions::deferred_deletion_bytes() const {
4726 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.deferred_deletion_bytes)
4727 return _internal_deferred_deletion_bytes();
4728 }
_internal_set_deferred_deletion_bytes(::int64_t value)4729 inline void GPUOptions::_internal_set_deferred_deletion_bytes(::int64_t value) {
4730
4731 _impl_.deferred_deletion_bytes_ = value;
4732 }
set_deferred_deletion_bytes(::int64_t value)4733 inline void GPUOptions::set_deferred_deletion_bytes(::int64_t value) {
4734 _internal_set_deferred_deletion_bytes(value);
4735 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.deferred_deletion_bytes)
4736 }
4737
4738 // string visible_device_list = 5;
clear_visible_device_list()4739 inline void GPUOptions::clear_visible_device_list() {
4740 _impl_.visible_device_list_.ClearToEmpty();
4741 }
visible_device_list()4742 inline const std::string& GPUOptions::visible_device_list() const {
4743 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.visible_device_list)
4744 return _internal_visible_device_list();
4745 }
4746 template <typename ArgT0, typename... ArgT>
4747 inline PROTOBUF_ALWAYS_INLINE
set_visible_device_list(ArgT0 && arg0,ArgT...args)4748 void GPUOptions::set_visible_device_list(ArgT0&& arg0, ArgT... args) {
4749
4750 _impl_.visible_device_list_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
4751 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.visible_device_list)
4752 }
mutable_visible_device_list()4753 inline std::string* GPUOptions::mutable_visible_device_list() {
4754 std::string* _s = _internal_mutable_visible_device_list();
4755 // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.visible_device_list)
4756 return _s;
4757 }
_internal_visible_device_list()4758 inline const std::string& GPUOptions::_internal_visible_device_list() const {
4759 return _impl_.visible_device_list_.Get();
4760 }
_internal_set_visible_device_list(const std::string & value)4761 inline void GPUOptions::_internal_set_visible_device_list(const std::string& value) {
4762
4763 _impl_.visible_device_list_.Set(value, GetArenaForAllocation());
4764 }
_internal_mutable_visible_device_list()4765 inline std::string* GPUOptions::_internal_mutable_visible_device_list() {
4766
4767 return _impl_.visible_device_list_.Mutable(GetArenaForAllocation());
4768 }
release_visible_device_list()4769 inline std::string* GPUOptions::release_visible_device_list() {
4770 // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.visible_device_list)
4771 return _impl_.visible_device_list_.Release();
4772 }
set_allocated_visible_device_list(std::string * visible_device_list)4773 inline void GPUOptions::set_allocated_visible_device_list(std::string* visible_device_list) {
4774 _impl_.visible_device_list_.SetAllocated(visible_device_list, GetArenaForAllocation());
4775 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
4776 if (_impl_.visible_device_list_.IsDefault()) {
4777 _impl_.visible_device_list_.Set("", GetArenaForAllocation());
4778 }
4779 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
4780 // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.visible_device_list)
4781 }
4782
4783 // int32 polling_active_delay_usecs = 6;
clear_polling_active_delay_usecs()4784 inline void GPUOptions::clear_polling_active_delay_usecs() {
4785 _impl_.polling_active_delay_usecs_ = 0;
4786 }
_internal_polling_active_delay_usecs()4787 inline ::int32_t GPUOptions::_internal_polling_active_delay_usecs() const {
4788 return _impl_.polling_active_delay_usecs_;
4789 }
polling_active_delay_usecs()4790 inline ::int32_t GPUOptions::polling_active_delay_usecs() const {
4791 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.polling_active_delay_usecs)
4792 return _internal_polling_active_delay_usecs();
4793 }
_internal_set_polling_active_delay_usecs(::int32_t value)4794 inline void GPUOptions::_internal_set_polling_active_delay_usecs(::int32_t value) {
4795
4796 _impl_.polling_active_delay_usecs_ = value;
4797 }
set_polling_active_delay_usecs(::int32_t value)4798 inline void GPUOptions::set_polling_active_delay_usecs(::int32_t value) {
4799 _internal_set_polling_active_delay_usecs(value);
4800 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.polling_active_delay_usecs)
4801 }
4802
4803 // int32 polling_inactive_delay_msecs = 7;
clear_polling_inactive_delay_msecs()4804 inline void GPUOptions::clear_polling_inactive_delay_msecs() {
4805 _impl_.polling_inactive_delay_msecs_ = 0;
4806 }
_internal_polling_inactive_delay_msecs()4807 inline ::int32_t GPUOptions::_internal_polling_inactive_delay_msecs() const {
4808 return _impl_.polling_inactive_delay_msecs_;
4809 }
polling_inactive_delay_msecs()4810 inline ::int32_t GPUOptions::polling_inactive_delay_msecs() const {
4811 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.polling_inactive_delay_msecs)
4812 return _internal_polling_inactive_delay_msecs();
4813 }
_internal_set_polling_inactive_delay_msecs(::int32_t value)4814 inline void GPUOptions::_internal_set_polling_inactive_delay_msecs(::int32_t value) {
4815
4816 _impl_.polling_inactive_delay_msecs_ = value;
4817 }
set_polling_inactive_delay_msecs(::int32_t value)4818 inline void GPUOptions::set_polling_inactive_delay_msecs(::int32_t value) {
4819 _internal_set_polling_inactive_delay_msecs(value);
4820 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.polling_inactive_delay_msecs)
4821 }
4822
4823 // bool force_gpu_compatible = 8;
clear_force_gpu_compatible()4824 inline void GPUOptions::clear_force_gpu_compatible() {
4825 _impl_.force_gpu_compatible_ = false;
4826 }
_internal_force_gpu_compatible()4827 inline bool GPUOptions::_internal_force_gpu_compatible() const {
4828 return _impl_.force_gpu_compatible_;
4829 }
force_gpu_compatible()4830 inline bool GPUOptions::force_gpu_compatible() const {
4831 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.force_gpu_compatible)
4832 return _internal_force_gpu_compatible();
4833 }
_internal_set_force_gpu_compatible(bool value)4834 inline void GPUOptions::_internal_set_force_gpu_compatible(bool value) {
4835
4836 _impl_.force_gpu_compatible_ = value;
4837 }
set_force_gpu_compatible(bool value)4838 inline void GPUOptions::set_force_gpu_compatible(bool value) {
4839 _internal_set_force_gpu_compatible(value);
4840 // @@protoc_insertion_point(field_set:tensorflow.GPUOptions.force_gpu_compatible)
4841 }
4842
4843 // .tensorflow.GPUOptions.Experimental experimental = 9;
_internal_has_experimental()4844 inline bool GPUOptions::_internal_has_experimental() const {
4845 return this != internal_default_instance() && _impl_.experimental_ != nullptr;
4846 }
has_experimental()4847 inline bool GPUOptions::has_experimental() const {
4848 return _internal_has_experimental();
4849 }
clear_experimental()4850 inline void GPUOptions::clear_experimental() {
4851 if (GetArenaForAllocation() == nullptr && _impl_.experimental_ != nullptr) {
4852 delete _impl_.experimental_;
4853 }
4854 _impl_.experimental_ = nullptr;
4855 }
_internal_experimental()4856 inline const ::tensorflow::GPUOptions_Experimental& GPUOptions::_internal_experimental() const {
4857 const ::tensorflow::GPUOptions_Experimental* p = _impl_.experimental_;
4858 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::GPUOptions_Experimental&>(
4859 ::tensorflow::_GPUOptions_Experimental_default_instance_);
4860 }
experimental()4861 inline const ::tensorflow::GPUOptions_Experimental& GPUOptions::experimental() const {
4862 // @@protoc_insertion_point(field_get:tensorflow.GPUOptions.experimental)
4863 return _internal_experimental();
4864 }
unsafe_arena_set_allocated_experimental(::tensorflow::GPUOptions_Experimental * experimental)4865 inline void GPUOptions::unsafe_arena_set_allocated_experimental(
4866 ::tensorflow::GPUOptions_Experimental* experimental) {
4867 if (GetArenaForAllocation() == nullptr) {
4868 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.experimental_);
4869 }
4870 _impl_.experimental_ = experimental;
4871 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GPUOptions.experimental)
4872 }
release_experimental()4873 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::release_experimental() {
4874
4875 ::tensorflow::GPUOptions_Experimental* temp = _impl_.experimental_;
4876 _impl_.experimental_ = nullptr;
4877 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
4878 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
4879 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
4880 if (GetArenaForAllocation() == nullptr) { delete old; }
4881 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
4882 if (GetArenaForAllocation() != nullptr) {
4883 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
4884 }
4885 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
4886 return temp;
4887 }
unsafe_arena_release_experimental()4888 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::unsafe_arena_release_experimental() {
4889 // @@protoc_insertion_point(field_release:tensorflow.GPUOptions.experimental)
4890
4891 ::tensorflow::GPUOptions_Experimental* temp = _impl_.experimental_;
4892 _impl_.experimental_ = nullptr;
4893 return temp;
4894 }
_internal_mutable_experimental()4895 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::_internal_mutable_experimental() {
4896
4897 if (_impl_.experimental_ == nullptr) {
4898 auto* p = CreateMaybeMessage<::tensorflow::GPUOptions_Experimental>(GetArenaForAllocation());
4899 _impl_.experimental_ = p;
4900 }
4901 return _impl_.experimental_;
4902 }
mutable_experimental()4903 inline ::tensorflow::GPUOptions_Experimental* GPUOptions::mutable_experimental() {
4904 ::tensorflow::GPUOptions_Experimental* _msg = _internal_mutable_experimental();
4905 // @@protoc_insertion_point(field_mutable:tensorflow.GPUOptions.experimental)
4906 return _msg;
4907 }
set_allocated_experimental(::tensorflow::GPUOptions_Experimental * experimental)4908 inline void GPUOptions::set_allocated_experimental(::tensorflow::GPUOptions_Experimental* experimental) {
4909 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
4910 if (message_arena == nullptr) {
4911 delete _impl_.experimental_;
4912 }
4913 if (experimental) {
4914 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
4915 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(experimental);
4916 if (message_arena != submessage_arena) {
4917 experimental = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
4918 message_arena, experimental, submessage_arena);
4919 }
4920
4921 } else {
4922
4923 }
4924 _impl_.experimental_ = experimental;
4925 // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUOptions.experimental)
4926 }
4927
4928 // -------------------------------------------------------------------
4929
4930 // OptimizerOptions
4931
4932 // bool do_common_subexpression_elimination = 1;
clear_do_common_subexpression_elimination()4933 inline void OptimizerOptions::clear_do_common_subexpression_elimination() {
4934 _impl_.do_common_subexpression_elimination_ = false;
4935 }
_internal_do_common_subexpression_elimination()4936 inline bool OptimizerOptions::_internal_do_common_subexpression_elimination() const {
4937 return _impl_.do_common_subexpression_elimination_;
4938 }
do_common_subexpression_elimination()4939 inline bool OptimizerOptions::do_common_subexpression_elimination() const {
4940 // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.do_common_subexpression_elimination)
4941 return _internal_do_common_subexpression_elimination();
4942 }
_internal_set_do_common_subexpression_elimination(bool value)4943 inline void OptimizerOptions::_internal_set_do_common_subexpression_elimination(bool value) {
4944
4945 _impl_.do_common_subexpression_elimination_ = value;
4946 }
set_do_common_subexpression_elimination(bool value)4947 inline void OptimizerOptions::set_do_common_subexpression_elimination(bool value) {
4948 _internal_set_do_common_subexpression_elimination(value);
4949 // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.do_common_subexpression_elimination)
4950 }
4951
4952 // bool do_constant_folding = 2;
clear_do_constant_folding()4953 inline void OptimizerOptions::clear_do_constant_folding() {
4954 _impl_.do_constant_folding_ = false;
4955 }
_internal_do_constant_folding()4956 inline bool OptimizerOptions::_internal_do_constant_folding() const {
4957 return _impl_.do_constant_folding_;
4958 }
do_constant_folding()4959 inline bool OptimizerOptions::do_constant_folding() const {
4960 // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.do_constant_folding)
4961 return _internal_do_constant_folding();
4962 }
_internal_set_do_constant_folding(bool value)4963 inline void OptimizerOptions::_internal_set_do_constant_folding(bool value) {
4964
4965 _impl_.do_constant_folding_ = value;
4966 }
set_do_constant_folding(bool value)4967 inline void OptimizerOptions::set_do_constant_folding(bool value) {
4968 _internal_set_do_constant_folding(value);
4969 // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.do_constant_folding)
4970 }
4971
4972 // int64 max_folded_constant_in_bytes = 6;
clear_max_folded_constant_in_bytes()4973 inline void OptimizerOptions::clear_max_folded_constant_in_bytes() {
4974 _impl_.max_folded_constant_in_bytes_ = ::int64_t{0};
4975 }
_internal_max_folded_constant_in_bytes()4976 inline ::int64_t OptimizerOptions::_internal_max_folded_constant_in_bytes() const {
4977 return _impl_.max_folded_constant_in_bytes_;
4978 }
max_folded_constant_in_bytes()4979 inline ::int64_t OptimizerOptions::max_folded_constant_in_bytes() const {
4980 // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.max_folded_constant_in_bytes)
4981 return _internal_max_folded_constant_in_bytes();
4982 }
_internal_set_max_folded_constant_in_bytes(::int64_t value)4983 inline void OptimizerOptions::_internal_set_max_folded_constant_in_bytes(::int64_t value) {
4984
4985 _impl_.max_folded_constant_in_bytes_ = value;
4986 }
set_max_folded_constant_in_bytes(::int64_t value)4987 inline void OptimizerOptions::set_max_folded_constant_in_bytes(::int64_t value) {
4988 _internal_set_max_folded_constant_in_bytes(value);
4989 // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.max_folded_constant_in_bytes)
4990 }
4991
4992 // bool do_function_inlining = 4;
clear_do_function_inlining()4993 inline void OptimizerOptions::clear_do_function_inlining() {
4994 _impl_.do_function_inlining_ = false;
4995 }
_internal_do_function_inlining()4996 inline bool OptimizerOptions::_internal_do_function_inlining() const {
4997 return _impl_.do_function_inlining_;
4998 }
do_function_inlining()4999 inline bool OptimizerOptions::do_function_inlining() const {
5000 // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.do_function_inlining)
5001 return _internal_do_function_inlining();
5002 }
_internal_set_do_function_inlining(bool value)5003 inline void OptimizerOptions::_internal_set_do_function_inlining(bool value) {
5004
5005 _impl_.do_function_inlining_ = value;
5006 }
set_do_function_inlining(bool value)5007 inline void OptimizerOptions::set_do_function_inlining(bool value) {
5008 _internal_set_do_function_inlining(value);
5009 // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.do_function_inlining)
5010 }
5011
5012 // .tensorflow.OptimizerOptions.Level opt_level = 3;
clear_opt_level()5013 inline void OptimizerOptions::clear_opt_level() {
5014 _impl_.opt_level_ = 0;
5015 }
_internal_opt_level()5016 inline ::tensorflow::OptimizerOptions_Level OptimizerOptions::_internal_opt_level() const {
5017 return static_cast< ::tensorflow::OptimizerOptions_Level >(_impl_.opt_level_);
5018 }
opt_level()5019 inline ::tensorflow::OptimizerOptions_Level OptimizerOptions::opt_level() const {
5020 // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.opt_level)
5021 return _internal_opt_level();
5022 }
_internal_set_opt_level(::tensorflow::OptimizerOptions_Level value)5023 inline void OptimizerOptions::_internal_set_opt_level(::tensorflow::OptimizerOptions_Level value) {
5024
5025 _impl_.opt_level_ = value;
5026 }
set_opt_level(::tensorflow::OptimizerOptions_Level value)5027 inline void OptimizerOptions::set_opt_level(::tensorflow::OptimizerOptions_Level value) {
5028 _internal_set_opt_level(value);
5029 // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.opt_level)
5030 }
5031
5032 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
clear_global_jit_level()5033 inline void OptimizerOptions::clear_global_jit_level() {
5034 _impl_.global_jit_level_ = 0;
5035 }
_internal_global_jit_level()5036 inline ::tensorflow::OptimizerOptions_GlobalJitLevel OptimizerOptions::_internal_global_jit_level() const {
5037 return static_cast< ::tensorflow::OptimizerOptions_GlobalJitLevel >(_impl_.global_jit_level_);
5038 }
global_jit_level()5039 inline ::tensorflow::OptimizerOptions_GlobalJitLevel OptimizerOptions::global_jit_level() const {
5040 // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.global_jit_level)
5041 return _internal_global_jit_level();
5042 }
_internal_set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value)5043 inline void OptimizerOptions::_internal_set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value) {
5044
5045 _impl_.global_jit_level_ = value;
5046 }
set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value)5047 inline void OptimizerOptions::set_global_jit_level(::tensorflow::OptimizerOptions_GlobalJitLevel value) {
5048 _internal_set_global_jit_level(value);
5049 // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.global_jit_level)
5050 }
5051
5052 // bool cpu_global_jit = 7;
clear_cpu_global_jit()5053 inline void OptimizerOptions::clear_cpu_global_jit() {
5054 _impl_.cpu_global_jit_ = false;
5055 }
_internal_cpu_global_jit()5056 inline bool OptimizerOptions::_internal_cpu_global_jit() const {
5057 return _impl_.cpu_global_jit_;
5058 }
cpu_global_jit()5059 inline bool OptimizerOptions::cpu_global_jit() const {
5060 // @@protoc_insertion_point(field_get:tensorflow.OptimizerOptions.cpu_global_jit)
5061 return _internal_cpu_global_jit();
5062 }
_internal_set_cpu_global_jit(bool value)5063 inline void OptimizerOptions::_internal_set_cpu_global_jit(bool value) {
5064
5065 _impl_.cpu_global_jit_ = value;
5066 }
set_cpu_global_jit(bool value)5067 inline void OptimizerOptions::set_cpu_global_jit(bool value) {
5068 _internal_set_cpu_global_jit(value);
5069 // @@protoc_insertion_point(field_set:tensorflow.OptimizerOptions.cpu_global_jit)
5070 }
5071
5072 // -------------------------------------------------------------------
5073
5074 // GraphOptions
5075
5076 // bool enable_recv_scheduling = 2;
clear_enable_recv_scheduling()5077 inline void GraphOptions::clear_enable_recv_scheduling() {
5078 _impl_.enable_recv_scheduling_ = false;
5079 }
_internal_enable_recv_scheduling()5080 inline bool GraphOptions::_internal_enable_recv_scheduling() const {
5081 return _impl_.enable_recv_scheduling_;
5082 }
enable_recv_scheduling()5083 inline bool GraphOptions::enable_recv_scheduling() const {
5084 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.enable_recv_scheduling)
5085 return _internal_enable_recv_scheduling();
5086 }
_internal_set_enable_recv_scheduling(bool value)5087 inline void GraphOptions::_internal_set_enable_recv_scheduling(bool value) {
5088
5089 _impl_.enable_recv_scheduling_ = value;
5090 }
set_enable_recv_scheduling(bool value)5091 inline void GraphOptions::set_enable_recv_scheduling(bool value) {
5092 _internal_set_enable_recv_scheduling(value);
5093 // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.enable_recv_scheduling)
5094 }
5095
5096 // .tensorflow.OptimizerOptions optimizer_options = 3;
_internal_has_optimizer_options()5097 inline bool GraphOptions::_internal_has_optimizer_options() const {
5098 return this != internal_default_instance() && _impl_.optimizer_options_ != nullptr;
5099 }
has_optimizer_options()5100 inline bool GraphOptions::has_optimizer_options() const {
5101 return _internal_has_optimizer_options();
5102 }
clear_optimizer_options()5103 inline void GraphOptions::clear_optimizer_options() {
5104 if (GetArenaForAllocation() == nullptr && _impl_.optimizer_options_ != nullptr) {
5105 delete _impl_.optimizer_options_;
5106 }
5107 _impl_.optimizer_options_ = nullptr;
5108 }
_internal_optimizer_options()5109 inline const ::tensorflow::OptimizerOptions& GraphOptions::_internal_optimizer_options() const {
5110 const ::tensorflow::OptimizerOptions* p = _impl_.optimizer_options_;
5111 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::OptimizerOptions&>(
5112 ::tensorflow::_OptimizerOptions_default_instance_);
5113 }
optimizer_options()5114 inline const ::tensorflow::OptimizerOptions& GraphOptions::optimizer_options() const {
5115 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.optimizer_options)
5116 return _internal_optimizer_options();
5117 }
unsafe_arena_set_allocated_optimizer_options(::tensorflow::OptimizerOptions * optimizer_options)5118 inline void GraphOptions::unsafe_arena_set_allocated_optimizer_options(
5119 ::tensorflow::OptimizerOptions* optimizer_options) {
5120 if (GetArenaForAllocation() == nullptr) {
5121 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.optimizer_options_);
5122 }
5123 _impl_.optimizer_options_ = optimizer_options;
5124 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GraphOptions.optimizer_options)
5125 }
release_optimizer_options()5126 inline ::tensorflow::OptimizerOptions* GraphOptions::release_optimizer_options() {
5127
5128 ::tensorflow::OptimizerOptions* temp = _impl_.optimizer_options_;
5129 _impl_.optimizer_options_ = nullptr;
5130 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
5131 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
5132 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5133 if (GetArenaForAllocation() == nullptr) { delete old; }
5134 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
5135 if (GetArenaForAllocation() != nullptr) {
5136 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5137 }
5138 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
5139 return temp;
5140 }
unsafe_arena_release_optimizer_options()5141 inline ::tensorflow::OptimizerOptions* GraphOptions::unsafe_arena_release_optimizer_options() {
5142 // @@protoc_insertion_point(field_release:tensorflow.GraphOptions.optimizer_options)
5143
5144 ::tensorflow::OptimizerOptions* temp = _impl_.optimizer_options_;
5145 _impl_.optimizer_options_ = nullptr;
5146 return temp;
5147 }
_internal_mutable_optimizer_options()5148 inline ::tensorflow::OptimizerOptions* GraphOptions::_internal_mutable_optimizer_options() {
5149
5150 if (_impl_.optimizer_options_ == nullptr) {
5151 auto* p = CreateMaybeMessage<::tensorflow::OptimizerOptions>(GetArenaForAllocation());
5152 _impl_.optimizer_options_ = p;
5153 }
5154 return _impl_.optimizer_options_;
5155 }
mutable_optimizer_options()5156 inline ::tensorflow::OptimizerOptions* GraphOptions::mutable_optimizer_options() {
5157 ::tensorflow::OptimizerOptions* _msg = _internal_mutable_optimizer_options();
5158 // @@protoc_insertion_point(field_mutable:tensorflow.GraphOptions.optimizer_options)
5159 return _msg;
5160 }
set_allocated_optimizer_options(::tensorflow::OptimizerOptions * optimizer_options)5161 inline void GraphOptions::set_allocated_optimizer_options(::tensorflow::OptimizerOptions* optimizer_options) {
5162 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
5163 if (message_arena == nullptr) {
5164 delete _impl_.optimizer_options_;
5165 }
5166 if (optimizer_options) {
5167 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
5168 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(optimizer_options);
5169 if (message_arena != submessage_arena) {
5170 optimizer_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
5171 message_arena, optimizer_options, submessage_arena);
5172 }
5173
5174 } else {
5175
5176 }
5177 _impl_.optimizer_options_ = optimizer_options;
5178 // @@protoc_insertion_point(field_set_allocated:tensorflow.GraphOptions.optimizer_options)
5179 }
5180
5181 // int64 build_cost_model = 4;
clear_build_cost_model()5182 inline void GraphOptions::clear_build_cost_model() {
5183 _impl_.build_cost_model_ = ::int64_t{0};
5184 }
_internal_build_cost_model()5185 inline ::int64_t GraphOptions::_internal_build_cost_model() const {
5186 return _impl_.build_cost_model_;
5187 }
build_cost_model()5188 inline ::int64_t GraphOptions::build_cost_model() const {
5189 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.build_cost_model)
5190 return _internal_build_cost_model();
5191 }
_internal_set_build_cost_model(::int64_t value)5192 inline void GraphOptions::_internal_set_build_cost_model(::int64_t value) {
5193
5194 _impl_.build_cost_model_ = value;
5195 }
set_build_cost_model(::int64_t value)5196 inline void GraphOptions::set_build_cost_model(::int64_t value) {
5197 _internal_set_build_cost_model(value);
5198 // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.build_cost_model)
5199 }
5200
5201 // int64 build_cost_model_after = 9;
clear_build_cost_model_after()5202 inline void GraphOptions::clear_build_cost_model_after() {
5203 _impl_.build_cost_model_after_ = ::int64_t{0};
5204 }
_internal_build_cost_model_after()5205 inline ::int64_t GraphOptions::_internal_build_cost_model_after() const {
5206 return _impl_.build_cost_model_after_;
5207 }
build_cost_model_after()5208 inline ::int64_t GraphOptions::build_cost_model_after() const {
5209 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.build_cost_model_after)
5210 return _internal_build_cost_model_after();
5211 }
_internal_set_build_cost_model_after(::int64_t value)5212 inline void GraphOptions::_internal_set_build_cost_model_after(::int64_t value) {
5213
5214 _impl_.build_cost_model_after_ = value;
5215 }
set_build_cost_model_after(::int64_t value)5216 inline void GraphOptions::set_build_cost_model_after(::int64_t value) {
5217 _internal_set_build_cost_model_after(value);
5218 // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.build_cost_model_after)
5219 }
5220
5221 // bool infer_shapes = 5;
clear_infer_shapes()5222 inline void GraphOptions::clear_infer_shapes() {
5223 _impl_.infer_shapes_ = false;
5224 }
_internal_infer_shapes()5225 inline bool GraphOptions::_internal_infer_shapes() const {
5226 return _impl_.infer_shapes_;
5227 }
infer_shapes()5228 inline bool GraphOptions::infer_shapes() const {
5229 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.infer_shapes)
5230 return _internal_infer_shapes();
5231 }
_internal_set_infer_shapes(bool value)5232 inline void GraphOptions::_internal_set_infer_shapes(bool value) {
5233
5234 _impl_.infer_shapes_ = value;
5235 }
set_infer_shapes(bool value)5236 inline void GraphOptions::set_infer_shapes(bool value) {
5237 _internal_set_infer_shapes(value);
5238 // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.infer_shapes)
5239 }
5240
5241 // bool place_pruned_graph = 6;
clear_place_pruned_graph()5242 inline void GraphOptions::clear_place_pruned_graph() {
5243 _impl_.place_pruned_graph_ = false;
5244 }
_internal_place_pruned_graph()5245 inline bool GraphOptions::_internal_place_pruned_graph() const {
5246 return _impl_.place_pruned_graph_;
5247 }
place_pruned_graph()5248 inline bool GraphOptions::place_pruned_graph() const {
5249 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.place_pruned_graph)
5250 return _internal_place_pruned_graph();
5251 }
_internal_set_place_pruned_graph(bool value)5252 inline void GraphOptions::_internal_set_place_pruned_graph(bool value) {
5253
5254 _impl_.place_pruned_graph_ = value;
5255 }
set_place_pruned_graph(bool value)5256 inline void GraphOptions::set_place_pruned_graph(bool value) {
5257 _internal_set_place_pruned_graph(value);
5258 // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.place_pruned_graph)
5259 }
5260
5261 // bool enable_bfloat16_sendrecv = 7;
clear_enable_bfloat16_sendrecv()5262 inline void GraphOptions::clear_enable_bfloat16_sendrecv() {
5263 _impl_.enable_bfloat16_sendrecv_ = false;
5264 }
_internal_enable_bfloat16_sendrecv()5265 inline bool GraphOptions::_internal_enable_bfloat16_sendrecv() const {
5266 return _impl_.enable_bfloat16_sendrecv_;
5267 }
enable_bfloat16_sendrecv()5268 inline bool GraphOptions::enable_bfloat16_sendrecv() const {
5269 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.enable_bfloat16_sendrecv)
5270 return _internal_enable_bfloat16_sendrecv();
5271 }
_internal_set_enable_bfloat16_sendrecv(bool value)5272 inline void GraphOptions::_internal_set_enable_bfloat16_sendrecv(bool value) {
5273
5274 _impl_.enable_bfloat16_sendrecv_ = value;
5275 }
set_enable_bfloat16_sendrecv(bool value)5276 inline void GraphOptions::set_enable_bfloat16_sendrecv(bool value) {
5277 _internal_set_enable_bfloat16_sendrecv(value);
5278 // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.enable_bfloat16_sendrecv)
5279 }
5280
5281 // int32 timeline_step = 8;
clear_timeline_step()5282 inline void GraphOptions::clear_timeline_step() {
5283 _impl_.timeline_step_ = 0;
5284 }
_internal_timeline_step()5285 inline ::int32_t GraphOptions::_internal_timeline_step() const {
5286 return _impl_.timeline_step_;
5287 }
timeline_step()5288 inline ::int32_t GraphOptions::timeline_step() const {
5289 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.timeline_step)
5290 return _internal_timeline_step();
5291 }
_internal_set_timeline_step(::int32_t value)5292 inline void GraphOptions::_internal_set_timeline_step(::int32_t value) {
5293
5294 _impl_.timeline_step_ = value;
5295 }
set_timeline_step(::int32_t value)5296 inline void GraphOptions::set_timeline_step(::int32_t value) {
5297 _internal_set_timeline_step(value);
5298 // @@protoc_insertion_point(field_set:tensorflow.GraphOptions.timeline_step)
5299 }
5300
5301 // .tensorflow.RewriterConfig rewrite_options = 10;
_internal_has_rewrite_options()5302 inline bool GraphOptions::_internal_has_rewrite_options() const {
5303 return this != internal_default_instance() && _impl_.rewrite_options_ != nullptr;
5304 }
has_rewrite_options()5305 inline bool GraphOptions::has_rewrite_options() const {
5306 return _internal_has_rewrite_options();
5307 }
_internal_rewrite_options()5308 inline const ::tensorflow::RewriterConfig& GraphOptions::_internal_rewrite_options() const {
5309 const ::tensorflow::RewriterConfig* p = _impl_.rewrite_options_;
5310 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::RewriterConfig&>(
5311 ::tensorflow::_RewriterConfig_default_instance_);
5312 }
rewrite_options()5313 inline const ::tensorflow::RewriterConfig& GraphOptions::rewrite_options() const {
5314 // @@protoc_insertion_point(field_get:tensorflow.GraphOptions.rewrite_options)
5315 return _internal_rewrite_options();
5316 }
unsafe_arena_set_allocated_rewrite_options(::tensorflow::RewriterConfig * rewrite_options)5317 inline void GraphOptions::unsafe_arena_set_allocated_rewrite_options(
5318 ::tensorflow::RewriterConfig* rewrite_options) {
5319 if (GetArenaForAllocation() == nullptr) {
5320 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.rewrite_options_);
5321 }
5322 _impl_.rewrite_options_ = rewrite_options;
5323 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GraphOptions.rewrite_options)
5324 }
release_rewrite_options()5325 inline ::tensorflow::RewriterConfig* GraphOptions::release_rewrite_options() {
5326
5327 ::tensorflow::RewriterConfig* temp = _impl_.rewrite_options_;
5328 _impl_.rewrite_options_ = nullptr;
5329 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
5330 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
5331 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5332 if (GetArenaForAllocation() == nullptr) { delete old; }
5333 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
5334 if (GetArenaForAllocation() != nullptr) {
5335 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
5336 }
5337 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
5338 return temp;
5339 }
unsafe_arena_release_rewrite_options()5340 inline ::tensorflow::RewriterConfig* GraphOptions::unsafe_arena_release_rewrite_options() {
5341 // @@protoc_insertion_point(field_release:tensorflow.GraphOptions.rewrite_options)
5342
5343 ::tensorflow::RewriterConfig* temp = _impl_.rewrite_options_;
5344 _impl_.rewrite_options_ = nullptr;
5345 return temp;
5346 }
_internal_mutable_rewrite_options()5347 inline ::tensorflow::RewriterConfig* GraphOptions::_internal_mutable_rewrite_options() {
5348
5349 if (_impl_.rewrite_options_ == nullptr) {
5350 auto* p = CreateMaybeMessage<::tensorflow::RewriterConfig>(GetArenaForAllocation());
5351 _impl_.rewrite_options_ = p;
5352 }
5353 return _impl_.rewrite_options_;
5354 }
mutable_rewrite_options()5355 inline ::tensorflow::RewriterConfig* GraphOptions::mutable_rewrite_options() {
5356 ::tensorflow::RewriterConfig* _msg = _internal_mutable_rewrite_options();
5357 // @@protoc_insertion_point(field_mutable:tensorflow.GraphOptions.rewrite_options)
5358 return _msg;
5359 }
set_allocated_rewrite_options(::tensorflow::RewriterConfig * rewrite_options)5360 inline void GraphOptions::set_allocated_rewrite_options(::tensorflow::RewriterConfig* rewrite_options) {
5361 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
5362 if (message_arena == nullptr) {
5363 delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.rewrite_options_);
5364 }
5365 if (rewrite_options) {
5366 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
5367 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
5368 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(rewrite_options));
5369 if (message_arena != submessage_arena) {
5370 rewrite_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
5371 message_arena, rewrite_options, submessage_arena);
5372 }
5373
5374 } else {
5375
5376 }
5377 _impl_.rewrite_options_ = rewrite_options;
5378 // @@protoc_insertion_point(field_set_allocated:tensorflow.GraphOptions.rewrite_options)
5379 }
5380
5381 // -------------------------------------------------------------------
5382
5383 // ThreadPoolOptionProto
5384
5385 // int32 num_threads = 1;
clear_num_threads()5386 inline void ThreadPoolOptionProto::clear_num_threads() {
5387 _impl_.num_threads_ = 0;
5388 }
_internal_num_threads()5389 inline ::int32_t ThreadPoolOptionProto::_internal_num_threads() const {
5390 return _impl_.num_threads_;
5391 }
num_threads()5392 inline ::int32_t ThreadPoolOptionProto::num_threads() const {
5393 // @@protoc_insertion_point(field_get:tensorflow.ThreadPoolOptionProto.num_threads)
5394 return _internal_num_threads();
5395 }
_internal_set_num_threads(::int32_t value)5396 inline void ThreadPoolOptionProto::_internal_set_num_threads(::int32_t value) {
5397
5398 _impl_.num_threads_ = value;
5399 }
set_num_threads(::int32_t value)5400 inline void ThreadPoolOptionProto::set_num_threads(::int32_t value) {
5401 _internal_set_num_threads(value);
5402 // @@protoc_insertion_point(field_set:tensorflow.ThreadPoolOptionProto.num_threads)
5403 }
5404
5405 // string global_name = 2;
clear_global_name()5406 inline void ThreadPoolOptionProto::clear_global_name() {
5407 _impl_.global_name_.ClearToEmpty();
5408 }
global_name()5409 inline const std::string& ThreadPoolOptionProto::global_name() const {
5410 // @@protoc_insertion_point(field_get:tensorflow.ThreadPoolOptionProto.global_name)
5411 return _internal_global_name();
5412 }
5413 template <typename ArgT0, typename... ArgT>
5414 inline PROTOBUF_ALWAYS_INLINE
set_global_name(ArgT0 && arg0,ArgT...args)5415 void ThreadPoolOptionProto::set_global_name(ArgT0&& arg0, ArgT... args) {
5416
5417 _impl_.global_name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
5418 // @@protoc_insertion_point(field_set:tensorflow.ThreadPoolOptionProto.global_name)
5419 }
mutable_global_name()5420 inline std::string* ThreadPoolOptionProto::mutable_global_name() {
5421 std::string* _s = _internal_mutable_global_name();
5422 // @@protoc_insertion_point(field_mutable:tensorflow.ThreadPoolOptionProto.global_name)
5423 return _s;
5424 }
_internal_global_name()5425 inline const std::string& ThreadPoolOptionProto::_internal_global_name() const {
5426 return _impl_.global_name_.Get();
5427 }
_internal_set_global_name(const std::string & value)5428 inline void ThreadPoolOptionProto::_internal_set_global_name(const std::string& value) {
5429
5430 _impl_.global_name_.Set(value, GetArenaForAllocation());
5431 }
_internal_mutable_global_name()5432 inline std::string* ThreadPoolOptionProto::_internal_mutable_global_name() {
5433
5434 return _impl_.global_name_.Mutable(GetArenaForAllocation());
5435 }
release_global_name()5436 inline std::string* ThreadPoolOptionProto::release_global_name() {
5437 // @@protoc_insertion_point(field_release:tensorflow.ThreadPoolOptionProto.global_name)
5438 return _impl_.global_name_.Release();
5439 }
set_allocated_global_name(std::string * global_name)5440 inline void ThreadPoolOptionProto::set_allocated_global_name(std::string* global_name) {
5441 _impl_.global_name_.SetAllocated(global_name, GetArenaForAllocation());
5442 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
5443 if (_impl_.global_name_.IsDefault()) {
5444 _impl_.global_name_.Set("", GetArenaForAllocation());
5445 }
5446 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
5447 // @@protoc_insertion_point(field_set_allocated:tensorflow.ThreadPoolOptionProto.global_name)
5448 }
5449
5450 // -------------------------------------------------------------------
5451
5452 // RPCOptions
5453
5454 // bool use_rpc_for_inprocess_master = 1;
clear_use_rpc_for_inprocess_master()5455 inline void RPCOptions::clear_use_rpc_for_inprocess_master() {
5456 _impl_.use_rpc_for_inprocess_master_ = false;
5457 }
_internal_use_rpc_for_inprocess_master()5458 inline bool RPCOptions::_internal_use_rpc_for_inprocess_master() const {
5459 return _impl_.use_rpc_for_inprocess_master_;
5460 }
use_rpc_for_inprocess_master()5461 inline bool RPCOptions::use_rpc_for_inprocess_master() const {
5462 // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.use_rpc_for_inprocess_master)
5463 return _internal_use_rpc_for_inprocess_master();
5464 }
_internal_set_use_rpc_for_inprocess_master(bool value)5465 inline void RPCOptions::_internal_set_use_rpc_for_inprocess_master(bool value) {
5466
5467 _impl_.use_rpc_for_inprocess_master_ = value;
5468 }
set_use_rpc_for_inprocess_master(bool value)5469