1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/dataset_options.proto
3
4 #ifndef GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto
5 #define GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto
6
7 #include <cstdint>
8 #include <limits>
9 #include <string>
10
11 #include <google/protobuf/port_def.inc>
12 #if PROTOBUF_VERSION < 3021000
13 #error This file was generated by a newer version of protoc which is
14 #error incompatible with your Protocol Buffer headers. Please update
15 #error your headers.
16 #endif
17 #if 3021012 < PROTOBUF_MIN_PROTOC_VERSION
18 #error This file was generated by an older version of protoc which is
19 #error incompatible with your Protocol Buffer headers. Please
20 #error regenerate this file with a newer version of protoc.
21 #endif
22
23 #include <google/protobuf/port_undef.inc>
24 #include <google/protobuf/io/coded_stream.h>
25 #include <google/protobuf/arena.h>
26 #include <google/protobuf/arenastring.h>
27 #include <google/protobuf/generated_message_util.h>
28 #include <google/protobuf/metadata_lite.h>
29 #include <google/protobuf/message_lite.h>
30 #include <google/protobuf/repeated_field.h> // IWYU pragma: export
31 #include <google/protobuf/extension_set.h> // IWYU pragma: export
32 #include <google/protobuf/generated_enum_util.h>
33 #include "tensorflow/core/framework/model.pb.h"
34 // @@protoc_insertion_point(includes)
35 #include <google/protobuf/port_def.inc>
36 #define PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto
37 PROTOBUF_NAMESPACE_OPEN
38 namespace internal {
39 class AnyMetadata;
40 } // namespace internal
41 PROTOBUF_NAMESPACE_CLOSE
42
43 // Internal implementation detail -- do not use these members.
44 struct TableStruct_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto {
45 static const ::uint32_t offsets[];
46 };
47 namespace tensorflow {
48 namespace data {
49 class AutotuneOptions;
50 struct AutotuneOptionsDefaultTypeInternal;
51 extern AutotuneOptionsDefaultTypeInternal _AutotuneOptions_default_instance_;
52 class CardinalityOptions;
53 struct CardinalityOptionsDefaultTypeInternal;
54 extern CardinalityOptionsDefaultTypeInternal _CardinalityOptions_default_instance_;
55 class DistributeOptions;
56 struct DistributeOptionsDefaultTypeInternal;
57 extern DistributeOptionsDefaultTypeInternal _DistributeOptions_default_instance_;
58 class OptimizationOptions;
59 struct OptimizationOptionsDefaultTypeInternal;
60 extern OptimizationOptionsDefaultTypeInternal _OptimizationOptions_default_instance_;
61 class Options;
62 struct OptionsDefaultTypeInternal;
63 extern OptionsDefaultTypeInternal _Options_default_instance_;
64 class ThreadingOptions;
65 struct ThreadingOptionsDefaultTypeInternal;
66 extern ThreadingOptionsDefaultTypeInternal _ThreadingOptions_default_instance_;
67 } // namespace data
68 } // namespace tensorflow
69 PROTOBUF_NAMESPACE_OPEN
70 template<> ::tensorflow::data::AutotuneOptions* Arena::CreateMaybeMessage<::tensorflow::data::AutotuneOptions>(Arena*);
71 template<> ::tensorflow::data::CardinalityOptions* Arena::CreateMaybeMessage<::tensorflow::data::CardinalityOptions>(Arena*);
72 template<> ::tensorflow::data::DistributeOptions* Arena::CreateMaybeMessage<::tensorflow::data::DistributeOptions>(Arena*);
73 template<> ::tensorflow::data::OptimizationOptions* Arena::CreateMaybeMessage<::tensorflow::data::OptimizationOptions>(Arena*);
74 template<> ::tensorflow::data::Options* Arena::CreateMaybeMessage<::tensorflow::data::Options>(Arena*);
75 template<> ::tensorflow::data::ThreadingOptions* Arena::CreateMaybeMessage<::tensorflow::data::ThreadingOptions>(Arena*);
76 PROTOBUF_NAMESPACE_CLOSE
77 namespace tensorflow {
78 namespace data {
79
80 enum CardinalityOptions_ComputeLevel : int {
81 CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_UNSPECIFIED = 0,
82 CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_LOW = 1,
83 CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_MODERATE = 2,
84 CardinalityOptions_ComputeLevel_CardinalityOptions_ComputeLevel_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
85 CardinalityOptions_ComputeLevel_CardinalityOptions_ComputeLevel_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
86 };
87 bool CardinalityOptions_ComputeLevel_IsValid(int value);
88 constexpr CardinalityOptions_ComputeLevel CardinalityOptions_ComputeLevel_ComputeLevel_MIN = CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_UNSPECIFIED;
89 constexpr CardinalityOptions_ComputeLevel CardinalityOptions_ComputeLevel_ComputeLevel_MAX = CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_MODERATE;
90 constexpr int CardinalityOptions_ComputeLevel_ComputeLevel_ARRAYSIZE = CardinalityOptions_ComputeLevel_ComputeLevel_MAX + 1;
91
92 const std::string& CardinalityOptions_ComputeLevel_Name(CardinalityOptions_ComputeLevel value);
93 template<typename T>
CardinalityOptions_ComputeLevel_Name(T enum_t_value)94 inline const std::string& CardinalityOptions_ComputeLevel_Name(T enum_t_value) {
95 static_assert(::std::is_same<T, CardinalityOptions_ComputeLevel>::value ||
96 ::std::is_integral<T>::value,
97 "Incorrect type passed to function CardinalityOptions_ComputeLevel_Name.");
98 return CardinalityOptions_ComputeLevel_Name(static_cast<CardinalityOptions_ComputeLevel>(enum_t_value));
99 }
100 bool CardinalityOptions_ComputeLevel_Parse(
101 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, CardinalityOptions_ComputeLevel* value);
102 enum AutoShardPolicy : int {
103 AUTO = 0,
104 FILE = 1,
105 DATA = 2,
106 HINT = 3,
107 OFF = -1,
108 AutoShardPolicy_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
109 AutoShardPolicy_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
110 };
111 bool AutoShardPolicy_IsValid(int value);
112 constexpr AutoShardPolicy AutoShardPolicy_MIN = OFF;
113 constexpr AutoShardPolicy AutoShardPolicy_MAX = HINT;
114 constexpr int AutoShardPolicy_ARRAYSIZE = AutoShardPolicy_MAX + 1;
115
116 const std::string& AutoShardPolicy_Name(AutoShardPolicy value);
117 template<typename T>
AutoShardPolicy_Name(T enum_t_value)118 inline const std::string& AutoShardPolicy_Name(T enum_t_value) {
119 static_assert(::std::is_same<T, AutoShardPolicy>::value ||
120 ::std::is_integral<T>::value,
121 "Incorrect type passed to function AutoShardPolicy_Name.");
122 return AutoShardPolicy_Name(static_cast<AutoShardPolicy>(enum_t_value));
123 }
124 bool AutoShardPolicy_Parse(
125 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, AutoShardPolicy* value);
126 enum ExternalStatePolicy : int {
127 POLICY_WARN = 0,
128 POLICY_IGNORE = 1,
129 POLICY_FAIL = 2,
130 ExternalStatePolicy_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::min(),
131 ExternalStatePolicy_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::int32_t>::max()
132 };
133 bool ExternalStatePolicy_IsValid(int value);
134 constexpr ExternalStatePolicy ExternalStatePolicy_MIN = POLICY_WARN;
135 constexpr ExternalStatePolicy ExternalStatePolicy_MAX = POLICY_FAIL;
136 constexpr int ExternalStatePolicy_ARRAYSIZE = ExternalStatePolicy_MAX + 1;
137
138 const std::string& ExternalStatePolicy_Name(ExternalStatePolicy value);
139 template<typename T>
ExternalStatePolicy_Name(T enum_t_value)140 inline const std::string& ExternalStatePolicy_Name(T enum_t_value) {
141 static_assert(::std::is_same<T, ExternalStatePolicy>::value ||
142 ::std::is_integral<T>::value,
143 "Incorrect type passed to function ExternalStatePolicy_Name.");
144 return ExternalStatePolicy_Name(static_cast<ExternalStatePolicy>(enum_t_value));
145 }
146 bool ExternalStatePolicy_Parse(
147 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ExternalStatePolicy* value);
148 // ===================================================================
149
150 class AutotuneOptions final :
151 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.data.AutotuneOptions) */ {
152 public:
AutotuneOptions()153 inline AutotuneOptions() : AutotuneOptions(nullptr) {}
154 ~AutotuneOptions() override;
155 explicit PROTOBUF_CONSTEXPR AutotuneOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
156
157 AutotuneOptions(const AutotuneOptions& from);
AutotuneOptions(AutotuneOptions && from)158 AutotuneOptions(AutotuneOptions&& from) noexcept
159 : AutotuneOptions() {
160 *this = ::std::move(from);
161 }
162
163 inline AutotuneOptions& operator=(const AutotuneOptions& from) {
164 if (this == &from) return *this;
165 CopyFrom(from);
166 return *this;
167 }
168 inline AutotuneOptions& operator=(AutotuneOptions&& from) noexcept {
169 if (this == &from) return *this;
170 if (GetOwningArena() == from.GetOwningArena()
171 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
172 && GetOwningArena() != nullptr
173 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
174 ) {
175 InternalSwap(&from);
176 } else {
177 CopyFrom(from);
178 }
179 return *this;
180 }
181
default_instance()182 static const AutotuneOptions& default_instance() {
183 return *internal_default_instance();
184 }
185 enum OptionalEnabledCase {
186 kEnabled = 1,
187 OPTIONAL_ENABLED_NOT_SET = 0,
188 };
189
190 enum OptionalCpuBudgetCase {
191 kCpuBudget = 2,
192 OPTIONAL_CPU_BUDGET_NOT_SET = 0,
193 };
194
195 enum OptionalRamBudgetCase {
196 kRamBudget = 3,
197 OPTIONAL_RAM_BUDGET_NOT_SET = 0,
198 };
199
200 enum OptionalAutotuneAlgorithmCase {
201 kAutotuneAlgorithm = 4,
202 OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET = 0,
203 };
204
internal_default_instance()205 static inline const AutotuneOptions* internal_default_instance() {
206 return reinterpret_cast<const AutotuneOptions*>(
207 &_AutotuneOptions_default_instance_);
208 }
209 static constexpr int kIndexInFileMessages =
210 0;
211
swap(AutotuneOptions & a,AutotuneOptions & b)212 friend void swap(AutotuneOptions& a, AutotuneOptions& b) {
213 a.Swap(&b);
214 }
Swap(AutotuneOptions * other)215 inline void Swap(AutotuneOptions* other) {
216 if (other == this) return;
217 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
218 if (GetOwningArena() != nullptr &&
219 GetOwningArena() == other->GetOwningArena()) {
220 #else // PROTOBUF_FORCE_COPY_IN_SWAP
221 if (GetOwningArena() == other->GetOwningArena()) {
222 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
223 InternalSwap(other);
224 } else {
225 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
226 }
227 }
228 void UnsafeArenaSwap(AutotuneOptions* other) {
229 if (other == this) return;
230 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
231 InternalSwap(other);
232 }
233
234 // implements Message ----------------------------------------------
235
236 AutotuneOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
237 return CreateMaybeMessage<AutotuneOptions>(arena);
238 }
239 AutotuneOptions* New() const {
240 return New(nullptr);
241 }
242 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
243 void CopyFrom(const AutotuneOptions& from);
244 void MergeFrom(const AutotuneOptions& from);
245 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
246 bool IsInitialized() const final;
247
248 size_t ByteSizeLong() const final;
249 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
250 ::uint8_t* _InternalSerialize(
251 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
252 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
253
254 private:
255 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
256 void SharedDtor();
257 void SetCachedSize(int size) const;
258 void InternalSwap(AutotuneOptions* other);
259
260 private:
261 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
262 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
263 return "tensorflow.data.AutotuneOptions";
264 }
265 protected:
266 explicit AutotuneOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
267 bool is_message_owned = false);
268 public:
269
270 std::string GetTypeName() const final;
271
272 // nested types ----------------------------------------------------
273
274 // accessors -------------------------------------------------------
275
276 enum : int {
277 kEnabledFieldNumber = 1,
278 kCpuBudgetFieldNumber = 2,
279 kRamBudgetFieldNumber = 3,
280 kAutotuneAlgorithmFieldNumber = 4,
281 };
282 // bool enabled = 1;
283 bool has_enabled() const;
284 private:
285 bool _internal_has_enabled() const;
286 public:
287 void clear_enabled();
288 bool enabled() const;
289 void set_enabled(bool value);
290 private:
291 bool _internal_enabled() const;
292 void _internal_set_enabled(bool value);
293 public:
294
295 // int32 cpu_budget = 2;
296 bool has_cpu_budget() const;
297 private:
298 bool _internal_has_cpu_budget() const;
299 public:
300 void clear_cpu_budget();
301 ::int32_t cpu_budget() const;
302 void set_cpu_budget(::int32_t value);
303 private:
304 ::int32_t _internal_cpu_budget() const;
305 void _internal_set_cpu_budget(::int32_t value);
306 public:
307
308 // int64 ram_budget = 3;
309 bool has_ram_budget() const;
310 private:
311 bool _internal_has_ram_budget() const;
312 public:
313 void clear_ram_budget();
314 ::int64_t ram_budget() const;
315 void set_ram_budget(::int64_t value);
316 private:
317 ::int64_t _internal_ram_budget() const;
318 void _internal_set_ram_budget(::int64_t value);
319 public:
320
321 // .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4;
322 bool has_autotune_algorithm() const;
323 private:
324 bool _internal_has_autotune_algorithm() const;
325 public:
326 void clear_autotune_algorithm();
327 ::tensorflow::data::model::AutotuneAlgorithm autotune_algorithm() const;
328 void set_autotune_algorithm(::tensorflow::data::model::AutotuneAlgorithm value);
329 private:
330 ::tensorflow::data::model::AutotuneAlgorithm _internal_autotune_algorithm() const;
331 void _internal_set_autotune_algorithm(::tensorflow::data::model::AutotuneAlgorithm value);
332 public:
333
334 void clear_optional_enabled();
335 OptionalEnabledCase optional_enabled_case() const;
336 void clear_optional_cpu_budget();
337 OptionalCpuBudgetCase optional_cpu_budget_case() const;
338 void clear_optional_ram_budget();
339 OptionalRamBudgetCase optional_ram_budget_case() const;
340 void clear_optional_autotune_algorithm();
341 OptionalAutotuneAlgorithmCase optional_autotune_algorithm_case() const;
342 // @@protoc_insertion_point(class_scope:tensorflow.data.AutotuneOptions)
343 private:
344 class _Internal;
345 void set_has_enabled();
346 void set_has_cpu_budget();
347 void set_has_ram_budget();
348 void set_has_autotune_algorithm();
349
350 inline bool has_optional_enabled() const;
351 inline void clear_has_optional_enabled();
352
353 inline bool has_optional_cpu_budget() const;
354 inline void clear_has_optional_cpu_budget();
355
356 inline bool has_optional_ram_budget() const;
357 inline void clear_has_optional_ram_budget();
358
359 inline bool has_optional_autotune_algorithm() const;
360 inline void clear_has_optional_autotune_algorithm();
361
362 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
363 typedef void InternalArenaConstructable_;
364 typedef void DestructorSkippable_;
365 struct Impl_ {
366 union OptionalEnabledUnion {
367 constexpr OptionalEnabledUnion() : _constinit_{} {}
368 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
369 bool enabled_;
370 } optional_enabled_;
371 union OptionalCpuBudgetUnion {
372 constexpr OptionalCpuBudgetUnion() : _constinit_{} {}
373 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
374 ::int32_t cpu_budget_;
375 } optional_cpu_budget_;
376 union OptionalRamBudgetUnion {
377 constexpr OptionalRamBudgetUnion() : _constinit_{} {}
378 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
379 ::int64_t ram_budget_;
380 } optional_ram_budget_;
381 union OptionalAutotuneAlgorithmUnion {
382 constexpr OptionalAutotuneAlgorithmUnion() : _constinit_{} {}
383 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
384 int autotune_algorithm_;
385 } optional_autotune_algorithm_;
386 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
387 ::uint32_t _oneof_case_[4];
388
389 };
390 union { Impl_ _impl_; };
391 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto;
392 };
393 // -------------------------------------------------------------------
394
395 class CardinalityOptions final :
396 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.data.CardinalityOptions) */ {
397 public:
CardinalityOptions()398 inline CardinalityOptions() : CardinalityOptions(nullptr) {}
399 ~CardinalityOptions() override;
400 explicit PROTOBUF_CONSTEXPR CardinalityOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
401
402 CardinalityOptions(const CardinalityOptions& from);
CardinalityOptions(CardinalityOptions && from)403 CardinalityOptions(CardinalityOptions&& from) noexcept
404 : CardinalityOptions() {
405 *this = ::std::move(from);
406 }
407
408 inline CardinalityOptions& operator=(const CardinalityOptions& from) {
409 if (this == &from) return *this;
410 CopyFrom(from);
411 return *this;
412 }
413 inline CardinalityOptions& operator=(CardinalityOptions&& from) noexcept {
414 if (this == &from) return *this;
415 if (GetOwningArena() == from.GetOwningArena()
416 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
417 && GetOwningArena() != nullptr
418 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
419 ) {
420 InternalSwap(&from);
421 } else {
422 CopyFrom(from);
423 }
424 return *this;
425 }
426
default_instance()427 static const CardinalityOptions& default_instance() {
428 return *internal_default_instance();
429 }
internal_default_instance()430 static inline const CardinalityOptions* internal_default_instance() {
431 return reinterpret_cast<const CardinalityOptions*>(
432 &_CardinalityOptions_default_instance_);
433 }
434 static constexpr int kIndexInFileMessages =
435 1;
436
swap(CardinalityOptions & a,CardinalityOptions & b)437 friend void swap(CardinalityOptions& a, CardinalityOptions& b) {
438 a.Swap(&b);
439 }
Swap(CardinalityOptions * other)440 inline void Swap(CardinalityOptions* other) {
441 if (other == this) return;
442 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
443 if (GetOwningArena() != nullptr &&
444 GetOwningArena() == other->GetOwningArena()) {
445 #else // PROTOBUF_FORCE_COPY_IN_SWAP
446 if (GetOwningArena() == other->GetOwningArena()) {
447 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
448 InternalSwap(other);
449 } else {
450 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
451 }
452 }
453 void UnsafeArenaSwap(CardinalityOptions* other) {
454 if (other == this) return;
455 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
456 InternalSwap(other);
457 }
458
459 // implements Message ----------------------------------------------
460
461 CardinalityOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
462 return CreateMaybeMessage<CardinalityOptions>(arena);
463 }
464 CardinalityOptions* New() const {
465 return New(nullptr);
466 }
467 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
468 void CopyFrom(const CardinalityOptions& from);
469 void MergeFrom(const CardinalityOptions& from);
470 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
471 bool IsInitialized() const final;
472
473 size_t ByteSizeLong() const final;
474 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
475 ::uint8_t* _InternalSerialize(
476 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
477 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
478
479 private:
480 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
481 void SharedDtor();
482 void SetCachedSize(int size) const;
483 void InternalSwap(CardinalityOptions* other);
484
485 private:
486 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
487 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
488 return "tensorflow.data.CardinalityOptions";
489 }
490 protected:
491 explicit CardinalityOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
492 bool is_message_owned = false);
493 public:
494
495 std::string GetTypeName() const final;
496
497 // nested types ----------------------------------------------------
498
499 typedef CardinalityOptions_ComputeLevel ComputeLevel;
500 static constexpr ComputeLevel CARDINALITY_COMPUTE_UNSPECIFIED =
501 CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_UNSPECIFIED;
502 static constexpr ComputeLevel CARDINALITY_COMPUTE_LOW =
503 CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_LOW;
504 static constexpr ComputeLevel CARDINALITY_COMPUTE_MODERATE =
505 CardinalityOptions_ComputeLevel_CARDINALITY_COMPUTE_MODERATE;
506 static inline bool ComputeLevel_IsValid(int value) {
507 return CardinalityOptions_ComputeLevel_IsValid(value);
508 }
509 static constexpr ComputeLevel ComputeLevel_MIN =
510 CardinalityOptions_ComputeLevel_ComputeLevel_MIN;
511 static constexpr ComputeLevel ComputeLevel_MAX =
512 CardinalityOptions_ComputeLevel_ComputeLevel_MAX;
513 static constexpr int ComputeLevel_ARRAYSIZE =
514 CardinalityOptions_ComputeLevel_ComputeLevel_ARRAYSIZE;
515 template<typename T>
516 static inline const std::string& ComputeLevel_Name(T enum_t_value) {
517 static_assert(::std::is_same<T, ComputeLevel>::value ||
518 ::std::is_integral<T>::value,
519 "Incorrect type passed to function ComputeLevel_Name.");
520 return CardinalityOptions_ComputeLevel_Name(enum_t_value);
521 }
522 static inline bool ComputeLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,
523 ComputeLevel* value) {
524 return CardinalityOptions_ComputeLevel_Parse(name, value);
525 }
526
527 // accessors -------------------------------------------------------
528
529 enum : int {
530 kComputeLevelFieldNumber = 1,
531 };
532 // .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1;
533 void clear_compute_level();
534 ::tensorflow::data::CardinalityOptions_ComputeLevel compute_level() const;
535 void set_compute_level(::tensorflow::data::CardinalityOptions_ComputeLevel value);
536 private:
537 ::tensorflow::data::CardinalityOptions_ComputeLevel _internal_compute_level() const;
538 void _internal_set_compute_level(::tensorflow::data::CardinalityOptions_ComputeLevel value);
539 public:
540
541 // @@protoc_insertion_point(class_scope:tensorflow.data.CardinalityOptions)
542 private:
543 class _Internal;
544
545 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
546 typedef void InternalArenaConstructable_;
547 typedef void DestructorSkippable_;
548 struct Impl_ {
549 int compute_level_;
550 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
551 };
552 union { Impl_ _impl_; };
553 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto;
554 };
555 // -------------------------------------------------------------------
556
557 class DistributeOptions final :
558 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.data.DistributeOptions) */ {
559 public:
DistributeOptions()560 inline DistributeOptions() : DistributeOptions(nullptr) {}
561 ~DistributeOptions() override;
562 explicit PROTOBUF_CONSTEXPR DistributeOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
563
564 DistributeOptions(const DistributeOptions& from);
DistributeOptions(DistributeOptions && from)565 DistributeOptions(DistributeOptions&& from) noexcept
566 : DistributeOptions() {
567 *this = ::std::move(from);
568 }
569
570 inline DistributeOptions& operator=(const DistributeOptions& from) {
571 if (this == &from) return *this;
572 CopyFrom(from);
573 return *this;
574 }
575 inline DistributeOptions& operator=(DistributeOptions&& from) noexcept {
576 if (this == &from) return *this;
577 if (GetOwningArena() == from.GetOwningArena()
578 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
579 && GetOwningArena() != nullptr
580 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
581 ) {
582 InternalSwap(&from);
583 } else {
584 CopyFrom(from);
585 }
586 return *this;
587 }
588
default_instance()589 static const DistributeOptions& default_instance() {
590 return *internal_default_instance();
591 }
592 enum OptionalNumDevicesCase {
593 kNumDevices = 2,
594 OPTIONAL_NUM_DEVICES_NOT_SET = 0,
595 };
596
internal_default_instance()597 static inline const DistributeOptions* internal_default_instance() {
598 return reinterpret_cast<const DistributeOptions*>(
599 &_DistributeOptions_default_instance_);
600 }
601 static constexpr int kIndexInFileMessages =
602 2;
603
swap(DistributeOptions & a,DistributeOptions & b)604 friend void swap(DistributeOptions& a, DistributeOptions& b) {
605 a.Swap(&b);
606 }
Swap(DistributeOptions * other)607 inline void Swap(DistributeOptions* other) {
608 if (other == this) return;
609 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
610 if (GetOwningArena() != nullptr &&
611 GetOwningArena() == other->GetOwningArena()) {
612 #else // PROTOBUF_FORCE_COPY_IN_SWAP
613 if (GetOwningArena() == other->GetOwningArena()) {
614 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
615 InternalSwap(other);
616 } else {
617 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
618 }
619 }
620 void UnsafeArenaSwap(DistributeOptions* other) {
621 if (other == this) return;
622 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
623 InternalSwap(other);
624 }
625
626 // implements Message ----------------------------------------------
627
628 DistributeOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
629 return CreateMaybeMessage<DistributeOptions>(arena);
630 }
631 DistributeOptions* New() const {
632 return New(nullptr);
633 }
634 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
635 void CopyFrom(const DistributeOptions& from);
636 void MergeFrom(const DistributeOptions& from);
637 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
638 bool IsInitialized() const final;
639
640 size_t ByteSizeLong() const final;
641 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
642 ::uint8_t* _InternalSerialize(
643 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
644 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
645
646 private:
647 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
648 void SharedDtor();
649 void SetCachedSize(int size) const;
650 void InternalSwap(DistributeOptions* other);
651
652 private:
653 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
654 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
655 return "tensorflow.data.DistributeOptions";
656 }
657 protected:
658 explicit DistributeOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
659 bool is_message_owned = false);
660 public:
661
662 std::string GetTypeName() const final;
663
664 // nested types ----------------------------------------------------
665
666 // accessors -------------------------------------------------------
667
668 enum : int {
669 kAutoShardPolicyFieldNumber = 1,
670 kNumDevicesFieldNumber = 2,
671 };
672 // .tensorflow.data.AutoShardPolicy auto_shard_policy = 1;
673 void clear_auto_shard_policy();
674 ::tensorflow::data::AutoShardPolicy auto_shard_policy() const;
675 void set_auto_shard_policy(::tensorflow::data::AutoShardPolicy value);
676 private:
677 ::tensorflow::data::AutoShardPolicy _internal_auto_shard_policy() const;
678 void _internal_set_auto_shard_policy(::tensorflow::data::AutoShardPolicy value);
679 public:
680
681 // int32 num_devices = 2;
682 bool has_num_devices() const;
683 private:
684 bool _internal_has_num_devices() const;
685 public:
686 void clear_num_devices();
687 ::int32_t num_devices() const;
688 void set_num_devices(::int32_t value);
689 private:
690 ::int32_t _internal_num_devices() const;
691 void _internal_set_num_devices(::int32_t value);
692 public:
693
694 void clear_optional_num_devices();
695 OptionalNumDevicesCase optional_num_devices_case() const;
696 // @@protoc_insertion_point(class_scope:tensorflow.data.DistributeOptions)
697 private:
698 class _Internal;
699 void set_has_num_devices();
700
701 inline bool has_optional_num_devices() const;
702 inline void clear_has_optional_num_devices();
703
704 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
705 typedef void InternalArenaConstructable_;
706 typedef void DestructorSkippable_;
707 struct Impl_ {
708 int auto_shard_policy_;
709 union OptionalNumDevicesUnion {
710 constexpr OptionalNumDevicesUnion() : _constinit_{} {}
711 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
712 ::int32_t num_devices_;
713 } optional_num_devices_;
714 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
715 ::uint32_t _oneof_case_[1];
716
717 };
718 union { Impl_ _impl_; };
719 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto;
720 };
721 // -------------------------------------------------------------------
722
723 class OptimizationOptions final :
724 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.data.OptimizationOptions) */ {
725 public:
OptimizationOptions()726 inline OptimizationOptions() : OptimizationOptions(nullptr) {}
727 ~OptimizationOptions() override;
728 explicit PROTOBUF_CONSTEXPR OptimizationOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
729
730 OptimizationOptions(const OptimizationOptions& from);
OptimizationOptions(OptimizationOptions && from)731 OptimizationOptions(OptimizationOptions&& from) noexcept
732 : OptimizationOptions() {
733 *this = ::std::move(from);
734 }
735
736 inline OptimizationOptions& operator=(const OptimizationOptions& from) {
737 if (this == &from) return *this;
738 CopyFrom(from);
739 return *this;
740 }
741 inline OptimizationOptions& operator=(OptimizationOptions&& from) noexcept {
742 if (this == &from) return *this;
743 if (GetOwningArena() == from.GetOwningArena()
744 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
745 && GetOwningArena() != nullptr
746 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
747 ) {
748 InternalSwap(&from);
749 } else {
750 CopyFrom(from);
751 }
752 return *this;
753 }
754
default_instance()755 static const OptimizationOptions& default_instance() {
756 return *internal_default_instance();
757 }
758 enum OptionalApplyDefaultOptimizationsCase {
759 kApplyDefaultOptimizations = 1,
760 OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET = 0,
761 };
762
763 enum OptionalFilterFusionCase {
764 kFilterFusion = 6,
765 OPTIONAL_FILTER_FUSION_NOT_SET = 0,
766 };
767
768 enum OptionalMapAndBatchFusionCase {
769 kMapAndBatchFusion = 9,
770 OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET = 0,
771 };
772
773 enum OptionalMapAndFilterFusionCase {
774 kMapAndFilterFusion = 10,
775 OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET = 0,
776 };
777
778 enum OptionalMapFusionCase {
779 kMapFusion = 11,
780 OPTIONAL_MAP_FUSION_NOT_SET = 0,
781 };
782
783 enum OptionalMapParallelizationCase {
784 kMapParallelization = 12,
785 OPTIONAL_MAP_PARALLELIZATION_NOT_SET = 0,
786 };
787
788 enum OptionalNoopEliminationCase {
789 kNoopElimination = 14,
790 OPTIONAL_NOOP_ELIMINATION_NOT_SET = 0,
791 };
792
793 enum OptionalParallelBatchCase {
794 kParallelBatch = 15,
795 OPTIONAL_PARALLEL_BATCH_NOT_SET = 0,
796 };
797
798 enum OptionalShuffleAndRepeatFusionCase {
799 kShuffleAndRepeatFusion = 17,
800 OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET = 0,
801 };
802
803 enum OptionalFilterParallelizationCase {
804 kFilterParallelization = 18,
805 OPTIONAL_FILTER_PARALLELIZATION_NOT_SET = 0,
806 };
807
808 enum OptionalInjectPrefetchCase {
809 kInjectPrefetch = 19,
810 OPTIONAL_INJECT_PREFETCH_NOT_SET = 0,
811 };
812
internal_default_instance()813 static inline const OptimizationOptions* internal_default_instance() {
814 return reinterpret_cast<const OptimizationOptions*>(
815 &_OptimizationOptions_default_instance_);
816 }
817 static constexpr int kIndexInFileMessages =
818 3;
819
swap(OptimizationOptions & a,OptimizationOptions & b)820 friend void swap(OptimizationOptions& a, OptimizationOptions& b) {
821 a.Swap(&b);
822 }
Swap(OptimizationOptions * other)823 inline void Swap(OptimizationOptions* other) {
824 if (other == this) return;
825 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
826 if (GetOwningArena() != nullptr &&
827 GetOwningArena() == other->GetOwningArena()) {
828 #else // PROTOBUF_FORCE_COPY_IN_SWAP
829 if (GetOwningArena() == other->GetOwningArena()) {
830 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
831 InternalSwap(other);
832 } else {
833 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
834 }
835 }
836 void UnsafeArenaSwap(OptimizationOptions* other) {
837 if (other == this) return;
838 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
839 InternalSwap(other);
840 }
841
842 // implements Message ----------------------------------------------
843
844 OptimizationOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
845 return CreateMaybeMessage<OptimizationOptions>(arena);
846 }
847 OptimizationOptions* New() const {
848 return New(nullptr);
849 }
850 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
851 void CopyFrom(const OptimizationOptions& from);
852 void MergeFrom(const OptimizationOptions& from);
853 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
854 bool IsInitialized() const final;
855
856 size_t ByteSizeLong() const final;
857 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
858 ::uint8_t* _InternalSerialize(
859 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
860 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
861
862 private:
863 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
864 void SharedDtor();
865 void SetCachedSize(int size) const;
866 void InternalSwap(OptimizationOptions* other);
867
868 private:
869 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
870 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
871 return "tensorflow.data.OptimizationOptions";
872 }
873 protected:
874 explicit OptimizationOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
875 bool is_message_owned = false);
876 public:
877
878 std::string GetTypeName() const final;
879
880 // nested types ----------------------------------------------------
881
882 // accessors -------------------------------------------------------
883
884 enum : int {
885 kApplyDefaultOptimizationsFieldNumber = 1,
886 kFilterFusionFieldNumber = 6,
887 kMapAndBatchFusionFieldNumber = 9,
888 kMapAndFilterFusionFieldNumber = 10,
889 kMapFusionFieldNumber = 11,
890 kMapParallelizationFieldNumber = 12,
891 kNoopEliminationFieldNumber = 14,
892 kParallelBatchFieldNumber = 15,
893 kShuffleAndRepeatFusionFieldNumber = 17,
894 kFilterParallelizationFieldNumber = 18,
895 kInjectPrefetchFieldNumber = 19,
896 };
897 // bool apply_default_optimizations = 1;
898 bool has_apply_default_optimizations() const;
899 private:
900 bool _internal_has_apply_default_optimizations() const;
901 public:
902 void clear_apply_default_optimizations();
903 bool apply_default_optimizations() const;
904 void set_apply_default_optimizations(bool value);
905 private:
906 bool _internal_apply_default_optimizations() const;
907 void _internal_set_apply_default_optimizations(bool value);
908 public:
909
910 // bool filter_fusion = 6;
911 bool has_filter_fusion() const;
912 private:
913 bool _internal_has_filter_fusion() const;
914 public:
915 void clear_filter_fusion();
916 bool filter_fusion() const;
917 void set_filter_fusion(bool value);
918 private:
919 bool _internal_filter_fusion() const;
920 void _internal_set_filter_fusion(bool value);
921 public:
922
923 // bool map_and_batch_fusion = 9;
924 bool has_map_and_batch_fusion() const;
925 private:
926 bool _internal_has_map_and_batch_fusion() const;
927 public:
928 void clear_map_and_batch_fusion();
929 bool map_and_batch_fusion() const;
930 void set_map_and_batch_fusion(bool value);
931 private:
932 bool _internal_map_and_batch_fusion() const;
933 void _internal_set_map_and_batch_fusion(bool value);
934 public:
935
936 // bool map_and_filter_fusion = 10;
937 bool has_map_and_filter_fusion() const;
938 private:
939 bool _internal_has_map_and_filter_fusion() const;
940 public:
941 void clear_map_and_filter_fusion();
942 bool map_and_filter_fusion() const;
943 void set_map_and_filter_fusion(bool value);
944 private:
945 bool _internal_map_and_filter_fusion() const;
946 void _internal_set_map_and_filter_fusion(bool value);
947 public:
948
949 // bool map_fusion = 11;
950 bool has_map_fusion() const;
951 private:
952 bool _internal_has_map_fusion() const;
953 public:
954 void clear_map_fusion();
955 bool map_fusion() const;
956 void set_map_fusion(bool value);
957 private:
958 bool _internal_map_fusion() const;
959 void _internal_set_map_fusion(bool value);
960 public:
961
962 // bool map_parallelization = 12;
963 bool has_map_parallelization() const;
964 private:
965 bool _internal_has_map_parallelization() const;
966 public:
967 void clear_map_parallelization();
968 bool map_parallelization() const;
969 void set_map_parallelization(bool value);
970 private:
971 bool _internal_map_parallelization() const;
972 void _internal_set_map_parallelization(bool value);
973 public:
974
975 // bool noop_elimination = 14;
976 bool has_noop_elimination() const;
977 private:
978 bool _internal_has_noop_elimination() const;
979 public:
980 void clear_noop_elimination();
981 bool noop_elimination() const;
982 void set_noop_elimination(bool value);
983 private:
984 bool _internal_noop_elimination() const;
985 void _internal_set_noop_elimination(bool value);
986 public:
987
988 // bool parallel_batch = 15;
989 bool has_parallel_batch() const;
990 private:
991 bool _internal_has_parallel_batch() const;
992 public:
993 void clear_parallel_batch();
994 bool parallel_batch() const;
995 void set_parallel_batch(bool value);
996 private:
997 bool _internal_parallel_batch() const;
998 void _internal_set_parallel_batch(bool value);
999 public:
1000
1001 // bool shuffle_and_repeat_fusion = 17;
1002 bool has_shuffle_and_repeat_fusion() const;
1003 private:
1004 bool _internal_has_shuffle_and_repeat_fusion() const;
1005 public:
1006 void clear_shuffle_and_repeat_fusion();
1007 bool shuffle_and_repeat_fusion() const;
1008 void set_shuffle_and_repeat_fusion(bool value);
1009 private:
1010 bool _internal_shuffle_and_repeat_fusion() const;
1011 void _internal_set_shuffle_and_repeat_fusion(bool value);
1012 public:
1013
1014 // bool filter_parallelization = 18;
1015 bool has_filter_parallelization() const;
1016 private:
1017 bool _internal_has_filter_parallelization() const;
1018 public:
1019 void clear_filter_parallelization();
1020 bool filter_parallelization() const;
1021 void set_filter_parallelization(bool value);
1022 private:
1023 bool _internal_filter_parallelization() const;
1024 void _internal_set_filter_parallelization(bool value);
1025 public:
1026
1027 // bool inject_prefetch = 19;
1028 bool has_inject_prefetch() const;
1029 private:
1030 bool _internal_has_inject_prefetch() const;
1031 public:
1032 void clear_inject_prefetch();
1033 bool inject_prefetch() const;
1034 void set_inject_prefetch(bool value);
1035 private:
1036 bool _internal_inject_prefetch() const;
1037 void _internal_set_inject_prefetch(bool value);
1038 public:
1039
1040 void clear_optional_apply_default_optimizations();
1041 OptionalApplyDefaultOptimizationsCase optional_apply_default_optimizations_case() const;
1042 void clear_optional_filter_fusion();
1043 OptionalFilterFusionCase optional_filter_fusion_case() const;
1044 void clear_optional_map_and_batch_fusion();
1045 OptionalMapAndBatchFusionCase optional_map_and_batch_fusion_case() const;
1046 void clear_optional_map_and_filter_fusion();
1047 OptionalMapAndFilterFusionCase optional_map_and_filter_fusion_case() const;
1048 void clear_optional_map_fusion();
1049 OptionalMapFusionCase optional_map_fusion_case() const;
1050 void clear_optional_map_parallelization();
1051 OptionalMapParallelizationCase optional_map_parallelization_case() const;
1052 void clear_optional_noop_elimination();
1053 OptionalNoopEliminationCase optional_noop_elimination_case() const;
1054 void clear_optional_parallel_batch();
1055 OptionalParallelBatchCase optional_parallel_batch_case() const;
1056 void clear_optional_shuffle_and_repeat_fusion();
1057 OptionalShuffleAndRepeatFusionCase optional_shuffle_and_repeat_fusion_case() const;
1058 void clear_optional_filter_parallelization();
1059 OptionalFilterParallelizationCase optional_filter_parallelization_case() const;
1060 void clear_optional_inject_prefetch();
1061 OptionalInjectPrefetchCase optional_inject_prefetch_case() const;
1062 // @@protoc_insertion_point(class_scope:tensorflow.data.OptimizationOptions)
1063 private:
1064 class _Internal;
1065 void set_has_apply_default_optimizations();
1066 void set_has_filter_fusion();
1067 void set_has_map_and_batch_fusion();
1068 void set_has_map_and_filter_fusion();
1069 void set_has_map_fusion();
1070 void set_has_map_parallelization();
1071 void set_has_noop_elimination();
1072 void set_has_parallel_batch();
1073 void set_has_shuffle_and_repeat_fusion();
1074 void set_has_filter_parallelization();
1075 void set_has_inject_prefetch();
1076
1077 inline bool has_optional_apply_default_optimizations() const;
1078 inline void clear_has_optional_apply_default_optimizations();
1079
1080 inline bool has_optional_filter_fusion() const;
1081 inline void clear_has_optional_filter_fusion();
1082
1083 inline bool has_optional_map_and_batch_fusion() const;
1084 inline void clear_has_optional_map_and_batch_fusion();
1085
1086 inline bool has_optional_map_and_filter_fusion() const;
1087 inline void clear_has_optional_map_and_filter_fusion();
1088
1089 inline bool has_optional_map_fusion() const;
1090 inline void clear_has_optional_map_fusion();
1091
1092 inline bool has_optional_map_parallelization() const;
1093 inline void clear_has_optional_map_parallelization();
1094
1095 inline bool has_optional_noop_elimination() const;
1096 inline void clear_has_optional_noop_elimination();
1097
1098 inline bool has_optional_parallel_batch() const;
1099 inline void clear_has_optional_parallel_batch();
1100
1101 inline bool has_optional_shuffle_and_repeat_fusion() const;
1102 inline void clear_has_optional_shuffle_and_repeat_fusion();
1103
1104 inline bool has_optional_filter_parallelization() const;
1105 inline void clear_has_optional_filter_parallelization();
1106
1107 inline bool has_optional_inject_prefetch() const;
1108 inline void clear_has_optional_inject_prefetch();
1109
1110 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1111 typedef void InternalArenaConstructable_;
1112 typedef void DestructorSkippable_;
1113 struct Impl_ {
1114 union OptionalApplyDefaultOptimizationsUnion {
1115 constexpr OptionalApplyDefaultOptimizationsUnion() : _constinit_{} {}
1116 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1117 bool apply_default_optimizations_;
1118 } optional_apply_default_optimizations_;
1119 union OptionalFilterFusionUnion {
1120 constexpr OptionalFilterFusionUnion() : _constinit_{} {}
1121 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1122 bool filter_fusion_;
1123 } optional_filter_fusion_;
1124 union OptionalMapAndBatchFusionUnion {
1125 constexpr OptionalMapAndBatchFusionUnion() : _constinit_{} {}
1126 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1127 bool map_and_batch_fusion_;
1128 } optional_map_and_batch_fusion_;
1129 union OptionalMapAndFilterFusionUnion {
1130 constexpr OptionalMapAndFilterFusionUnion() : _constinit_{} {}
1131 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1132 bool map_and_filter_fusion_;
1133 } optional_map_and_filter_fusion_;
1134 union OptionalMapFusionUnion {
1135 constexpr OptionalMapFusionUnion() : _constinit_{} {}
1136 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1137 bool map_fusion_;
1138 } optional_map_fusion_;
1139 union OptionalMapParallelizationUnion {
1140 constexpr OptionalMapParallelizationUnion() : _constinit_{} {}
1141 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1142 bool map_parallelization_;
1143 } optional_map_parallelization_;
1144 union OptionalNoopEliminationUnion {
1145 constexpr OptionalNoopEliminationUnion() : _constinit_{} {}
1146 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1147 bool noop_elimination_;
1148 } optional_noop_elimination_;
1149 union OptionalParallelBatchUnion {
1150 constexpr OptionalParallelBatchUnion() : _constinit_{} {}
1151 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1152 bool parallel_batch_;
1153 } optional_parallel_batch_;
1154 union OptionalShuffleAndRepeatFusionUnion {
1155 constexpr OptionalShuffleAndRepeatFusionUnion() : _constinit_{} {}
1156 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1157 bool shuffle_and_repeat_fusion_;
1158 } optional_shuffle_and_repeat_fusion_;
1159 union OptionalFilterParallelizationUnion {
1160 constexpr OptionalFilterParallelizationUnion() : _constinit_{} {}
1161 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1162 bool filter_parallelization_;
1163 } optional_filter_parallelization_;
1164 union OptionalInjectPrefetchUnion {
1165 constexpr OptionalInjectPrefetchUnion() : _constinit_{} {}
1166 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1167 bool inject_prefetch_;
1168 } optional_inject_prefetch_;
1169 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1170 ::uint32_t _oneof_case_[11];
1171
1172 };
1173 union { Impl_ _impl_; };
1174 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto;
1175 };
1176 // -------------------------------------------------------------------
1177
1178 class ThreadingOptions final :
1179 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.data.ThreadingOptions) */ {
1180 public:
ThreadingOptions()1181 inline ThreadingOptions() : ThreadingOptions(nullptr) {}
1182 ~ThreadingOptions() override;
1183 explicit PROTOBUF_CONSTEXPR ThreadingOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1184
1185 ThreadingOptions(const ThreadingOptions& from);
ThreadingOptions(ThreadingOptions && from)1186 ThreadingOptions(ThreadingOptions&& from) noexcept
1187 : ThreadingOptions() {
1188 *this = ::std::move(from);
1189 }
1190
1191 inline ThreadingOptions& operator=(const ThreadingOptions& from) {
1192 if (this == &from) return *this;
1193 CopyFrom(from);
1194 return *this;
1195 }
1196 inline ThreadingOptions& operator=(ThreadingOptions&& from) noexcept {
1197 if (this == &from) return *this;
1198 if (GetOwningArena() == from.GetOwningArena()
1199 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1200 && GetOwningArena() != nullptr
1201 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
1202 ) {
1203 InternalSwap(&from);
1204 } else {
1205 CopyFrom(from);
1206 }
1207 return *this;
1208 }
1209
default_instance()1210 static const ThreadingOptions& default_instance() {
1211 return *internal_default_instance();
1212 }
1213 enum OptionalMaxIntraOpParallelismCase {
1214 kMaxIntraOpParallelism = 1,
1215 OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET = 0,
1216 };
1217
1218 enum OptionalPrivateThreadpoolSizeCase {
1219 kPrivateThreadpoolSize = 2,
1220 OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET = 0,
1221 };
1222
internal_default_instance()1223 static inline const ThreadingOptions* internal_default_instance() {
1224 return reinterpret_cast<const ThreadingOptions*>(
1225 &_ThreadingOptions_default_instance_);
1226 }
1227 static constexpr int kIndexInFileMessages =
1228 4;
1229
swap(ThreadingOptions & a,ThreadingOptions & b)1230 friend void swap(ThreadingOptions& a, ThreadingOptions& b) {
1231 a.Swap(&b);
1232 }
Swap(ThreadingOptions * other)1233 inline void Swap(ThreadingOptions* other) {
1234 if (other == this) return;
1235 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1236 if (GetOwningArena() != nullptr &&
1237 GetOwningArena() == other->GetOwningArena()) {
1238 #else // PROTOBUF_FORCE_COPY_IN_SWAP
1239 if (GetOwningArena() == other->GetOwningArena()) {
1240 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
1241 InternalSwap(other);
1242 } else {
1243 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1244 }
1245 }
1246 void UnsafeArenaSwap(ThreadingOptions* other) {
1247 if (other == this) return;
1248 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1249 InternalSwap(other);
1250 }
1251
1252 // implements Message ----------------------------------------------
1253
1254 ThreadingOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1255 return CreateMaybeMessage<ThreadingOptions>(arena);
1256 }
1257 ThreadingOptions* New() const {
1258 return New(nullptr);
1259 }
1260 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1261 void CopyFrom(const ThreadingOptions& from);
1262 void MergeFrom(const ThreadingOptions& from);
1263 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1264 bool IsInitialized() const final;
1265
1266 size_t ByteSizeLong() const final;
1267 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1268 ::uint8_t* _InternalSerialize(
1269 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1270 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1271
1272 private:
1273 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1274 void SharedDtor();
1275 void SetCachedSize(int size) const;
1276 void InternalSwap(ThreadingOptions* other);
1277
1278 private:
1279 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1280 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1281 return "tensorflow.data.ThreadingOptions";
1282 }
1283 protected:
1284 explicit ThreadingOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1285 bool is_message_owned = false);
1286 public:
1287
1288 std::string GetTypeName() const final;
1289
1290 // nested types ----------------------------------------------------
1291
1292 // accessors -------------------------------------------------------
1293
1294 enum : int {
1295 kMaxIntraOpParallelismFieldNumber = 1,
1296 kPrivateThreadpoolSizeFieldNumber = 2,
1297 };
1298 // int32 max_intra_op_parallelism = 1;
1299 bool has_max_intra_op_parallelism() const;
1300 private:
1301 bool _internal_has_max_intra_op_parallelism() const;
1302 public:
1303 void clear_max_intra_op_parallelism();
1304 ::int32_t max_intra_op_parallelism() const;
1305 void set_max_intra_op_parallelism(::int32_t value);
1306 private:
1307 ::int32_t _internal_max_intra_op_parallelism() const;
1308 void _internal_set_max_intra_op_parallelism(::int32_t value);
1309 public:
1310
1311 // int32 private_threadpool_size = 2;
1312 bool has_private_threadpool_size() const;
1313 private:
1314 bool _internal_has_private_threadpool_size() const;
1315 public:
1316 void clear_private_threadpool_size();
1317 ::int32_t private_threadpool_size() const;
1318 void set_private_threadpool_size(::int32_t value);
1319 private:
1320 ::int32_t _internal_private_threadpool_size() const;
1321 void _internal_set_private_threadpool_size(::int32_t value);
1322 public:
1323
1324 void clear_optional_max_intra_op_parallelism();
1325 OptionalMaxIntraOpParallelismCase optional_max_intra_op_parallelism_case() const;
1326 void clear_optional_private_threadpool_size();
1327 OptionalPrivateThreadpoolSizeCase optional_private_threadpool_size_case() const;
1328 // @@protoc_insertion_point(class_scope:tensorflow.data.ThreadingOptions)
1329 private:
1330 class _Internal;
1331 void set_has_max_intra_op_parallelism();
1332 void set_has_private_threadpool_size();
1333
1334 inline bool has_optional_max_intra_op_parallelism() const;
1335 inline void clear_has_optional_max_intra_op_parallelism();
1336
1337 inline bool has_optional_private_threadpool_size() const;
1338 inline void clear_has_optional_private_threadpool_size();
1339
1340 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1341 typedef void InternalArenaConstructable_;
1342 typedef void DestructorSkippable_;
1343 struct Impl_ {
1344 union OptionalMaxIntraOpParallelismUnion {
1345 constexpr OptionalMaxIntraOpParallelismUnion() : _constinit_{} {}
1346 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1347 ::int32_t max_intra_op_parallelism_;
1348 } optional_max_intra_op_parallelism_;
1349 union OptionalPrivateThreadpoolSizeUnion {
1350 constexpr OptionalPrivateThreadpoolSizeUnion() : _constinit_{} {}
1351 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1352 ::int32_t private_threadpool_size_;
1353 } optional_private_threadpool_size_;
1354 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1355 ::uint32_t _oneof_case_[2];
1356
1357 };
1358 union { Impl_ _impl_; };
1359 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto;
1360 };
1361 // -------------------------------------------------------------------
1362
1363 class Options final :
1364 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.data.Options) */ {
1365 public:
Options()1366 inline Options() : Options(nullptr) {}
1367 ~Options() override;
1368 explicit PROTOBUF_CONSTEXPR Options(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
1369
1370 Options(const Options& from);
Options(Options && from)1371 Options(Options&& from) noexcept
1372 : Options() {
1373 *this = ::std::move(from);
1374 }
1375
1376 inline Options& operator=(const Options& from) {
1377 if (this == &from) return *this;
1378 CopyFrom(from);
1379 return *this;
1380 }
1381 inline Options& operator=(Options&& from) noexcept {
1382 if (this == &from) return *this;
1383 if (GetOwningArena() == from.GetOwningArena()
1384 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
1385 && GetOwningArena() != nullptr
1386 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
1387 ) {
1388 InternalSwap(&from);
1389 } else {
1390 CopyFrom(from);
1391 }
1392 return *this;
1393 }
1394
default_instance()1395 static const Options& default_instance() {
1396 return *internal_default_instance();
1397 }
1398 enum OptionalDeterministicCase {
1399 kDeterministic = 1,
1400 OPTIONAL_DETERMINISTIC_NOT_SET = 0,
1401 };
1402
1403 enum OptionalSlackCase {
1404 kSlack = 4,
1405 OPTIONAL_SLACK_NOT_SET = 0,
1406 };
1407
1408 enum OptionalExternalStatePolicyCase {
1409 kExternalStatePolicy = 6,
1410 OPTIONAL_EXTERNAL_STATE_POLICY_NOT_SET = 0,
1411 };
1412
internal_default_instance()1413 static inline const Options* internal_default_instance() {
1414 return reinterpret_cast<const Options*>(
1415 &_Options_default_instance_);
1416 }
1417 static constexpr int kIndexInFileMessages =
1418 5;
1419
swap(Options & a,Options & b)1420 friend void swap(Options& a, Options& b) {
1421 a.Swap(&b);
1422 }
Swap(Options * other)1423 inline void Swap(Options* other) {
1424 if (other == this) return;
1425 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
1426 if (GetOwningArena() != nullptr &&
1427 GetOwningArena() == other->GetOwningArena()) {
1428 #else // PROTOBUF_FORCE_COPY_IN_SWAP
1429 if (GetOwningArena() == other->GetOwningArena()) {
1430 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
1431 InternalSwap(other);
1432 } else {
1433 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
1434 }
1435 }
1436 void UnsafeArenaSwap(Options* other) {
1437 if (other == this) return;
1438 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
1439 InternalSwap(other);
1440 }
1441
1442 // implements Message ----------------------------------------------
1443
1444 Options* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
1445 return CreateMaybeMessage<Options>(arena);
1446 }
1447 Options* New() const {
1448 return New(nullptr);
1449 }
1450 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1451 void CopyFrom(const Options& from);
1452 void MergeFrom(const Options& from);
1453 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1454 bool IsInitialized() const final;
1455
1456 size_t ByteSizeLong() const final;
1457 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1458 ::uint8_t* _InternalSerialize(
1459 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1460 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1461
1462 private:
1463 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1464 void SharedDtor();
1465 void SetCachedSize(int size) const;
1466 void InternalSwap(Options* other);
1467
1468 private:
1469 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1470 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1471 return "tensorflow.data.Options";
1472 }
1473 protected:
1474 explicit Options(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1475 bool is_message_owned = false);
1476 public:
1477
1478 std::string GetTypeName() const final;
1479
1480 // nested types ----------------------------------------------------
1481
1482 // accessors -------------------------------------------------------
1483
1484 enum : int {
1485 kDistributeOptionsFieldNumber = 2,
1486 kOptimizationOptionsFieldNumber = 3,
1487 kThreadingOptionsFieldNumber = 5,
1488 kAutotuneOptionsFieldNumber = 7,
1489 kDeterministicFieldNumber = 1,
1490 kSlackFieldNumber = 4,
1491 kExternalStatePolicyFieldNumber = 6,
1492 };
1493 // .tensorflow.data.DistributeOptions distribute_options = 2;
1494 bool has_distribute_options() const;
1495 private:
1496 bool _internal_has_distribute_options() const;
1497 public:
1498 void clear_distribute_options();
1499 const ::tensorflow::data::DistributeOptions& distribute_options() const;
1500 PROTOBUF_NODISCARD ::tensorflow::data::DistributeOptions* release_distribute_options();
1501 ::tensorflow::data::DistributeOptions* mutable_distribute_options();
1502 void set_allocated_distribute_options(::tensorflow::data::DistributeOptions* distribute_options);
1503 private:
1504 const ::tensorflow::data::DistributeOptions& _internal_distribute_options() const;
1505 ::tensorflow::data::DistributeOptions* _internal_mutable_distribute_options();
1506 public:
1507 void unsafe_arena_set_allocated_distribute_options(
1508 ::tensorflow::data::DistributeOptions* distribute_options);
1509 ::tensorflow::data::DistributeOptions* unsafe_arena_release_distribute_options();
1510
1511 // .tensorflow.data.OptimizationOptions optimization_options = 3;
1512 bool has_optimization_options() const;
1513 private:
1514 bool _internal_has_optimization_options() const;
1515 public:
1516 void clear_optimization_options();
1517 const ::tensorflow::data::OptimizationOptions& optimization_options() const;
1518 PROTOBUF_NODISCARD ::tensorflow::data::OptimizationOptions* release_optimization_options();
1519 ::tensorflow::data::OptimizationOptions* mutable_optimization_options();
1520 void set_allocated_optimization_options(::tensorflow::data::OptimizationOptions* optimization_options);
1521 private:
1522 const ::tensorflow::data::OptimizationOptions& _internal_optimization_options() const;
1523 ::tensorflow::data::OptimizationOptions* _internal_mutable_optimization_options();
1524 public:
1525 void unsafe_arena_set_allocated_optimization_options(
1526 ::tensorflow::data::OptimizationOptions* optimization_options);
1527 ::tensorflow::data::OptimizationOptions* unsafe_arena_release_optimization_options();
1528
1529 // .tensorflow.data.ThreadingOptions threading_options = 5;
1530 bool has_threading_options() const;
1531 private:
1532 bool _internal_has_threading_options() const;
1533 public:
1534 void clear_threading_options();
1535 const ::tensorflow::data::ThreadingOptions& threading_options() const;
1536 PROTOBUF_NODISCARD ::tensorflow::data::ThreadingOptions* release_threading_options();
1537 ::tensorflow::data::ThreadingOptions* mutable_threading_options();
1538 void set_allocated_threading_options(::tensorflow::data::ThreadingOptions* threading_options);
1539 private:
1540 const ::tensorflow::data::ThreadingOptions& _internal_threading_options() const;
1541 ::tensorflow::data::ThreadingOptions* _internal_mutable_threading_options();
1542 public:
1543 void unsafe_arena_set_allocated_threading_options(
1544 ::tensorflow::data::ThreadingOptions* threading_options);
1545 ::tensorflow::data::ThreadingOptions* unsafe_arena_release_threading_options();
1546
1547 // .tensorflow.data.AutotuneOptions autotune_options = 7;
1548 bool has_autotune_options() const;
1549 private:
1550 bool _internal_has_autotune_options() const;
1551 public:
1552 void clear_autotune_options();
1553 const ::tensorflow::data::AutotuneOptions& autotune_options() const;
1554 PROTOBUF_NODISCARD ::tensorflow::data::AutotuneOptions* release_autotune_options();
1555 ::tensorflow::data::AutotuneOptions* mutable_autotune_options();
1556 void set_allocated_autotune_options(::tensorflow::data::AutotuneOptions* autotune_options);
1557 private:
1558 const ::tensorflow::data::AutotuneOptions& _internal_autotune_options() const;
1559 ::tensorflow::data::AutotuneOptions* _internal_mutable_autotune_options();
1560 public:
1561 void unsafe_arena_set_allocated_autotune_options(
1562 ::tensorflow::data::AutotuneOptions* autotune_options);
1563 ::tensorflow::data::AutotuneOptions* unsafe_arena_release_autotune_options();
1564
1565 // bool deterministic = 1;
1566 bool has_deterministic() const;
1567 private:
1568 bool _internal_has_deterministic() const;
1569 public:
1570 void clear_deterministic();
1571 bool deterministic() const;
1572 void set_deterministic(bool value);
1573 private:
1574 bool _internal_deterministic() const;
1575 void _internal_set_deterministic(bool value);
1576 public:
1577
1578 // bool slack = 4;
1579 bool has_slack() const;
1580 private:
1581 bool _internal_has_slack() const;
1582 public:
1583 void clear_slack();
1584 bool slack() const;
1585 void set_slack(bool value);
1586 private:
1587 bool _internal_slack() const;
1588 void _internal_set_slack(bool value);
1589 public:
1590
1591 // .tensorflow.data.ExternalStatePolicy external_state_policy = 6;
1592 bool has_external_state_policy() const;
1593 private:
1594 bool _internal_has_external_state_policy() const;
1595 public:
1596 void clear_external_state_policy();
1597 ::tensorflow::data::ExternalStatePolicy external_state_policy() const;
1598 void set_external_state_policy(::tensorflow::data::ExternalStatePolicy value);
1599 private:
1600 ::tensorflow::data::ExternalStatePolicy _internal_external_state_policy() const;
1601 void _internal_set_external_state_policy(::tensorflow::data::ExternalStatePolicy value);
1602 public:
1603
1604 void clear_optional_deterministic();
1605 OptionalDeterministicCase optional_deterministic_case() const;
1606 void clear_optional_slack();
1607 OptionalSlackCase optional_slack_case() const;
1608 void clear_optional_external_state_policy();
1609 OptionalExternalStatePolicyCase optional_external_state_policy_case() const;
1610 // @@protoc_insertion_point(class_scope:tensorflow.data.Options)
1611 private:
1612 class _Internal;
1613 void set_has_deterministic();
1614 void set_has_slack();
1615 void set_has_external_state_policy();
1616
1617 inline bool has_optional_deterministic() const;
1618 inline void clear_has_optional_deterministic();
1619
1620 inline bool has_optional_slack() const;
1621 inline void clear_has_optional_slack();
1622
1623 inline bool has_optional_external_state_policy() const;
1624 inline void clear_has_optional_external_state_policy();
1625
1626 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1627 typedef void InternalArenaConstructable_;
1628 typedef void DestructorSkippable_;
1629 struct Impl_ {
1630 ::tensorflow::data::DistributeOptions* distribute_options_;
1631 ::tensorflow::data::OptimizationOptions* optimization_options_;
1632 ::tensorflow::data::ThreadingOptions* threading_options_;
1633 ::tensorflow::data::AutotuneOptions* autotune_options_;
1634 union OptionalDeterministicUnion {
1635 constexpr OptionalDeterministicUnion() : _constinit_{} {}
1636 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1637 bool deterministic_;
1638 } optional_deterministic_;
1639 union OptionalSlackUnion {
1640 constexpr OptionalSlackUnion() : _constinit_{} {}
1641 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1642 bool slack_;
1643 } optional_slack_;
1644 union OptionalExternalStatePolicyUnion {
1645 constexpr OptionalExternalStatePolicyUnion() : _constinit_{} {}
1646 ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_;
1647 int external_state_policy_;
1648 } optional_external_state_policy_;
1649 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1650 ::uint32_t _oneof_case_[3];
1651
1652 };
1653 union { Impl_ _impl_; };
1654 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2fdataset_5foptions_2eproto;
1655 };
1656 // ===================================================================
1657
1658
1659 // ===================================================================
1660
1661 #ifdef __GNUC__
1662 #pragma GCC diagnostic push
1663 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
1664 #endif // __GNUC__
1665 // AutotuneOptions
1666
1667 // bool enabled = 1;
_internal_has_enabled()1668 inline bool AutotuneOptions::_internal_has_enabled() const {
1669 return optional_enabled_case() == kEnabled;
1670 }
has_enabled()1671 inline bool AutotuneOptions::has_enabled() const {
1672 return _internal_has_enabled();
1673 }
set_has_enabled()1674 inline void AutotuneOptions::set_has_enabled() {
1675 _impl_._oneof_case_[0] = kEnabled;
1676 }
clear_enabled()1677 inline void AutotuneOptions::clear_enabled() {
1678 if (_internal_has_enabled()) {
1679 _impl_.optional_enabled_.enabled_ = false;
1680 clear_has_optional_enabled();
1681 }
1682 }
_internal_enabled()1683 inline bool AutotuneOptions::_internal_enabled() const {
1684 if (_internal_has_enabled()) {
1685 return _impl_.optional_enabled_.enabled_;
1686 }
1687 return false;
1688 }
_internal_set_enabled(bool value)1689 inline void AutotuneOptions::_internal_set_enabled(bool value) {
1690 if (!_internal_has_enabled()) {
1691 clear_optional_enabled();
1692 set_has_enabled();
1693 }
1694 _impl_.optional_enabled_.enabled_ = value;
1695 }
enabled()1696 inline bool AutotuneOptions::enabled() const {
1697 // @@protoc_insertion_point(field_get:tensorflow.data.AutotuneOptions.enabled)
1698 return _internal_enabled();
1699 }
set_enabled(bool value)1700 inline void AutotuneOptions::set_enabled(bool value) {
1701 _internal_set_enabled(value);
1702 // @@protoc_insertion_point(field_set:tensorflow.data.AutotuneOptions.enabled)
1703 }
1704
1705 // int32 cpu_budget = 2;
_internal_has_cpu_budget()1706 inline bool AutotuneOptions::_internal_has_cpu_budget() const {
1707 return optional_cpu_budget_case() == kCpuBudget;
1708 }
has_cpu_budget()1709 inline bool AutotuneOptions::has_cpu_budget() const {
1710 return _internal_has_cpu_budget();
1711 }
set_has_cpu_budget()1712 inline void AutotuneOptions::set_has_cpu_budget() {
1713 _impl_._oneof_case_[1] = kCpuBudget;
1714 }
clear_cpu_budget()1715 inline void AutotuneOptions::clear_cpu_budget() {
1716 if (_internal_has_cpu_budget()) {
1717 _impl_.optional_cpu_budget_.cpu_budget_ = 0;
1718 clear_has_optional_cpu_budget();
1719 }
1720 }
_internal_cpu_budget()1721 inline ::int32_t AutotuneOptions::_internal_cpu_budget() const {
1722 if (_internal_has_cpu_budget()) {
1723 return _impl_.optional_cpu_budget_.cpu_budget_;
1724 }
1725 return 0;
1726 }
_internal_set_cpu_budget(::int32_t value)1727 inline void AutotuneOptions::_internal_set_cpu_budget(::int32_t value) {
1728 if (!_internal_has_cpu_budget()) {
1729 clear_optional_cpu_budget();
1730 set_has_cpu_budget();
1731 }
1732 _impl_.optional_cpu_budget_.cpu_budget_ = value;
1733 }
cpu_budget()1734 inline ::int32_t AutotuneOptions::cpu_budget() const {
1735 // @@protoc_insertion_point(field_get:tensorflow.data.AutotuneOptions.cpu_budget)
1736 return _internal_cpu_budget();
1737 }
set_cpu_budget(::int32_t value)1738 inline void AutotuneOptions::set_cpu_budget(::int32_t value) {
1739 _internal_set_cpu_budget(value);
1740 // @@protoc_insertion_point(field_set:tensorflow.data.AutotuneOptions.cpu_budget)
1741 }
1742
1743 // int64 ram_budget = 3;
_internal_has_ram_budget()1744 inline bool AutotuneOptions::_internal_has_ram_budget() const {
1745 return optional_ram_budget_case() == kRamBudget;
1746 }
has_ram_budget()1747 inline bool AutotuneOptions::has_ram_budget() const {
1748 return _internal_has_ram_budget();
1749 }
set_has_ram_budget()1750 inline void AutotuneOptions::set_has_ram_budget() {
1751 _impl_._oneof_case_[2] = kRamBudget;
1752 }
clear_ram_budget()1753 inline void AutotuneOptions::clear_ram_budget() {
1754 if (_internal_has_ram_budget()) {
1755 _impl_.optional_ram_budget_.ram_budget_ = ::int64_t{0};
1756 clear_has_optional_ram_budget();
1757 }
1758 }
_internal_ram_budget()1759 inline ::int64_t AutotuneOptions::_internal_ram_budget() const {
1760 if (_internal_has_ram_budget()) {
1761 return _impl_.optional_ram_budget_.ram_budget_;
1762 }
1763 return ::int64_t{0};
1764 }
_internal_set_ram_budget(::int64_t value)1765 inline void AutotuneOptions::_internal_set_ram_budget(::int64_t value) {
1766 if (!_internal_has_ram_budget()) {
1767 clear_optional_ram_budget();
1768 set_has_ram_budget();
1769 }
1770 _impl_.optional_ram_budget_.ram_budget_ = value;
1771 }
ram_budget()1772 inline ::int64_t AutotuneOptions::ram_budget() const {
1773 // @@protoc_insertion_point(field_get:tensorflow.data.AutotuneOptions.ram_budget)
1774 return _internal_ram_budget();
1775 }
set_ram_budget(::int64_t value)1776 inline void AutotuneOptions::set_ram_budget(::int64_t value) {
1777 _internal_set_ram_budget(value);
1778 // @@protoc_insertion_point(field_set:tensorflow.data.AutotuneOptions.ram_budget)
1779 }
1780
1781 // .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4;
_internal_has_autotune_algorithm()1782 inline bool AutotuneOptions::_internal_has_autotune_algorithm() const {
1783 return optional_autotune_algorithm_case() == kAutotuneAlgorithm;
1784 }
has_autotune_algorithm()1785 inline bool AutotuneOptions::has_autotune_algorithm() const {
1786 return _internal_has_autotune_algorithm();
1787 }
set_has_autotune_algorithm()1788 inline void AutotuneOptions::set_has_autotune_algorithm() {
1789 _impl_._oneof_case_[3] = kAutotuneAlgorithm;
1790 }
clear_autotune_algorithm()1791 inline void AutotuneOptions::clear_autotune_algorithm() {
1792 if (_internal_has_autotune_algorithm()) {
1793 _impl_.optional_autotune_algorithm_.autotune_algorithm_ = 0;
1794 clear_has_optional_autotune_algorithm();
1795 }
1796 }
_internal_autotune_algorithm()1797 inline ::tensorflow::data::model::AutotuneAlgorithm AutotuneOptions::_internal_autotune_algorithm() const {
1798 if (_internal_has_autotune_algorithm()) {
1799 return static_cast< ::tensorflow::data::model::AutotuneAlgorithm >(_impl_.optional_autotune_algorithm_.autotune_algorithm_);
1800 }
1801 return static_cast< ::tensorflow::data::model::AutotuneAlgorithm >(0);
1802 }
autotune_algorithm()1803 inline ::tensorflow::data::model::AutotuneAlgorithm AutotuneOptions::autotune_algorithm() const {
1804 // @@protoc_insertion_point(field_get:tensorflow.data.AutotuneOptions.autotune_algorithm)
1805 return _internal_autotune_algorithm();
1806 }
_internal_set_autotune_algorithm(::tensorflow::data::model::AutotuneAlgorithm value)1807 inline void AutotuneOptions::_internal_set_autotune_algorithm(::tensorflow::data::model::AutotuneAlgorithm value) {
1808 if (!_internal_has_autotune_algorithm()) {
1809 clear_optional_autotune_algorithm();
1810 set_has_autotune_algorithm();
1811 }
1812 _impl_.optional_autotune_algorithm_.autotune_algorithm_ = value;
1813 }
set_autotune_algorithm(::tensorflow::data::model::AutotuneAlgorithm value)1814 inline void AutotuneOptions::set_autotune_algorithm(::tensorflow::data::model::AutotuneAlgorithm value) {
1815 _internal_set_autotune_algorithm(value);
1816 // @@protoc_insertion_point(field_set:tensorflow.data.AutotuneOptions.autotune_algorithm)
1817 }
1818
has_optional_enabled()1819 inline bool AutotuneOptions::has_optional_enabled() const {
1820 return optional_enabled_case() != OPTIONAL_ENABLED_NOT_SET;
1821 }
clear_has_optional_enabled()1822 inline void AutotuneOptions::clear_has_optional_enabled() {
1823 _impl_._oneof_case_[0] = OPTIONAL_ENABLED_NOT_SET;
1824 }
has_optional_cpu_budget()1825 inline bool AutotuneOptions::has_optional_cpu_budget() const {
1826 return optional_cpu_budget_case() != OPTIONAL_CPU_BUDGET_NOT_SET;
1827 }
clear_has_optional_cpu_budget()1828 inline void AutotuneOptions::clear_has_optional_cpu_budget() {
1829 _impl_._oneof_case_[1] = OPTIONAL_CPU_BUDGET_NOT_SET;
1830 }
has_optional_ram_budget()1831 inline bool AutotuneOptions::has_optional_ram_budget() const {
1832 return optional_ram_budget_case() != OPTIONAL_RAM_BUDGET_NOT_SET;
1833 }
clear_has_optional_ram_budget()1834 inline void AutotuneOptions::clear_has_optional_ram_budget() {
1835 _impl_._oneof_case_[2] = OPTIONAL_RAM_BUDGET_NOT_SET;
1836 }
has_optional_autotune_algorithm()1837 inline bool AutotuneOptions::has_optional_autotune_algorithm() const {
1838 return optional_autotune_algorithm_case() != OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET;
1839 }
clear_has_optional_autotune_algorithm()1840 inline void AutotuneOptions::clear_has_optional_autotune_algorithm() {
1841 _impl_._oneof_case_[3] = OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET;
1842 }
optional_enabled_case()1843 inline AutotuneOptions::OptionalEnabledCase AutotuneOptions::optional_enabled_case() const {
1844 return AutotuneOptions::OptionalEnabledCase(_impl_._oneof_case_[0]);
1845 }
optional_cpu_budget_case()1846 inline AutotuneOptions::OptionalCpuBudgetCase AutotuneOptions::optional_cpu_budget_case() const {
1847 return AutotuneOptions::OptionalCpuBudgetCase(_impl_._oneof_case_[1]);
1848 }
optional_ram_budget_case()1849 inline AutotuneOptions::OptionalRamBudgetCase AutotuneOptions::optional_ram_budget_case() const {
1850 return AutotuneOptions::OptionalRamBudgetCase(_impl_._oneof_case_[2]);
1851 }
optional_autotune_algorithm_case()1852 inline AutotuneOptions::OptionalAutotuneAlgorithmCase AutotuneOptions::optional_autotune_algorithm_case() const {
1853 return AutotuneOptions::OptionalAutotuneAlgorithmCase(_impl_._oneof_case_[3]);
1854 }
1855 // -------------------------------------------------------------------
1856
1857 // CardinalityOptions
1858
1859 // .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1;
clear_compute_level()1860 inline void CardinalityOptions::clear_compute_level() {
1861 _impl_.compute_level_ = 0;
1862 }
_internal_compute_level()1863 inline ::tensorflow::data::CardinalityOptions_ComputeLevel CardinalityOptions::_internal_compute_level() const {
1864 return static_cast< ::tensorflow::data::CardinalityOptions_ComputeLevel >(_impl_.compute_level_);
1865 }
compute_level()1866 inline ::tensorflow::data::CardinalityOptions_ComputeLevel CardinalityOptions::compute_level() const {
1867 // @@protoc_insertion_point(field_get:tensorflow.data.CardinalityOptions.compute_level)
1868 return _internal_compute_level();
1869 }
_internal_set_compute_level(::tensorflow::data::CardinalityOptions_ComputeLevel value)1870 inline void CardinalityOptions::_internal_set_compute_level(::tensorflow::data::CardinalityOptions_ComputeLevel value) {
1871
1872 _impl_.compute_level_ = value;
1873 }
set_compute_level(::tensorflow::data::CardinalityOptions_ComputeLevel value)1874 inline void CardinalityOptions::set_compute_level(::tensorflow::data::CardinalityOptions_ComputeLevel value) {
1875 _internal_set_compute_level(value);
1876 // @@protoc_insertion_point(field_set:tensorflow.data.CardinalityOptions.compute_level)
1877 }
1878
1879 // -------------------------------------------------------------------
1880
1881 // DistributeOptions
1882
1883 // .tensorflow.data.AutoShardPolicy auto_shard_policy = 1;
clear_auto_shard_policy()1884 inline void DistributeOptions::clear_auto_shard_policy() {
1885 _impl_.auto_shard_policy_ = 0;
1886 }
_internal_auto_shard_policy()1887 inline ::tensorflow::data::AutoShardPolicy DistributeOptions::_internal_auto_shard_policy() const {
1888 return static_cast< ::tensorflow::data::AutoShardPolicy >(_impl_.auto_shard_policy_);
1889 }
auto_shard_policy()1890 inline ::tensorflow::data::AutoShardPolicy DistributeOptions::auto_shard_policy() const {
1891 // @@protoc_insertion_point(field_get:tensorflow.data.DistributeOptions.auto_shard_policy)
1892 return _internal_auto_shard_policy();
1893 }
_internal_set_auto_shard_policy(::tensorflow::data::AutoShardPolicy value)1894 inline void DistributeOptions::_internal_set_auto_shard_policy(::tensorflow::data::AutoShardPolicy value) {
1895
1896 _impl_.auto_shard_policy_ = value;
1897 }
set_auto_shard_policy(::tensorflow::data::AutoShardPolicy value)1898 inline void DistributeOptions::set_auto_shard_policy(::tensorflow::data::AutoShardPolicy value) {
1899 _internal_set_auto_shard_policy(value);
1900 // @@protoc_insertion_point(field_set:tensorflow.data.DistributeOptions.auto_shard_policy)
1901 }
1902
1903 // int32 num_devices = 2;
_internal_has_num_devices()1904 inline bool DistributeOptions::_internal_has_num_devices() const {
1905 return optional_num_devices_case() == kNumDevices;
1906 }
has_num_devices()1907 inline bool DistributeOptions::has_num_devices() const {
1908 return _internal_has_num_devices();
1909 }
set_has_num_devices()1910 inline void DistributeOptions::set_has_num_devices() {
1911 _impl_._oneof_case_[0] = kNumDevices;
1912 }
clear_num_devices()1913 inline void DistributeOptions::clear_num_devices() {
1914 if (_internal_has_num_devices()) {
1915 _impl_.optional_num_devices_.num_devices_ = 0;
1916 clear_has_optional_num_devices();
1917 }
1918 }
_internal_num_devices()1919 inline ::int32_t DistributeOptions::_internal_num_devices() const {
1920 if (_internal_has_num_devices()) {
1921 return _impl_.optional_num_devices_.num_devices_;
1922 }
1923 return 0;
1924 }
_internal_set_num_devices(::int32_t value)1925 inline void DistributeOptions::_internal_set_num_devices(::int32_t value) {
1926 if (!_internal_has_num_devices()) {
1927 clear_optional_num_devices();
1928 set_has_num_devices();
1929 }
1930 _impl_.optional_num_devices_.num_devices_ = value;
1931 }
num_devices()1932 inline ::int32_t DistributeOptions::num_devices() const {
1933 // @@protoc_insertion_point(field_get:tensorflow.data.DistributeOptions.num_devices)
1934 return _internal_num_devices();
1935 }
set_num_devices(::int32_t value)1936 inline void DistributeOptions::set_num_devices(::int32_t value) {
1937 _internal_set_num_devices(value);
1938 // @@protoc_insertion_point(field_set:tensorflow.data.DistributeOptions.num_devices)
1939 }
1940
has_optional_num_devices()1941 inline bool DistributeOptions::has_optional_num_devices() const {
1942 return optional_num_devices_case() != OPTIONAL_NUM_DEVICES_NOT_SET;
1943 }
clear_has_optional_num_devices()1944 inline void DistributeOptions::clear_has_optional_num_devices() {
1945 _impl_._oneof_case_[0] = OPTIONAL_NUM_DEVICES_NOT_SET;
1946 }
optional_num_devices_case()1947 inline DistributeOptions::OptionalNumDevicesCase DistributeOptions::optional_num_devices_case() const {
1948 return DistributeOptions::OptionalNumDevicesCase(_impl_._oneof_case_[0]);
1949 }
1950 // -------------------------------------------------------------------
1951
1952 // OptimizationOptions
1953
1954 // bool apply_default_optimizations = 1;
_internal_has_apply_default_optimizations()1955 inline bool OptimizationOptions::_internal_has_apply_default_optimizations() const {
1956 return optional_apply_default_optimizations_case() == kApplyDefaultOptimizations;
1957 }
has_apply_default_optimizations()1958 inline bool OptimizationOptions::has_apply_default_optimizations() const {
1959 return _internal_has_apply_default_optimizations();
1960 }
set_has_apply_default_optimizations()1961 inline void OptimizationOptions::set_has_apply_default_optimizations() {
1962 _impl_._oneof_case_[0] = kApplyDefaultOptimizations;
1963 }
clear_apply_default_optimizations()1964 inline void OptimizationOptions::clear_apply_default_optimizations() {
1965 if (_internal_has_apply_default_optimizations()) {
1966 _impl_.optional_apply_default_optimizations_.apply_default_optimizations_ = false;
1967 clear_has_optional_apply_default_optimizations();
1968 }
1969 }
_internal_apply_default_optimizations()1970 inline bool OptimizationOptions::_internal_apply_default_optimizations() const {
1971 if (_internal_has_apply_default_optimizations()) {
1972 return _impl_.optional_apply_default_optimizations_.apply_default_optimizations_;
1973 }
1974 return false;
1975 }
_internal_set_apply_default_optimizations(bool value)1976 inline void OptimizationOptions::_internal_set_apply_default_optimizations(bool value) {
1977 if (!_internal_has_apply_default_optimizations()) {
1978 clear_optional_apply_default_optimizations();
1979 set_has_apply_default_optimizations();
1980 }
1981 _impl_.optional_apply_default_optimizations_.apply_default_optimizations_ = value;
1982 }
apply_default_optimizations()1983 inline bool OptimizationOptions::apply_default_optimizations() const {
1984 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.apply_default_optimizations)
1985 return _internal_apply_default_optimizations();
1986 }
set_apply_default_optimizations(bool value)1987 inline void OptimizationOptions::set_apply_default_optimizations(bool value) {
1988 _internal_set_apply_default_optimizations(value);
1989 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.apply_default_optimizations)
1990 }
1991
1992 // bool filter_fusion = 6;
_internal_has_filter_fusion()1993 inline bool OptimizationOptions::_internal_has_filter_fusion() const {
1994 return optional_filter_fusion_case() == kFilterFusion;
1995 }
has_filter_fusion()1996 inline bool OptimizationOptions::has_filter_fusion() const {
1997 return _internal_has_filter_fusion();
1998 }
set_has_filter_fusion()1999 inline void OptimizationOptions::set_has_filter_fusion() {
2000 _impl_._oneof_case_[1] = kFilterFusion;
2001 }
clear_filter_fusion()2002 inline void OptimizationOptions::clear_filter_fusion() {
2003 if (_internal_has_filter_fusion()) {
2004 _impl_.optional_filter_fusion_.filter_fusion_ = false;
2005 clear_has_optional_filter_fusion();
2006 }
2007 }
_internal_filter_fusion()2008 inline bool OptimizationOptions::_internal_filter_fusion() const {
2009 if (_internal_has_filter_fusion()) {
2010 return _impl_.optional_filter_fusion_.filter_fusion_;
2011 }
2012 return false;
2013 }
_internal_set_filter_fusion(bool value)2014 inline void OptimizationOptions::_internal_set_filter_fusion(bool value) {
2015 if (!_internal_has_filter_fusion()) {
2016 clear_optional_filter_fusion();
2017 set_has_filter_fusion();
2018 }
2019 _impl_.optional_filter_fusion_.filter_fusion_ = value;
2020 }
filter_fusion()2021 inline bool OptimizationOptions::filter_fusion() const {
2022 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.filter_fusion)
2023 return _internal_filter_fusion();
2024 }
set_filter_fusion(bool value)2025 inline void OptimizationOptions::set_filter_fusion(bool value) {
2026 _internal_set_filter_fusion(value);
2027 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.filter_fusion)
2028 }
2029
2030 // bool map_and_batch_fusion = 9;
_internal_has_map_and_batch_fusion()2031 inline bool OptimizationOptions::_internal_has_map_and_batch_fusion() const {
2032 return optional_map_and_batch_fusion_case() == kMapAndBatchFusion;
2033 }
has_map_and_batch_fusion()2034 inline bool OptimizationOptions::has_map_and_batch_fusion() const {
2035 return _internal_has_map_and_batch_fusion();
2036 }
set_has_map_and_batch_fusion()2037 inline void OptimizationOptions::set_has_map_and_batch_fusion() {
2038 _impl_._oneof_case_[2] = kMapAndBatchFusion;
2039 }
clear_map_and_batch_fusion()2040 inline void OptimizationOptions::clear_map_and_batch_fusion() {
2041 if (_internal_has_map_and_batch_fusion()) {
2042 _impl_.optional_map_and_batch_fusion_.map_and_batch_fusion_ = false;
2043 clear_has_optional_map_and_batch_fusion();
2044 }
2045 }
_internal_map_and_batch_fusion()2046 inline bool OptimizationOptions::_internal_map_and_batch_fusion() const {
2047 if (_internal_has_map_and_batch_fusion()) {
2048 return _impl_.optional_map_and_batch_fusion_.map_and_batch_fusion_;
2049 }
2050 return false;
2051 }
_internal_set_map_and_batch_fusion(bool value)2052 inline void OptimizationOptions::_internal_set_map_and_batch_fusion(bool value) {
2053 if (!_internal_has_map_and_batch_fusion()) {
2054 clear_optional_map_and_batch_fusion();
2055 set_has_map_and_batch_fusion();
2056 }
2057 _impl_.optional_map_and_batch_fusion_.map_and_batch_fusion_ = value;
2058 }
map_and_batch_fusion()2059 inline bool OptimizationOptions::map_and_batch_fusion() const {
2060 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.map_and_batch_fusion)
2061 return _internal_map_and_batch_fusion();
2062 }
set_map_and_batch_fusion(bool value)2063 inline void OptimizationOptions::set_map_and_batch_fusion(bool value) {
2064 _internal_set_map_and_batch_fusion(value);
2065 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.map_and_batch_fusion)
2066 }
2067
2068 // bool map_and_filter_fusion = 10;
_internal_has_map_and_filter_fusion()2069 inline bool OptimizationOptions::_internal_has_map_and_filter_fusion() const {
2070 return optional_map_and_filter_fusion_case() == kMapAndFilterFusion;
2071 }
has_map_and_filter_fusion()2072 inline bool OptimizationOptions::has_map_and_filter_fusion() const {
2073 return _internal_has_map_and_filter_fusion();
2074 }
set_has_map_and_filter_fusion()2075 inline void OptimizationOptions::set_has_map_and_filter_fusion() {
2076 _impl_._oneof_case_[3] = kMapAndFilterFusion;
2077 }
clear_map_and_filter_fusion()2078 inline void OptimizationOptions::clear_map_and_filter_fusion() {
2079 if (_internal_has_map_and_filter_fusion()) {
2080 _impl_.optional_map_and_filter_fusion_.map_and_filter_fusion_ = false;
2081 clear_has_optional_map_and_filter_fusion();
2082 }
2083 }
_internal_map_and_filter_fusion()2084 inline bool OptimizationOptions::_internal_map_and_filter_fusion() const {
2085 if (_internal_has_map_and_filter_fusion()) {
2086 return _impl_.optional_map_and_filter_fusion_.map_and_filter_fusion_;
2087 }
2088 return false;
2089 }
_internal_set_map_and_filter_fusion(bool value)2090 inline void OptimizationOptions::_internal_set_map_and_filter_fusion(bool value) {
2091 if (!_internal_has_map_and_filter_fusion()) {
2092 clear_optional_map_and_filter_fusion();
2093 set_has_map_and_filter_fusion();
2094 }
2095 _impl_.optional_map_and_filter_fusion_.map_and_filter_fusion_ = value;
2096 }
map_and_filter_fusion()2097 inline bool OptimizationOptions::map_and_filter_fusion() const {
2098 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.map_and_filter_fusion)
2099 return _internal_map_and_filter_fusion();
2100 }
set_map_and_filter_fusion(bool value)2101 inline void OptimizationOptions::set_map_and_filter_fusion(bool value) {
2102 _internal_set_map_and_filter_fusion(value);
2103 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.map_and_filter_fusion)
2104 }
2105
2106 // bool map_fusion = 11;
_internal_has_map_fusion()2107 inline bool OptimizationOptions::_internal_has_map_fusion() const {
2108 return optional_map_fusion_case() == kMapFusion;
2109 }
has_map_fusion()2110 inline bool OptimizationOptions::has_map_fusion() const {
2111 return _internal_has_map_fusion();
2112 }
set_has_map_fusion()2113 inline void OptimizationOptions::set_has_map_fusion() {
2114 _impl_._oneof_case_[4] = kMapFusion;
2115 }
clear_map_fusion()2116 inline void OptimizationOptions::clear_map_fusion() {
2117 if (_internal_has_map_fusion()) {
2118 _impl_.optional_map_fusion_.map_fusion_ = false;
2119 clear_has_optional_map_fusion();
2120 }
2121 }
_internal_map_fusion()2122 inline bool OptimizationOptions::_internal_map_fusion() const {
2123 if (_internal_has_map_fusion()) {
2124 return _impl_.optional_map_fusion_.map_fusion_;
2125 }
2126 return false;
2127 }
_internal_set_map_fusion(bool value)2128 inline void OptimizationOptions::_internal_set_map_fusion(bool value) {
2129 if (!_internal_has_map_fusion()) {
2130 clear_optional_map_fusion();
2131 set_has_map_fusion();
2132 }
2133 _impl_.optional_map_fusion_.map_fusion_ = value;
2134 }
map_fusion()2135 inline bool OptimizationOptions::map_fusion() const {
2136 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.map_fusion)
2137 return _internal_map_fusion();
2138 }
set_map_fusion(bool value)2139 inline void OptimizationOptions::set_map_fusion(bool value) {
2140 _internal_set_map_fusion(value);
2141 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.map_fusion)
2142 }
2143
2144 // bool map_parallelization = 12;
_internal_has_map_parallelization()2145 inline bool OptimizationOptions::_internal_has_map_parallelization() const {
2146 return optional_map_parallelization_case() == kMapParallelization;
2147 }
has_map_parallelization()2148 inline bool OptimizationOptions::has_map_parallelization() const {
2149 return _internal_has_map_parallelization();
2150 }
set_has_map_parallelization()2151 inline void OptimizationOptions::set_has_map_parallelization() {
2152 _impl_._oneof_case_[5] = kMapParallelization;
2153 }
clear_map_parallelization()2154 inline void OptimizationOptions::clear_map_parallelization() {
2155 if (_internal_has_map_parallelization()) {
2156 _impl_.optional_map_parallelization_.map_parallelization_ = false;
2157 clear_has_optional_map_parallelization();
2158 }
2159 }
_internal_map_parallelization()2160 inline bool OptimizationOptions::_internal_map_parallelization() const {
2161 if (_internal_has_map_parallelization()) {
2162 return _impl_.optional_map_parallelization_.map_parallelization_;
2163 }
2164 return false;
2165 }
_internal_set_map_parallelization(bool value)2166 inline void OptimizationOptions::_internal_set_map_parallelization(bool value) {
2167 if (!_internal_has_map_parallelization()) {
2168 clear_optional_map_parallelization();
2169 set_has_map_parallelization();
2170 }
2171 _impl_.optional_map_parallelization_.map_parallelization_ = value;
2172 }
map_parallelization()2173 inline bool OptimizationOptions::map_parallelization() const {
2174 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.map_parallelization)
2175 return _internal_map_parallelization();
2176 }
set_map_parallelization(bool value)2177 inline void OptimizationOptions::set_map_parallelization(bool value) {
2178 _internal_set_map_parallelization(value);
2179 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.map_parallelization)
2180 }
2181
2182 // bool noop_elimination = 14;
_internal_has_noop_elimination()2183 inline bool OptimizationOptions::_internal_has_noop_elimination() const {
2184 return optional_noop_elimination_case() == kNoopElimination;
2185 }
has_noop_elimination()2186 inline bool OptimizationOptions::has_noop_elimination() const {
2187 return _internal_has_noop_elimination();
2188 }
set_has_noop_elimination()2189 inline void OptimizationOptions::set_has_noop_elimination() {
2190 _impl_._oneof_case_[6] = kNoopElimination;
2191 }
clear_noop_elimination()2192 inline void OptimizationOptions::clear_noop_elimination() {
2193 if (_internal_has_noop_elimination()) {
2194 _impl_.optional_noop_elimination_.noop_elimination_ = false;
2195 clear_has_optional_noop_elimination();
2196 }
2197 }
_internal_noop_elimination()2198 inline bool OptimizationOptions::_internal_noop_elimination() const {
2199 if (_internal_has_noop_elimination()) {
2200 return _impl_.optional_noop_elimination_.noop_elimination_;
2201 }
2202 return false;
2203 }
_internal_set_noop_elimination(bool value)2204 inline void OptimizationOptions::_internal_set_noop_elimination(bool value) {
2205 if (!_internal_has_noop_elimination()) {
2206 clear_optional_noop_elimination();
2207 set_has_noop_elimination();
2208 }
2209 _impl_.optional_noop_elimination_.noop_elimination_ = value;
2210 }
noop_elimination()2211 inline bool OptimizationOptions::noop_elimination() const {
2212 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.noop_elimination)
2213 return _internal_noop_elimination();
2214 }
set_noop_elimination(bool value)2215 inline void OptimizationOptions::set_noop_elimination(bool value) {
2216 _internal_set_noop_elimination(value);
2217 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.noop_elimination)
2218 }
2219
2220 // bool parallel_batch = 15;
_internal_has_parallel_batch()2221 inline bool OptimizationOptions::_internal_has_parallel_batch() const {
2222 return optional_parallel_batch_case() == kParallelBatch;
2223 }
has_parallel_batch()2224 inline bool OptimizationOptions::has_parallel_batch() const {
2225 return _internal_has_parallel_batch();
2226 }
set_has_parallel_batch()2227 inline void OptimizationOptions::set_has_parallel_batch() {
2228 _impl_._oneof_case_[7] = kParallelBatch;
2229 }
clear_parallel_batch()2230 inline void OptimizationOptions::clear_parallel_batch() {
2231 if (_internal_has_parallel_batch()) {
2232 _impl_.optional_parallel_batch_.parallel_batch_ = false;
2233 clear_has_optional_parallel_batch();
2234 }
2235 }
_internal_parallel_batch()2236 inline bool OptimizationOptions::_internal_parallel_batch() const {
2237 if (_internal_has_parallel_batch()) {
2238 return _impl_.optional_parallel_batch_.parallel_batch_;
2239 }
2240 return false;
2241 }
_internal_set_parallel_batch(bool value)2242 inline void OptimizationOptions::_internal_set_parallel_batch(bool value) {
2243 if (!_internal_has_parallel_batch()) {
2244 clear_optional_parallel_batch();
2245 set_has_parallel_batch();
2246 }
2247 _impl_.optional_parallel_batch_.parallel_batch_ = value;
2248 }
parallel_batch()2249 inline bool OptimizationOptions::parallel_batch() const {
2250 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.parallel_batch)
2251 return _internal_parallel_batch();
2252 }
set_parallel_batch(bool value)2253 inline void OptimizationOptions::set_parallel_batch(bool value) {
2254 _internal_set_parallel_batch(value);
2255 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.parallel_batch)
2256 }
2257
2258 // bool shuffle_and_repeat_fusion = 17;
_internal_has_shuffle_and_repeat_fusion()2259 inline bool OptimizationOptions::_internal_has_shuffle_and_repeat_fusion() const {
2260 return optional_shuffle_and_repeat_fusion_case() == kShuffleAndRepeatFusion;
2261 }
has_shuffle_and_repeat_fusion()2262 inline bool OptimizationOptions::has_shuffle_and_repeat_fusion() const {
2263 return _internal_has_shuffle_and_repeat_fusion();
2264 }
set_has_shuffle_and_repeat_fusion()2265 inline void OptimizationOptions::set_has_shuffle_and_repeat_fusion() {
2266 _impl_._oneof_case_[8] = kShuffleAndRepeatFusion;
2267 }
clear_shuffle_and_repeat_fusion()2268 inline void OptimizationOptions::clear_shuffle_and_repeat_fusion() {
2269 if (_internal_has_shuffle_and_repeat_fusion()) {
2270 _impl_.optional_shuffle_and_repeat_fusion_.shuffle_and_repeat_fusion_ = false;
2271 clear_has_optional_shuffle_and_repeat_fusion();
2272 }
2273 }
_internal_shuffle_and_repeat_fusion()2274 inline bool OptimizationOptions::_internal_shuffle_and_repeat_fusion() const {
2275 if (_internal_has_shuffle_and_repeat_fusion()) {
2276 return _impl_.optional_shuffle_and_repeat_fusion_.shuffle_and_repeat_fusion_;
2277 }
2278 return false;
2279 }
_internal_set_shuffle_and_repeat_fusion(bool value)2280 inline void OptimizationOptions::_internal_set_shuffle_and_repeat_fusion(bool value) {
2281 if (!_internal_has_shuffle_and_repeat_fusion()) {
2282 clear_optional_shuffle_and_repeat_fusion();
2283 set_has_shuffle_and_repeat_fusion();
2284 }
2285 _impl_.optional_shuffle_and_repeat_fusion_.shuffle_and_repeat_fusion_ = value;
2286 }
shuffle_and_repeat_fusion()2287 inline bool OptimizationOptions::shuffle_and_repeat_fusion() const {
2288 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.shuffle_and_repeat_fusion)
2289 return _internal_shuffle_and_repeat_fusion();
2290 }
set_shuffle_and_repeat_fusion(bool value)2291 inline void OptimizationOptions::set_shuffle_and_repeat_fusion(bool value) {
2292 _internal_set_shuffle_and_repeat_fusion(value);
2293 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.shuffle_and_repeat_fusion)
2294 }
2295
2296 // bool filter_parallelization = 18;
_internal_has_filter_parallelization()2297 inline bool OptimizationOptions::_internal_has_filter_parallelization() const {
2298 return optional_filter_parallelization_case() == kFilterParallelization;
2299 }
has_filter_parallelization()2300 inline bool OptimizationOptions::has_filter_parallelization() const {
2301 return _internal_has_filter_parallelization();
2302 }
set_has_filter_parallelization()2303 inline void OptimizationOptions::set_has_filter_parallelization() {
2304 _impl_._oneof_case_[9] = kFilterParallelization;
2305 }
clear_filter_parallelization()2306 inline void OptimizationOptions::clear_filter_parallelization() {
2307 if (_internal_has_filter_parallelization()) {
2308 _impl_.optional_filter_parallelization_.filter_parallelization_ = false;
2309 clear_has_optional_filter_parallelization();
2310 }
2311 }
_internal_filter_parallelization()2312 inline bool OptimizationOptions::_internal_filter_parallelization() const {
2313 if (_internal_has_filter_parallelization()) {
2314 return _impl_.optional_filter_parallelization_.filter_parallelization_;
2315 }
2316 return false;
2317 }
_internal_set_filter_parallelization(bool value)2318 inline void OptimizationOptions::_internal_set_filter_parallelization(bool value) {
2319 if (!_internal_has_filter_parallelization()) {
2320 clear_optional_filter_parallelization();
2321 set_has_filter_parallelization();
2322 }
2323 _impl_.optional_filter_parallelization_.filter_parallelization_ = value;
2324 }
filter_parallelization()2325 inline bool OptimizationOptions::filter_parallelization() const {
2326 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.filter_parallelization)
2327 return _internal_filter_parallelization();
2328 }
set_filter_parallelization(bool value)2329 inline void OptimizationOptions::set_filter_parallelization(bool value) {
2330 _internal_set_filter_parallelization(value);
2331 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.filter_parallelization)
2332 }
2333
2334 // bool inject_prefetch = 19;
_internal_has_inject_prefetch()2335 inline bool OptimizationOptions::_internal_has_inject_prefetch() const {
2336 return optional_inject_prefetch_case() == kInjectPrefetch;
2337 }
has_inject_prefetch()2338 inline bool OptimizationOptions::has_inject_prefetch() const {
2339 return _internal_has_inject_prefetch();
2340 }
set_has_inject_prefetch()2341 inline void OptimizationOptions::set_has_inject_prefetch() {
2342 _impl_._oneof_case_[10] = kInjectPrefetch;
2343 }
clear_inject_prefetch()2344 inline void OptimizationOptions::clear_inject_prefetch() {
2345 if (_internal_has_inject_prefetch()) {
2346 _impl_.optional_inject_prefetch_.inject_prefetch_ = false;
2347 clear_has_optional_inject_prefetch();
2348 }
2349 }
_internal_inject_prefetch()2350 inline bool OptimizationOptions::_internal_inject_prefetch() const {
2351 if (_internal_has_inject_prefetch()) {
2352 return _impl_.optional_inject_prefetch_.inject_prefetch_;
2353 }
2354 return false;
2355 }
_internal_set_inject_prefetch(bool value)2356 inline void OptimizationOptions::_internal_set_inject_prefetch(bool value) {
2357 if (!_internal_has_inject_prefetch()) {
2358 clear_optional_inject_prefetch();
2359 set_has_inject_prefetch();
2360 }
2361 _impl_.optional_inject_prefetch_.inject_prefetch_ = value;
2362 }
inject_prefetch()2363 inline bool OptimizationOptions::inject_prefetch() const {
2364 // @@protoc_insertion_point(field_get:tensorflow.data.OptimizationOptions.inject_prefetch)
2365 return _internal_inject_prefetch();
2366 }
set_inject_prefetch(bool value)2367 inline void OptimizationOptions::set_inject_prefetch(bool value) {
2368 _internal_set_inject_prefetch(value);
2369 // @@protoc_insertion_point(field_set:tensorflow.data.OptimizationOptions.inject_prefetch)
2370 }
2371
has_optional_apply_default_optimizations()2372 inline bool OptimizationOptions::has_optional_apply_default_optimizations() const {
2373 return optional_apply_default_optimizations_case() != OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET;
2374 }
clear_has_optional_apply_default_optimizations()2375 inline void OptimizationOptions::clear_has_optional_apply_default_optimizations() {
2376 _impl_._oneof_case_[0] = OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET;
2377 }
has_optional_filter_fusion()2378 inline bool OptimizationOptions::has_optional_filter_fusion() const {
2379 return optional_filter_fusion_case() != OPTIONAL_FILTER_FUSION_NOT_SET;
2380 }
clear_has_optional_filter_fusion()2381 inline void OptimizationOptions::clear_has_optional_filter_fusion() {
2382 _impl_._oneof_case_[1] = OPTIONAL_FILTER_FUSION_NOT_SET;
2383 }
has_optional_map_and_batch_fusion()2384 inline bool OptimizationOptions::has_optional_map_and_batch_fusion() const {
2385 return optional_map_and_batch_fusion_case() != OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET;
2386 }
clear_has_optional_map_and_batch_fusion()2387 inline void OptimizationOptions::clear_has_optional_map_and_batch_fusion() {
2388 _impl_._oneof_case_[2] = OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET;
2389 }
has_optional_map_and_filter_fusion()2390 inline bool OptimizationOptions::has_optional_map_and_filter_fusion() const {
2391 return optional_map_and_filter_fusion_case() != OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET;
2392 }
clear_has_optional_map_and_filter_fusion()2393 inline void OptimizationOptions::clear_has_optional_map_and_filter_fusion() {
2394 _impl_._oneof_case_[3] = OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET;
2395 }
has_optional_map_fusion()2396 inline bool OptimizationOptions::has_optional_map_fusion() const {
2397 return optional_map_fusion_case() != OPTIONAL_MAP_FUSION_NOT_SET;
2398 }
clear_has_optional_map_fusion()2399 inline void OptimizationOptions::clear_has_optional_map_fusion() {
2400 _impl_._oneof_case_[4] = OPTIONAL_MAP_FUSION_NOT_SET;
2401 }
has_optional_map_parallelization()2402 inline bool OptimizationOptions::has_optional_map_parallelization() const {
2403 return optional_map_parallelization_case() != OPTIONAL_MAP_PARALLELIZATION_NOT_SET;
2404 }
clear_has_optional_map_parallelization()2405 inline void OptimizationOptions::clear_has_optional_map_parallelization() {
2406 _impl_._oneof_case_[5] = OPTIONAL_MAP_PARALLELIZATION_NOT_SET;
2407 }
has_optional_noop_elimination()2408 inline bool OptimizationOptions::has_optional_noop_elimination() const {
2409 return optional_noop_elimination_case() != OPTIONAL_NOOP_ELIMINATION_NOT_SET;
2410 }
clear_has_optional_noop_elimination()2411 inline void OptimizationOptions::clear_has_optional_noop_elimination() {
2412 _impl_._oneof_case_[6] = OPTIONAL_NOOP_ELIMINATION_NOT_SET;
2413 }
has_optional_parallel_batch()2414 inline bool OptimizationOptions::has_optional_parallel_batch() const {
2415 return optional_parallel_batch_case() != OPTIONAL_PARALLEL_BATCH_NOT_SET;
2416 }
clear_has_optional_parallel_batch()2417 inline void OptimizationOptions::clear_has_optional_parallel_batch() {
2418 _impl_._oneof_case_[7] = OPTIONAL_PARALLEL_BATCH_NOT_SET;
2419 }
has_optional_shuffle_and_repeat_fusion()2420 inline bool OptimizationOptions::has_optional_shuffle_and_repeat_fusion() const {
2421 return optional_shuffle_and_repeat_fusion_case() != OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET;
2422 }
clear_has_optional_shuffle_and_repeat_fusion()2423 inline void OptimizationOptions::clear_has_optional_shuffle_and_repeat_fusion() {
2424 _impl_._oneof_case_[8] = OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET;
2425 }
has_optional_filter_parallelization()2426 inline bool OptimizationOptions::has_optional_filter_parallelization() const {
2427 return optional_filter_parallelization_case() != OPTIONAL_FILTER_PARALLELIZATION_NOT_SET;
2428 }
clear_has_optional_filter_parallelization()2429 inline void OptimizationOptions::clear_has_optional_filter_parallelization() {
2430 _impl_._oneof_case_[9] = OPTIONAL_FILTER_PARALLELIZATION_NOT_SET;
2431 }
has_optional_inject_prefetch()2432 inline bool OptimizationOptions::has_optional_inject_prefetch() const {
2433 return optional_inject_prefetch_case() != OPTIONAL_INJECT_PREFETCH_NOT_SET;
2434 }
clear_has_optional_inject_prefetch()2435 inline void OptimizationOptions::clear_has_optional_inject_prefetch() {
2436 _impl_._oneof_case_[10] = OPTIONAL_INJECT_PREFETCH_NOT_SET;
2437 }
optional_apply_default_optimizations_case()2438 inline OptimizationOptions::OptionalApplyDefaultOptimizationsCase OptimizationOptions::optional_apply_default_optimizations_case() const {
2439 return OptimizationOptions::OptionalApplyDefaultOptimizationsCase(_impl_._oneof_case_[0]);
2440 }
optional_filter_fusion_case()2441 inline OptimizationOptions::OptionalFilterFusionCase OptimizationOptions::optional_filter_fusion_case() const {
2442 return OptimizationOptions::OptionalFilterFusionCase(_impl_._oneof_case_[1]);
2443 }
optional_map_and_batch_fusion_case()2444 inline OptimizationOptions::OptionalMapAndBatchFusionCase OptimizationOptions::optional_map_and_batch_fusion_case() const {
2445 return OptimizationOptions::OptionalMapAndBatchFusionCase(_impl_._oneof_case_[2]);
2446 }
optional_map_and_filter_fusion_case()2447 inline OptimizationOptions::OptionalMapAndFilterFusionCase OptimizationOptions::optional_map_and_filter_fusion_case() const {
2448 return OptimizationOptions::OptionalMapAndFilterFusionCase(_impl_._oneof_case_[3]);
2449 }
optional_map_fusion_case()2450 inline OptimizationOptions::OptionalMapFusionCase OptimizationOptions::optional_map_fusion_case() const {
2451 return OptimizationOptions::OptionalMapFusionCase(_impl_._oneof_case_[4]);
2452 }
optional_map_parallelization_case()2453 inline OptimizationOptions::OptionalMapParallelizationCase OptimizationOptions::optional_map_parallelization_case() const {
2454 return OptimizationOptions::OptionalMapParallelizationCase(_impl_._oneof_case_[5]);
2455 }
optional_noop_elimination_case()2456 inline OptimizationOptions::OptionalNoopEliminationCase OptimizationOptions::optional_noop_elimination_case() const {
2457 return OptimizationOptions::OptionalNoopEliminationCase(_impl_._oneof_case_[6]);
2458 }
optional_parallel_batch_case()2459 inline OptimizationOptions::OptionalParallelBatchCase OptimizationOptions::optional_parallel_batch_case() const {
2460 return OptimizationOptions::OptionalParallelBatchCase(_impl_._oneof_case_[7]);
2461 }
optional_shuffle_and_repeat_fusion_case()2462 inline OptimizationOptions::OptionalShuffleAndRepeatFusionCase OptimizationOptions::optional_shuffle_and_repeat_fusion_case() const {
2463 return OptimizationOptions::OptionalShuffleAndRepeatFusionCase(_impl_._oneof_case_[8]);
2464 }
optional_filter_parallelization_case()2465 inline OptimizationOptions::OptionalFilterParallelizationCase OptimizationOptions::optional_filter_parallelization_case() const {
2466 return OptimizationOptions::OptionalFilterParallelizationCase(_impl_._oneof_case_[9]);
2467 }
optional_inject_prefetch_case()2468 inline OptimizationOptions::OptionalInjectPrefetchCase OptimizationOptions::optional_inject_prefetch_case() const {
2469 return OptimizationOptions::OptionalInjectPrefetchCase(_impl_._oneof_case_[10]);
2470 }
2471 // -------------------------------------------------------------------
2472
2473 // ThreadingOptions
2474
2475 // int32 max_intra_op_parallelism = 1;
_internal_has_max_intra_op_parallelism()2476 inline bool ThreadingOptions::_internal_has_max_intra_op_parallelism() const {
2477 return optional_max_intra_op_parallelism_case() == kMaxIntraOpParallelism;
2478 }
has_max_intra_op_parallelism()2479 inline bool ThreadingOptions::has_max_intra_op_parallelism() const {
2480 return _internal_has_max_intra_op_parallelism();
2481 }
set_has_max_intra_op_parallelism()2482 inline void ThreadingOptions::set_has_max_intra_op_parallelism() {
2483 _impl_._oneof_case_[0] = kMaxIntraOpParallelism;
2484 }
clear_max_intra_op_parallelism()2485 inline void ThreadingOptions::clear_max_intra_op_parallelism() {
2486 if (_internal_has_max_intra_op_parallelism()) {
2487 _impl_.optional_max_intra_op_parallelism_.max_intra_op_parallelism_ = 0;
2488 clear_has_optional_max_intra_op_parallelism();
2489 }
2490 }
_internal_max_intra_op_parallelism()2491 inline ::int32_t ThreadingOptions::_internal_max_intra_op_parallelism() const {
2492 if (_internal_has_max_intra_op_parallelism()) {
2493 return _impl_.optional_max_intra_op_parallelism_.max_intra_op_parallelism_;
2494 }
2495 return 0;
2496 }
_internal_set_max_intra_op_parallelism(::int32_t value)2497 inline void ThreadingOptions::_internal_set_max_intra_op_parallelism(::int32_t value) {
2498 if (!_internal_has_max_intra_op_parallelism()) {
2499 clear_optional_max_intra_op_parallelism();
2500 set_has_max_intra_op_parallelism();
2501 }
2502 _impl_.optional_max_intra_op_parallelism_.max_intra_op_parallelism_ = value;
2503 }
max_intra_op_parallelism()2504 inline ::int32_t ThreadingOptions::max_intra_op_parallelism() const {
2505 // @@protoc_insertion_point(field_get:tensorflow.data.ThreadingOptions.max_intra_op_parallelism)
2506 return _internal_max_intra_op_parallelism();
2507 }
set_max_intra_op_parallelism(::int32_t value)2508 inline void ThreadingOptions::set_max_intra_op_parallelism(::int32_t value) {
2509 _internal_set_max_intra_op_parallelism(value);
2510 // @@protoc_insertion_point(field_set:tensorflow.data.ThreadingOptions.max_intra_op_parallelism)
2511 }
2512
2513 // int32 private_threadpool_size = 2;
_internal_has_private_threadpool_size()2514 inline bool ThreadingOptions::_internal_has_private_threadpool_size() const {
2515 return optional_private_threadpool_size_case() == kPrivateThreadpoolSize;
2516 }
has_private_threadpool_size()2517 inline bool ThreadingOptions::has_private_threadpool_size() const {
2518 return _internal_has_private_threadpool_size();
2519 }
set_has_private_threadpool_size()2520 inline void ThreadingOptions::set_has_private_threadpool_size() {
2521 _impl_._oneof_case_[1] = kPrivateThreadpoolSize;
2522 }
clear_private_threadpool_size()2523 inline void ThreadingOptions::clear_private_threadpool_size() {
2524 if (_internal_has_private_threadpool_size()) {
2525 _impl_.optional_private_threadpool_size_.private_threadpool_size_ = 0;
2526 clear_has_optional_private_threadpool_size();
2527 }
2528 }
_internal_private_threadpool_size()2529 inline ::int32_t ThreadingOptions::_internal_private_threadpool_size() const {
2530 if (_internal_has_private_threadpool_size()) {
2531 return _impl_.optional_private_threadpool_size_.private_threadpool_size_;
2532 }
2533 return 0;
2534 }
_internal_set_private_threadpool_size(::int32_t value)2535 inline void ThreadingOptions::_internal_set_private_threadpool_size(::int32_t value) {
2536 if (!_internal_has_private_threadpool_size()) {
2537 clear_optional_private_threadpool_size();
2538 set_has_private_threadpool_size();
2539 }
2540 _impl_.optional_private_threadpool_size_.private_threadpool_size_ = value;
2541 }
private_threadpool_size()2542 inline ::int32_t ThreadingOptions::private_threadpool_size() const {
2543 // @@protoc_insertion_point(field_get:tensorflow.data.ThreadingOptions.private_threadpool_size)
2544 return _internal_private_threadpool_size();
2545 }
set_private_threadpool_size(::int32_t value)2546 inline void ThreadingOptions::set_private_threadpool_size(::int32_t value) {
2547 _internal_set_private_threadpool_size(value);
2548 // @@protoc_insertion_point(field_set:tensorflow.data.ThreadingOptions.private_threadpool_size)
2549 }
2550
has_optional_max_intra_op_parallelism()2551 inline bool ThreadingOptions::has_optional_max_intra_op_parallelism() const {
2552 return optional_max_intra_op_parallelism_case() != OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET;
2553 }
clear_has_optional_max_intra_op_parallelism()2554 inline void ThreadingOptions::clear_has_optional_max_intra_op_parallelism() {
2555 _impl_._oneof_case_[0] = OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET;
2556 }
has_optional_private_threadpool_size()2557 inline bool ThreadingOptions::has_optional_private_threadpool_size() const {
2558 return optional_private_threadpool_size_case() != OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET;
2559 }
clear_has_optional_private_threadpool_size()2560 inline void ThreadingOptions::clear_has_optional_private_threadpool_size() {
2561 _impl_._oneof_case_[1] = OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET;
2562 }
optional_max_intra_op_parallelism_case()2563 inline ThreadingOptions::OptionalMaxIntraOpParallelismCase ThreadingOptions::optional_max_intra_op_parallelism_case() const {
2564 return ThreadingOptions::OptionalMaxIntraOpParallelismCase(_impl_._oneof_case_[0]);
2565 }
optional_private_threadpool_size_case()2566 inline ThreadingOptions::OptionalPrivateThreadpoolSizeCase ThreadingOptions::optional_private_threadpool_size_case() const {
2567 return ThreadingOptions::OptionalPrivateThreadpoolSizeCase(_impl_._oneof_case_[1]);
2568 }
2569 // -------------------------------------------------------------------
2570
2571 // Options
2572
2573 // bool deterministic = 1;
_internal_has_deterministic()2574 inline bool Options::_internal_has_deterministic() const {
2575 return optional_deterministic_case() == kDeterministic;
2576 }
has_deterministic()2577 inline bool Options::has_deterministic() const {
2578 return _internal_has_deterministic();
2579 }
set_has_deterministic()2580 inline void Options::set_has_deterministic() {
2581 _impl_._oneof_case_[0] = kDeterministic;
2582 }
clear_deterministic()2583 inline void Options::clear_deterministic() {
2584 if (_internal_has_deterministic()) {
2585 _impl_.optional_deterministic_.deterministic_ = false;
2586 clear_has_optional_deterministic();
2587 }
2588 }
_internal_deterministic()2589 inline bool Options::_internal_deterministic() const {
2590 if (_internal_has_deterministic()) {
2591 return _impl_.optional_deterministic_.deterministic_;
2592 }
2593 return false;
2594 }
_internal_set_deterministic(bool value)2595 inline void Options::_internal_set_deterministic(bool value) {
2596 if (!_internal_has_deterministic()) {
2597 clear_optional_deterministic();
2598 set_has_deterministic();
2599 }
2600 _impl_.optional_deterministic_.deterministic_ = value;
2601 }
deterministic()2602 inline bool Options::deterministic() const {
2603 // @@protoc_insertion_point(field_get:tensorflow.data.Options.deterministic)
2604 return _internal_deterministic();
2605 }
set_deterministic(bool value)2606 inline void Options::set_deterministic(bool value) {
2607 _internal_set_deterministic(value);
2608 // @@protoc_insertion_point(field_set:tensorflow.data.Options.deterministic)
2609 }
2610
2611 // .tensorflow.data.AutotuneOptions autotune_options = 7;
_internal_has_autotune_options()2612 inline bool Options::_internal_has_autotune_options() const {
2613 return this != internal_default_instance() && _impl_.autotune_options_ != nullptr;
2614 }
has_autotune_options()2615 inline bool Options::has_autotune_options() const {
2616 return _internal_has_autotune_options();
2617 }
clear_autotune_options()2618 inline void Options::clear_autotune_options() {
2619 if (GetArenaForAllocation() == nullptr && _impl_.autotune_options_ != nullptr) {
2620 delete _impl_.autotune_options_;
2621 }
2622 _impl_.autotune_options_ = nullptr;
2623 }
_internal_autotune_options()2624 inline const ::tensorflow::data::AutotuneOptions& Options::_internal_autotune_options() const {
2625 const ::tensorflow::data::AutotuneOptions* p = _impl_.autotune_options_;
2626 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::data::AutotuneOptions&>(
2627 ::tensorflow::data::_AutotuneOptions_default_instance_);
2628 }
autotune_options()2629 inline const ::tensorflow::data::AutotuneOptions& Options::autotune_options() const {
2630 // @@protoc_insertion_point(field_get:tensorflow.data.Options.autotune_options)
2631 return _internal_autotune_options();
2632 }
unsafe_arena_set_allocated_autotune_options(::tensorflow::data::AutotuneOptions * autotune_options)2633 inline void Options::unsafe_arena_set_allocated_autotune_options(
2634 ::tensorflow::data::AutotuneOptions* autotune_options) {
2635 if (GetArenaForAllocation() == nullptr) {
2636 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.autotune_options_);
2637 }
2638 _impl_.autotune_options_ = autotune_options;
2639 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.data.Options.autotune_options)
2640 }
release_autotune_options()2641 inline ::tensorflow::data::AutotuneOptions* Options::release_autotune_options() {
2642
2643 ::tensorflow::data::AutotuneOptions* temp = _impl_.autotune_options_;
2644 _impl_.autotune_options_ = nullptr;
2645 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
2646 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
2647 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2648 if (GetArenaForAllocation() == nullptr) { delete old; }
2649 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
2650 if (GetArenaForAllocation() != nullptr) {
2651 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2652 }
2653 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
2654 return temp;
2655 }
unsafe_arena_release_autotune_options()2656 inline ::tensorflow::data::AutotuneOptions* Options::unsafe_arena_release_autotune_options() {
2657 // @@protoc_insertion_point(field_release:tensorflow.data.Options.autotune_options)
2658
2659 ::tensorflow::data::AutotuneOptions* temp = _impl_.autotune_options_;
2660 _impl_.autotune_options_ = nullptr;
2661 return temp;
2662 }
_internal_mutable_autotune_options()2663 inline ::tensorflow::data::AutotuneOptions* Options::_internal_mutable_autotune_options() {
2664
2665 if (_impl_.autotune_options_ == nullptr) {
2666 auto* p = CreateMaybeMessage<::tensorflow::data::AutotuneOptions>(GetArenaForAllocation());
2667 _impl_.autotune_options_ = p;
2668 }
2669 return _impl_.autotune_options_;
2670 }
mutable_autotune_options()2671 inline ::tensorflow::data::AutotuneOptions* Options::mutable_autotune_options() {
2672 ::tensorflow::data::AutotuneOptions* _msg = _internal_mutable_autotune_options();
2673 // @@protoc_insertion_point(field_mutable:tensorflow.data.Options.autotune_options)
2674 return _msg;
2675 }
set_allocated_autotune_options(::tensorflow::data::AutotuneOptions * autotune_options)2676 inline void Options::set_allocated_autotune_options(::tensorflow::data::AutotuneOptions* autotune_options) {
2677 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
2678 if (message_arena == nullptr) {
2679 delete _impl_.autotune_options_;
2680 }
2681 if (autotune_options) {
2682 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
2683 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(autotune_options);
2684 if (message_arena != submessage_arena) {
2685 autotune_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
2686 message_arena, autotune_options, submessage_arena);
2687 }
2688
2689 } else {
2690
2691 }
2692 _impl_.autotune_options_ = autotune_options;
2693 // @@protoc_insertion_point(field_set_allocated:tensorflow.data.Options.autotune_options)
2694 }
2695
2696 // .tensorflow.data.DistributeOptions distribute_options = 2;
_internal_has_distribute_options()2697 inline bool Options::_internal_has_distribute_options() const {
2698 return this != internal_default_instance() && _impl_.distribute_options_ != nullptr;
2699 }
has_distribute_options()2700 inline bool Options::has_distribute_options() const {
2701 return _internal_has_distribute_options();
2702 }
clear_distribute_options()2703 inline void Options::clear_distribute_options() {
2704 if (GetArenaForAllocation() == nullptr && _impl_.distribute_options_ != nullptr) {
2705 delete _impl_.distribute_options_;
2706 }
2707 _impl_.distribute_options_ = nullptr;
2708 }
_internal_distribute_options()2709 inline const ::tensorflow::data::DistributeOptions& Options::_internal_distribute_options() const {
2710 const ::tensorflow::data::DistributeOptions* p = _impl_.distribute_options_;
2711 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::data::DistributeOptions&>(
2712 ::tensorflow::data::_DistributeOptions_default_instance_);
2713 }
distribute_options()2714 inline const ::tensorflow::data::DistributeOptions& Options::distribute_options() const {
2715 // @@protoc_insertion_point(field_get:tensorflow.data.Options.distribute_options)
2716 return _internal_distribute_options();
2717 }
unsafe_arena_set_allocated_distribute_options(::tensorflow::data::DistributeOptions * distribute_options)2718 inline void Options::unsafe_arena_set_allocated_distribute_options(
2719 ::tensorflow::data::DistributeOptions* distribute_options) {
2720 if (GetArenaForAllocation() == nullptr) {
2721 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.distribute_options_);
2722 }
2723 _impl_.distribute_options_ = distribute_options;
2724 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.data.Options.distribute_options)
2725 }
release_distribute_options()2726 inline ::tensorflow::data::DistributeOptions* Options::release_distribute_options() {
2727
2728 ::tensorflow::data::DistributeOptions* temp = _impl_.distribute_options_;
2729 _impl_.distribute_options_ = nullptr;
2730 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
2731 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
2732 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2733 if (GetArenaForAllocation() == nullptr) { delete old; }
2734 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
2735 if (GetArenaForAllocation() != nullptr) {
2736 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2737 }
2738 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
2739 return temp;
2740 }
unsafe_arena_release_distribute_options()2741 inline ::tensorflow::data::DistributeOptions* Options::unsafe_arena_release_distribute_options() {
2742 // @@protoc_insertion_point(field_release:tensorflow.data.Options.distribute_options)
2743
2744 ::tensorflow::data::DistributeOptions* temp = _impl_.distribute_options_;
2745 _impl_.distribute_options_ = nullptr;
2746 return temp;
2747 }
_internal_mutable_distribute_options()2748 inline ::tensorflow::data::DistributeOptions* Options::_internal_mutable_distribute_options() {
2749
2750 if (_impl_.distribute_options_ == nullptr) {
2751 auto* p = CreateMaybeMessage<::tensorflow::data::DistributeOptions>(GetArenaForAllocation());
2752 _impl_.distribute_options_ = p;
2753 }
2754 return _impl_.distribute_options_;
2755 }
mutable_distribute_options()2756 inline ::tensorflow::data::DistributeOptions* Options::mutable_distribute_options() {
2757 ::tensorflow::data::DistributeOptions* _msg = _internal_mutable_distribute_options();
2758 // @@protoc_insertion_point(field_mutable:tensorflow.data.Options.distribute_options)
2759 return _msg;
2760 }
set_allocated_distribute_options(::tensorflow::data::DistributeOptions * distribute_options)2761 inline void Options::set_allocated_distribute_options(::tensorflow::data::DistributeOptions* distribute_options) {
2762 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
2763 if (message_arena == nullptr) {
2764 delete _impl_.distribute_options_;
2765 }
2766 if (distribute_options) {
2767 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
2768 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(distribute_options);
2769 if (message_arena != submessage_arena) {
2770 distribute_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
2771 message_arena, distribute_options, submessage_arena);
2772 }
2773
2774 } else {
2775
2776 }
2777 _impl_.distribute_options_ = distribute_options;
2778 // @@protoc_insertion_point(field_set_allocated:tensorflow.data.Options.distribute_options)
2779 }
2780
2781 // .tensorflow.data.OptimizationOptions optimization_options = 3;
_internal_has_optimization_options()2782 inline bool Options::_internal_has_optimization_options() const {
2783 return this != internal_default_instance() && _impl_.optimization_options_ != nullptr;
2784 }
has_optimization_options()2785 inline bool Options::has_optimization_options() const {
2786 return _internal_has_optimization_options();
2787 }
clear_optimization_options()2788 inline void Options::clear_optimization_options() {
2789 if (GetArenaForAllocation() == nullptr && _impl_.optimization_options_ != nullptr) {
2790 delete _impl_.optimization_options_;
2791 }
2792 _impl_.optimization_options_ = nullptr;
2793 }
_internal_optimization_options()2794 inline const ::tensorflow::data::OptimizationOptions& Options::_internal_optimization_options() const {
2795 const ::tensorflow::data::OptimizationOptions* p = _impl_.optimization_options_;
2796 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::data::OptimizationOptions&>(
2797 ::tensorflow::data::_OptimizationOptions_default_instance_);
2798 }
optimization_options()2799 inline const ::tensorflow::data::OptimizationOptions& Options::optimization_options() const {
2800 // @@protoc_insertion_point(field_get:tensorflow.data.Options.optimization_options)
2801 return _internal_optimization_options();
2802 }
unsafe_arena_set_allocated_optimization_options(::tensorflow::data::OptimizationOptions * optimization_options)2803 inline void Options::unsafe_arena_set_allocated_optimization_options(
2804 ::tensorflow::data::OptimizationOptions* optimization_options) {
2805 if (GetArenaForAllocation() == nullptr) {
2806 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.optimization_options_);
2807 }
2808 _impl_.optimization_options_ = optimization_options;
2809 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.data.Options.optimization_options)
2810 }
release_optimization_options()2811 inline ::tensorflow::data::OptimizationOptions* Options::release_optimization_options() {
2812
2813 ::tensorflow::data::OptimizationOptions* temp = _impl_.optimization_options_;
2814 _impl_.optimization_options_ = nullptr;
2815 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
2816 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
2817 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2818 if (GetArenaForAllocation() == nullptr) { delete old; }
2819 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
2820 if (GetArenaForAllocation() != nullptr) {
2821 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2822 }
2823 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
2824 return temp;
2825 }
unsafe_arena_release_optimization_options()2826 inline ::tensorflow::data::OptimizationOptions* Options::unsafe_arena_release_optimization_options() {
2827 // @@protoc_insertion_point(field_release:tensorflow.data.Options.optimization_options)
2828
2829 ::tensorflow::data::OptimizationOptions* temp = _impl_.optimization_options_;
2830 _impl_.optimization_options_ = nullptr;
2831 return temp;
2832 }
_internal_mutable_optimization_options()2833 inline ::tensorflow::data::OptimizationOptions* Options::_internal_mutable_optimization_options() {
2834
2835 if (_impl_.optimization_options_ == nullptr) {
2836 auto* p = CreateMaybeMessage<::tensorflow::data::OptimizationOptions>(GetArenaForAllocation());
2837 _impl_.optimization_options_ = p;
2838 }
2839 return _impl_.optimization_options_;
2840 }
mutable_optimization_options()2841 inline ::tensorflow::data::OptimizationOptions* Options::mutable_optimization_options() {
2842 ::tensorflow::data::OptimizationOptions* _msg = _internal_mutable_optimization_options();
2843 // @@protoc_insertion_point(field_mutable:tensorflow.data.Options.optimization_options)
2844 return _msg;
2845 }
set_allocated_optimization_options(::tensorflow::data::OptimizationOptions * optimization_options)2846 inline void Options::set_allocated_optimization_options(::tensorflow::data::OptimizationOptions* optimization_options) {
2847 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
2848 if (message_arena == nullptr) {
2849 delete _impl_.optimization_options_;
2850 }
2851 if (optimization_options) {
2852 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
2853 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(optimization_options);
2854 if (message_arena != submessage_arena) {
2855 optimization_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
2856 message_arena, optimization_options, submessage_arena);
2857 }
2858
2859 } else {
2860
2861 }
2862 _impl_.optimization_options_ = optimization_options;
2863 // @@protoc_insertion_point(field_set_allocated:tensorflow.data.Options.optimization_options)
2864 }
2865
2866 // bool slack = 4;
_internal_has_slack()2867 inline bool Options::_internal_has_slack() const {
2868 return optional_slack_case() == kSlack;
2869 }
has_slack()2870 inline bool Options::has_slack() const {
2871 return _internal_has_slack();
2872 }
set_has_slack()2873 inline void Options::set_has_slack() {
2874 _impl_._oneof_case_[1] = kSlack;
2875 }
clear_slack()2876 inline void Options::clear_slack() {
2877 if (_internal_has_slack()) {
2878 _impl_.optional_slack_.slack_ = false;
2879 clear_has_optional_slack();
2880 }
2881 }
_internal_slack()2882 inline bool Options::_internal_slack() const {
2883 if (_internal_has_slack()) {
2884 return _impl_.optional_slack_.slack_;
2885 }
2886 return false;
2887 }
_internal_set_slack(bool value)2888 inline void Options::_internal_set_slack(bool value) {
2889 if (!_internal_has_slack()) {
2890 clear_optional_slack();
2891 set_has_slack();
2892 }
2893 _impl_.optional_slack_.slack_ = value;
2894 }
slack()2895 inline bool Options::slack() const {
2896 // @@protoc_insertion_point(field_get:tensorflow.data.Options.slack)
2897 return _internal_slack();
2898 }
set_slack(bool value)2899 inline void Options::set_slack(bool value) {
2900 _internal_set_slack(value);
2901 // @@protoc_insertion_point(field_set:tensorflow.data.Options.slack)
2902 }
2903
2904 // .tensorflow.data.ThreadingOptions threading_options = 5;
_internal_has_threading_options()2905 inline bool Options::_internal_has_threading_options() const {
2906 return this != internal_default_instance() && _impl_.threading_options_ != nullptr;
2907 }
has_threading_options()2908 inline bool Options::has_threading_options() const {
2909 return _internal_has_threading_options();
2910 }
clear_threading_options()2911 inline void Options::clear_threading_options() {
2912 if (GetArenaForAllocation() == nullptr && _impl_.threading_options_ != nullptr) {
2913 delete _impl_.threading_options_;
2914 }
2915 _impl_.threading_options_ = nullptr;
2916 }
_internal_threading_options()2917 inline const ::tensorflow::data::ThreadingOptions& Options::_internal_threading_options() const {
2918 const ::tensorflow::data::ThreadingOptions* p = _impl_.threading_options_;
2919 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::data::ThreadingOptions&>(
2920 ::tensorflow::data::_ThreadingOptions_default_instance_);
2921 }
threading_options()2922 inline const ::tensorflow::data::ThreadingOptions& Options::threading_options() const {
2923 // @@protoc_insertion_point(field_get:tensorflow.data.Options.threading_options)
2924 return _internal_threading_options();
2925 }
unsafe_arena_set_allocated_threading_options(::tensorflow::data::ThreadingOptions * threading_options)2926 inline void Options::unsafe_arena_set_allocated_threading_options(
2927 ::tensorflow::data::ThreadingOptions* threading_options) {
2928 if (GetArenaForAllocation() == nullptr) {
2929 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.threading_options_);
2930 }
2931 _impl_.threading_options_ = threading_options;
2932 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.data.Options.threading_options)
2933 }
release_threading_options()2934 inline ::tensorflow::data::ThreadingOptions* Options::release_threading_options() {
2935
2936 ::tensorflow::data::ThreadingOptions* temp = _impl_.threading_options_;
2937 _impl_.threading_options_ = nullptr;
2938 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
2939 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
2940 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2941 if (GetArenaForAllocation() == nullptr) { delete old; }
2942 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
2943 if (GetArenaForAllocation() != nullptr) {
2944 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
2945 }
2946 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
2947 return temp;
2948 }
unsafe_arena_release_threading_options()2949 inline ::tensorflow::data::ThreadingOptions* Options::unsafe_arena_release_threading_options() {
2950 // @@protoc_insertion_point(field_release:tensorflow.data.Options.threading_options)
2951
2952 ::tensorflow::data::ThreadingOptions* temp = _impl_.threading_options_;
2953 _impl_.threading_options_ = nullptr;
2954 return temp;
2955 }
_internal_mutable_threading_options()2956 inline ::tensorflow::data::ThreadingOptions* Options::_internal_mutable_threading_options() {
2957
2958 if (_impl_.threading_options_ == nullptr) {
2959 auto* p = CreateMaybeMessage<::tensorflow::data::ThreadingOptions>(GetArenaForAllocation());
2960 _impl_.threading_options_ = p;
2961 }
2962 return _impl_.threading_options_;
2963 }
mutable_threading_options()2964 inline ::tensorflow::data::ThreadingOptions* Options::mutable_threading_options() {
2965 ::tensorflow::data::ThreadingOptions* _msg = _internal_mutable_threading_options();
2966 // @@protoc_insertion_point(field_mutable:tensorflow.data.Options.threading_options)
2967 return _msg;
2968 }
set_allocated_threading_options(::tensorflow::data::ThreadingOptions * threading_options)2969 inline void Options::set_allocated_threading_options(::tensorflow::data::ThreadingOptions* threading_options) {
2970 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
2971 if (message_arena == nullptr) {
2972 delete _impl_.threading_options_;
2973 }
2974 if (threading_options) {
2975 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
2976 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(threading_options);
2977 if (message_arena != submessage_arena) {
2978 threading_options = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
2979 message_arena, threading_options, submessage_arena);
2980 }
2981
2982 } else {
2983
2984 }
2985 _impl_.threading_options_ = threading_options;
2986 // @@protoc_insertion_point(field_set_allocated:tensorflow.data.Options.threading_options)
2987 }
2988
2989 // .tensorflow.data.ExternalStatePolicy external_state_policy = 6;
_internal_has_external_state_policy()2990 inline bool Options::_internal_has_external_state_policy() const {
2991 return optional_external_state_policy_case() == kExternalStatePolicy;
2992 }
has_external_state_policy()2993 inline bool Options::has_external_state_policy() const {
2994 return _internal_has_external_state_policy();
2995 }
set_has_external_state_policy()2996 inline void Options::set_has_external_state_policy() {
2997 _impl_._oneof_case_[2] = kExternalStatePolicy;
2998 }
clear_external_state_policy()2999 inline void Options::clear_external_state_policy() {
3000 if (_internal_has_external_state_policy()) {
3001 _impl_.optional_external_state_policy_.external_state_policy_ = 0;
3002 clear_has_optional_external_state_policy();
3003 }
3004 }
_internal_external_state_policy()3005