1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/dataset_options.proto
3
4 #include "tensorflow/core/framework/dataset_options.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
22 namespace data {
AutotuneOptions(::_pbi::ConstantInitialized)23 PROTOBUF_CONSTEXPR AutotuneOptions::AutotuneOptions(
24 ::_pbi::ConstantInitialized): _impl_{
25 /*decltype(_impl_.optional_enabled_)*/{}
26 , /*decltype(_impl_.optional_cpu_budget_)*/{}
27 , /*decltype(_impl_.optional_ram_budget_)*/{}
28 , /*decltype(_impl_.optional_autotune_algorithm_)*/{}
29 , /*decltype(_impl_._cached_size_)*/{}
30 , /*decltype(_impl_._oneof_case_)*/{}} {}
31 struct AutotuneOptionsDefaultTypeInternal {
AutotuneOptionsDefaultTypeInternaltensorflow::data::AutotuneOptionsDefaultTypeInternal32 PROTOBUF_CONSTEXPR AutotuneOptionsDefaultTypeInternal()
33 : _instance(::_pbi::ConstantInitialized{}) {}
~AutotuneOptionsDefaultTypeInternaltensorflow::data::AutotuneOptionsDefaultTypeInternal34 ~AutotuneOptionsDefaultTypeInternal() {}
35 union { // NOLINT(misc-non-private-member-variables-in-classes)
36 AutotuneOptions _instance;
37 };
38 };
39 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AutotuneOptionsDefaultTypeInternal _AutotuneOptions_default_instance_;
CardinalityOptions(::_pbi::ConstantInitialized)40 PROTOBUF_CONSTEXPR CardinalityOptions::CardinalityOptions(
41 ::_pbi::ConstantInitialized): _impl_{
42 /*decltype(_impl_.compute_level_)*/0
43 , /*decltype(_impl_._cached_size_)*/{}} {}
44 struct CardinalityOptionsDefaultTypeInternal {
CardinalityOptionsDefaultTypeInternaltensorflow::data::CardinalityOptionsDefaultTypeInternal45 PROTOBUF_CONSTEXPR CardinalityOptionsDefaultTypeInternal()
46 : _instance(::_pbi::ConstantInitialized{}) {}
~CardinalityOptionsDefaultTypeInternaltensorflow::data::CardinalityOptionsDefaultTypeInternal47 ~CardinalityOptionsDefaultTypeInternal() {}
48 union { // NOLINT(misc-non-private-member-variables-in-classes)
49 CardinalityOptions _instance;
50 };
51 };
52 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CardinalityOptionsDefaultTypeInternal _CardinalityOptions_default_instance_;
DistributeOptions(::_pbi::ConstantInitialized)53 PROTOBUF_CONSTEXPR DistributeOptions::DistributeOptions(
54 ::_pbi::ConstantInitialized): _impl_{
55 /*decltype(_impl_.auto_shard_policy_)*/0
56 , /*decltype(_impl_.optional_num_devices_)*/{}
57 , /*decltype(_impl_._cached_size_)*/{}
58 , /*decltype(_impl_._oneof_case_)*/{}} {}
59 struct DistributeOptionsDefaultTypeInternal {
DistributeOptionsDefaultTypeInternaltensorflow::data::DistributeOptionsDefaultTypeInternal60 PROTOBUF_CONSTEXPR DistributeOptionsDefaultTypeInternal()
61 : _instance(::_pbi::ConstantInitialized{}) {}
~DistributeOptionsDefaultTypeInternaltensorflow::data::DistributeOptionsDefaultTypeInternal62 ~DistributeOptionsDefaultTypeInternal() {}
63 union { // NOLINT(misc-non-private-member-variables-in-classes)
64 DistributeOptions _instance;
65 };
66 };
67 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 DistributeOptionsDefaultTypeInternal _DistributeOptions_default_instance_;
OptimizationOptions(::_pbi::ConstantInitialized)68 PROTOBUF_CONSTEXPR OptimizationOptions::OptimizationOptions(
69 ::_pbi::ConstantInitialized): _impl_{
70 /*decltype(_impl_.optional_apply_default_optimizations_)*/{}
71 , /*decltype(_impl_.optional_filter_fusion_)*/{}
72 , /*decltype(_impl_.optional_map_and_batch_fusion_)*/{}
73 , /*decltype(_impl_.optional_map_and_filter_fusion_)*/{}
74 , /*decltype(_impl_.optional_map_fusion_)*/{}
75 , /*decltype(_impl_.optional_map_parallelization_)*/{}
76 , /*decltype(_impl_.optional_noop_elimination_)*/{}
77 , /*decltype(_impl_.optional_parallel_batch_)*/{}
78 , /*decltype(_impl_.optional_shuffle_and_repeat_fusion_)*/{}
79 , /*decltype(_impl_.optional_filter_parallelization_)*/{}
80 , /*decltype(_impl_.optional_inject_prefetch_)*/{}
81 , /*decltype(_impl_._cached_size_)*/{}
82 , /*decltype(_impl_._oneof_case_)*/{}} {}
83 struct OptimizationOptionsDefaultTypeInternal {
OptimizationOptionsDefaultTypeInternaltensorflow::data::OptimizationOptionsDefaultTypeInternal84 PROTOBUF_CONSTEXPR OptimizationOptionsDefaultTypeInternal()
85 : _instance(::_pbi::ConstantInitialized{}) {}
~OptimizationOptionsDefaultTypeInternaltensorflow::data::OptimizationOptionsDefaultTypeInternal86 ~OptimizationOptionsDefaultTypeInternal() {}
87 union { // NOLINT(misc-non-private-member-variables-in-classes)
88 OptimizationOptions _instance;
89 };
90 };
91 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OptimizationOptionsDefaultTypeInternal _OptimizationOptions_default_instance_;
ThreadingOptions(::_pbi::ConstantInitialized)92 PROTOBUF_CONSTEXPR ThreadingOptions::ThreadingOptions(
93 ::_pbi::ConstantInitialized): _impl_{
94 /*decltype(_impl_.optional_max_intra_op_parallelism_)*/{}
95 , /*decltype(_impl_.optional_private_threadpool_size_)*/{}
96 , /*decltype(_impl_._cached_size_)*/{}
97 , /*decltype(_impl_._oneof_case_)*/{}} {}
98 struct ThreadingOptionsDefaultTypeInternal {
ThreadingOptionsDefaultTypeInternaltensorflow::data::ThreadingOptionsDefaultTypeInternal99 PROTOBUF_CONSTEXPR ThreadingOptionsDefaultTypeInternal()
100 : _instance(::_pbi::ConstantInitialized{}) {}
~ThreadingOptionsDefaultTypeInternaltensorflow::data::ThreadingOptionsDefaultTypeInternal101 ~ThreadingOptionsDefaultTypeInternal() {}
102 union { // NOLINT(misc-non-private-member-variables-in-classes)
103 ThreadingOptions _instance;
104 };
105 };
106 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ThreadingOptionsDefaultTypeInternal _ThreadingOptions_default_instance_;
Options(::_pbi::ConstantInitialized)107 PROTOBUF_CONSTEXPR Options::Options(
108 ::_pbi::ConstantInitialized): _impl_{
109 /*decltype(_impl_.distribute_options_)*/nullptr
110 , /*decltype(_impl_.optimization_options_)*/nullptr
111 , /*decltype(_impl_.threading_options_)*/nullptr
112 , /*decltype(_impl_.autotune_options_)*/nullptr
113 , /*decltype(_impl_.optional_deterministic_)*/{}
114 , /*decltype(_impl_.optional_slack_)*/{}
115 , /*decltype(_impl_.optional_external_state_policy_)*/{}
116 , /*decltype(_impl_._cached_size_)*/{}
117 , /*decltype(_impl_._oneof_case_)*/{}} {}
118 struct OptionsDefaultTypeInternal {
OptionsDefaultTypeInternaltensorflow::data::OptionsDefaultTypeInternal119 PROTOBUF_CONSTEXPR OptionsDefaultTypeInternal()
120 : _instance(::_pbi::ConstantInitialized{}) {}
~OptionsDefaultTypeInternaltensorflow::data::OptionsDefaultTypeInternal121 ~OptionsDefaultTypeInternal() {}
122 union { // NOLINT(misc-non-private-member-variables-in-classes)
123 Options _instance;
124 };
125 };
126 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OptionsDefaultTypeInternal _Options_default_instance_;
127 } // namespace data
128 } // namespace tensorflow
129 namespace tensorflow {
130 namespace data {
CardinalityOptions_ComputeLevel_IsValid(int value)131 bool CardinalityOptions_ComputeLevel_IsValid(int value) {
132 switch (value) {
133 case 0:
134 case 1:
135 case 2:
136 return true;
137 default:
138 return false;
139 }
140 }
141
142 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> CardinalityOptions_ComputeLevel_strings[3] = {};
143
144 static const char CardinalityOptions_ComputeLevel_names[] =
145 "CARDINALITY_COMPUTE_LOW"
146 "CARDINALITY_COMPUTE_MODERATE"
147 "CARDINALITY_COMPUTE_UNSPECIFIED";
148
149 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry CardinalityOptions_ComputeLevel_entries[] = {
150 { {CardinalityOptions_ComputeLevel_names + 0, 23}, 1 },
151 { {CardinalityOptions_ComputeLevel_names + 23, 28}, 2 },
152 { {CardinalityOptions_ComputeLevel_names + 51, 31}, 0 },
153 };
154
155 static const int CardinalityOptions_ComputeLevel_entries_by_number[] = {
156 2, // 0 -> CARDINALITY_COMPUTE_UNSPECIFIED
157 0, // 1 -> CARDINALITY_COMPUTE_LOW
158 1, // 2 -> CARDINALITY_COMPUTE_MODERATE
159 };
160
CardinalityOptions_ComputeLevel_Name(CardinalityOptions_ComputeLevel value)161 const std::string& CardinalityOptions_ComputeLevel_Name(
162 CardinalityOptions_ComputeLevel value) {
163 static const bool dummy =
164 ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
165 CardinalityOptions_ComputeLevel_entries,
166 CardinalityOptions_ComputeLevel_entries_by_number,
167 3, CardinalityOptions_ComputeLevel_strings);
168 (void) dummy;
169 int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
170 CardinalityOptions_ComputeLevel_entries,
171 CardinalityOptions_ComputeLevel_entries_by_number,
172 3, value);
173 return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
174 CardinalityOptions_ComputeLevel_strings[idx].get();
175 }
CardinalityOptions_ComputeLevel_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,CardinalityOptions_ComputeLevel * value)176 bool CardinalityOptions_ComputeLevel_Parse(
177 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, CardinalityOptions_ComputeLevel* value) {
178 int int_value;
179 bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
180 CardinalityOptions_ComputeLevel_entries, 3, name, &int_value);
181 if (success) {
182 *value = static_cast<CardinalityOptions_ComputeLevel>(int_value);
183 }
184 return success;
185 }
186 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
187 constexpr CardinalityOptions_ComputeLevel CardinalityOptions::CARDINALITY_COMPUTE_UNSPECIFIED;
188 constexpr CardinalityOptions_ComputeLevel CardinalityOptions::CARDINALITY_COMPUTE_LOW;
189 constexpr CardinalityOptions_ComputeLevel CardinalityOptions::CARDINALITY_COMPUTE_MODERATE;
190 constexpr CardinalityOptions_ComputeLevel CardinalityOptions::ComputeLevel_MIN;
191 constexpr CardinalityOptions_ComputeLevel CardinalityOptions::ComputeLevel_MAX;
192 constexpr int CardinalityOptions::ComputeLevel_ARRAYSIZE;
193 #endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
AutoShardPolicy_IsValid(int value)194 bool AutoShardPolicy_IsValid(int value) {
195 switch (value) {
196 case -1:
197 case 0:
198 case 1:
199 case 2:
200 case 3:
201 return true;
202 default:
203 return false;
204 }
205 }
206
207 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> AutoShardPolicy_strings[5] = {};
208
209 static const char AutoShardPolicy_names[] =
210 "AUTO"
211 "DATA"
212 "FILE"
213 "HINT"
214 "OFF";
215
216 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry AutoShardPolicy_entries[] = {
217 { {AutoShardPolicy_names + 0, 4}, 0 },
218 { {AutoShardPolicy_names + 4, 4}, 2 },
219 { {AutoShardPolicy_names + 8, 4}, 1 },
220 { {AutoShardPolicy_names + 12, 4}, 3 },
221 { {AutoShardPolicy_names + 16, 3}, -1 },
222 };
223
224 static const int AutoShardPolicy_entries_by_number[] = {
225 4, // -1 -> OFF
226 0, // 0 -> AUTO
227 2, // 1 -> FILE
228 1, // 2 -> DATA
229 3, // 3 -> HINT
230 };
231
AutoShardPolicy_Name(AutoShardPolicy value)232 const std::string& AutoShardPolicy_Name(
233 AutoShardPolicy value) {
234 static const bool dummy =
235 ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
236 AutoShardPolicy_entries,
237 AutoShardPolicy_entries_by_number,
238 5, AutoShardPolicy_strings);
239 (void) dummy;
240 int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
241 AutoShardPolicy_entries,
242 AutoShardPolicy_entries_by_number,
243 5, value);
244 return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
245 AutoShardPolicy_strings[idx].get();
246 }
AutoShardPolicy_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,AutoShardPolicy * value)247 bool AutoShardPolicy_Parse(
248 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, AutoShardPolicy* value) {
249 int int_value;
250 bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
251 AutoShardPolicy_entries, 5, name, &int_value);
252 if (success) {
253 *value = static_cast<AutoShardPolicy>(int_value);
254 }
255 return success;
256 }
ExternalStatePolicy_IsValid(int value)257 bool ExternalStatePolicy_IsValid(int value) {
258 switch (value) {
259 case 0:
260 case 1:
261 case 2:
262 return true;
263 default:
264 return false;
265 }
266 }
267
268 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> ExternalStatePolicy_strings[3] = {};
269
270 static const char ExternalStatePolicy_names[] =
271 "POLICY_FAIL"
272 "POLICY_IGNORE"
273 "POLICY_WARN";
274
275 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry ExternalStatePolicy_entries[] = {
276 { {ExternalStatePolicy_names + 0, 11}, 2 },
277 { {ExternalStatePolicy_names + 11, 13}, 1 },
278 { {ExternalStatePolicy_names + 24, 11}, 0 },
279 };
280
281 static const int ExternalStatePolicy_entries_by_number[] = {
282 2, // 0 -> POLICY_WARN
283 1, // 1 -> POLICY_IGNORE
284 0, // 2 -> POLICY_FAIL
285 };
286
ExternalStatePolicy_Name(ExternalStatePolicy value)287 const std::string& ExternalStatePolicy_Name(
288 ExternalStatePolicy value) {
289 static const bool dummy =
290 ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
291 ExternalStatePolicy_entries,
292 ExternalStatePolicy_entries_by_number,
293 3, ExternalStatePolicy_strings);
294 (void) dummy;
295 int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
296 ExternalStatePolicy_entries,
297 ExternalStatePolicy_entries_by_number,
298 3, value);
299 return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
300 ExternalStatePolicy_strings[idx].get();
301 }
ExternalStatePolicy_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,ExternalStatePolicy * value)302 bool ExternalStatePolicy_Parse(
303 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ExternalStatePolicy* value) {
304 int int_value;
305 bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
306 ExternalStatePolicy_entries, 3, name, &int_value);
307 if (success) {
308 *value = static_cast<ExternalStatePolicy>(int_value);
309 }
310 return success;
311 }
312
313 // ===================================================================
314
315 class AutotuneOptions::_Internal {
316 public:
317 };
318
AutotuneOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)319 AutotuneOptions::AutotuneOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
320 bool is_message_owned)
321 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
322 SharedCtor(arena, is_message_owned);
323 // @@protoc_insertion_point(arena_constructor:tensorflow.data.AutotuneOptions)
324 }
AutotuneOptions(const AutotuneOptions & from)325 AutotuneOptions::AutotuneOptions(const AutotuneOptions& from)
326 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
327 AutotuneOptions* const _this = this; (void)_this;
328 new (&_impl_) Impl_{
329 decltype(_impl_.optional_enabled_){}
330 , decltype(_impl_.optional_cpu_budget_){}
331 , decltype(_impl_.optional_ram_budget_){}
332 , decltype(_impl_.optional_autotune_algorithm_){}
333 , /*decltype(_impl_._cached_size_)*/{}
334 , /*decltype(_impl_._oneof_case_)*/{}};
335
336 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
337 clear_has_optional_enabled();
338 switch (from.optional_enabled_case()) {
339 case kEnabled: {
340 _this->_internal_set_enabled(from._internal_enabled());
341 break;
342 }
343 case OPTIONAL_ENABLED_NOT_SET: {
344 break;
345 }
346 }
347 clear_has_optional_cpu_budget();
348 switch (from.optional_cpu_budget_case()) {
349 case kCpuBudget: {
350 _this->_internal_set_cpu_budget(from._internal_cpu_budget());
351 break;
352 }
353 case OPTIONAL_CPU_BUDGET_NOT_SET: {
354 break;
355 }
356 }
357 clear_has_optional_ram_budget();
358 switch (from.optional_ram_budget_case()) {
359 case kRamBudget: {
360 _this->_internal_set_ram_budget(from._internal_ram_budget());
361 break;
362 }
363 case OPTIONAL_RAM_BUDGET_NOT_SET: {
364 break;
365 }
366 }
367 clear_has_optional_autotune_algorithm();
368 switch (from.optional_autotune_algorithm_case()) {
369 case kAutotuneAlgorithm: {
370 _this->_internal_set_autotune_algorithm(from._internal_autotune_algorithm());
371 break;
372 }
373 case OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET: {
374 break;
375 }
376 }
377 // @@protoc_insertion_point(copy_constructor:tensorflow.data.AutotuneOptions)
378 }
379
SharedCtor(::_pb::Arena * arena,bool is_message_owned)380 inline void AutotuneOptions::SharedCtor(
381 ::_pb::Arena* arena, bool is_message_owned) {
382 (void)arena;
383 (void)is_message_owned;
384 new (&_impl_) Impl_{
385 decltype(_impl_.optional_enabled_){}
386 , decltype(_impl_.optional_cpu_budget_){}
387 , decltype(_impl_.optional_ram_budget_){}
388 , decltype(_impl_.optional_autotune_algorithm_){}
389 , /*decltype(_impl_._cached_size_)*/{}
390 , /*decltype(_impl_._oneof_case_)*/{}
391 };
392 clear_has_optional_enabled();
393 clear_has_optional_cpu_budget();
394 clear_has_optional_ram_budget();
395 clear_has_optional_autotune_algorithm();
396 }
397
~AutotuneOptions()398 AutotuneOptions::~AutotuneOptions() {
399 // @@protoc_insertion_point(destructor:tensorflow.data.AutotuneOptions)
400 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
401 (void)arena;
402 return;
403 }
404 SharedDtor();
405 }
406
SharedDtor()407 inline void AutotuneOptions::SharedDtor() {
408 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
409 if (has_optional_enabled()) {
410 clear_optional_enabled();
411 }
412 if (has_optional_cpu_budget()) {
413 clear_optional_cpu_budget();
414 }
415 if (has_optional_ram_budget()) {
416 clear_optional_ram_budget();
417 }
418 if (has_optional_autotune_algorithm()) {
419 clear_optional_autotune_algorithm();
420 }
421 }
422
SetCachedSize(int size) const423 void AutotuneOptions::SetCachedSize(int size) const {
424 _impl_._cached_size_.Set(size);
425 }
426
clear_optional_enabled()427 void AutotuneOptions::clear_optional_enabled() {
428 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.AutotuneOptions)
429 switch (optional_enabled_case()) {
430 case kEnabled: {
431 // No need to clear
432 break;
433 }
434 case OPTIONAL_ENABLED_NOT_SET: {
435 break;
436 }
437 }
438 _impl_._oneof_case_[0] = OPTIONAL_ENABLED_NOT_SET;
439 }
440
clear_optional_cpu_budget()441 void AutotuneOptions::clear_optional_cpu_budget() {
442 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.AutotuneOptions)
443 switch (optional_cpu_budget_case()) {
444 case kCpuBudget: {
445 // No need to clear
446 break;
447 }
448 case OPTIONAL_CPU_BUDGET_NOT_SET: {
449 break;
450 }
451 }
452 _impl_._oneof_case_[1] = OPTIONAL_CPU_BUDGET_NOT_SET;
453 }
454
clear_optional_ram_budget()455 void AutotuneOptions::clear_optional_ram_budget() {
456 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.AutotuneOptions)
457 switch (optional_ram_budget_case()) {
458 case kRamBudget: {
459 // No need to clear
460 break;
461 }
462 case OPTIONAL_RAM_BUDGET_NOT_SET: {
463 break;
464 }
465 }
466 _impl_._oneof_case_[2] = OPTIONAL_RAM_BUDGET_NOT_SET;
467 }
468
clear_optional_autotune_algorithm()469 void AutotuneOptions::clear_optional_autotune_algorithm() {
470 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.AutotuneOptions)
471 switch (optional_autotune_algorithm_case()) {
472 case kAutotuneAlgorithm: {
473 // No need to clear
474 break;
475 }
476 case OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET: {
477 break;
478 }
479 }
480 _impl_._oneof_case_[3] = OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET;
481 }
482
483
Clear()484 void AutotuneOptions::Clear() {
485 // @@protoc_insertion_point(message_clear_start:tensorflow.data.AutotuneOptions)
486 ::uint32_t cached_has_bits = 0;
487 // Prevent compiler warnings about cached_has_bits being unused
488 (void) cached_has_bits;
489
490 clear_optional_enabled();
491 clear_optional_cpu_budget();
492 clear_optional_ram_budget();
493 clear_optional_autotune_algorithm();
494 _internal_metadata_.Clear<std::string>();
495 }
496
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)497 const char* AutotuneOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
498 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
499 while (!ctx->Done(&ptr)) {
500 ::uint32_t tag;
501 ptr = ::_pbi::ReadTag(ptr, &tag);
502 switch (tag >> 3) {
503 // bool enabled = 1;
504 case 1:
505 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
506 _internal_set_enabled(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
507 CHK_(ptr);
508 } else {
509 goto handle_unusual;
510 }
511 continue;
512 // int32 cpu_budget = 2;
513 case 2:
514 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
515 _internal_set_cpu_budget(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
516 CHK_(ptr);
517 } else {
518 goto handle_unusual;
519 }
520 continue;
521 // int64 ram_budget = 3;
522 case 3:
523 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
524 _internal_set_ram_budget(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
525 CHK_(ptr);
526 } else {
527 goto handle_unusual;
528 }
529 continue;
530 // .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4;
531 case 4:
532 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
533 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
534 CHK_(ptr);
535 _internal_set_autotune_algorithm(static_cast<::tensorflow::data::model::AutotuneAlgorithm>(val));
536 } else {
537 goto handle_unusual;
538 }
539 continue;
540 default:
541 goto handle_unusual;
542 } // switch
543 handle_unusual:
544 if ((tag == 0) || ((tag & 7) == 4)) {
545 CHK_(ptr);
546 ctx->SetLastTag(tag);
547 goto message_done;
548 }
549 ptr = UnknownFieldParse(
550 tag,
551 _internal_metadata_.mutable_unknown_fields<std::string>(),
552 ptr, ctx);
553 CHK_(ptr != nullptr);
554 } // while
555 message_done:
556 return ptr;
557 failure:
558 ptr = nullptr;
559 goto message_done;
560 #undef CHK_
561 }
562
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const563 ::uint8_t* AutotuneOptions::_InternalSerialize(
564 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
565 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.AutotuneOptions)
566 ::uint32_t cached_has_bits = 0;
567 (void) cached_has_bits;
568
569 // bool enabled = 1;
570 if (_internal_has_enabled()) {
571 target = stream->EnsureSpace(target);
572 target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_enabled(), target);
573 }
574
575 // int32 cpu_budget = 2;
576 if (_internal_has_cpu_budget()) {
577 target = stream->EnsureSpace(target);
578 target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_cpu_budget(), target);
579 }
580
581 // int64 ram_budget = 3;
582 if (_internal_has_ram_budget()) {
583 target = stream->EnsureSpace(target);
584 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_ram_budget(), target);
585 }
586
587 // .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4;
588 if (_internal_has_autotune_algorithm()) {
589 target = stream->EnsureSpace(target);
590 target = ::_pbi::WireFormatLite::WriteEnumToArray(
591 4, this->_internal_autotune_algorithm(), target);
592 }
593
594 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
595 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
596 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
597 }
598 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.AutotuneOptions)
599 return target;
600 }
601
ByteSizeLong() const602 size_t AutotuneOptions::ByteSizeLong() const {
603 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.AutotuneOptions)
604 size_t total_size = 0;
605
606 switch (optional_enabled_case()) {
607 // bool enabled = 1;
608 case kEnabled: {
609 total_size += 1 + 1;
610 break;
611 }
612 case OPTIONAL_ENABLED_NOT_SET: {
613 break;
614 }
615 }
616 switch (optional_cpu_budget_case()) {
617 // int32 cpu_budget = 2;
618 case kCpuBudget: {
619 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_cpu_budget());
620 break;
621 }
622 case OPTIONAL_CPU_BUDGET_NOT_SET: {
623 break;
624 }
625 }
626 switch (optional_ram_budget_case()) {
627 // int64 ram_budget = 3;
628 case kRamBudget: {
629 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_ram_budget());
630 break;
631 }
632 case OPTIONAL_RAM_BUDGET_NOT_SET: {
633 break;
634 }
635 }
636 switch (optional_autotune_algorithm_case()) {
637 // .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4;
638 case kAutotuneAlgorithm: {
639 total_size += 1 +
640 ::_pbi::WireFormatLite::EnumSize(this->_internal_autotune_algorithm());
641 break;
642 }
643 case OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET: {
644 break;
645 }
646 }
647 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
648 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
649 }
650 int cached_size = ::_pbi::ToCachedSize(total_size);
651 SetCachedSize(cached_size);
652 return total_size;
653 }
654
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)655 void AutotuneOptions::CheckTypeAndMergeFrom(
656 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
657 MergeFrom(*::_pbi::DownCast<const AutotuneOptions*>(
658 &from));
659 }
660
MergeFrom(const AutotuneOptions & from)661 void AutotuneOptions::MergeFrom(const AutotuneOptions& from) {
662 AutotuneOptions* const _this = this;
663 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.AutotuneOptions)
664 GOOGLE_DCHECK_NE(&from, _this);
665 ::uint32_t cached_has_bits = 0;
666 (void) cached_has_bits;
667
668 switch (from.optional_enabled_case()) {
669 case kEnabled: {
670 _this->_internal_set_enabled(from._internal_enabled());
671 break;
672 }
673 case OPTIONAL_ENABLED_NOT_SET: {
674 break;
675 }
676 }
677 switch (from.optional_cpu_budget_case()) {
678 case kCpuBudget: {
679 _this->_internal_set_cpu_budget(from._internal_cpu_budget());
680 break;
681 }
682 case OPTIONAL_CPU_BUDGET_NOT_SET: {
683 break;
684 }
685 }
686 switch (from.optional_ram_budget_case()) {
687 case kRamBudget: {
688 _this->_internal_set_ram_budget(from._internal_ram_budget());
689 break;
690 }
691 case OPTIONAL_RAM_BUDGET_NOT_SET: {
692 break;
693 }
694 }
695 switch (from.optional_autotune_algorithm_case()) {
696 case kAutotuneAlgorithm: {
697 _this->_internal_set_autotune_algorithm(from._internal_autotune_algorithm());
698 break;
699 }
700 case OPTIONAL_AUTOTUNE_ALGORITHM_NOT_SET: {
701 break;
702 }
703 }
704 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
705 }
706
CopyFrom(const AutotuneOptions & from)707 void AutotuneOptions::CopyFrom(const AutotuneOptions& from) {
708 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.AutotuneOptions)
709 if (&from == this) return;
710 Clear();
711 MergeFrom(from);
712 }
713
IsInitialized() const714 bool AutotuneOptions::IsInitialized() const {
715 return true;
716 }
717
InternalSwap(AutotuneOptions * other)718 void AutotuneOptions::InternalSwap(AutotuneOptions* other) {
719 using std::swap;
720 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
721 swap(_impl_.optional_enabled_, other->_impl_.optional_enabled_);
722 swap(_impl_.optional_cpu_budget_, other->_impl_.optional_cpu_budget_);
723 swap(_impl_.optional_ram_budget_, other->_impl_.optional_ram_budget_);
724 swap(_impl_.optional_autotune_algorithm_, other->_impl_.optional_autotune_algorithm_);
725 swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
726 swap(_impl_._oneof_case_[1], other->_impl_._oneof_case_[1]);
727 swap(_impl_._oneof_case_[2], other->_impl_._oneof_case_[2]);
728 swap(_impl_._oneof_case_[3], other->_impl_._oneof_case_[3]);
729 }
730
GetTypeName() const731 std::string AutotuneOptions::GetTypeName() const {
732 return "tensorflow.data.AutotuneOptions";
733 }
734
735
736 // ===================================================================
737
738 class CardinalityOptions::_Internal {
739 public:
740 };
741
CardinalityOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)742 CardinalityOptions::CardinalityOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
743 bool is_message_owned)
744 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
745 SharedCtor(arena, is_message_owned);
746 // @@protoc_insertion_point(arena_constructor:tensorflow.data.CardinalityOptions)
747 }
CardinalityOptions(const CardinalityOptions & from)748 CardinalityOptions::CardinalityOptions(const CardinalityOptions& from)
749 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
750 CardinalityOptions* const _this = this; (void)_this;
751 new (&_impl_) Impl_{
752 decltype(_impl_.compute_level_){}
753 , /*decltype(_impl_._cached_size_)*/{}};
754
755 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
756 _this->_impl_.compute_level_ = from._impl_.compute_level_;
757 // @@protoc_insertion_point(copy_constructor:tensorflow.data.CardinalityOptions)
758 }
759
SharedCtor(::_pb::Arena * arena,bool is_message_owned)760 inline void CardinalityOptions::SharedCtor(
761 ::_pb::Arena* arena, bool is_message_owned) {
762 (void)arena;
763 (void)is_message_owned;
764 new (&_impl_) Impl_{
765 decltype(_impl_.compute_level_){0}
766 , /*decltype(_impl_._cached_size_)*/{}
767 };
768 }
769
~CardinalityOptions()770 CardinalityOptions::~CardinalityOptions() {
771 // @@protoc_insertion_point(destructor:tensorflow.data.CardinalityOptions)
772 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
773 (void)arena;
774 return;
775 }
776 SharedDtor();
777 }
778
SharedDtor()779 inline void CardinalityOptions::SharedDtor() {
780 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
781 }
782
SetCachedSize(int size) const783 void CardinalityOptions::SetCachedSize(int size) const {
784 _impl_._cached_size_.Set(size);
785 }
786
Clear()787 void CardinalityOptions::Clear() {
788 // @@protoc_insertion_point(message_clear_start:tensorflow.data.CardinalityOptions)
789 ::uint32_t cached_has_bits = 0;
790 // Prevent compiler warnings about cached_has_bits being unused
791 (void) cached_has_bits;
792
793 _impl_.compute_level_ = 0;
794 _internal_metadata_.Clear<std::string>();
795 }
796
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)797 const char* CardinalityOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
798 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
799 while (!ctx->Done(&ptr)) {
800 ::uint32_t tag;
801 ptr = ::_pbi::ReadTag(ptr, &tag);
802 switch (tag >> 3) {
803 // .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1;
804 case 1:
805 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
806 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
807 CHK_(ptr);
808 _internal_set_compute_level(static_cast<::tensorflow::data::CardinalityOptions_ComputeLevel>(val));
809 } else {
810 goto handle_unusual;
811 }
812 continue;
813 default:
814 goto handle_unusual;
815 } // switch
816 handle_unusual:
817 if ((tag == 0) || ((tag & 7) == 4)) {
818 CHK_(ptr);
819 ctx->SetLastTag(tag);
820 goto message_done;
821 }
822 ptr = UnknownFieldParse(
823 tag,
824 _internal_metadata_.mutable_unknown_fields<std::string>(),
825 ptr, ctx);
826 CHK_(ptr != nullptr);
827 } // while
828 message_done:
829 return ptr;
830 failure:
831 ptr = nullptr;
832 goto message_done;
833 #undef CHK_
834 }
835
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const836 ::uint8_t* CardinalityOptions::_InternalSerialize(
837 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
838 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.CardinalityOptions)
839 ::uint32_t cached_has_bits = 0;
840 (void) cached_has_bits;
841
842 // .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1;
843 if (this->_internal_compute_level() != 0) {
844 target = stream->EnsureSpace(target);
845 target = ::_pbi::WireFormatLite::WriteEnumToArray(
846 1, this->_internal_compute_level(), target);
847 }
848
849 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
850 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
851 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
852 }
853 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.CardinalityOptions)
854 return target;
855 }
856
ByteSizeLong() const857 size_t CardinalityOptions::ByteSizeLong() const {
858 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.CardinalityOptions)
859 size_t total_size = 0;
860
861 ::uint32_t cached_has_bits = 0;
862 // Prevent compiler warnings about cached_has_bits being unused
863 (void) cached_has_bits;
864
865 // .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1;
866 if (this->_internal_compute_level() != 0) {
867 total_size += 1 +
868 ::_pbi::WireFormatLite::EnumSize(this->_internal_compute_level());
869 }
870
871 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
872 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
873 }
874 int cached_size = ::_pbi::ToCachedSize(total_size);
875 SetCachedSize(cached_size);
876 return total_size;
877 }
878
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)879 void CardinalityOptions::CheckTypeAndMergeFrom(
880 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
881 MergeFrom(*::_pbi::DownCast<const CardinalityOptions*>(
882 &from));
883 }
884
MergeFrom(const CardinalityOptions & from)885 void CardinalityOptions::MergeFrom(const CardinalityOptions& from) {
886 CardinalityOptions* const _this = this;
887 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.CardinalityOptions)
888 GOOGLE_DCHECK_NE(&from, _this);
889 ::uint32_t cached_has_bits = 0;
890 (void) cached_has_bits;
891
892 if (from._internal_compute_level() != 0) {
893 _this->_internal_set_compute_level(from._internal_compute_level());
894 }
895 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
896 }
897
CopyFrom(const CardinalityOptions & from)898 void CardinalityOptions::CopyFrom(const CardinalityOptions& from) {
899 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.CardinalityOptions)
900 if (&from == this) return;
901 Clear();
902 MergeFrom(from);
903 }
904
IsInitialized() const905 bool CardinalityOptions::IsInitialized() const {
906 return true;
907 }
908
InternalSwap(CardinalityOptions * other)909 void CardinalityOptions::InternalSwap(CardinalityOptions* other) {
910 using std::swap;
911 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
912 swap(_impl_.compute_level_, other->_impl_.compute_level_);
913 }
914
GetTypeName() const915 std::string CardinalityOptions::GetTypeName() const {
916 return "tensorflow.data.CardinalityOptions";
917 }
918
919
920 // ===================================================================
921
922 class DistributeOptions::_Internal {
923 public:
924 };
925
DistributeOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)926 DistributeOptions::DistributeOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
927 bool is_message_owned)
928 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
929 SharedCtor(arena, is_message_owned);
930 // @@protoc_insertion_point(arena_constructor:tensorflow.data.DistributeOptions)
931 }
DistributeOptions(const DistributeOptions & from)932 DistributeOptions::DistributeOptions(const DistributeOptions& from)
933 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
934 DistributeOptions* const _this = this; (void)_this;
935 new (&_impl_) Impl_{
936 decltype(_impl_.auto_shard_policy_){}
937 , decltype(_impl_.optional_num_devices_){}
938 , /*decltype(_impl_._cached_size_)*/{}
939 , /*decltype(_impl_._oneof_case_)*/{}};
940
941 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
942 _this->_impl_.auto_shard_policy_ = from._impl_.auto_shard_policy_;
943 clear_has_optional_num_devices();
944 switch (from.optional_num_devices_case()) {
945 case kNumDevices: {
946 _this->_internal_set_num_devices(from._internal_num_devices());
947 break;
948 }
949 case OPTIONAL_NUM_DEVICES_NOT_SET: {
950 break;
951 }
952 }
953 // @@protoc_insertion_point(copy_constructor:tensorflow.data.DistributeOptions)
954 }
955
SharedCtor(::_pb::Arena * arena,bool is_message_owned)956 inline void DistributeOptions::SharedCtor(
957 ::_pb::Arena* arena, bool is_message_owned) {
958 (void)arena;
959 (void)is_message_owned;
960 new (&_impl_) Impl_{
961 decltype(_impl_.auto_shard_policy_){0}
962 , decltype(_impl_.optional_num_devices_){}
963 , /*decltype(_impl_._cached_size_)*/{}
964 , /*decltype(_impl_._oneof_case_)*/{}
965 };
966 clear_has_optional_num_devices();
967 }
968
~DistributeOptions()969 DistributeOptions::~DistributeOptions() {
970 // @@protoc_insertion_point(destructor:tensorflow.data.DistributeOptions)
971 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
972 (void)arena;
973 return;
974 }
975 SharedDtor();
976 }
977
SharedDtor()978 inline void DistributeOptions::SharedDtor() {
979 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
980 if (has_optional_num_devices()) {
981 clear_optional_num_devices();
982 }
983 }
984
SetCachedSize(int size) const985 void DistributeOptions::SetCachedSize(int size) const {
986 _impl_._cached_size_.Set(size);
987 }
988
clear_optional_num_devices()989 void DistributeOptions::clear_optional_num_devices() {
990 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.DistributeOptions)
991 switch (optional_num_devices_case()) {
992 case kNumDevices: {
993 // No need to clear
994 break;
995 }
996 case OPTIONAL_NUM_DEVICES_NOT_SET: {
997 break;
998 }
999 }
1000 _impl_._oneof_case_[0] = OPTIONAL_NUM_DEVICES_NOT_SET;
1001 }
1002
1003
Clear()1004 void DistributeOptions::Clear() {
1005 // @@protoc_insertion_point(message_clear_start:tensorflow.data.DistributeOptions)
1006 ::uint32_t cached_has_bits = 0;
1007 // Prevent compiler warnings about cached_has_bits being unused
1008 (void) cached_has_bits;
1009
1010 _impl_.auto_shard_policy_ = 0;
1011 clear_optional_num_devices();
1012 _internal_metadata_.Clear<std::string>();
1013 }
1014
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1015 const char* DistributeOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1016 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1017 while (!ctx->Done(&ptr)) {
1018 ::uint32_t tag;
1019 ptr = ::_pbi::ReadTag(ptr, &tag);
1020 switch (tag >> 3) {
1021 // .tensorflow.data.AutoShardPolicy auto_shard_policy = 1;
1022 case 1:
1023 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1024 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1025 CHK_(ptr);
1026 _internal_set_auto_shard_policy(static_cast<::tensorflow::data::AutoShardPolicy>(val));
1027 } else {
1028 goto handle_unusual;
1029 }
1030 continue;
1031 // int32 num_devices = 2;
1032 case 2:
1033 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1034 _internal_set_num_devices(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
1035 CHK_(ptr);
1036 } else {
1037 goto handle_unusual;
1038 }
1039 continue;
1040 default:
1041 goto handle_unusual;
1042 } // switch
1043 handle_unusual:
1044 if ((tag == 0) || ((tag & 7) == 4)) {
1045 CHK_(ptr);
1046 ctx->SetLastTag(tag);
1047 goto message_done;
1048 }
1049 ptr = UnknownFieldParse(
1050 tag,
1051 _internal_metadata_.mutable_unknown_fields<std::string>(),
1052 ptr, ctx);
1053 CHK_(ptr != nullptr);
1054 } // while
1055 message_done:
1056 return ptr;
1057 failure:
1058 ptr = nullptr;
1059 goto message_done;
1060 #undef CHK_
1061 }
1062
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1063 ::uint8_t* DistributeOptions::_InternalSerialize(
1064 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1065 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.DistributeOptions)
1066 ::uint32_t cached_has_bits = 0;
1067 (void) cached_has_bits;
1068
1069 // .tensorflow.data.AutoShardPolicy auto_shard_policy = 1;
1070 if (this->_internal_auto_shard_policy() != 0) {
1071 target = stream->EnsureSpace(target);
1072 target = ::_pbi::WireFormatLite::WriteEnumToArray(
1073 1, this->_internal_auto_shard_policy(), target);
1074 }
1075
1076 // int32 num_devices = 2;
1077 if (_internal_has_num_devices()) {
1078 target = stream->EnsureSpace(target);
1079 target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_num_devices(), target);
1080 }
1081
1082 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1083 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1084 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1085 }
1086 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.DistributeOptions)
1087 return target;
1088 }
1089
ByteSizeLong() const1090 size_t DistributeOptions::ByteSizeLong() const {
1091 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.DistributeOptions)
1092 size_t total_size = 0;
1093
1094 ::uint32_t cached_has_bits = 0;
1095 // Prevent compiler warnings about cached_has_bits being unused
1096 (void) cached_has_bits;
1097
1098 // .tensorflow.data.AutoShardPolicy auto_shard_policy = 1;
1099 if (this->_internal_auto_shard_policy() != 0) {
1100 total_size += 1 +
1101 ::_pbi::WireFormatLite::EnumSize(this->_internal_auto_shard_policy());
1102 }
1103
1104 switch (optional_num_devices_case()) {
1105 // int32 num_devices = 2;
1106 case kNumDevices: {
1107 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_num_devices());
1108 break;
1109 }
1110 case OPTIONAL_NUM_DEVICES_NOT_SET: {
1111 break;
1112 }
1113 }
1114 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1115 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1116 }
1117 int cached_size = ::_pbi::ToCachedSize(total_size);
1118 SetCachedSize(cached_size);
1119 return total_size;
1120 }
1121
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1122 void DistributeOptions::CheckTypeAndMergeFrom(
1123 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1124 MergeFrom(*::_pbi::DownCast<const DistributeOptions*>(
1125 &from));
1126 }
1127
MergeFrom(const DistributeOptions & from)1128 void DistributeOptions::MergeFrom(const DistributeOptions& from) {
1129 DistributeOptions* const _this = this;
1130 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.DistributeOptions)
1131 GOOGLE_DCHECK_NE(&from, _this);
1132 ::uint32_t cached_has_bits = 0;
1133 (void) cached_has_bits;
1134
1135 if (from._internal_auto_shard_policy() != 0) {
1136 _this->_internal_set_auto_shard_policy(from._internal_auto_shard_policy());
1137 }
1138 switch (from.optional_num_devices_case()) {
1139 case kNumDevices: {
1140 _this->_internal_set_num_devices(from._internal_num_devices());
1141 break;
1142 }
1143 case OPTIONAL_NUM_DEVICES_NOT_SET: {
1144 break;
1145 }
1146 }
1147 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1148 }
1149
CopyFrom(const DistributeOptions & from)1150 void DistributeOptions::CopyFrom(const DistributeOptions& from) {
1151 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.DistributeOptions)
1152 if (&from == this) return;
1153 Clear();
1154 MergeFrom(from);
1155 }
1156
IsInitialized() const1157 bool DistributeOptions::IsInitialized() const {
1158 return true;
1159 }
1160
InternalSwap(DistributeOptions * other)1161 void DistributeOptions::InternalSwap(DistributeOptions* other) {
1162 using std::swap;
1163 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1164 swap(_impl_.auto_shard_policy_, other->_impl_.auto_shard_policy_);
1165 swap(_impl_.optional_num_devices_, other->_impl_.optional_num_devices_);
1166 swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
1167 }
1168
GetTypeName() const1169 std::string DistributeOptions::GetTypeName() const {
1170 return "tensorflow.data.DistributeOptions";
1171 }
1172
1173
1174 // ===================================================================
1175
1176 class OptimizationOptions::_Internal {
1177 public:
1178 };
1179
OptimizationOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1180 OptimizationOptions::OptimizationOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1181 bool is_message_owned)
1182 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1183 SharedCtor(arena, is_message_owned);
1184 // @@protoc_insertion_point(arena_constructor:tensorflow.data.OptimizationOptions)
1185 }
OptimizationOptions(const OptimizationOptions & from)1186 OptimizationOptions::OptimizationOptions(const OptimizationOptions& from)
1187 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1188 OptimizationOptions* const _this = this; (void)_this;
1189 new (&_impl_) Impl_{
1190 decltype(_impl_.optional_apply_default_optimizations_){}
1191 , decltype(_impl_.optional_filter_fusion_){}
1192 , decltype(_impl_.optional_map_and_batch_fusion_){}
1193 , decltype(_impl_.optional_map_and_filter_fusion_){}
1194 , decltype(_impl_.optional_map_fusion_){}
1195 , decltype(_impl_.optional_map_parallelization_){}
1196 , decltype(_impl_.optional_noop_elimination_){}
1197 , decltype(_impl_.optional_parallel_batch_){}
1198 , decltype(_impl_.optional_shuffle_and_repeat_fusion_){}
1199 , decltype(_impl_.optional_filter_parallelization_){}
1200 , decltype(_impl_.optional_inject_prefetch_){}
1201 , /*decltype(_impl_._cached_size_)*/{}
1202 , /*decltype(_impl_._oneof_case_)*/{}};
1203
1204 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1205 clear_has_optional_apply_default_optimizations();
1206 switch (from.optional_apply_default_optimizations_case()) {
1207 case kApplyDefaultOptimizations: {
1208 _this->_internal_set_apply_default_optimizations(from._internal_apply_default_optimizations());
1209 break;
1210 }
1211 case OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET: {
1212 break;
1213 }
1214 }
1215 clear_has_optional_filter_fusion();
1216 switch (from.optional_filter_fusion_case()) {
1217 case kFilterFusion: {
1218 _this->_internal_set_filter_fusion(from._internal_filter_fusion());
1219 break;
1220 }
1221 case OPTIONAL_FILTER_FUSION_NOT_SET: {
1222 break;
1223 }
1224 }
1225 clear_has_optional_map_and_batch_fusion();
1226 switch (from.optional_map_and_batch_fusion_case()) {
1227 case kMapAndBatchFusion: {
1228 _this->_internal_set_map_and_batch_fusion(from._internal_map_and_batch_fusion());
1229 break;
1230 }
1231 case OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET: {
1232 break;
1233 }
1234 }
1235 clear_has_optional_map_and_filter_fusion();
1236 switch (from.optional_map_and_filter_fusion_case()) {
1237 case kMapAndFilterFusion: {
1238 _this->_internal_set_map_and_filter_fusion(from._internal_map_and_filter_fusion());
1239 break;
1240 }
1241 case OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET: {
1242 break;
1243 }
1244 }
1245 clear_has_optional_map_fusion();
1246 switch (from.optional_map_fusion_case()) {
1247 case kMapFusion: {
1248 _this->_internal_set_map_fusion(from._internal_map_fusion());
1249 break;
1250 }
1251 case OPTIONAL_MAP_FUSION_NOT_SET: {
1252 break;
1253 }
1254 }
1255 clear_has_optional_map_parallelization();
1256 switch (from.optional_map_parallelization_case()) {
1257 case kMapParallelization: {
1258 _this->_internal_set_map_parallelization(from._internal_map_parallelization());
1259 break;
1260 }
1261 case OPTIONAL_MAP_PARALLELIZATION_NOT_SET: {
1262 break;
1263 }
1264 }
1265 clear_has_optional_noop_elimination();
1266 switch (from.optional_noop_elimination_case()) {
1267 case kNoopElimination: {
1268 _this->_internal_set_noop_elimination(from._internal_noop_elimination());
1269 break;
1270 }
1271 case OPTIONAL_NOOP_ELIMINATION_NOT_SET: {
1272 break;
1273 }
1274 }
1275 clear_has_optional_parallel_batch();
1276 switch (from.optional_parallel_batch_case()) {
1277 case kParallelBatch: {
1278 _this->_internal_set_parallel_batch(from._internal_parallel_batch());
1279 break;
1280 }
1281 case OPTIONAL_PARALLEL_BATCH_NOT_SET: {
1282 break;
1283 }
1284 }
1285 clear_has_optional_shuffle_and_repeat_fusion();
1286 switch (from.optional_shuffle_and_repeat_fusion_case()) {
1287 case kShuffleAndRepeatFusion: {
1288 _this->_internal_set_shuffle_and_repeat_fusion(from._internal_shuffle_and_repeat_fusion());
1289 break;
1290 }
1291 case OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET: {
1292 break;
1293 }
1294 }
1295 clear_has_optional_filter_parallelization();
1296 switch (from.optional_filter_parallelization_case()) {
1297 case kFilterParallelization: {
1298 _this->_internal_set_filter_parallelization(from._internal_filter_parallelization());
1299 break;
1300 }
1301 case OPTIONAL_FILTER_PARALLELIZATION_NOT_SET: {
1302 break;
1303 }
1304 }
1305 clear_has_optional_inject_prefetch();
1306 switch (from.optional_inject_prefetch_case()) {
1307 case kInjectPrefetch: {
1308 _this->_internal_set_inject_prefetch(from._internal_inject_prefetch());
1309 break;
1310 }
1311 case OPTIONAL_INJECT_PREFETCH_NOT_SET: {
1312 break;
1313 }
1314 }
1315 // @@protoc_insertion_point(copy_constructor:tensorflow.data.OptimizationOptions)
1316 }
1317
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1318 inline void OptimizationOptions::SharedCtor(
1319 ::_pb::Arena* arena, bool is_message_owned) {
1320 (void)arena;
1321 (void)is_message_owned;
1322 new (&_impl_) Impl_{
1323 decltype(_impl_.optional_apply_default_optimizations_){}
1324 , decltype(_impl_.optional_filter_fusion_){}
1325 , decltype(_impl_.optional_map_and_batch_fusion_){}
1326 , decltype(_impl_.optional_map_and_filter_fusion_){}
1327 , decltype(_impl_.optional_map_fusion_){}
1328 , decltype(_impl_.optional_map_parallelization_){}
1329 , decltype(_impl_.optional_noop_elimination_){}
1330 , decltype(_impl_.optional_parallel_batch_){}
1331 , decltype(_impl_.optional_shuffle_and_repeat_fusion_){}
1332 , decltype(_impl_.optional_filter_parallelization_){}
1333 , decltype(_impl_.optional_inject_prefetch_){}
1334 , /*decltype(_impl_._cached_size_)*/{}
1335 , /*decltype(_impl_._oneof_case_)*/{}
1336 };
1337 clear_has_optional_apply_default_optimizations();
1338 clear_has_optional_filter_fusion();
1339 clear_has_optional_map_and_batch_fusion();
1340 clear_has_optional_map_and_filter_fusion();
1341 clear_has_optional_map_fusion();
1342 clear_has_optional_map_parallelization();
1343 clear_has_optional_noop_elimination();
1344 clear_has_optional_parallel_batch();
1345 clear_has_optional_shuffle_and_repeat_fusion();
1346 clear_has_optional_filter_parallelization();
1347 clear_has_optional_inject_prefetch();
1348 }
1349
~OptimizationOptions()1350 OptimizationOptions::~OptimizationOptions() {
1351 // @@protoc_insertion_point(destructor:tensorflow.data.OptimizationOptions)
1352 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1353 (void)arena;
1354 return;
1355 }
1356 SharedDtor();
1357 }
1358
SharedDtor()1359 inline void OptimizationOptions::SharedDtor() {
1360 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1361 if (has_optional_apply_default_optimizations()) {
1362 clear_optional_apply_default_optimizations();
1363 }
1364 if (has_optional_filter_fusion()) {
1365 clear_optional_filter_fusion();
1366 }
1367 if (has_optional_map_and_batch_fusion()) {
1368 clear_optional_map_and_batch_fusion();
1369 }
1370 if (has_optional_map_and_filter_fusion()) {
1371 clear_optional_map_and_filter_fusion();
1372 }
1373 if (has_optional_map_fusion()) {
1374 clear_optional_map_fusion();
1375 }
1376 if (has_optional_map_parallelization()) {
1377 clear_optional_map_parallelization();
1378 }
1379 if (has_optional_noop_elimination()) {
1380 clear_optional_noop_elimination();
1381 }
1382 if (has_optional_parallel_batch()) {
1383 clear_optional_parallel_batch();
1384 }
1385 if (has_optional_shuffle_and_repeat_fusion()) {
1386 clear_optional_shuffle_and_repeat_fusion();
1387 }
1388 if (has_optional_filter_parallelization()) {
1389 clear_optional_filter_parallelization();
1390 }
1391 if (has_optional_inject_prefetch()) {
1392 clear_optional_inject_prefetch();
1393 }
1394 }
1395
SetCachedSize(int size) const1396 void OptimizationOptions::SetCachedSize(int size) const {
1397 _impl_._cached_size_.Set(size);
1398 }
1399
clear_optional_apply_default_optimizations()1400 void OptimizationOptions::clear_optional_apply_default_optimizations() {
1401 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1402 switch (optional_apply_default_optimizations_case()) {
1403 case kApplyDefaultOptimizations: {
1404 // No need to clear
1405 break;
1406 }
1407 case OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET: {
1408 break;
1409 }
1410 }
1411 _impl_._oneof_case_[0] = OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET;
1412 }
1413
clear_optional_filter_fusion()1414 void OptimizationOptions::clear_optional_filter_fusion() {
1415 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1416 switch (optional_filter_fusion_case()) {
1417 case kFilterFusion: {
1418 // No need to clear
1419 break;
1420 }
1421 case OPTIONAL_FILTER_FUSION_NOT_SET: {
1422 break;
1423 }
1424 }
1425 _impl_._oneof_case_[1] = OPTIONAL_FILTER_FUSION_NOT_SET;
1426 }
1427
clear_optional_map_and_batch_fusion()1428 void OptimizationOptions::clear_optional_map_and_batch_fusion() {
1429 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1430 switch (optional_map_and_batch_fusion_case()) {
1431 case kMapAndBatchFusion: {
1432 // No need to clear
1433 break;
1434 }
1435 case OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET: {
1436 break;
1437 }
1438 }
1439 _impl_._oneof_case_[2] = OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET;
1440 }
1441
clear_optional_map_and_filter_fusion()1442 void OptimizationOptions::clear_optional_map_and_filter_fusion() {
1443 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1444 switch (optional_map_and_filter_fusion_case()) {
1445 case kMapAndFilterFusion: {
1446 // No need to clear
1447 break;
1448 }
1449 case OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET: {
1450 break;
1451 }
1452 }
1453 _impl_._oneof_case_[3] = OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET;
1454 }
1455
clear_optional_map_fusion()1456 void OptimizationOptions::clear_optional_map_fusion() {
1457 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1458 switch (optional_map_fusion_case()) {
1459 case kMapFusion: {
1460 // No need to clear
1461 break;
1462 }
1463 case OPTIONAL_MAP_FUSION_NOT_SET: {
1464 break;
1465 }
1466 }
1467 _impl_._oneof_case_[4] = OPTIONAL_MAP_FUSION_NOT_SET;
1468 }
1469
clear_optional_map_parallelization()1470 void OptimizationOptions::clear_optional_map_parallelization() {
1471 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1472 switch (optional_map_parallelization_case()) {
1473 case kMapParallelization: {
1474 // No need to clear
1475 break;
1476 }
1477 case OPTIONAL_MAP_PARALLELIZATION_NOT_SET: {
1478 break;
1479 }
1480 }
1481 _impl_._oneof_case_[5] = OPTIONAL_MAP_PARALLELIZATION_NOT_SET;
1482 }
1483
clear_optional_noop_elimination()1484 void OptimizationOptions::clear_optional_noop_elimination() {
1485 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1486 switch (optional_noop_elimination_case()) {
1487 case kNoopElimination: {
1488 // No need to clear
1489 break;
1490 }
1491 case OPTIONAL_NOOP_ELIMINATION_NOT_SET: {
1492 break;
1493 }
1494 }
1495 _impl_._oneof_case_[6] = OPTIONAL_NOOP_ELIMINATION_NOT_SET;
1496 }
1497
clear_optional_parallel_batch()1498 void OptimizationOptions::clear_optional_parallel_batch() {
1499 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1500 switch (optional_parallel_batch_case()) {
1501 case kParallelBatch: {
1502 // No need to clear
1503 break;
1504 }
1505 case OPTIONAL_PARALLEL_BATCH_NOT_SET: {
1506 break;
1507 }
1508 }
1509 _impl_._oneof_case_[7] = OPTIONAL_PARALLEL_BATCH_NOT_SET;
1510 }
1511
clear_optional_shuffle_and_repeat_fusion()1512 void OptimizationOptions::clear_optional_shuffle_and_repeat_fusion() {
1513 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1514 switch (optional_shuffle_and_repeat_fusion_case()) {
1515 case kShuffleAndRepeatFusion: {
1516 // No need to clear
1517 break;
1518 }
1519 case OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET: {
1520 break;
1521 }
1522 }
1523 _impl_._oneof_case_[8] = OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET;
1524 }
1525
clear_optional_filter_parallelization()1526 void OptimizationOptions::clear_optional_filter_parallelization() {
1527 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1528 switch (optional_filter_parallelization_case()) {
1529 case kFilterParallelization: {
1530 // No need to clear
1531 break;
1532 }
1533 case OPTIONAL_FILTER_PARALLELIZATION_NOT_SET: {
1534 break;
1535 }
1536 }
1537 _impl_._oneof_case_[9] = OPTIONAL_FILTER_PARALLELIZATION_NOT_SET;
1538 }
1539
clear_optional_inject_prefetch()1540 void OptimizationOptions::clear_optional_inject_prefetch() {
1541 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.OptimizationOptions)
1542 switch (optional_inject_prefetch_case()) {
1543 case kInjectPrefetch: {
1544 // No need to clear
1545 break;
1546 }
1547 case OPTIONAL_INJECT_PREFETCH_NOT_SET: {
1548 break;
1549 }
1550 }
1551 _impl_._oneof_case_[10] = OPTIONAL_INJECT_PREFETCH_NOT_SET;
1552 }
1553
1554
Clear()1555 void OptimizationOptions::Clear() {
1556 // @@protoc_insertion_point(message_clear_start:tensorflow.data.OptimizationOptions)
1557 ::uint32_t cached_has_bits = 0;
1558 // Prevent compiler warnings about cached_has_bits being unused
1559 (void) cached_has_bits;
1560
1561 clear_optional_apply_default_optimizations();
1562 clear_optional_filter_fusion();
1563 clear_optional_map_and_batch_fusion();
1564 clear_optional_map_and_filter_fusion();
1565 clear_optional_map_fusion();
1566 clear_optional_map_parallelization();
1567 clear_optional_noop_elimination();
1568 clear_optional_parallel_batch();
1569 clear_optional_shuffle_and_repeat_fusion();
1570 clear_optional_filter_parallelization();
1571 clear_optional_inject_prefetch();
1572 _internal_metadata_.Clear<std::string>();
1573 }
1574
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1575 const char* OptimizationOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1576 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1577 while (!ctx->Done(&ptr)) {
1578 ::uint32_t tag;
1579 ptr = ::_pbi::ReadTag(ptr, &tag);
1580 switch (tag >> 3) {
1581 // bool apply_default_optimizations = 1;
1582 case 1:
1583 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1584 _internal_set_apply_default_optimizations(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1585 CHK_(ptr);
1586 } else {
1587 goto handle_unusual;
1588 }
1589 continue;
1590 // bool filter_fusion = 6;
1591 case 6:
1592 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
1593 _internal_set_filter_fusion(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1594 CHK_(ptr);
1595 } else {
1596 goto handle_unusual;
1597 }
1598 continue;
1599 // bool map_and_batch_fusion = 9;
1600 case 9:
1601 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
1602 _internal_set_map_and_batch_fusion(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1603 CHK_(ptr);
1604 } else {
1605 goto handle_unusual;
1606 }
1607 continue;
1608 // bool map_and_filter_fusion = 10;
1609 case 10:
1610 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 80)) {
1611 _internal_set_map_and_filter_fusion(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1612 CHK_(ptr);
1613 } else {
1614 goto handle_unusual;
1615 }
1616 continue;
1617 // bool map_fusion = 11;
1618 case 11:
1619 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 88)) {
1620 _internal_set_map_fusion(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1621 CHK_(ptr);
1622 } else {
1623 goto handle_unusual;
1624 }
1625 continue;
1626 // bool map_parallelization = 12;
1627 case 12:
1628 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 96)) {
1629 _internal_set_map_parallelization(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1630 CHK_(ptr);
1631 } else {
1632 goto handle_unusual;
1633 }
1634 continue;
1635 // bool noop_elimination = 14;
1636 case 14:
1637 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 112)) {
1638 _internal_set_noop_elimination(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1639 CHK_(ptr);
1640 } else {
1641 goto handle_unusual;
1642 }
1643 continue;
1644 // bool parallel_batch = 15;
1645 case 15:
1646 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 120)) {
1647 _internal_set_parallel_batch(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1648 CHK_(ptr);
1649 } else {
1650 goto handle_unusual;
1651 }
1652 continue;
1653 // bool shuffle_and_repeat_fusion = 17;
1654 case 17:
1655 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 136)) {
1656 _internal_set_shuffle_and_repeat_fusion(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1657 CHK_(ptr);
1658 } else {
1659 goto handle_unusual;
1660 }
1661 continue;
1662 // bool filter_parallelization = 18;
1663 case 18:
1664 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 144)) {
1665 _internal_set_filter_parallelization(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1666 CHK_(ptr);
1667 } else {
1668 goto handle_unusual;
1669 }
1670 continue;
1671 // bool inject_prefetch = 19;
1672 case 19:
1673 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 152)) {
1674 _internal_set_inject_prefetch(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1675 CHK_(ptr);
1676 } else {
1677 goto handle_unusual;
1678 }
1679 continue;
1680 default:
1681 goto handle_unusual;
1682 } // switch
1683 handle_unusual:
1684 if ((tag == 0) || ((tag & 7) == 4)) {
1685 CHK_(ptr);
1686 ctx->SetLastTag(tag);
1687 goto message_done;
1688 }
1689 ptr = UnknownFieldParse(
1690 tag,
1691 _internal_metadata_.mutable_unknown_fields<std::string>(),
1692 ptr, ctx);
1693 CHK_(ptr != nullptr);
1694 } // while
1695 message_done:
1696 return ptr;
1697 failure:
1698 ptr = nullptr;
1699 goto message_done;
1700 #undef CHK_
1701 }
1702
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1703 ::uint8_t* OptimizationOptions::_InternalSerialize(
1704 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1705 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.OptimizationOptions)
1706 ::uint32_t cached_has_bits = 0;
1707 (void) cached_has_bits;
1708
1709 // bool apply_default_optimizations = 1;
1710 if (_internal_has_apply_default_optimizations()) {
1711 target = stream->EnsureSpace(target);
1712 target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_apply_default_optimizations(), target);
1713 }
1714
1715 // bool filter_fusion = 6;
1716 if (_internal_has_filter_fusion()) {
1717 target = stream->EnsureSpace(target);
1718 target = ::_pbi::WireFormatLite::WriteBoolToArray(6, this->_internal_filter_fusion(), target);
1719 }
1720
1721 // bool map_and_batch_fusion = 9;
1722 if (_internal_has_map_and_batch_fusion()) {
1723 target = stream->EnsureSpace(target);
1724 target = ::_pbi::WireFormatLite::WriteBoolToArray(9, this->_internal_map_and_batch_fusion(), target);
1725 }
1726
1727 // bool map_and_filter_fusion = 10;
1728 if (_internal_has_map_and_filter_fusion()) {
1729 target = stream->EnsureSpace(target);
1730 target = ::_pbi::WireFormatLite::WriteBoolToArray(10, this->_internal_map_and_filter_fusion(), target);
1731 }
1732
1733 // bool map_fusion = 11;
1734 if (_internal_has_map_fusion()) {
1735 target = stream->EnsureSpace(target);
1736 target = ::_pbi::WireFormatLite::WriteBoolToArray(11, this->_internal_map_fusion(), target);
1737 }
1738
1739 // bool map_parallelization = 12;
1740 if (_internal_has_map_parallelization()) {
1741 target = stream->EnsureSpace(target);
1742 target = ::_pbi::WireFormatLite::WriteBoolToArray(12, this->_internal_map_parallelization(), target);
1743 }
1744
1745 // bool noop_elimination = 14;
1746 if (_internal_has_noop_elimination()) {
1747 target = stream->EnsureSpace(target);
1748 target = ::_pbi::WireFormatLite::WriteBoolToArray(14, this->_internal_noop_elimination(), target);
1749 }
1750
1751 // bool parallel_batch = 15;
1752 if (_internal_has_parallel_batch()) {
1753 target = stream->EnsureSpace(target);
1754 target = ::_pbi::WireFormatLite::WriteBoolToArray(15, this->_internal_parallel_batch(), target);
1755 }
1756
1757 // bool shuffle_and_repeat_fusion = 17;
1758 if (_internal_has_shuffle_and_repeat_fusion()) {
1759 target = stream->EnsureSpace(target);
1760 target = ::_pbi::WireFormatLite::WriteBoolToArray(17, this->_internal_shuffle_and_repeat_fusion(), target);
1761 }
1762
1763 // bool filter_parallelization = 18;
1764 if (_internal_has_filter_parallelization()) {
1765 target = stream->EnsureSpace(target);
1766 target = ::_pbi::WireFormatLite::WriteBoolToArray(18, this->_internal_filter_parallelization(), target);
1767 }
1768
1769 // bool inject_prefetch = 19;
1770 if (_internal_has_inject_prefetch()) {
1771 target = stream->EnsureSpace(target);
1772 target = ::_pbi::WireFormatLite::WriteBoolToArray(19, this->_internal_inject_prefetch(), target);
1773 }
1774
1775 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1776 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1777 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1778 }
1779 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.OptimizationOptions)
1780 return target;
1781 }
1782
ByteSizeLong() const1783 size_t OptimizationOptions::ByteSizeLong() const {
1784 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.OptimizationOptions)
1785 size_t total_size = 0;
1786
1787 switch (optional_apply_default_optimizations_case()) {
1788 // bool apply_default_optimizations = 1;
1789 case kApplyDefaultOptimizations: {
1790 total_size += 1 + 1;
1791 break;
1792 }
1793 case OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET: {
1794 break;
1795 }
1796 }
1797 switch (optional_filter_fusion_case()) {
1798 // bool filter_fusion = 6;
1799 case kFilterFusion: {
1800 total_size += 1 + 1;
1801 break;
1802 }
1803 case OPTIONAL_FILTER_FUSION_NOT_SET: {
1804 break;
1805 }
1806 }
1807 switch (optional_map_and_batch_fusion_case()) {
1808 // bool map_and_batch_fusion = 9;
1809 case kMapAndBatchFusion: {
1810 total_size += 1 + 1;
1811 break;
1812 }
1813 case OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET: {
1814 break;
1815 }
1816 }
1817 switch (optional_map_and_filter_fusion_case()) {
1818 // bool map_and_filter_fusion = 10;
1819 case kMapAndFilterFusion: {
1820 total_size += 1 + 1;
1821 break;
1822 }
1823 case OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET: {
1824 break;
1825 }
1826 }
1827 switch (optional_map_fusion_case()) {
1828 // bool map_fusion = 11;
1829 case kMapFusion: {
1830 total_size += 1 + 1;
1831 break;
1832 }
1833 case OPTIONAL_MAP_FUSION_NOT_SET: {
1834 break;
1835 }
1836 }
1837 switch (optional_map_parallelization_case()) {
1838 // bool map_parallelization = 12;
1839 case kMapParallelization: {
1840 total_size += 1 + 1;
1841 break;
1842 }
1843 case OPTIONAL_MAP_PARALLELIZATION_NOT_SET: {
1844 break;
1845 }
1846 }
1847 switch (optional_noop_elimination_case()) {
1848 // bool noop_elimination = 14;
1849 case kNoopElimination: {
1850 total_size += 1 + 1;
1851 break;
1852 }
1853 case OPTIONAL_NOOP_ELIMINATION_NOT_SET: {
1854 break;
1855 }
1856 }
1857 switch (optional_parallel_batch_case()) {
1858 // bool parallel_batch = 15;
1859 case kParallelBatch: {
1860 total_size += 1 + 1;
1861 break;
1862 }
1863 case OPTIONAL_PARALLEL_BATCH_NOT_SET: {
1864 break;
1865 }
1866 }
1867 switch (optional_shuffle_and_repeat_fusion_case()) {
1868 // bool shuffle_and_repeat_fusion = 17;
1869 case kShuffleAndRepeatFusion: {
1870 total_size += 2 + 1;
1871 break;
1872 }
1873 case OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET: {
1874 break;
1875 }
1876 }
1877 switch (optional_filter_parallelization_case()) {
1878 // bool filter_parallelization = 18;
1879 case kFilterParallelization: {
1880 total_size += 2 + 1;
1881 break;
1882 }
1883 case OPTIONAL_FILTER_PARALLELIZATION_NOT_SET: {
1884 break;
1885 }
1886 }
1887 switch (optional_inject_prefetch_case()) {
1888 // bool inject_prefetch = 19;
1889 case kInjectPrefetch: {
1890 total_size += 2 + 1;
1891 break;
1892 }
1893 case OPTIONAL_INJECT_PREFETCH_NOT_SET: {
1894 break;
1895 }
1896 }
1897 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1898 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1899 }
1900 int cached_size = ::_pbi::ToCachedSize(total_size);
1901 SetCachedSize(cached_size);
1902 return total_size;
1903 }
1904
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1905 void OptimizationOptions::CheckTypeAndMergeFrom(
1906 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1907 MergeFrom(*::_pbi::DownCast<const OptimizationOptions*>(
1908 &from));
1909 }
1910
MergeFrom(const OptimizationOptions & from)1911 void OptimizationOptions::MergeFrom(const OptimizationOptions& from) {
1912 OptimizationOptions* const _this = this;
1913 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.OptimizationOptions)
1914 GOOGLE_DCHECK_NE(&from, _this);
1915 ::uint32_t cached_has_bits = 0;
1916 (void) cached_has_bits;
1917
1918 switch (from.optional_apply_default_optimizations_case()) {
1919 case kApplyDefaultOptimizations: {
1920 _this->_internal_set_apply_default_optimizations(from._internal_apply_default_optimizations());
1921 break;
1922 }
1923 case OPTIONAL_APPLY_DEFAULT_OPTIMIZATIONS_NOT_SET: {
1924 break;
1925 }
1926 }
1927 switch (from.optional_filter_fusion_case()) {
1928 case kFilterFusion: {
1929 _this->_internal_set_filter_fusion(from._internal_filter_fusion());
1930 break;
1931 }
1932 case OPTIONAL_FILTER_FUSION_NOT_SET: {
1933 break;
1934 }
1935 }
1936 switch (from.optional_map_and_batch_fusion_case()) {
1937 case kMapAndBatchFusion: {
1938 _this->_internal_set_map_and_batch_fusion(from._internal_map_and_batch_fusion());
1939 break;
1940 }
1941 case OPTIONAL_MAP_AND_BATCH_FUSION_NOT_SET: {
1942 break;
1943 }
1944 }
1945 switch (from.optional_map_and_filter_fusion_case()) {
1946 case kMapAndFilterFusion: {
1947 _this->_internal_set_map_and_filter_fusion(from._internal_map_and_filter_fusion());
1948 break;
1949 }
1950 case OPTIONAL_MAP_AND_FILTER_FUSION_NOT_SET: {
1951 break;
1952 }
1953 }
1954 switch (from.optional_map_fusion_case()) {
1955 case kMapFusion: {
1956 _this->_internal_set_map_fusion(from._internal_map_fusion());
1957 break;
1958 }
1959 case OPTIONAL_MAP_FUSION_NOT_SET: {
1960 break;
1961 }
1962 }
1963 switch (from.optional_map_parallelization_case()) {
1964 case kMapParallelization: {
1965 _this->_internal_set_map_parallelization(from._internal_map_parallelization());
1966 break;
1967 }
1968 case OPTIONAL_MAP_PARALLELIZATION_NOT_SET: {
1969 break;
1970 }
1971 }
1972 switch (from.optional_noop_elimination_case()) {
1973 case kNoopElimination: {
1974 _this->_internal_set_noop_elimination(from._internal_noop_elimination());
1975 break;
1976 }
1977 case OPTIONAL_NOOP_ELIMINATION_NOT_SET: {
1978 break;
1979 }
1980 }
1981 switch (from.optional_parallel_batch_case()) {
1982 case kParallelBatch: {
1983 _this->_internal_set_parallel_batch(from._internal_parallel_batch());
1984 break;
1985 }
1986 case OPTIONAL_PARALLEL_BATCH_NOT_SET: {
1987 break;
1988 }
1989 }
1990 switch (from.optional_shuffle_and_repeat_fusion_case()) {
1991 case kShuffleAndRepeatFusion: {
1992 _this->_internal_set_shuffle_and_repeat_fusion(from._internal_shuffle_and_repeat_fusion());
1993 break;
1994 }
1995 case OPTIONAL_SHUFFLE_AND_REPEAT_FUSION_NOT_SET: {
1996 break;
1997 }
1998 }
1999 switch (from.optional_filter_parallelization_case()) {
2000 case kFilterParallelization: {
2001 _this->_internal_set_filter_parallelization(from._internal_filter_parallelization());
2002 break;
2003 }
2004 case OPTIONAL_FILTER_PARALLELIZATION_NOT_SET: {
2005 break;
2006 }
2007 }
2008 switch (from.optional_inject_prefetch_case()) {
2009 case kInjectPrefetch: {
2010 _this->_internal_set_inject_prefetch(from._internal_inject_prefetch());
2011 break;
2012 }
2013 case OPTIONAL_INJECT_PREFETCH_NOT_SET: {
2014 break;
2015 }
2016 }
2017 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2018 }
2019
CopyFrom(const OptimizationOptions & from)2020 void OptimizationOptions::CopyFrom(const OptimizationOptions& from) {
2021 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.OptimizationOptions)
2022 if (&from == this) return;
2023 Clear();
2024 MergeFrom(from);
2025 }
2026
IsInitialized() const2027 bool OptimizationOptions::IsInitialized() const {
2028 return true;
2029 }
2030
InternalSwap(OptimizationOptions * other)2031 void OptimizationOptions::InternalSwap(OptimizationOptions* other) {
2032 using std::swap;
2033 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2034 swap(_impl_.optional_apply_default_optimizations_, other->_impl_.optional_apply_default_optimizations_);
2035 swap(_impl_.optional_filter_fusion_, other->_impl_.optional_filter_fusion_);
2036 swap(_impl_.optional_map_and_batch_fusion_, other->_impl_.optional_map_and_batch_fusion_);
2037 swap(_impl_.optional_map_and_filter_fusion_, other->_impl_.optional_map_and_filter_fusion_);
2038 swap(_impl_.optional_map_fusion_, other->_impl_.optional_map_fusion_);
2039 swap(_impl_.optional_map_parallelization_, other->_impl_.optional_map_parallelization_);
2040 swap(_impl_.optional_noop_elimination_, other->_impl_.optional_noop_elimination_);
2041 swap(_impl_.optional_parallel_batch_, other->_impl_.optional_parallel_batch_);
2042 swap(_impl_.optional_shuffle_and_repeat_fusion_, other->_impl_.optional_shuffle_and_repeat_fusion_);
2043 swap(_impl_.optional_filter_parallelization_, other->_impl_.optional_filter_parallelization_);
2044 swap(_impl_.optional_inject_prefetch_, other->_impl_.optional_inject_prefetch_);
2045 swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
2046 swap(_impl_._oneof_case_[1], other->_impl_._oneof_case_[1]);
2047 swap(_impl_._oneof_case_[2], other->_impl_._oneof_case_[2]);
2048 swap(_impl_._oneof_case_[3], other->_impl_._oneof_case_[3]);
2049 swap(_impl_._oneof_case_[4], other->_impl_._oneof_case_[4]);
2050 swap(_impl_._oneof_case_[5], other->_impl_._oneof_case_[5]);
2051 swap(_impl_._oneof_case_[6], other->_impl_._oneof_case_[6]);
2052 swap(_impl_._oneof_case_[7], other->_impl_._oneof_case_[7]);
2053 swap(_impl_._oneof_case_[8], other->_impl_._oneof_case_[8]);
2054 swap(_impl_._oneof_case_[9], other->_impl_._oneof_case_[9]);
2055 swap(_impl_._oneof_case_[10], other->_impl_._oneof_case_[10]);
2056 }
2057
GetTypeName() const2058 std::string OptimizationOptions::GetTypeName() const {
2059 return "tensorflow.data.OptimizationOptions";
2060 }
2061
2062
2063 // ===================================================================
2064
2065 class ThreadingOptions::_Internal {
2066 public:
2067 };
2068
ThreadingOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2069 ThreadingOptions::ThreadingOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2070 bool is_message_owned)
2071 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2072 SharedCtor(arena, is_message_owned);
2073 // @@protoc_insertion_point(arena_constructor:tensorflow.data.ThreadingOptions)
2074 }
ThreadingOptions(const ThreadingOptions & from)2075 ThreadingOptions::ThreadingOptions(const ThreadingOptions& from)
2076 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2077 ThreadingOptions* const _this = this; (void)_this;
2078 new (&_impl_) Impl_{
2079 decltype(_impl_.optional_max_intra_op_parallelism_){}
2080 , decltype(_impl_.optional_private_threadpool_size_){}
2081 , /*decltype(_impl_._cached_size_)*/{}
2082 , /*decltype(_impl_._oneof_case_)*/{}};
2083
2084 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2085 clear_has_optional_max_intra_op_parallelism();
2086 switch (from.optional_max_intra_op_parallelism_case()) {
2087 case kMaxIntraOpParallelism: {
2088 _this->_internal_set_max_intra_op_parallelism(from._internal_max_intra_op_parallelism());
2089 break;
2090 }
2091 case OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET: {
2092 break;
2093 }
2094 }
2095 clear_has_optional_private_threadpool_size();
2096 switch (from.optional_private_threadpool_size_case()) {
2097 case kPrivateThreadpoolSize: {
2098 _this->_internal_set_private_threadpool_size(from._internal_private_threadpool_size());
2099 break;
2100 }
2101 case OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET: {
2102 break;
2103 }
2104 }
2105 // @@protoc_insertion_point(copy_constructor:tensorflow.data.ThreadingOptions)
2106 }
2107
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2108 inline void ThreadingOptions::SharedCtor(
2109 ::_pb::Arena* arena, bool is_message_owned) {
2110 (void)arena;
2111 (void)is_message_owned;
2112 new (&_impl_) Impl_{
2113 decltype(_impl_.optional_max_intra_op_parallelism_){}
2114 , decltype(_impl_.optional_private_threadpool_size_){}
2115 , /*decltype(_impl_._cached_size_)*/{}
2116 , /*decltype(_impl_._oneof_case_)*/{}
2117 };
2118 clear_has_optional_max_intra_op_parallelism();
2119 clear_has_optional_private_threadpool_size();
2120 }
2121
~ThreadingOptions()2122 ThreadingOptions::~ThreadingOptions() {
2123 // @@protoc_insertion_point(destructor:tensorflow.data.ThreadingOptions)
2124 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2125 (void)arena;
2126 return;
2127 }
2128 SharedDtor();
2129 }
2130
SharedDtor()2131 inline void ThreadingOptions::SharedDtor() {
2132 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2133 if (has_optional_max_intra_op_parallelism()) {
2134 clear_optional_max_intra_op_parallelism();
2135 }
2136 if (has_optional_private_threadpool_size()) {
2137 clear_optional_private_threadpool_size();
2138 }
2139 }
2140
SetCachedSize(int size) const2141 void ThreadingOptions::SetCachedSize(int size) const {
2142 _impl_._cached_size_.Set(size);
2143 }
2144
clear_optional_max_intra_op_parallelism()2145 void ThreadingOptions::clear_optional_max_intra_op_parallelism() {
2146 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.ThreadingOptions)
2147 switch (optional_max_intra_op_parallelism_case()) {
2148 case kMaxIntraOpParallelism: {
2149 // No need to clear
2150 break;
2151 }
2152 case OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET: {
2153 break;
2154 }
2155 }
2156 _impl_._oneof_case_[0] = OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET;
2157 }
2158
clear_optional_private_threadpool_size()2159 void ThreadingOptions::clear_optional_private_threadpool_size() {
2160 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.ThreadingOptions)
2161 switch (optional_private_threadpool_size_case()) {
2162 case kPrivateThreadpoolSize: {
2163 // No need to clear
2164 break;
2165 }
2166 case OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET: {
2167 break;
2168 }
2169 }
2170 _impl_._oneof_case_[1] = OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET;
2171 }
2172
2173
Clear()2174 void ThreadingOptions::Clear() {
2175 // @@protoc_insertion_point(message_clear_start:tensorflow.data.ThreadingOptions)
2176 ::uint32_t cached_has_bits = 0;
2177 // Prevent compiler warnings about cached_has_bits being unused
2178 (void) cached_has_bits;
2179
2180 clear_optional_max_intra_op_parallelism();
2181 clear_optional_private_threadpool_size();
2182 _internal_metadata_.Clear<std::string>();
2183 }
2184
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2185 const char* ThreadingOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2186 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2187 while (!ctx->Done(&ptr)) {
2188 ::uint32_t tag;
2189 ptr = ::_pbi::ReadTag(ptr, &tag);
2190 switch (tag >> 3) {
2191 // int32 max_intra_op_parallelism = 1;
2192 case 1:
2193 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
2194 _internal_set_max_intra_op_parallelism(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
2195 CHK_(ptr);
2196 } else {
2197 goto handle_unusual;
2198 }
2199 continue;
2200 // int32 private_threadpool_size = 2;
2201 case 2:
2202 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
2203 _internal_set_private_threadpool_size(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
2204 CHK_(ptr);
2205 } else {
2206 goto handle_unusual;
2207 }
2208 continue;
2209 default:
2210 goto handle_unusual;
2211 } // switch
2212 handle_unusual:
2213 if ((tag == 0) || ((tag & 7) == 4)) {
2214 CHK_(ptr);
2215 ctx->SetLastTag(tag);
2216 goto message_done;
2217 }
2218 ptr = UnknownFieldParse(
2219 tag,
2220 _internal_metadata_.mutable_unknown_fields<std::string>(),
2221 ptr, ctx);
2222 CHK_(ptr != nullptr);
2223 } // while
2224 message_done:
2225 return ptr;
2226 failure:
2227 ptr = nullptr;
2228 goto message_done;
2229 #undef CHK_
2230 }
2231
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2232 ::uint8_t* ThreadingOptions::_InternalSerialize(
2233 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2234 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.ThreadingOptions)
2235 ::uint32_t cached_has_bits = 0;
2236 (void) cached_has_bits;
2237
2238 // int32 max_intra_op_parallelism = 1;
2239 if (_internal_has_max_intra_op_parallelism()) {
2240 target = stream->EnsureSpace(target);
2241 target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_max_intra_op_parallelism(), target);
2242 }
2243
2244 // int32 private_threadpool_size = 2;
2245 if (_internal_has_private_threadpool_size()) {
2246 target = stream->EnsureSpace(target);
2247 target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_private_threadpool_size(), target);
2248 }
2249
2250 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2251 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2252 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2253 }
2254 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.ThreadingOptions)
2255 return target;
2256 }
2257
ByteSizeLong() const2258 size_t ThreadingOptions::ByteSizeLong() const {
2259 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.ThreadingOptions)
2260 size_t total_size = 0;
2261
2262 switch (optional_max_intra_op_parallelism_case()) {
2263 // int32 max_intra_op_parallelism = 1;
2264 case kMaxIntraOpParallelism: {
2265 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_max_intra_op_parallelism());
2266 break;
2267 }
2268 case OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET: {
2269 break;
2270 }
2271 }
2272 switch (optional_private_threadpool_size_case()) {
2273 // int32 private_threadpool_size = 2;
2274 case kPrivateThreadpoolSize: {
2275 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_private_threadpool_size());
2276 break;
2277 }
2278 case OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET: {
2279 break;
2280 }
2281 }
2282 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2283 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2284 }
2285 int cached_size = ::_pbi::ToCachedSize(total_size);
2286 SetCachedSize(cached_size);
2287 return total_size;
2288 }
2289
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2290 void ThreadingOptions::CheckTypeAndMergeFrom(
2291 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2292 MergeFrom(*::_pbi::DownCast<const ThreadingOptions*>(
2293 &from));
2294 }
2295
MergeFrom(const ThreadingOptions & from)2296 void ThreadingOptions::MergeFrom(const ThreadingOptions& from) {
2297 ThreadingOptions* const _this = this;
2298 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.ThreadingOptions)
2299 GOOGLE_DCHECK_NE(&from, _this);
2300 ::uint32_t cached_has_bits = 0;
2301 (void) cached_has_bits;
2302
2303 switch (from.optional_max_intra_op_parallelism_case()) {
2304 case kMaxIntraOpParallelism: {
2305 _this->_internal_set_max_intra_op_parallelism(from._internal_max_intra_op_parallelism());
2306 break;
2307 }
2308 case OPTIONAL_MAX_INTRA_OP_PARALLELISM_NOT_SET: {
2309 break;
2310 }
2311 }
2312 switch (from.optional_private_threadpool_size_case()) {
2313 case kPrivateThreadpoolSize: {
2314 _this->_internal_set_private_threadpool_size(from._internal_private_threadpool_size());
2315 break;
2316 }
2317 case OPTIONAL_PRIVATE_THREADPOOL_SIZE_NOT_SET: {
2318 break;
2319 }
2320 }
2321 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2322 }
2323
CopyFrom(const ThreadingOptions & from)2324 void ThreadingOptions::CopyFrom(const ThreadingOptions& from) {
2325 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.ThreadingOptions)
2326 if (&from == this) return;
2327 Clear();
2328 MergeFrom(from);
2329 }
2330
IsInitialized() const2331 bool ThreadingOptions::IsInitialized() const {
2332 return true;
2333 }
2334
InternalSwap(ThreadingOptions * other)2335 void ThreadingOptions::InternalSwap(ThreadingOptions* other) {
2336 using std::swap;
2337 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2338 swap(_impl_.optional_max_intra_op_parallelism_, other->_impl_.optional_max_intra_op_parallelism_);
2339 swap(_impl_.optional_private_threadpool_size_, other->_impl_.optional_private_threadpool_size_);
2340 swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
2341 swap(_impl_._oneof_case_[1], other->_impl_._oneof_case_[1]);
2342 }
2343
GetTypeName() const2344 std::string ThreadingOptions::GetTypeName() const {
2345 return "tensorflow.data.ThreadingOptions";
2346 }
2347
2348
2349 // ===================================================================
2350
2351 class Options::_Internal {
2352 public:
2353 static const ::tensorflow::data::AutotuneOptions& autotune_options(const Options* msg);
2354 static const ::tensorflow::data::DistributeOptions& distribute_options(const Options* msg);
2355 static const ::tensorflow::data::OptimizationOptions& optimization_options(const Options* msg);
2356 static const ::tensorflow::data::ThreadingOptions& threading_options(const Options* msg);
2357 };
2358
2359 const ::tensorflow::data::AutotuneOptions&
autotune_options(const Options * msg)2360 Options::_Internal::autotune_options(const Options* msg) {
2361 return *msg->_impl_.autotune_options_;
2362 }
2363 const ::tensorflow::data::DistributeOptions&
distribute_options(const Options * msg)2364 Options::_Internal::distribute_options(const Options* msg) {
2365 return *msg->_impl_.distribute_options_;
2366 }
2367 const ::tensorflow::data::OptimizationOptions&
optimization_options(const Options * msg)2368 Options::_Internal::optimization_options(const Options* msg) {
2369 return *msg->_impl_.optimization_options_;
2370 }
2371 const ::tensorflow::data::ThreadingOptions&
threading_options(const Options * msg)2372 Options::_Internal::threading_options(const Options* msg) {
2373 return *msg->_impl_.threading_options_;
2374 }
Options(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2375 Options::Options(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2376 bool is_message_owned)
2377 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2378 SharedCtor(arena, is_message_owned);
2379 // @@protoc_insertion_point(arena_constructor:tensorflow.data.Options)
2380 }
Options(const Options & from)2381 Options::Options(const Options& from)
2382 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2383 Options* const _this = this; (void)_this;
2384 new (&_impl_) Impl_{
2385 decltype(_impl_.distribute_options_){nullptr}
2386 , decltype(_impl_.optimization_options_){nullptr}
2387 , decltype(_impl_.threading_options_){nullptr}
2388 , decltype(_impl_.autotune_options_){nullptr}
2389 , decltype(_impl_.optional_deterministic_){}
2390 , decltype(_impl_.optional_slack_){}
2391 , decltype(_impl_.optional_external_state_policy_){}
2392 , /*decltype(_impl_._cached_size_)*/{}
2393 , /*decltype(_impl_._oneof_case_)*/{}};
2394
2395 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2396 if (from._internal_has_distribute_options()) {
2397 _this->_impl_.distribute_options_ = new ::tensorflow::data::DistributeOptions(*from._impl_.distribute_options_);
2398 }
2399 if (from._internal_has_optimization_options()) {
2400 _this->_impl_.optimization_options_ = new ::tensorflow::data::OptimizationOptions(*from._impl_.optimization_options_);
2401 }
2402 if (from._internal_has_threading_options()) {
2403 _this->_impl_.threading_options_ = new ::tensorflow::data::ThreadingOptions(*from._impl_.threading_options_);
2404 }
2405 if (from._internal_has_autotune_options()) {
2406 _this->_impl_.autotune_options_ = new ::tensorflow::data::AutotuneOptions(*from._impl_.autotune_options_);
2407 }
2408 clear_has_optional_deterministic();
2409 switch (from.optional_deterministic_case()) {
2410 case kDeterministic: {
2411 _this->_internal_set_deterministic(from._internal_deterministic());
2412 break;
2413 }
2414 case OPTIONAL_DETERMINISTIC_NOT_SET: {
2415 break;
2416 }
2417 }
2418 clear_has_optional_slack();
2419 switch (from.optional_slack_case()) {
2420 case kSlack: {
2421 _this->_internal_set_slack(from._internal_slack());
2422 break;
2423 }
2424 case OPTIONAL_SLACK_NOT_SET: {
2425 break;
2426 }
2427 }
2428 clear_has_optional_external_state_policy();
2429 switch (from.optional_external_state_policy_case()) {
2430 case kExternalStatePolicy: {
2431 _this->_internal_set_external_state_policy(from._internal_external_state_policy());
2432 break;
2433 }
2434 case OPTIONAL_EXTERNAL_STATE_POLICY_NOT_SET: {
2435 break;
2436 }
2437 }
2438 // @@protoc_insertion_point(copy_constructor:tensorflow.data.Options)
2439 }
2440
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2441 inline void Options::SharedCtor(
2442 ::_pb::Arena* arena, bool is_message_owned) {
2443 (void)arena;
2444 (void)is_message_owned;
2445 new (&_impl_) Impl_{
2446 decltype(_impl_.distribute_options_){nullptr}
2447 , decltype(_impl_.optimization_options_){nullptr}
2448 , decltype(_impl_.threading_options_){nullptr}
2449 , decltype(_impl_.autotune_options_){nullptr}
2450 , decltype(_impl_.optional_deterministic_){}
2451 , decltype(_impl_.optional_slack_){}
2452 , decltype(_impl_.optional_external_state_policy_){}
2453 , /*decltype(_impl_._cached_size_)*/{}
2454 , /*decltype(_impl_._oneof_case_)*/{}
2455 };
2456 clear_has_optional_deterministic();
2457 clear_has_optional_slack();
2458 clear_has_optional_external_state_policy();
2459 }
2460
~Options()2461 Options::~Options() {
2462 // @@protoc_insertion_point(destructor:tensorflow.data.Options)
2463 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2464 (void)arena;
2465 return;
2466 }
2467 SharedDtor();
2468 }
2469
SharedDtor()2470 inline void Options::SharedDtor() {
2471 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2472 if (this != internal_default_instance()) delete _impl_.distribute_options_;
2473 if (this != internal_default_instance()) delete _impl_.optimization_options_;
2474 if (this != internal_default_instance()) delete _impl_.threading_options_;
2475 if (this != internal_default_instance()) delete _impl_.autotune_options_;
2476 if (has_optional_deterministic()) {
2477 clear_optional_deterministic();
2478 }
2479 if (has_optional_slack()) {
2480 clear_optional_slack();
2481 }
2482 if (has_optional_external_state_policy()) {
2483 clear_optional_external_state_policy();
2484 }
2485 }
2486
SetCachedSize(int size) const2487 void Options::SetCachedSize(int size) const {
2488 _impl_._cached_size_.Set(size);
2489 }
2490
clear_optional_deterministic()2491 void Options::clear_optional_deterministic() {
2492 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.Options)
2493 switch (optional_deterministic_case()) {
2494 case kDeterministic: {
2495 // No need to clear
2496 break;
2497 }
2498 case OPTIONAL_DETERMINISTIC_NOT_SET: {
2499 break;
2500 }
2501 }
2502 _impl_._oneof_case_[0] = OPTIONAL_DETERMINISTIC_NOT_SET;
2503 }
2504
clear_optional_slack()2505 void Options::clear_optional_slack() {
2506 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.Options)
2507 switch (optional_slack_case()) {
2508 case kSlack: {
2509 // No need to clear
2510 break;
2511 }
2512 case OPTIONAL_SLACK_NOT_SET: {
2513 break;
2514 }
2515 }
2516 _impl_._oneof_case_[1] = OPTIONAL_SLACK_NOT_SET;
2517 }
2518
clear_optional_external_state_policy()2519 void Options::clear_optional_external_state_policy() {
2520 // @@protoc_insertion_point(one_of_clear_start:tensorflow.data.Options)
2521 switch (optional_external_state_policy_case()) {
2522 case kExternalStatePolicy: {
2523 // No need to clear
2524 break;
2525 }
2526 case OPTIONAL_EXTERNAL_STATE_POLICY_NOT_SET: {
2527 break;
2528 }
2529 }
2530 _impl_._oneof_case_[2] = OPTIONAL_EXTERNAL_STATE_POLICY_NOT_SET;
2531 }
2532
2533
Clear()2534 void Options::Clear() {
2535 // @@protoc_insertion_point(message_clear_start:tensorflow.data.Options)
2536 ::uint32_t cached_has_bits = 0;
2537 // Prevent compiler warnings about cached_has_bits being unused
2538 (void) cached_has_bits;
2539
2540 if (GetArenaForAllocation() == nullptr && _impl_.distribute_options_ != nullptr) {
2541 delete _impl_.distribute_options_;
2542 }
2543 _impl_.distribute_options_ = nullptr;
2544 if (GetArenaForAllocation() == nullptr && _impl_.optimization_options_ != nullptr) {
2545 delete _impl_.optimization_options_;
2546 }
2547 _impl_.optimization_options_ = nullptr;
2548 if (GetArenaForAllocation() == nullptr && _impl_.threading_options_ != nullptr) {
2549 delete _impl_.threading_options_;
2550 }
2551 _impl_.threading_options_ = nullptr;
2552 if (GetArenaForAllocation() == nullptr && _impl_.autotune_options_ != nullptr) {
2553 delete _impl_.autotune_options_;
2554 }
2555 _impl_.autotune_options_ = nullptr;
2556 clear_optional_deterministic();
2557 clear_optional_slack();
2558 clear_optional_external_state_policy();
2559 _internal_metadata_.Clear<std::string>();
2560 }
2561
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2562 const char* Options::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2563 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2564 while (!ctx->Done(&ptr)) {
2565 ::uint32_t tag;
2566 ptr = ::_pbi::ReadTag(ptr, &tag);
2567 switch (tag >> 3) {
2568 // bool deterministic = 1;
2569 case 1:
2570 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
2571 _internal_set_deterministic(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
2572 CHK_(ptr);
2573 } else {
2574 goto handle_unusual;
2575 }
2576 continue;
2577 // .tensorflow.data.DistributeOptions distribute_options = 2;
2578 case 2:
2579 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2580 ptr = ctx->ParseMessage(_internal_mutable_distribute_options(), ptr);
2581 CHK_(ptr);
2582 } else {
2583 goto handle_unusual;
2584 }
2585 continue;
2586 // .tensorflow.data.OptimizationOptions optimization_options = 3;
2587 case 3:
2588 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
2589 ptr = ctx->ParseMessage(_internal_mutable_optimization_options(), ptr);
2590 CHK_(ptr);
2591 } else {
2592 goto handle_unusual;
2593 }
2594 continue;
2595 // bool slack = 4;
2596 case 4:
2597 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
2598 _internal_set_slack(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
2599 CHK_(ptr);
2600 } else {
2601 goto handle_unusual;
2602 }
2603 continue;
2604 // .tensorflow.data.ThreadingOptions threading_options = 5;
2605 case 5:
2606 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
2607 ptr = ctx->ParseMessage(_internal_mutable_threading_options(), ptr);
2608 CHK_(ptr);
2609 } else {
2610 goto handle_unusual;
2611 }
2612 continue;
2613 // .tensorflow.data.ExternalStatePolicy external_state_policy = 6;
2614 case 6:
2615 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
2616 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2617 CHK_(ptr);
2618 _internal_set_external_state_policy(static_cast<::tensorflow::data::ExternalStatePolicy>(val));
2619 } else {
2620 goto handle_unusual;
2621 }
2622 continue;
2623 // .tensorflow.data.AutotuneOptions autotune_options = 7;
2624 case 7:
2625 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 58)) {
2626 ptr = ctx->ParseMessage(_internal_mutable_autotune_options(), ptr);
2627 CHK_(ptr);
2628 } else {
2629 goto handle_unusual;
2630 }
2631 continue;
2632 default:
2633 goto handle_unusual;
2634 } // switch
2635 handle_unusual:
2636 if ((tag == 0) || ((tag & 7) == 4)) {
2637 CHK_(ptr);
2638 ctx->SetLastTag(tag);
2639 goto message_done;
2640 }
2641 ptr = UnknownFieldParse(
2642 tag,
2643 _internal_metadata_.mutable_unknown_fields<std::string>(),
2644 ptr, ctx);
2645 CHK_(ptr != nullptr);
2646 } // while
2647 message_done:
2648 return ptr;
2649 failure:
2650 ptr = nullptr;
2651 goto message_done;
2652 #undef CHK_
2653 }
2654
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2655 ::uint8_t* Options::_InternalSerialize(
2656 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2657 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.data.Options)
2658 ::uint32_t cached_has_bits = 0;
2659 (void) cached_has_bits;
2660
2661 // bool deterministic = 1;
2662 if (_internal_has_deterministic()) {
2663 target = stream->EnsureSpace(target);
2664 target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_deterministic(), target);
2665 }
2666
2667 // .tensorflow.data.DistributeOptions distribute_options = 2;
2668 if (this->_internal_has_distribute_options()) {
2669 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2670 InternalWriteMessage(2, _Internal::distribute_options(this),
2671 _Internal::distribute_options(this).GetCachedSize(), target, stream);
2672 }
2673
2674 // .tensorflow.data.OptimizationOptions optimization_options = 3;
2675 if (this->_internal_has_optimization_options()) {
2676 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2677 InternalWriteMessage(3, _Internal::optimization_options(this),
2678 _Internal::optimization_options(this).GetCachedSize(), target, stream);
2679 }
2680
2681 // bool slack = 4;
2682 if (_internal_has_slack()) {
2683 target = stream->EnsureSpace(target);
2684 target = ::_pbi::WireFormatLite::WriteBoolToArray(4, this->_internal_slack(), target);
2685 }
2686
2687 // .tensorflow.data.ThreadingOptions threading_options = 5;
2688 if (this->_internal_has_threading_options()) {
2689 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2690 InternalWriteMessage(5, _Internal::threading_options(this),
2691 _Internal::threading_options(this).GetCachedSize(), target, stream);
2692 }
2693
2694 // .tensorflow.data.ExternalStatePolicy external_state_policy = 6;
2695 if (_internal_has_external_state_policy()) {
2696 target = stream->EnsureSpace(target);
2697 target = ::_pbi::WireFormatLite::WriteEnumToArray(
2698 6, this->_internal_external_state_policy(), target);
2699 }
2700
2701 // .tensorflow.data.AutotuneOptions autotune_options = 7;
2702 if (this->_internal_has_autotune_options()) {
2703 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2704 InternalWriteMessage(7, _Internal::autotune_options(this),
2705 _Internal::autotune_options(this).GetCachedSize(), target, stream);
2706 }
2707
2708 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2709 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2710 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2711 }
2712 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.data.Options)
2713 return target;
2714 }
2715
ByteSizeLong() const2716 size_t Options::ByteSizeLong() const {
2717 // @@protoc_insertion_point(message_byte_size_start:tensorflow.data.Options)
2718 size_t total_size = 0;
2719
2720 ::uint32_t cached_has_bits = 0;
2721 // Prevent compiler warnings about cached_has_bits being unused
2722 (void) cached_has_bits;
2723
2724 // .tensorflow.data.DistributeOptions distribute_options = 2;
2725 if (this->_internal_has_distribute_options()) {
2726 total_size += 1 +
2727 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2728 *_impl_.distribute_options_);
2729 }
2730
2731 // .tensorflow.data.OptimizationOptions optimization_options = 3;
2732 if (this->_internal_has_optimization_options()) {
2733 total_size += 1 +
2734 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2735 *_impl_.optimization_options_);
2736 }
2737
2738 // .tensorflow.data.ThreadingOptions threading_options = 5;
2739 if (this->_internal_has_threading_options()) {
2740 total_size += 1 +
2741 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2742 *_impl_.threading_options_);
2743 }
2744
2745 // .tensorflow.data.AutotuneOptions autotune_options = 7;
2746 if (this->_internal_has_autotune_options()) {
2747 total_size += 1 +
2748 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2749 *_impl_.autotune_options_);
2750 }
2751
2752 switch (optional_deterministic_case()) {
2753 // bool deterministic = 1;
2754 case kDeterministic: {
2755 total_size += 1 + 1;
2756 break;
2757 }
2758 case OPTIONAL_DETERMINISTIC_NOT_SET: {
2759 break;
2760 }
2761 }
2762 switch (optional_slack_case()) {
2763 // bool slack = 4;
2764 case kSlack: {
2765 total_size += 1 + 1;
2766 break;
2767 }
2768 case OPTIONAL_SLACK_NOT_SET: {
2769 break;
2770 }
2771 }
2772 switch (optional_external_state_policy_case()) {
2773 // .tensorflow.data.ExternalStatePolicy external_state_policy = 6;
2774 case kExternalStatePolicy: {
2775 total_size += 1 +
2776 ::_pbi::WireFormatLite::EnumSize(this->_internal_external_state_policy());
2777 break;
2778 }
2779 case OPTIONAL_EXTERNAL_STATE_POLICY_NOT_SET: {
2780 break;
2781 }
2782 }
2783 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2784 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2785 }
2786 int cached_size = ::_pbi::ToCachedSize(total_size);
2787 SetCachedSize(cached_size);
2788 return total_size;
2789 }
2790
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2791 void Options::CheckTypeAndMergeFrom(
2792 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2793 MergeFrom(*::_pbi::DownCast<const Options*>(
2794 &from));
2795 }
2796
MergeFrom(const Options & from)2797 void Options::MergeFrom(const Options& from) {
2798 Options* const _this = this;
2799 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.data.Options)
2800 GOOGLE_DCHECK_NE(&from, _this);
2801 ::uint32_t cached_has_bits = 0;
2802 (void) cached_has_bits;
2803
2804 if (from._internal_has_distribute_options()) {
2805 _this->_internal_mutable_distribute_options()->::tensorflow::data::DistributeOptions::MergeFrom(
2806 from._internal_distribute_options());
2807 }
2808 if (from._internal_has_optimization_options()) {
2809 _this->_internal_mutable_optimization_options()->::tensorflow::data::OptimizationOptions::MergeFrom(
2810 from._internal_optimization_options());
2811 }
2812 if (from._internal_has_threading_options()) {
2813 _this->_internal_mutable_threading_options()->::tensorflow::data::ThreadingOptions::MergeFrom(
2814 from._internal_threading_options());
2815 }
2816 if (from._internal_has_autotune_options()) {
2817 _this->_internal_mutable_autotune_options()->::tensorflow::data::AutotuneOptions::MergeFrom(
2818 from._internal_autotune_options());
2819 }
2820 switch (from.optional_deterministic_case()) {
2821 case kDeterministic: {
2822 _this->_internal_set_deterministic(from._internal_deterministic());
2823 break;
2824 }
2825 case OPTIONAL_DETERMINISTIC_NOT_SET: {
2826 break;
2827 }
2828 }
2829 switch (from.optional_slack_case()) {
2830 case kSlack: {
2831 _this->_internal_set_slack(from._internal_slack());
2832 break;
2833 }
2834 case OPTIONAL_SLACK_NOT_SET: {
2835 break;
2836 }
2837 }
2838 switch (from.optional_external_state_policy_case()) {
2839 case kExternalStatePolicy: {
2840 _this->_internal_set_external_state_policy(from._internal_external_state_policy());
2841 break;
2842 }
2843 case OPTIONAL_EXTERNAL_STATE_POLICY_NOT_SET: {
2844 break;
2845 }
2846 }
2847 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2848 }
2849
CopyFrom(const Options & from)2850 void Options::CopyFrom(const Options& from) {
2851 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.data.Options)
2852 if (&from == this) return;
2853 Clear();
2854 MergeFrom(from);
2855 }
2856
IsInitialized() const2857 bool Options::IsInitialized() const {
2858 return true;
2859 }
2860
InternalSwap(Options * other)2861 void Options::InternalSwap(Options* other) {
2862 using std::swap;
2863 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2864 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
2865 PROTOBUF_FIELD_OFFSET(Options, _impl_.autotune_options_)
2866 + sizeof(Options::_impl_.autotune_options_) // NOLINT
2867 - PROTOBUF_FIELD_OFFSET(Options, _impl_.distribute_options_)>(
2868 reinterpret_cast<char*>(&_impl_.distribute_options_),
2869 reinterpret_cast<char*>(&other->_impl_.distribute_options_));
2870 swap(_impl_.optional_deterministic_, other->_impl_.optional_deterministic_);
2871 swap(_impl_.optional_slack_, other->_impl_.optional_slack_);
2872 swap(_impl_.optional_external_state_policy_, other->_impl_.optional_external_state_policy_);
2873 swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
2874 swap(_impl_._oneof_case_[1], other->_impl_._oneof_case_[1]);
2875 swap(_impl_._oneof_case_[2], other->_impl_._oneof_case_[2]);
2876 }
2877
GetTypeName() const2878 std::string Options::GetTypeName() const {
2879 return "tensorflow.data.Options";
2880 }
2881
2882
2883 // @@protoc_insertion_point(namespace_scope)
2884 } // namespace data
2885 } // namespace tensorflow
2886 PROTOBUF_NAMESPACE_OPEN
2887 template<> PROTOBUF_NOINLINE ::tensorflow::data::AutotuneOptions*
CreateMaybeMessage(Arena * arena)2888 Arena::CreateMaybeMessage< ::tensorflow::data::AutotuneOptions >(Arena* arena) {
2889 return Arena::CreateMessageInternal< ::tensorflow::data::AutotuneOptions >(arena);
2890 }
2891 template<> PROTOBUF_NOINLINE ::tensorflow::data::CardinalityOptions*
CreateMaybeMessage(Arena * arena)2892 Arena::CreateMaybeMessage< ::tensorflow::data::CardinalityOptions >(Arena* arena) {
2893 return Arena::CreateMessageInternal< ::tensorflow::data::CardinalityOptions >(arena);
2894 }
2895 template<> PROTOBUF_NOINLINE ::tensorflow::data::DistributeOptions*
CreateMaybeMessage(Arena * arena)2896 Arena::CreateMaybeMessage< ::tensorflow::data::DistributeOptions >(Arena* arena) {
2897 return Arena::CreateMessageInternal< ::tensorflow::data::DistributeOptions >(arena);
2898 }
2899 template<> PROTOBUF_NOINLINE ::tensorflow::data::OptimizationOptions*
CreateMaybeMessage(Arena * arena)2900 Arena::CreateMaybeMessage< ::tensorflow::data::OptimizationOptions >(Arena* arena) {
2901 return Arena::CreateMessageInternal< ::tensorflow::data::OptimizationOptions >(arena);
2902 }
2903 template<> PROTOBUF_NOINLINE ::tensorflow::data::ThreadingOptions*
CreateMaybeMessage(Arena * arena)2904 Arena::CreateMaybeMessage< ::tensorflow::data::ThreadingOptions >(Arena* arena) {
2905 return Arena::CreateMessageInternal< ::tensorflow::data::ThreadingOptions >(arena);
2906 }
2907 template<> PROTOBUF_NOINLINE ::tensorflow::data::Options*
CreateMaybeMessage(Arena * arena)2908 Arena::CreateMaybeMessage< ::tensorflow::data::Options >(Arena* arena) {
2909 return Arena::CreateMessageInternal< ::tensorflow::data::Options >(arena);
2910 }
2911 PROTOBUF_NAMESPACE_CLOSE
2912
2913 // @@protoc_insertion_point(global_scope)
2914 #include <google/protobuf/port_undef.inc>
2915