1 // Generated by the protocol buffer compiler.  DO NOT EDIT!
2 // source: tensorflow/core/protobuf/rewriter_config.proto
3 
4 #include "tensorflow/core/protobuf/rewriter_config.pb.h"
5 
6 #include <algorithm>
7 #include <cstdint>
8 
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15 
16 PROTOBUF_PRAGMA_INIT_SEG
17 
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20 
21 namespace tensorflow {
AutoParallelOptions(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR AutoParallelOptions::AutoParallelOptions(
23     ::_pbi::ConstantInitialized): _impl_{
24     /*decltype(_impl_.enable_)*/false
25   , /*decltype(_impl_.num_replicas_)*/0
26   , /*decltype(_impl_._cached_size_)*/{}} {}
27 struct AutoParallelOptionsDefaultTypeInternal {
AutoParallelOptionsDefaultTypeInternaltensorflow::AutoParallelOptionsDefaultTypeInternal28   PROTOBUF_CONSTEXPR AutoParallelOptionsDefaultTypeInternal()
29       : _instance(::_pbi::ConstantInitialized{}) {}
~AutoParallelOptionsDefaultTypeInternaltensorflow::AutoParallelOptionsDefaultTypeInternal30   ~AutoParallelOptionsDefaultTypeInternal() {}
31   union {  // NOLINT(misc-non-private-member-variables-in-classes)
32     AutoParallelOptions _instance;
33   };
34 };
35 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AutoParallelOptionsDefaultTypeInternal _AutoParallelOptions_default_instance_;
ScopedAllocatorOptions(::_pbi::ConstantInitialized)36 PROTOBUF_CONSTEXPR ScopedAllocatorOptions::ScopedAllocatorOptions(
37     ::_pbi::ConstantInitialized): _impl_{
38     /*decltype(_impl_.enable_op_)*/{}
39   , /*decltype(_impl_._cached_size_)*/{}} {}
40 struct ScopedAllocatorOptionsDefaultTypeInternal {
ScopedAllocatorOptionsDefaultTypeInternaltensorflow::ScopedAllocatorOptionsDefaultTypeInternal41   PROTOBUF_CONSTEXPR ScopedAllocatorOptionsDefaultTypeInternal()
42       : _instance(::_pbi::ConstantInitialized{}) {}
~ScopedAllocatorOptionsDefaultTypeInternaltensorflow::ScopedAllocatorOptionsDefaultTypeInternal43   ~ScopedAllocatorOptionsDefaultTypeInternal() {}
44   union {  // NOLINT(misc-non-private-member-variables-in-classes)
45     ScopedAllocatorOptions _instance;
46   };
47 };
48 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ScopedAllocatorOptionsDefaultTypeInternal _ScopedAllocatorOptions_default_instance_;
RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse(::_pbi::ConstantInitialized)49 PROTOBUF_CONSTEXPR RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse(
50     ::_pbi::ConstantInitialized) {}
51 struct RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternal {
RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternaltensorflow::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternal52   PROTOBUF_CONSTEXPR RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternal()
53       : _instance(::_pbi::ConstantInitialized{}) {}
~RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternaltensorflow::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternal54   ~RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternal() {}
55   union {  // NOLINT(misc-non-private-member-variables-in-classes)
56     RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse _instance;
57   };
58 };
59 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUseDefaultTypeInternal _RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse_default_instance_;
RewriterConfig_CustomGraphOptimizer(::_pbi::ConstantInitialized)60 PROTOBUF_CONSTEXPR RewriterConfig_CustomGraphOptimizer::RewriterConfig_CustomGraphOptimizer(
61     ::_pbi::ConstantInitialized): _impl_{
62     /*decltype(_impl_.parameter_map_)*/{}
63   , /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
64   , /*decltype(_impl_._cached_size_)*/{}} {}
65 struct RewriterConfig_CustomGraphOptimizerDefaultTypeInternal {
RewriterConfig_CustomGraphOptimizerDefaultTypeInternaltensorflow::RewriterConfig_CustomGraphOptimizerDefaultTypeInternal66   PROTOBUF_CONSTEXPR RewriterConfig_CustomGraphOptimizerDefaultTypeInternal()
67       : _instance(::_pbi::ConstantInitialized{}) {}
~RewriterConfig_CustomGraphOptimizerDefaultTypeInternaltensorflow::RewriterConfig_CustomGraphOptimizerDefaultTypeInternal68   ~RewriterConfig_CustomGraphOptimizerDefaultTypeInternal() {}
69   union {  // NOLINT(misc-non-private-member-variables-in-classes)
70     RewriterConfig_CustomGraphOptimizer _instance;
71   };
72 };
73 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RewriterConfig_CustomGraphOptimizerDefaultTypeInternal _RewriterConfig_CustomGraphOptimizer_default_instance_;
RewriterConfig(::_pbi::ConstantInitialized)74 PROTOBUF_CONSTEXPR RewriterConfig::RewriterConfig(
75     ::_pbi::ConstantInitialized): _impl_{
76     /*decltype(_impl_.optimizers_)*/{}
77   , /*decltype(_impl_.custom_optimizers_)*/{}
78   , /*decltype(_impl_.memory_optimizer_target_node_name_scope_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
79   , /*decltype(_impl_.auto_parallel_)*/nullptr
80   , /*decltype(_impl_.scoped_allocator_opts_)*/nullptr
81   , /*decltype(_impl_.inter_optimizer_verifier_config_)*/nullptr
82   , /*decltype(_impl_.post_optimization_verifier_config_)*/nullptr
83   , /*decltype(_impl_.layout_optimizer_)*/0
84   , /*decltype(_impl_.constant_folding_)*/0
85   , /*decltype(_impl_.memory_optimization_)*/0
86   , /*decltype(_impl_.arithmetic_optimization_)*/0
87   , /*decltype(_impl_.dependency_optimization_)*/0
88   , /*decltype(_impl_.loop_optimization_)*/0
89   , /*decltype(_impl_.function_optimization_)*/0
90   , /*decltype(_impl_.debug_stripper_)*/0
91   , /*decltype(_impl_.meta_optimizer_iterations_)*/0
92   , /*decltype(_impl_.shape_optimization_)*/0
93   , /*decltype(_impl_.remapping_)*/0
94   , /*decltype(_impl_.scoped_allocator_optimization_)*/0
95   , /*decltype(_impl_.min_graph_nodes_)*/0
96   , /*decltype(_impl_.pin_to_host_optimization_)*/0
97   , /*decltype(_impl_.disable_model_pruning_)*/false
98   , /*decltype(_impl_.disable_meta_optimizer_)*/false
99   , /*decltype(_impl_.experimental_disable_compressed_tensor_optimization_)*/false
100   , /*decltype(_impl_.experimental_disable_folding_quantization_emulation_)*/false
101   , /*decltype(_impl_.fail_on_optimizer_errors_)*/false
102   , /*decltype(_impl_.meta_optimizer_timeout_ms_)*/::int64_t{0}
103   , /*decltype(_impl_.implementation_selector_)*/0
104   , /*decltype(_impl_.auto_mixed_precision_)*/0
105   , /*decltype(_impl_.common_subgraph_elimination_)*/0
106   , /*decltype(_impl_.auto_mixed_precision_mkl_)*/0
107   , /*decltype(_impl_.use_plugin_optimizers_)*/0
108   , /*decltype(_impl_.auto_mixed_precision_cpu_)*/0
109   , /*decltype(_impl_.experimental_conditional_code_motion_)*/0
110   , /*decltype(_impl_.auto_mixed_precision_onednn_bfloat16_)*/0
111   , /*decltype(_impl_.cpu_layout_conversion_)*/0
112   , /*decltype(_impl_._cached_size_)*/{}} {}
113 struct RewriterConfigDefaultTypeInternal {
RewriterConfigDefaultTypeInternaltensorflow::RewriterConfigDefaultTypeInternal114   PROTOBUF_CONSTEXPR RewriterConfigDefaultTypeInternal()
115       : _instance(::_pbi::ConstantInitialized{}) {}
~RewriterConfigDefaultTypeInternaltensorflow::RewriterConfigDefaultTypeInternal116   ~RewriterConfigDefaultTypeInternal() {}
117   union {  // NOLINT(misc-non-private-member-variables-in-classes)
118     RewriterConfig _instance;
119   };
120 };
121 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RewriterConfigDefaultTypeInternal _RewriterConfig_default_instance_;
122 }  // namespace tensorflow
123 namespace tensorflow {
RewriterConfig_Toggle_IsValid(int value)124 bool RewriterConfig_Toggle_IsValid(int value) {
125   switch (value) {
126     case 0:
127     case 1:
128     case 2:
129     case 3:
130     case 4:
131     case 5:
132       return true;
133     default:
134       return false;
135   }
136 }
137 
138 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> RewriterConfig_Toggle_strings[6] = {};
139 
140 static const char RewriterConfig_Toggle_names[] =
141   "AGGRESSIVE"
142   "DEFAULT"
143   "EXPERIMENTAL_BOTH"
144   "EXPERIMENTAL_MLIR"
145   "OFF"
146   "ON";
147 
148 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry RewriterConfig_Toggle_entries[] = {
149   { {RewriterConfig_Toggle_names + 0, 10}, 3 },
150   { {RewriterConfig_Toggle_names + 10, 7}, 0 },
151   { {RewriterConfig_Toggle_names + 17, 17}, 5 },
152   { {RewriterConfig_Toggle_names + 34, 17}, 4 },
153   { {RewriterConfig_Toggle_names + 51, 3}, 2 },
154   { {RewriterConfig_Toggle_names + 54, 2}, 1 },
155 };
156 
157 static const int RewriterConfig_Toggle_entries_by_number[] = {
158   1, // 0 -> DEFAULT
159   5, // 1 -> ON
160   4, // 2 -> OFF
161   0, // 3 -> AGGRESSIVE
162   3, // 4 -> EXPERIMENTAL_MLIR
163   2, // 5 -> EXPERIMENTAL_BOTH
164 };
165 
RewriterConfig_Toggle_Name(RewriterConfig_Toggle value)166 const std::string& RewriterConfig_Toggle_Name(
167     RewriterConfig_Toggle value) {
168   static const bool dummy =
169       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
170           RewriterConfig_Toggle_entries,
171           RewriterConfig_Toggle_entries_by_number,
172           6, RewriterConfig_Toggle_strings);
173   (void) dummy;
174   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
175       RewriterConfig_Toggle_entries,
176       RewriterConfig_Toggle_entries_by_number,
177       6, value);
178   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
179                      RewriterConfig_Toggle_strings[idx].get();
180 }
RewriterConfig_Toggle_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,RewriterConfig_Toggle * value)181 bool RewriterConfig_Toggle_Parse(
182     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, RewriterConfig_Toggle* value) {
183   int int_value;
184   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
185       RewriterConfig_Toggle_entries, 6, name, &int_value);
186   if (success) {
187     *value = static_cast<RewriterConfig_Toggle>(int_value);
188   }
189   return success;
190 }
191 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
192 constexpr RewriterConfig_Toggle RewriterConfig::DEFAULT;
193 constexpr RewriterConfig_Toggle RewriterConfig::ON;
194 constexpr RewriterConfig_Toggle RewriterConfig::OFF;
195 constexpr RewriterConfig_Toggle RewriterConfig::AGGRESSIVE;
196 constexpr RewriterConfig_Toggle RewriterConfig::EXPERIMENTAL_MLIR;
197 constexpr RewriterConfig_Toggle RewriterConfig::EXPERIMENTAL_BOTH;
198 constexpr RewriterConfig_Toggle RewriterConfig::Toggle_MIN;
199 constexpr RewriterConfig_Toggle RewriterConfig::Toggle_MAX;
200 constexpr int RewriterConfig::Toggle_ARRAYSIZE;
201 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
RewriterConfig_CpuLayout_IsValid(int value)202 bool RewriterConfig_CpuLayout_IsValid(int value) {
203   switch (value) {
204     case 0:
205     case 1:
206     case 2:
207       return true;
208     default:
209       return false;
210   }
211 }
212 
213 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> RewriterConfig_CpuLayout_strings[3] = {};
214 
215 static const char RewriterConfig_CpuLayout_names[] =
216   "NCHW_TO_NHWC"
217   "NHWC_TO_NCHW"
218   "NO_CONVERSION_ON_CPU";
219 
220 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry RewriterConfig_CpuLayout_entries[] = {
221   { {RewriterConfig_CpuLayout_names + 0, 12}, 1 },
222   { {RewriterConfig_CpuLayout_names + 12, 12}, 2 },
223   { {RewriterConfig_CpuLayout_names + 24, 20}, 0 },
224 };
225 
226 static const int RewriterConfig_CpuLayout_entries_by_number[] = {
227   2, // 0 -> NO_CONVERSION_ON_CPU
228   0, // 1 -> NCHW_TO_NHWC
229   1, // 2 -> NHWC_TO_NCHW
230 };
231 
RewriterConfig_CpuLayout_Name(RewriterConfig_CpuLayout value)232 const std::string& RewriterConfig_CpuLayout_Name(
233     RewriterConfig_CpuLayout value) {
234   static const bool dummy =
235       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
236           RewriterConfig_CpuLayout_entries,
237           RewriterConfig_CpuLayout_entries_by_number,
238           3, RewriterConfig_CpuLayout_strings);
239   (void) dummy;
240   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
241       RewriterConfig_CpuLayout_entries,
242       RewriterConfig_CpuLayout_entries_by_number,
243       3, value);
244   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
245                      RewriterConfig_CpuLayout_strings[idx].get();
246 }
RewriterConfig_CpuLayout_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,RewriterConfig_CpuLayout * value)247 bool RewriterConfig_CpuLayout_Parse(
248     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, RewriterConfig_CpuLayout* value) {
249   int int_value;
250   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
251       RewriterConfig_CpuLayout_entries, 3, name, &int_value);
252   if (success) {
253     *value = static_cast<RewriterConfig_CpuLayout>(int_value);
254   }
255   return success;
256 }
257 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
258 constexpr RewriterConfig_CpuLayout RewriterConfig::NO_CONVERSION_ON_CPU;
259 constexpr RewriterConfig_CpuLayout RewriterConfig::NCHW_TO_NHWC;
260 constexpr RewriterConfig_CpuLayout RewriterConfig::NHWC_TO_NCHW;
261 constexpr RewriterConfig_CpuLayout RewriterConfig::CpuLayout_MIN;
262 constexpr RewriterConfig_CpuLayout RewriterConfig::CpuLayout_MAX;
263 constexpr int RewriterConfig::CpuLayout_ARRAYSIZE;
264 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
RewriterConfig_NumIterationsType_IsValid(int value)265 bool RewriterConfig_NumIterationsType_IsValid(int value) {
266   switch (value) {
267     case 0:
268     case 1:
269     case 2:
270       return true;
271     default:
272       return false;
273   }
274 }
275 
276 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> RewriterConfig_NumIterationsType_strings[3] = {};
277 
278 static const char RewriterConfig_NumIterationsType_names[] =
279   "DEFAULT_NUM_ITERS"
280   "ONE"
281   "TWO";
282 
283 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry RewriterConfig_NumIterationsType_entries[] = {
284   { {RewriterConfig_NumIterationsType_names + 0, 17}, 0 },
285   { {RewriterConfig_NumIterationsType_names + 17, 3}, 1 },
286   { {RewriterConfig_NumIterationsType_names + 20, 3}, 2 },
287 };
288 
289 static const int RewriterConfig_NumIterationsType_entries_by_number[] = {
290   0, // 0 -> DEFAULT_NUM_ITERS
291   1, // 1 -> ONE
292   2, // 2 -> TWO
293 };
294 
RewriterConfig_NumIterationsType_Name(RewriterConfig_NumIterationsType value)295 const std::string& RewriterConfig_NumIterationsType_Name(
296     RewriterConfig_NumIterationsType value) {
297   static const bool dummy =
298       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
299           RewriterConfig_NumIterationsType_entries,
300           RewriterConfig_NumIterationsType_entries_by_number,
301           3, RewriterConfig_NumIterationsType_strings);
302   (void) dummy;
303   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
304       RewriterConfig_NumIterationsType_entries,
305       RewriterConfig_NumIterationsType_entries_by_number,
306       3, value);
307   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
308                      RewriterConfig_NumIterationsType_strings[idx].get();
309 }
RewriterConfig_NumIterationsType_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,RewriterConfig_NumIterationsType * value)310 bool RewriterConfig_NumIterationsType_Parse(
311     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, RewriterConfig_NumIterationsType* value) {
312   int int_value;
313   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
314       RewriterConfig_NumIterationsType_entries, 3, name, &int_value);
315   if (success) {
316     *value = static_cast<RewriterConfig_NumIterationsType>(int_value);
317   }
318   return success;
319 }
320 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
321 constexpr RewriterConfig_NumIterationsType RewriterConfig::DEFAULT_NUM_ITERS;
322 constexpr RewriterConfig_NumIterationsType RewriterConfig::ONE;
323 constexpr RewriterConfig_NumIterationsType RewriterConfig::TWO;
324 constexpr RewriterConfig_NumIterationsType RewriterConfig::NumIterationsType_MIN;
325 constexpr RewriterConfig_NumIterationsType RewriterConfig::NumIterationsType_MAX;
326 constexpr int RewriterConfig::NumIterationsType_ARRAYSIZE;
327 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
RewriterConfig_MemOptType_IsValid(int value)328 bool RewriterConfig_MemOptType_IsValid(int value) {
329   switch (value) {
330     case 0:
331     case 1:
332     case 2:
333     case 3:
334     case 4:
335     case 5:
336     case 6:
337       return true;
338     default:
339       return false;
340   }
341 }
342 
343 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> RewriterConfig_MemOptType_strings[7] = {};
344 
345 static const char RewriterConfig_MemOptType_names[] =
346   "DEFAULT_MEM_OPT"
347   "HEURISTICS"
348   "MANUAL"
349   "NO_MEM_OPT"
350   "RECOMPUTATION_HEURISTICS"
351   "SCHEDULING_HEURISTICS"
352   "SWAPPING_HEURISTICS";
353 
354 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry RewriterConfig_MemOptType_entries[] = {
355   { {RewriterConfig_MemOptType_names + 0, 15}, 0 },
356   { {RewriterConfig_MemOptType_names + 15, 10}, 3 },
357   { {RewriterConfig_MemOptType_names + 25, 6}, 2 },
358   { {RewriterConfig_MemOptType_names + 31, 10}, 1 },
359   { {RewriterConfig_MemOptType_names + 41, 24}, 5 },
360   { {RewriterConfig_MemOptType_names + 65, 21}, 6 },
361   { {RewriterConfig_MemOptType_names + 86, 19}, 4 },
362 };
363 
364 static const int RewriterConfig_MemOptType_entries_by_number[] = {
365   0, // 0 -> DEFAULT_MEM_OPT
366   3, // 1 -> NO_MEM_OPT
367   2, // 2 -> MANUAL
368   1, // 3 -> HEURISTICS
369   6, // 4 -> SWAPPING_HEURISTICS
370   4, // 5 -> RECOMPUTATION_HEURISTICS
371   5, // 6 -> SCHEDULING_HEURISTICS
372 };
373 
RewriterConfig_MemOptType_Name(RewriterConfig_MemOptType value)374 const std::string& RewriterConfig_MemOptType_Name(
375     RewriterConfig_MemOptType value) {
376   static const bool dummy =
377       ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
378           RewriterConfig_MemOptType_entries,
379           RewriterConfig_MemOptType_entries_by_number,
380           7, RewriterConfig_MemOptType_strings);
381   (void) dummy;
382   int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
383       RewriterConfig_MemOptType_entries,
384       RewriterConfig_MemOptType_entries_by_number,
385       7, value);
386   return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
387                      RewriterConfig_MemOptType_strings[idx].get();
388 }
RewriterConfig_MemOptType_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,RewriterConfig_MemOptType * value)389 bool RewriterConfig_MemOptType_Parse(
390     ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, RewriterConfig_MemOptType* value) {
391   int int_value;
392   bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
393       RewriterConfig_MemOptType_entries, 7, name, &int_value);
394   if (success) {
395     *value = static_cast<RewriterConfig_MemOptType>(int_value);
396   }
397   return success;
398 }
399 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
400 constexpr RewriterConfig_MemOptType RewriterConfig::DEFAULT_MEM_OPT;
401 constexpr RewriterConfig_MemOptType RewriterConfig::NO_MEM_OPT;
402 constexpr RewriterConfig_MemOptType RewriterConfig::MANUAL;
403 constexpr RewriterConfig_MemOptType RewriterConfig::SWAPPING_HEURISTICS;
404 constexpr RewriterConfig_MemOptType RewriterConfig::RECOMPUTATION_HEURISTICS;
405 constexpr RewriterConfig_MemOptType RewriterConfig::SCHEDULING_HEURISTICS;
406 constexpr RewriterConfig_MemOptType RewriterConfig::HEURISTICS;
407 constexpr RewriterConfig_MemOptType RewriterConfig::MemOptType_MIN;
408 constexpr RewriterConfig_MemOptType RewriterConfig::MemOptType_MAX;
409 constexpr int RewriterConfig::MemOptType_ARRAYSIZE;
410 #endif  // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
411 
412 // ===================================================================
413 
414 class AutoParallelOptions::_Internal {
415  public:
416 };
417 
AutoParallelOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)418 AutoParallelOptions::AutoParallelOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
419                          bool is_message_owned)
420   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
421   SharedCtor(arena, is_message_owned);
422   // @@protoc_insertion_point(arena_constructor:tensorflow.AutoParallelOptions)
423 }
AutoParallelOptions(const AutoParallelOptions & from)424 AutoParallelOptions::AutoParallelOptions(const AutoParallelOptions& from)
425   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
426   AutoParallelOptions* const _this = this; (void)_this;
427   new (&_impl_) Impl_{
428       decltype(_impl_.enable_){}
429     , decltype(_impl_.num_replicas_){}
430     , /*decltype(_impl_._cached_size_)*/{}};
431 
432   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
433   ::memcpy(&_impl_.enable_, &from._impl_.enable_,
434     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.num_replicas_) -
435     reinterpret_cast<char*>(&_impl_.enable_)) + sizeof(_impl_.num_replicas_));
436   // @@protoc_insertion_point(copy_constructor:tensorflow.AutoParallelOptions)
437 }
438 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)439 inline void AutoParallelOptions::SharedCtor(
440     ::_pb::Arena* arena, bool is_message_owned) {
441   (void)arena;
442   (void)is_message_owned;
443   new (&_impl_) Impl_{
444       decltype(_impl_.enable_){false}
445     , decltype(_impl_.num_replicas_){0}
446     , /*decltype(_impl_._cached_size_)*/{}
447   };
448 }
449 
~AutoParallelOptions()450 AutoParallelOptions::~AutoParallelOptions() {
451   // @@protoc_insertion_point(destructor:tensorflow.AutoParallelOptions)
452   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
453   (void)arena;
454     return;
455   }
456   SharedDtor();
457 }
458 
SharedDtor()459 inline void AutoParallelOptions::SharedDtor() {
460   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
461 }
462 
SetCachedSize(int size) const463 void AutoParallelOptions::SetCachedSize(int size) const {
464   _impl_._cached_size_.Set(size);
465 }
466 
Clear()467 void AutoParallelOptions::Clear() {
468 // @@protoc_insertion_point(message_clear_start:tensorflow.AutoParallelOptions)
469   ::uint32_t cached_has_bits = 0;
470   // Prevent compiler warnings about cached_has_bits being unused
471   (void) cached_has_bits;
472 
473   ::memset(&_impl_.enable_, 0, static_cast<size_t>(
474       reinterpret_cast<char*>(&_impl_.num_replicas_) -
475       reinterpret_cast<char*>(&_impl_.enable_)) + sizeof(_impl_.num_replicas_));
476   _internal_metadata_.Clear<std::string>();
477 }
478 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)479 const char* AutoParallelOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
480 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
481   while (!ctx->Done(&ptr)) {
482     ::uint32_t tag;
483     ptr = ::_pbi::ReadTag(ptr, &tag);
484     switch (tag >> 3) {
485       // bool enable = 1;
486       case 1:
487         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
488           _impl_.enable_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
489           CHK_(ptr);
490         } else {
491           goto handle_unusual;
492         }
493         continue;
494       // int32 num_replicas = 2;
495       case 2:
496         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
497           _impl_.num_replicas_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
498           CHK_(ptr);
499         } else {
500           goto handle_unusual;
501         }
502         continue;
503       default:
504         goto handle_unusual;
505     }  // switch
506   handle_unusual:
507     if ((tag == 0) || ((tag & 7) == 4)) {
508       CHK_(ptr);
509       ctx->SetLastTag(tag);
510       goto message_done;
511     }
512     ptr = UnknownFieldParse(
513         tag,
514         _internal_metadata_.mutable_unknown_fields<std::string>(),
515         ptr, ctx);
516     CHK_(ptr != nullptr);
517   }  // while
518 message_done:
519   return ptr;
520 failure:
521   ptr = nullptr;
522   goto message_done;
523 #undef CHK_
524 }
525 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const526 ::uint8_t* AutoParallelOptions::_InternalSerialize(
527     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
528   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.AutoParallelOptions)
529   ::uint32_t cached_has_bits = 0;
530   (void) cached_has_bits;
531 
532   // bool enable = 1;
533   if (this->_internal_enable() != 0) {
534     target = stream->EnsureSpace(target);
535     target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_enable(), target);
536   }
537 
538   // int32 num_replicas = 2;
539   if (this->_internal_num_replicas() != 0) {
540     target = stream->EnsureSpace(target);
541     target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_num_replicas(), target);
542   }
543 
544   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
545     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
546         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
547   }
548   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.AutoParallelOptions)
549   return target;
550 }
551 
ByteSizeLong() const552 size_t AutoParallelOptions::ByteSizeLong() const {
553 // @@protoc_insertion_point(message_byte_size_start:tensorflow.AutoParallelOptions)
554   size_t total_size = 0;
555 
556   ::uint32_t cached_has_bits = 0;
557   // Prevent compiler warnings about cached_has_bits being unused
558   (void) cached_has_bits;
559 
560   // bool enable = 1;
561   if (this->_internal_enable() != 0) {
562     total_size += 1 + 1;
563   }
564 
565   // int32 num_replicas = 2;
566   if (this->_internal_num_replicas() != 0) {
567     total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_num_replicas());
568   }
569 
570   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
571     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
572   }
573   int cached_size = ::_pbi::ToCachedSize(total_size);
574   SetCachedSize(cached_size);
575   return total_size;
576 }
577 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)578 void AutoParallelOptions::CheckTypeAndMergeFrom(
579     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
580   MergeFrom(*::_pbi::DownCast<const AutoParallelOptions*>(
581       &from));
582 }
583 
MergeFrom(const AutoParallelOptions & from)584 void AutoParallelOptions::MergeFrom(const AutoParallelOptions& from) {
585   AutoParallelOptions* const _this = this;
586   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.AutoParallelOptions)
587   GOOGLE_DCHECK_NE(&from, _this);
588   ::uint32_t cached_has_bits = 0;
589   (void) cached_has_bits;
590 
591   if (from._internal_enable() != 0) {
592     _this->_internal_set_enable(from._internal_enable());
593   }
594   if (from._internal_num_replicas() != 0) {
595     _this->_internal_set_num_replicas(from._internal_num_replicas());
596   }
597   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
598 }
599 
CopyFrom(const AutoParallelOptions & from)600 void AutoParallelOptions::CopyFrom(const AutoParallelOptions& from) {
601 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.AutoParallelOptions)
602   if (&from == this) return;
603   Clear();
604   MergeFrom(from);
605 }
606 
IsInitialized() const607 bool AutoParallelOptions::IsInitialized() const {
608   return true;
609 }
610 
InternalSwap(AutoParallelOptions * other)611 void AutoParallelOptions::InternalSwap(AutoParallelOptions* other) {
612   using std::swap;
613   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
614   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
615       PROTOBUF_FIELD_OFFSET(AutoParallelOptions, _impl_.num_replicas_)
616       + sizeof(AutoParallelOptions::_impl_.num_replicas_)  // NOLINT
617       - PROTOBUF_FIELD_OFFSET(AutoParallelOptions, _impl_.enable_)>(
618           reinterpret_cast<char*>(&_impl_.enable_),
619           reinterpret_cast<char*>(&other->_impl_.enable_));
620 }
621 
GetTypeName() const622 std::string AutoParallelOptions::GetTypeName() const {
623   return "tensorflow.AutoParallelOptions";
624 }
625 
626 
627 // ===================================================================
628 
629 class ScopedAllocatorOptions::_Internal {
630  public:
631 };
632 
ScopedAllocatorOptions(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)633 ScopedAllocatorOptions::ScopedAllocatorOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena,
634                          bool is_message_owned)
635   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
636   SharedCtor(arena, is_message_owned);
637   // @@protoc_insertion_point(arena_constructor:tensorflow.ScopedAllocatorOptions)
638 }
ScopedAllocatorOptions(const ScopedAllocatorOptions & from)639 ScopedAllocatorOptions::ScopedAllocatorOptions(const ScopedAllocatorOptions& from)
640   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
641   ScopedAllocatorOptions* const _this = this; (void)_this;
642   new (&_impl_) Impl_{
643       decltype(_impl_.enable_op_){from._impl_.enable_op_}
644     , /*decltype(_impl_._cached_size_)*/{}};
645 
646   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
647   // @@protoc_insertion_point(copy_constructor:tensorflow.ScopedAllocatorOptions)
648 }
649 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)650 inline void ScopedAllocatorOptions::SharedCtor(
651     ::_pb::Arena* arena, bool is_message_owned) {
652   (void)arena;
653   (void)is_message_owned;
654   new (&_impl_) Impl_{
655       decltype(_impl_.enable_op_){arena}
656     , /*decltype(_impl_._cached_size_)*/{}
657   };
658 }
659 
~ScopedAllocatorOptions()660 ScopedAllocatorOptions::~ScopedAllocatorOptions() {
661   // @@protoc_insertion_point(destructor:tensorflow.ScopedAllocatorOptions)
662   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
663   (void)arena;
664     return;
665   }
666   SharedDtor();
667 }
668 
SharedDtor()669 inline void ScopedAllocatorOptions::SharedDtor() {
670   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
671   _impl_.enable_op_.~RepeatedPtrField();
672 }
673 
SetCachedSize(int size) const674 void ScopedAllocatorOptions::SetCachedSize(int size) const {
675   _impl_._cached_size_.Set(size);
676 }
677 
Clear()678 void ScopedAllocatorOptions::Clear() {
679 // @@protoc_insertion_point(message_clear_start:tensorflow.ScopedAllocatorOptions)
680   ::uint32_t cached_has_bits = 0;
681   // Prevent compiler warnings about cached_has_bits being unused
682   (void) cached_has_bits;
683 
684   _impl_.enable_op_.Clear();
685   _internal_metadata_.Clear<std::string>();
686 }
687 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)688 const char* ScopedAllocatorOptions::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
689 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
690   while (!ctx->Done(&ptr)) {
691     ::uint32_t tag;
692     ptr = ::_pbi::ReadTag(ptr, &tag);
693     switch (tag >> 3) {
694       // repeated string enable_op = 1;
695       case 1:
696         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
697           ptr -= 1;
698           do {
699             ptr += 1;
700             auto str = _internal_add_enable_op();
701             ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
702             CHK_(ptr);
703             CHK_(::_pbi::VerifyUTF8(str, nullptr));
704             if (!ctx->DataAvailable(ptr)) break;
705           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
706         } else {
707           goto handle_unusual;
708         }
709         continue;
710       default:
711         goto handle_unusual;
712     }  // switch
713   handle_unusual:
714     if ((tag == 0) || ((tag & 7) == 4)) {
715       CHK_(ptr);
716       ctx->SetLastTag(tag);
717       goto message_done;
718     }
719     ptr = UnknownFieldParse(
720         tag,
721         _internal_metadata_.mutable_unknown_fields<std::string>(),
722         ptr, ctx);
723     CHK_(ptr != nullptr);
724   }  // while
725 message_done:
726   return ptr;
727 failure:
728   ptr = nullptr;
729   goto message_done;
730 #undef CHK_
731 }
732 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const733 ::uint8_t* ScopedAllocatorOptions::_InternalSerialize(
734     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
735   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ScopedAllocatorOptions)
736   ::uint32_t cached_has_bits = 0;
737   (void) cached_has_bits;
738 
739   // repeated string enable_op = 1;
740   for (int i = 0, n = this->_internal_enable_op_size(); i < n; i++) {
741     const auto& s = this->_internal_enable_op(i);
742     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
743       s.data(), static_cast<int>(s.length()),
744       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
745       "tensorflow.ScopedAllocatorOptions.enable_op");
746     target = stream->WriteString(1, s, target);
747   }
748 
749   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
750     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
751         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
752   }
753   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ScopedAllocatorOptions)
754   return target;
755 }
756 
ByteSizeLong() const757 size_t ScopedAllocatorOptions::ByteSizeLong() const {
758 // @@protoc_insertion_point(message_byte_size_start:tensorflow.ScopedAllocatorOptions)
759   size_t total_size = 0;
760 
761   ::uint32_t cached_has_bits = 0;
762   // Prevent compiler warnings about cached_has_bits being unused
763   (void) cached_has_bits;
764 
765   // repeated string enable_op = 1;
766   total_size += 1 *
767       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.enable_op_.size());
768   for (int i = 0, n = _impl_.enable_op_.size(); i < n; i++) {
769     total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
770       _impl_.enable_op_.Get(i));
771   }
772 
773   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
774     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
775   }
776   int cached_size = ::_pbi::ToCachedSize(total_size);
777   SetCachedSize(cached_size);
778   return total_size;
779 }
780 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)781 void ScopedAllocatorOptions::CheckTypeAndMergeFrom(
782     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
783   MergeFrom(*::_pbi::DownCast<const ScopedAllocatorOptions*>(
784       &from));
785 }
786 
MergeFrom(const ScopedAllocatorOptions & from)787 void ScopedAllocatorOptions::MergeFrom(const ScopedAllocatorOptions& from) {
788   ScopedAllocatorOptions* const _this = this;
789   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ScopedAllocatorOptions)
790   GOOGLE_DCHECK_NE(&from, _this);
791   ::uint32_t cached_has_bits = 0;
792   (void) cached_has_bits;
793 
794   _this->_impl_.enable_op_.MergeFrom(from._impl_.enable_op_);
795   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
796 }
797 
CopyFrom(const ScopedAllocatorOptions & from)798 void ScopedAllocatorOptions::CopyFrom(const ScopedAllocatorOptions& from) {
799 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ScopedAllocatorOptions)
800   if (&from == this) return;
801   Clear();
802   MergeFrom(from);
803 }
804 
IsInitialized() const805 bool ScopedAllocatorOptions::IsInitialized() const {
806   return true;
807 }
808 
InternalSwap(ScopedAllocatorOptions * other)809 void ScopedAllocatorOptions::InternalSwap(ScopedAllocatorOptions* other) {
810   using std::swap;
811   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
812   _impl_.enable_op_.InternalSwap(&other->_impl_.enable_op_);
813 }
814 
GetTypeName() const815 std::string ScopedAllocatorOptions::GetTypeName() const {
816   return "tensorflow.ScopedAllocatorOptions";
817 }
818 
819 
820 // ===================================================================
821 
RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse()822 RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse() {}
RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)823 RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
824     : SuperType(arena) {}
MergeFrom(const RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse & other)825 void RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse::MergeFrom(const RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse& other) {
826   MergeFromInternal(other);
827 }
828 
829 // ===================================================================
830 
831 class RewriterConfig_CustomGraphOptimizer::_Internal {
832  public:
833 };
834 
clear_parameter_map()835 void RewriterConfig_CustomGraphOptimizer::clear_parameter_map() {
836   _impl_.parameter_map_.Clear();
837 }
RewriterConfig_CustomGraphOptimizer(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)838 RewriterConfig_CustomGraphOptimizer::RewriterConfig_CustomGraphOptimizer(::PROTOBUF_NAMESPACE_ID::Arena* arena,
839                          bool is_message_owned)
840   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
841   SharedCtor(arena, is_message_owned);
842   // @@protoc_insertion_point(arena_constructor:tensorflow.RewriterConfig.CustomGraphOptimizer)
843 }
RewriterConfig_CustomGraphOptimizer(const RewriterConfig_CustomGraphOptimizer & from)844 RewriterConfig_CustomGraphOptimizer::RewriterConfig_CustomGraphOptimizer(const RewriterConfig_CustomGraphOptimizer& from)
845   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
846   RewriterConfig_CustomGraphOptimizer* const _this = this; (void)_this;
847   new (&_impl_) Impl_{
848       /*decltype(_impl_.parameter_map_)*/{}
849     , decltype(_impl_.name_){}
850     , /*decltype(_impl_._cached_size_)*/{}};
851 
852   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
853   _this->_impl_.parameter_map_.MergeFrom(from._impl_.parameter_map_);
854   _impl_.name_.InitDefault();
855   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
856     _impl_.name_.Set("", GetArenaForAllocation());
857   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
858   if (!from._internal_name().empty()) {
859     _this->_impl_.name_.Set(from._internal_name(),
860       _this->GetArenaForAllocation());
861   }
862   // @@protoc_insertion_point(copy_constructor:tensorflow.RewriterConfig.CustomGraphOptimizer)
863 }
864 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)865 inline void RewriterConfig_CustomGraphOptimizer::SharedCtor(
866     ::_pb::Arena* arena, bool is_message_owned) {
867   (void)arena;
868   (void)is_message_owned;
869   new (&_impl_) Impl_{
870       /*decltype(_impl_.parameter_map_)*/{::_pbi::ArenaInitialized(), arena}
871     , decltype(_impl_.name_){}
872     , /*decltype(_impl_._cached_size_)*/{}
873   };
874   _impl_.name_.InitDefault();
875   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
876     _impl_.name_.Set("", GetArenaForAllocation());
877   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
878 }
879 
~RewriterConfig_CustomGraphOptimizer()880 RewriterConfig_CustomGraphOptimizer::~RewriterConfig_CustomGraphOptimizer() {
881   // @@protoc_insertion_point(destructor:tensorflow.RewriterConfig.CustomGraphOptimizer)
882   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
883   (void)arena;
884     return;
885   }
886   SharedDtor();
887 }
888 
SharedDtor()889 inline void RewriterConfig_CustomGraphOptimizer::SharedDtor() {
890   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
891   _impl_.parameter_map_.Destruct();
892   _impl_.parameter_map_.~MapFieldLite();
893   _impl_.name_.Destroy();
894 }
895 
SetCachedSize(int size) const896 void RewriterConfig_CustomGraphOptimizer::SetCachedSize(int size) const {
897   _impl_._cached_size_.Set(size);
898 }
899 
Clear()900 void RewriterConfig_CustomGraphOptimizer::Clear() {
901 // @@protoc_insertion_point(message_clear_start:tensorflow.RewriterConfig.CustomGraphOptimizer)
902   ::uint32_t cached_has_bits = 0;
903   // Prevent compiler warnings about cached_has_bits being unused
904   (void) cached_has_bits;
905 
906   _impl_.parameter_map_.Clear();
907   _impl_.name_.ClearToEmpty();
908   _internal_metadata_.Clear<std::string>();
909 }
910 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)911 const char* RewriterConfig_CustomGraphOptimizer::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
912 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
913   while (!ctx->Done(&ptr)) {
914     ::uint32_t tag;
915     ptr = ::_pbi::ReadTag(ptr, &tag);
916     switch (tag >> 3) {
917       // string name = 1;
918       case 1:
919         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
920           auto str = _internal_mutable_name();
921           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
922           CHK_(ptr);
923           CHK_(::_pbi::VerifyUTF8(str, nullptr));
924         } else {
925           goto handle_unusual;
926         }
927         continue;
928       // map<string, .tensorflow.AttrValue> parameter_map = 2;
929       case 2:
930         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
931           ptr -= 1;
932           do {
933             ptr += 1;
934             ptr = ctx->ParseMessage(&_impl_.parameter_map_, ptr);
935             CHK_(ptr);
936             if (!ctx->DataAvailable(ptr)) break;
937           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
938         } else {
939           goto handle_unusual;
940         }
941         continue;
942       default:
943         goto handle_unusual;
944     }  // switch
945   handle_unusual:
946     if ((tag == 0) || ((tag & 7) == 4)) {
947       CHK_(ptr);
948       ctx->SetLastTag(tag);
949       goto message_done;
950     }
951     ptr = UnknownFieldParse(
952         tag,
953         _internal_metadata_.mutable_unknown_fields<std::string>(),
954         ptr, ctx);
955     CHK_(ptr != nullptr);
956   }  // while
957 message_done:
958   return ptr;
959 failure:
960   ptr = nullptr;
961   goto message_done;
962 #undef CHK_
963 }
964 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const965 ::uint8_t* RewriterConfig_CustomGraphOptimizer::_InternalSerialize(
966     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
967   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RewriterConfig.CustomGraphOptimizer)
968   ::uint32_t cached_has_bits = 0;
969   (void) cached_has_bits;
970 
971   // string name = 1;
972   if (!this->_internal_name().empty()) {
973     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
974       this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
975       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
976       "tensorflow.RewriterConfig.CustomGraphOptimizer.name");
977     target = stream->WriteStringMaybeAliased(
978         1, this->_internal_name(), target);
979   }
980 
981   // map<string, .tensorflow.AttrValue> parameter_map = 2;
982   if (!this->_internal_parameter_map().empty()) {
983     using MapType = ::_pb::Map<std::string, ::tensorflow::AttrValue>;
984     using WireHelper = RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse::Funcs;
985     const auto& map_field = this->_internal_parameter_map();
986     auto check_utf8 = [](const MapType::value_type& entry) {
987       (void)entry;
988       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
989         entry.first.data(), static_cast<int>(entry.first.length()),
990         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
991         "tensorflow.RewriterConfig.CustomGraphOptimizer.ParameterMapEntry.key");
992     };
993 
994     if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
995       for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
996         target = WireHelper::InternalSerialize(2, entry.first, entry.second, target, stream);
997         check_utf8(entry);
998       }
999     } else {
1000       for (const auto& entry : map_field) {
1001         target = WireHelper::InternalSerialize(2, entry.first, entry.second, target, stream);
1002         check_utf8(entry);
1003       }
1004     }
1005   }
1006 
1007   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1008     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1009         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1010   }
1011   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RewriterConfig.CustomGraphOptimizer)
1012   return target;
1013 }
1014 
ByteSizeLong() const1015 size_t RewriterConfig_CustomGraphOptimizer::ByteSizeLong() const {
1016 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RewriterConfig.CustomGraphOptimizer)
1017   size_t total_size = 0;
1018 
1019   ::uint32_t cached_has_bits = 0;
1020   // Prevent compiler warnings about cached_has_bits being unused
1021   (void) cached_has_bits;
1022 
1023   // map<string, .tensorflow.AttrValue> parameter_map = 2;
1024   total_size += 1 *
1025       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_parameter_map_size());
1026   for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::AttrValue >::const_iterator
1027       it = this->_internal_parameter_map().begin();
1028       it != this->_internal_parameter_map().end(); ++it) {
1029     total_size += RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
1030   }
1031 
1032   // string name = 1;
1033   if (!this->_internal_name().empty()) {
1034     total_size += 1 +
1035       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1036         this->_internal_name());
1037   }
1038 
1039   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1040     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1041   }
1042   int cached_size = ::_pbi::ToCachedSize(total_size);
1043   SetCachedSize(cached_size);
1044   return total_size;
1045 }
1046 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1047 void RewriterConfig_CustomGraphOptimizer::CheckTypeAndMergeFrom(
1048     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1049   MergeFrom(*::_pbi::DownCast<const RewriterConfig_CustomGraphOptimizer*>(
1050       &from));
1051 }
1052 
MergeFrom(const RewriterConfig_CustomGraphOptimizer & from)1053 void RewriterConfig_CustomGraphOptimizer::MergeFrom(const RewriterConfig_CustomGraphOptimizer& from) {
1054   RewriterConfig_CustomGraphOptimizer* const _this = this;
1055   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RewriterConfig.CustomGraphOptimizer)
1056   GOOGLE_DCHECK_NE(&from, _this);
1057   ::uint32_t cached_has_bits = 0;
1058   (void) cached_has_bits;
1059 
1060   _this->_impl_.parameter_map_.MergeFrom(from._impl_.parameter_map_);
1061   if (!from._internal_name().empty()) {
1062     _this->_internal_set_name(from._internal_name());
1063   }
1064   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1065 }
1066 
CopyFrom(const RewriterConfig_CustomGraphOptimizer & from)1067 void RewriterConfig_CustomGraphOptimizer::CopyFrom(const RewriterConfig_CustomGraphOptimizer& from) {
1068 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RewriterConfig.CustomGraphOptimizer)
1069   if (&from == this) return;
1070   Clear();
1071   MergeFrom(from);
1072 }
1073 
IsInitialized() const1074 bool RewriterConfig_CustomGraphOptimizer::IsInitialized() const {
1075   return true;
1076 }
1077 
InternalSwap(RewriterConfig_CustomGraphOptimizer * other)1078 void RewriterConfig_CustomGraphOptimizer::InternalSwap(RewriterConfig_CustomGraphOptimizer* other) {
1079   using std::swap;
1080   auto* lhs_arena = GetArenaForAllocation();
1081   auto* rhs_arena = other->GetArenaForAllocation();
1082   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1083   _impl_.parameter_map_.InternalSwap(&other->_impl_.parameter_map_);
1084   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1085       &_impl_.name_, lhs_arena,
1086       &other->_impl_.name_, rhs_arena
1087   );
1088 }
1089 
GetTypeName() const1090 std::string RewriterConfig_CustomGraphOptimizer::GetTypeName() const {
1091   return "tensorflow.RewriterConfig.CustomGraphOptimizer";
1092 }
1093 
1094 
1095 // ===================================================================
1096 
1097 class RewriterConfig::_Internal {
1098  public:
1099   static const ::tensorflow::AutoParallelOptions& auto_parallel(const RewriterConfig* msg);
1100   static const ::tensorflow::ScopedAllocatorOptions& scoped_allocator_opts(const RewriterConfig* msg);
1101   static const ::tensorflow::VerifierConfig& inter_optimizer_verifier_config(const RewriterConfig* msg);
1102   static const ::tensorflow::VerifierConfig& post_optimization_verifier_config(const RewriterConfig* msg);
1103 };
1104 
1105 const ::tensorflow::AutoParallelOptions&
auto_parallel(const RewriterConfig * msg)1106 RewriterConfig::_Internal::auto_parallel(const RewriterConfig* msg) {
1107   return *msg->_impl_.auto_parallel_;
1108 }
1109 const ::tensorflow::ScopedAllocatorOptions&
scoped_allocator_opts(const RewriterConfig * msg)1110 RewriterConfig::_Internal::scoped_allocator_opts(const RewriterConfig* msg) {
1111   return *msg->_impl_.scoped_allocator_opts_;
1112 }
1113 const ::tensorflow::VerifierConfig&
inter_optimizer_verifier_config(const RewriterConfig * msg)1114 RewriterConfig::_Internal::inter_optimizer_verifier_config(const RewriterConfig* msg) {
1115   return *msg->_impl_.inter_optimizer_verifier_config_;
1116 }
1117 const ::tensorflow::VerifierConfig&
post_optimization_verifier_config(const RewriterConfig * msg)1118 RewriterConfig::_Internal::post_optimization_verifier_config(const RewriterConfig* msg) {
1119   return *msg->_impl_.post_optimization_verifier_config_;
1120 }
clear_inter_optimizer_verifier_config()1121 void RewriterConfig::clear_inter_optimizer_verifier_config() {
1122   if (GetArenaForAllocation() == nullptr && _impl_.inter_optimizer_verifier_config_ != nullptr) {
1123     delete _impl_.inter_optimizer_verifier_config_;
1124   }
1125   _impl_.inter_optimizer_verifier_config_ = nullptr;
1126 }
clear_post_optimization_verifier_config()1127 void RewriterConfig::clear_post_optimization_verifier_config() {
1128   if (GetArenaForAllocation() == nullptr && _impl_.post_optimization_verifier_config_ != nullptr) {
1129     delete _impl_.post_optimization_verifier_config_;
1130   }
1131   _impl_.post_optimization_verifier_config_ = nullptr;
1132 }
RewriterConfig(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1133 RewriterConfig::RewriterConfig(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1134                          bool is_message_owned)
1135   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1136   SharedCtor(arena, is_message_owned);
1137   // @@protoc_insertion_point(arena_constructor:tensorflow.RewriterConfig)
1138 }
RewriterConfig(const RewriterConfig & from)1139 RewriterConfig::RewriterConfig(const RewriterConfig& from)
1140   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1141   RewriterConfig* const _this = this; (void)_this;
1142   new (&_impl_) Impl_{
1143       decltype(_impl_.optimizers_){from._impl_.optimizers_}
1144     , decltype(_impl_.custom_optimizers_){from._impl_.custom_optimizers_}
1145     , decltype(_impl_.memory_optimizer_target_node_name_scope_){}
1146     , decltype(_impl_.auto_parallel_){nullptr}
1147     , decltype(_impl_.scoped_allocator_opts_){nullptr}
1148     , decltype(_impl_.inter_optimizer_verifier_config_){nullptr}
1149     , decltype(_impl_.post_optimization_verifier_config_){nullptr}
1150     , decltype(_impl_.layout_optimizer_){}
1151     , decltype(_impl_.constant_folding_){}
1152     , decltype(_impl_.memory_optimization_){}
1153     , decltype(_impl_.arithmetic_optimization_){}
1154     , decltype(_impl_.dependency_optimization_){}
1155     , decltype(_impl_.loop_optimization_){}
1156     , decltype(_impl_.function_optimization_){}
1157     , decltype(_impl_.debug_stripper_){}
1158     , decltype(_impl_.meta_optimizer_iterations_){}
1159     , decltype(_impl_.shape_optimization_){}
1160     , decltype(_impl_.remapping_){}
1161     , decltype(_impl_.scoped_allocator_optimization_){}
1162     , decltype(_impl_.min_graph_nodes_){}
1163     , decltype(_impl_.pin_to_host_optimization_){}
1164     , decltype(_impl_.disable_model_pruning_){}
1165     , decltype(_impl_.disable_meta_optimizer_){}
1166     , decltype(_impl_.experimental_disable_compressed_tensor_optimization_){}
1167     , decltype(_impl_.experimental_disable_folding_quantization_emulation_){}
1168     , decltype(_impl_.fail_on_optimizer_errors_){}
1169     , decltype(_impl_.meta_optimizer_timeout_ms_){}
1170     , decltype(_impl_.implementation_selector_){}
1171     , decltype(_impl_.auto_mixed_precision_){}
1172     , decltype(_impl_.common_subgraph_elimination_){}
1173     , decltype(_impl_.auto_mixed_precision_mkl_){}
1174     , decltype(_impl_.use_plugin_optimizers_){}
1175     , decltype(_impl_.auto_mixed_precision_cpu_){}
1176     , decltype(_impl_.experimental_conditional_code_motion_){}
1177     , decltype(_impl_.auto_mixed_precision_onednn_bfloat16_){}
1178     , decltype(_impl_.cpu_layout_conversion_){}
1179     , /*decltype(_impl_._cached_size_)*/{}};
1180 
1181   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1182   _impl_.memory_optimizer_target_node_name_scope_.InitDefault();
1183   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1184     _impl_.memory_optimizer_target_node_name_scope_.Set("", GetArenaForAllocation());
1185   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1186   if (!from._internal_memory_optimizer_target_node_name_scope().empty()) {
1187     _this->_impl_.memory_optimizer_target_node_name_scope_.Set(from._internal_memory_optimizer_target_node_name_scope(),
1188       _this->GetArenaForAllocation());
1189   }
1190   if (from._internal_has_auto_parallel()) {
1191     _this->_impl_.auto_parallel_ = new ::tensorflow::AutoParallelOptions(*from._impl_.auto_parallel_);
1192   }
1193   if (from._internal_has_scoped_allocator_opts()) {
1194     _this->_impl_.scoped_allocator_opts_ = new ::tensorflow::ScopedAllocatorOptions(*from._impl_.scoped_allocator_opts_);
1195   }
1196   if (from._internal_has_inter_optimizer_verifier_config()) {
1197     _this->_impl_.inter_optimizer_verifier_config_ = new ::tensorflow::VerifierConfig(*from._impl_.inter_optimizer_verifier_config_);
1198   }
1199   if (from._internal_has_post_optimization_verifier_config()) {
1200     _this->_impl_.post_optimization_verifier_config_ = new ::tensorflow::VerifierConfig(*from._impl_.post_optimization_verifier_config_);
1201   }
1202   ::memcpy(&_impl_.layout_optimizer_, &from._impl_.layout_optimizer_,
1203     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.cpu_layout_conversion_) -
1204     reinterpret_cast<char*>(&_impl_.layout_optimizer_)) + sizeof(_impl_.cpu_layout_conversion_));
1205   // @@protoc_insertion_point(copy_constructor:tensorflow.RewriterConfig)
1206 }
1207 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1208 inline void RewriterConfig::SharedCtor(
1209     ::_pb::Arena* arena, bool is_message_owned) {
1210   (void)arena;
1211   (void)is_message_owned;
1212   new (&_impl_) Impl_{
1213       decltype(_impl_.optimizers_){arena}
1214     , decltype(_impl_.custom_optimizers_){arena}
1215     , decltype(_impl_.memory_optimizer_target_node_name_scope_){}
1216     , decltype(_impl_.auto_parallel_){nullptr}
1217     , decltype(_impl_.scoped_allocator_opts_){nullptr}
1218     , decltype(_impl_.inter_optimizer_verifier_config_){nullptr}
1219     , decltype(_impl_.post_optimization_verifier_config_){nullptr}
1220     , decltype(_impl_.layout_optimizer_){0}
1221     , decltype(_impl_.constant_folding_){0}
1222     , decltype(_impl_.memory_optimization_){0}
1223     , decltype(_impl_.arithmetic_optimization_){0}
1224     , decltype(_impl_.dependency_optimization_){0}
1225     , decltype(_impl_.loop_optimization_){0}
1226     , decltype(_impl_.function_optimization_){0}
1227     , decltype(_impl_.debug_stripper_){0}
1228     , decltype(_impl_.meta_optimizer_iterations_){0}
1229     , decltype(_impl_.shape_optimization_){0}
1230     , decltype(_impl_.remapping_){0}
1231     , decltype(_impl_.scoped_allocator_optimization_){0}
1232     , decltype(_impl_.min_graph_nodes_){0}
1233     , decltype(_impl_.pin_to_host_optimization_){0}
1234     , decltype(_impl_.disable_model_pruning_){false}
1235     , decltype(_impl_.disable_meta_optimizer_){false}
1236     , decltype(_impl_.experimental_disable_compressed_tensor_optimization_){false}
1237     , decltype(_impl_.experimental_disable_folding_quantization_emulation_){false}
1238     , decltype(_impl_.fail_on_optimizer_errors_){false}
1239     , decltype(_impl_.meta_optimizer_timeout_ms_){::int64_t{0}}
1240     , decltype(_impl_.implementation_selector_){0}
1241     , decltype(_impl_.auto_mixed_precision_){0}
1242     , decltype(_impl_.common_subgraph_elimination_){0}
1243     , decltype(_impl_.auto_mixed_precision_mkl_){0}
1244     , decltype(_impl_.use_plugin_optimizers_){0}
1245     , decltype(_impl_.auto_mixed_precision_cpu_){0}
1246     , decltype(_impl_.experimental_conditional_code_motion_){0}
1247     , decltype(_impl_.auto_mixed_precision_onednn_bfloat16_){0}
1248     , decltype(_impl_.cpu_layout_conversion_){0}
1249     , /*decltype(_impl_._cached_size_)*/{}
1250   };
1251   _impl_.memory_optimizer_target_node_name_scope_.InitDefault();
1252   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1253     _impl_.memory_optimizer_target_node_name_scope_.Set("", GetArenaForAllocation());
1254   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1255 }
1256 
~RewriterConfig()1257 RewriterConfig::~RewriterConfig() {
1258   // @@protoc_insertion_point(destructor:tensorflow.RewriterConfig)
1259   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1260   (void)arena;
1261     return;
1262   }
1263   SharedDtor();
1264 }
1265 
SharedDtor()1266 inline void RewriterConfig::SharedDtor() {
1267   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1268   _impl_.optimizers_.~RepeatedPtrField();
1269   _impl_.custom_optimizers_.~RepeatedPtrField();
1270   _impl_.memory_optimizer_target_node_name_scope_.Destroy();
1271   if (this != internal_default_instance()) delete _impl_.auto_parallel_;
1272   if (this != internal_default_instance()) delete _impl_.scoped_allocator_opts_;
1273   if (this != internal_default_instance()) delete _impl_.inter_optimizer_verifier_config_;
1274   if (this != internal_default_instance()) delete _impl_.post_optimization_verifier_config_;
1275 }
1276 
SetCachedSize(int size) const1277 void RewriterConfig::SetCachedSize(int size) const {
1278   _impl_._cached_size_.Set(size);
1279 }
1280 
Clear()1281 void RewriterConfig::Clear() {
1282 // @@protoc_insertion_point(message_clear_start:tensorflow.RewriterConfig)
1283   ::uint32_t cached_has_bits = 0;
1284   // Prevent compiler warnings about cached_has_bits being unused
1285   (void) cached_has_bits;
1286 
1287   _impl_.optimizers_.Clear();
1288   _impl_.custom_optimizers_.Clear();
1289   _impl_.memory_optimizer_target_node_name_scope_.ClearToEmpty();
1290   if (GetArenaForAllocation() == nullptr && _impl_.auto_parallel_ != nullptr) {
1291     delete _impl_.auto_parallel_;
1292   }
1293   _impl_.auto_parallel_ = nullptr;
1294   if (GetArenaForAllocation() == nullptr && _impl_.scoped_allocator_opts_ != nullptr) {
1295     delete _impl_.scoped_allocator_opts_;
1296   }
1297   _impl_.scoped_allocator_opts_ = nullptr;
1298   if (GetArenaForAllocation() == nullptr && _impl_.inter_optimizer_verifier_config_ != nullptr) {
1299     delete _impl_.inter_optimizer_verifier_config_;
1300   }
1301   _impl_.inter_optimizer_verifier_config_ = nullptr;
1302   if (GetArenaForAllocation() == nullptr && _impl_.post_optimization_verifier_config_ != nullptr) {
1303     delete _impl_.post_optimization_verifier_config_;
1304   }
1305   _impl_.post_optimization_verifier_config_ = nullptr;
1306   ::memset(&_impl_.layout_optimizer_, 0, static_cast<size_t>(
1307       reinterpret_cast<char*>(&_impl_.cpu_layout_conversion_) -
1308       reinterpret_cast<char*>(&_impl_.layout_optimizer_)) + sizeof(_impl_.cpu_layout_conversion_));
1309   _internal_metadata_.Clear<std::string>();
1310 }
1311 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1312 const char* RewriterConfig::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1313 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1314   while (!ctx->Done(&ptr)) {
1315     ::uint32_t tag;
1316     ptr = ::_pbi::ReadTag(ptr, &tag);
1317     switch (tag >> 3) {
1318       // .tensorflow.RewriterConfig.Toggle layout_optimizer = 1;
1319       case 1:
1320         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1321           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1322           CHK_(ptr);
1323           _internal_set_layout_optimizer(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1324         } else {
1325           goto handle_unusual;
1326         }
1327         continue;
1328       // bool disable_model_pruning = 2;
1329       case 2:
1330         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1331           _impl_.disable_model_pruning_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1332           CHK_(ptr);
1333         } else {
1334           goto handle_unusual;
1335         }
1336         continue;
1337       // .tensorflow.RewriterConfig.Toggle constant_folding = 3;
1338       case 3:
1339         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1340           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1341           CHK_(ptr);
1342           _internal_set_constant_folding(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1343         } else {
1344           goto handle_unusual;
1345         }
1346         continue;
1347       // .tensorflow.RewriterConfig.MemOptType memory_optimization = 4;
1348       case 4:
1349         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
1350           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1351           CHK_(ptr);
1352           _internal_set_memory_optimization(static_cast<::tensorflow::RewriterConfig_MemOptType>(val));
1353         } else {
1354           goto handle_unusual;
1355         }
1356         continue;
1357       // .tensorflow.AutoParallelOptions auto_parallel = 5;
1358       case 5:
1359         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
1360           ptr = ctx->ParseMessage(_internal_mutable_auto_parallel(), ptr);
1361           CHK_(ptr);
1362         } else {
1363           goto handle_unusual;
1364         }
1365         continue;
1366       // string memory_optimizer_target_node_name_scope = 6;
1367       case 6:
1368         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
1369           auto str = _internal_mutable_memory_optimizer_target_node_name_scope();
1370           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1371           CHK_(ptr);
1372           CHK_(::_pbi::VerifyUTF8(str, nullptr));
1373         } else {
1374           goto handle_unusual;
1375         }
1376         continue;
1377       // .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7;
1378       case 7:
1379         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
1380           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1381           CHK_(ptr);
1382           _internal_set_arithmetic_optimization(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1383         } else {
1384           goto handle_unusual;
1385         }
1386         continue;
1387       // .tensorflow.RewriterConfig.Toggle dependency_optimization = 8;
1388       case 8:
1389         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) {
1390           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1391           CHK_(ptr);
1392           _internal_set_dependency_optimization(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1393         } else {
1394           goto handle_unusual;
1395         }
1396         continue;
1397       // .tensorflow.RewriterConfig.Toggle loop_optimization = 9;
1398       case 9:
1399         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
1400           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1401           CHK_(ptr);
1402           _internal_set_loop_optimization(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1403         } else {
1404           goto handle_unusual;
1405         }
1406         continue;
1407       // .tensorflow.RewriterConfig.Toggle function_optimization = 10;
1408       case 10:
1409         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 80)) {
1410           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1411           CHK_(ptr);
1412           _internal_set_function_optimization(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1413         } else {
1414           goto handle_unusual;
1415         }
1416         continue;
1417       // .tensorflow.RewriterConfig.Toggle debug_stripper = 11;
1418       case 11:
1419         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 88)) {
1420           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1421           CHK_(ptr);
1422           _internal_set_debug_stripper(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1423         } else {
1424           goto handle_unusual;
1425         }
1426         continue;
1427       // .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12;
1428       case 12:
1429         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 96)) {
1430           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1431           CHK_(ptr);
1432           _internal_set_meta_optimizer_iterations(static_cast<::tensorflow::RewriterConfig_NumIterationsType>(val));
1433         } else {
1434           goto handle_unusual;
1435         }
1436         continue;
1437       // .tensorflow.RewriterConfig.Toggle shape_optimization = 13;
1438       case 13:
1439         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 104)) {
1440           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1441           CHK_(ptr);
1442           _internal_set_shape_optimization(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1443         } else {
1444           goto handle_unusual;
1445         }
1446         continue;
1447       // .tensorflow.RewriterConfig.Toggle remapping = 14;
1448       case 14:
1449         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 112)) {
1450           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1451           CHK_(ptr);
1452           _internal_set_remapping(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1453         } else {
1454           goto handle_unusual;
1455         }
1456         continue;
1457       // .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15;
1458       case 15:
1459         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 120)) {
1460           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1461           CHK_(ptr);
1462           _internal_set_scoped_allocator_optimization(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1463         } else {
1464           goto handle_unusual;
1465         }
1466         continue;
1467       // .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16;
1468       case 16:
1469         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 130)) {
1470           ptr = ctx->ParseMessage(_internal_mutable_scoped_allocator_opts(), ptr);
1471           CHK_(ptr);
1472         } else {
1473           goto handle_unusual;
1474         }
1475         continue;
1476       // int32 min_graph_nodes = 17;
1477       case 17:
1478         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 136)) {
1479           _impl_.min_graph_nodes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1480           CHK_(ptr);
1481         } else {
1482           goto handle_unusual;
1483         }
1484         continue;
1485       // .tensorflow.RewriterConfig.Toggle pin_to_host_optimization = 18;
1486       case 18:
1487         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 144)) {
1488           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1489           CHK_(ptr);
1490           _internal_set_pin_to_host_optimization(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1491         } else {
1492           goto handle_unusual;
1493         }
1494         continue;
1495       // bool disable_meta_optimizer = 19;
1496       case 19:
1497         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 152)) {
1498           _impl_.disable_meta_optimizer_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1499           CHK_(ptr);
1500         } else {
1501           goto handle_unusual;
1502         }
1503         continue;
1504       // int64 meta_optimizer_timeout_ms = 20;
1505       case 20:
1506         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 160)) {
1507           _impl_.meta_optimizer_timeout_ms_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1508           CHK_(ptr);
1509         } else {
1510           goto handle_unusual;
1511         }
1512         continue;
1513       // bool fail_on_optimizer_errors = 21;
1514       case 21:
1515         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 168)) {
1516           _impl_.fail_on_optimizer_errors_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1517           CHK_(ptr);
1518         } else {
1519           goto handle_unusual;
1520         }
1521         continue;
1522       // .tensorflow.RewriterConfig.Toggle implementation_selector = 22;
1523       case 22:
1524         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 176)) {
1525           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1526           CHK_(ptr);
1527           _internal_set_implementation_selector(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1528         } else {
1529           goto handle_unusual;
1530         }
1531         continue;
1532       // .tensorflow.RewriterConfig.Toggle auto_mixed_precision = 23;
1533       case 23:
1534         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 184)) {
1535           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1536           CHK_(ptr);
1537           _internal_set_auto_mixed_precision(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1538         } else {
1539           goto handle_unusual;
1540         }
1541         continue;
1542       // .tensorflow.RewriterConfig.Toggle common_subgraph_elimination = 24;
1543       case 24:
1544         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 192)) {
1545           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1546           CHK_(ptr);
1547           _internal_set_common_subgraph_elimination(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1548         } else {
1549           goto handle_unusual;
1550         }
1551         continue;
1552       // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_mkl = 25;
1553       case 25:
1554         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 200)) {
1555           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1556           CHK_(ptr);
1557           _internal_set_auto_mixed_precision_mkl(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1558         } else {
1559           goto handle_unusual;
1560         }
1561         continue;
1562       // bool experimental_disable_compressed_tensor_optimization = 26;
1563       case 26:
1564         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 208)) {
1565           _impl_.experimental_disable_compressed_tensor_optimization_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1566           CHK_(ptr);
1567         } else {
1568           goto handle_unusual;
1569         }
1570         continue;
1571       // bool experimental_disable_folding_quantization_emulation = 27;
1572       case 27:
1573         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 216)) {
1574           _impl_.experimental_disable_folding_quantization_emulation_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1575           CHK_(ptr);
1576         } else {
1577           goto handle_unusual;
1578         }
1579         continue;
1580       // .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28;
1581       case 28:
1582         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 224)) {
1583           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1584           CHK_(ptr);
1585           _internal_set_use_plugin_optimizers(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1586         } else {
1587           goto handle_unusual;
1588         }
1589         continue;
1590       // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29;
1591       case 29:
1592         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 232)) {
1593           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1594           CHK_(ptr);
1595           _internal_set_auto_mixed_precision_cpu(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1596         } else {
1597           goto handle_unusual;
1598         }
1599         continue;
1600       // .tensorflow.RewriterConfig.Toggle experimental_conditional_code_motion = 30;
1601       case 30:
1602         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 240)) {
1603           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1604           CHK_(ptr);
1605           _internal_set_experimental_conditional_code_motion(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1606         } else {
1607           goto handle_unusual;
1608         }
1609         continue;
1610       // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_onednn_bfloat16 = 31;
1611       case 31:
1612         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 248)) {
1613           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1614           CHK_(ptr);
1615           _internal_set_auto_mixed_precision_onednn_bfloat16(static_cast<::tensorflow::RewriterConfig_Toggle>(val));
1616         } else {
1617           goto handle_unusual;
1618         }
1619         continue;
1620       // .tensorflow.RewriterConfig.CpuLayout cpu_layout_conversion = 50;
1621       case 50:
1622         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 144)) {
1623           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1624           CHK_(ptr);
1625           _internal_set_cpu_layout_conversion(static_cast<::tensorflow::RewriterConfig_CpuLayout>(val));
1626         } else {
1627           goto handle_unusual;
1628         }
1629         continue;
1630       // repeated string optimizers = 100;
1631       case 100:
1632         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
1633           ptr -= 2;
1634           do {
1635             ptr += 2;
1636             auto str = _internal_add_optimizers();
1637             ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1638             CHK_(ptr);
1639             CHK_(::_pbi::VerifyUTF8(str, nullptr));
1640             if (!ctx->DataAvailable(ptr)) break;
1641           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<802>(ptr));
1642         } else {
1643           goto handle_unusual;
1644         }
1645         continue;
1646       // repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200;
1647       case 200:
1648         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 66)) {
1649           ptr -= 2;
1650           do {
1651             ptr += 2;
1652             ptr = ctx->ParseMessage(_internal_add_custom_optimizers(), ptr);
1653             CHK_(ptr);
1654             if (!ctx->DataAvailable(ptr)) break;
1655           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<1602>(ptr));
1656         } else {
1657           goto handle_unusual;
1658         }
1659         continue;
1660       // .tensorflow.VerifierConfig inter_optimizer_verifier_config = 300;
1661       case 300:
1662         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 98)) {
1663           ptr = ctx->ParseMessage(_internal_mutable_inter_optimizer_verifier_config(), ptr);
1664           CHK_(ptr);
1665         } else {
1666           goto handle_unusual;
1667         }
1668         continue;
1669       // .tensorflow.VerifierConfig post_optimization_verifier_config = 301;
1670       case 301:
1671         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 106)) {
1672           ptr = ctx->ParseMessage(_internal_mutable_post_optimization_verifier_config(), ptr);
1673           CHK_(ptr);
1674         } else {
1675           goto handle_unusual;
1676         }
1677         continue;
1678       default:
1679         goto handle_unusual;
1680     }  // switch
1681   handle_unusual:
1682     if ((tag == 0) || ((tag & 7) == 4)) {
1683       CHK_(ptr);
1684       ctx->SetLastTag(tag);
1685       goto message_done;
1686     }
1687     ptr = UnknownFieldParse(
1688         tag,
1689         _internal_metadata_.mutable_unknown_fields<std::string>(),
1690         ptr, ctx);
1691     CHK_(ptr != nullptr);
1692   }  // while
1693 message_done:
1694   return ptr;
1695 failure:
1696   ptr = nullptr;
1697   goto message_done;
1698 #undef CHK_
1699 }
1700 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1701 ::uint8_t* RewriterConfig::_InternalSerialize(
1702     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1703   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RewriterConfig)
1704   ::uint32_t cached_has_bits = 0;
1705   (void) cached_has_bits;
1706 
1707   // .tensorflow.RewriterConfig.Toggle layout_optimizer = 1;
1708   if (this->_internal_layout_optimizer() != 0) {
1709     target = stream->EnsureSpace(target);
1710     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1711       1, this->_internal_layout_optimizer(), target);
1712   }
1713 
1714   // bool disable_model_pruning = 2;
1715   if (this->_internal_disable_model_pruning() != 0) {
1716     target = stream->EnsureSpace(target);
1717     target = ::_pbi::WireFormatLite::WriteBoolToArray(2, this->_internal_disable_model_pruning(), target);
1718   }
1719 
1720   // .tensorflow.RewriterConfig.Toggle constant_folding = 3;
1721   if (this->_internal_constant_folding() != 0) {
1722     target = stream->EnsureSpace(target);
1723     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1724       3, this->_internal_constant_folding(), target);
1725   }
1726 
1727   // .tensorflow.RewriterConfig.MemOptType memory_optimization = 4;
1728   if (this->_internal_memory_optimization() != 0) {
1729     target = stream->EnsureSpace(target);
1730     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1731       4, this->_internal_memory_optimization(), target);
1732   }
1733 
1734   // .tensorflow.AutoParallelOptions auto_parallel = 5;
1735   if (this->_internal_has_auto_parallel()) {
1736     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1737       InternalWriteMessage(5, _Internal::auto_parallel(this),
1738         _Internal::auto_parallel(this).GetCachedSize(), target, stream);
1739   }
1740 
1741   // string memory_optimizer_target_node_name_scope = 6;
1742   if (!this->_internal_memory_optimizer_target_node_name_scope().empty()) {
1743     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1744       this->_internal_memory_optimizer_target_node_name_scope().data(), static_cast<int>(this->_internal_memory_optimizer_target_node_name_scope().length()),
1745       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1746       "tensorflow.RewriterConfig.memory_optimizer_target_node_name_scope");
1747     target = stream->WriteStringMaybeAliased(
1748         6, this->_internal_memory_optimizer_target_node_name_scope(), target);
1749   }
1750 
1751   // .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7;
1752   if (this->_internal_arithmetic_optimization() != 0) {
1753     target = stream->EnsureSpace(target);
1754     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1755       7, this->_internal_arithmetic_optimization(), target);
1756   }
1757 
1758   // .tensorflow.RewriterConfig.Toggle dependency_optimization = 8;
1759   if (this->_internal_dependency_optimization() != 0) {
1760     target = stream->EnsureSpace(target);
1761     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1762       8, this->_internal_dependency_optimization(), target);
1763   }
1764 
1765   // .tensorflow.RewriterConfig.Toggle loop_optimization = 9;
1766   if (this->_internal_loop_optimization() != 0) {
1767     target = stream->EnsureSpace(target);
1768     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1769       9, this->_internal_loop_optimization(), target);
1770   }
1771 
1772   // .tensorflow.RewriterConfig.Toggle function_optimization = 10;
1773   if (this->_internal_function_optimization() != 0) {
1774     target = stream->EnsureSpace(target);
1775     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1776       10, this->_internal_function_optimization(), target);
1777   }
1778 
1779   // .tensorflow.RewriterConfig.Toggle debug_stripper = 11;
1780   if (this->_internal_debug_stripper() != 0) {
1781     target = stream->EnsureSpace(target);
1782     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1783       11, this->_internal_debug_stripper(), target);
1784   }
1785 
1786   // .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12;
1787   if (this->_internal_meta_optimizer_iterations() != 0) {
1788     target = stream->EnsureSpace(target);
1789     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1790       12, this->_internal_meta_optimizer_iterations(), target);
1791   }
1792 
1793   // .tensorflow.RewriterConfig.Toggle shape_optimization = 13;
1794   if (this->_internal_shape_optimization() != 0) {
1795     target = stream->EnsureSpace(target);
1796     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1797       13, this->_internal_shape_optimization(), target);
1798   }
1799 
1800   // .tensorflow.RewriterConfig.Toggle remapping = 14;
1801   if (this->_internal_remapping() != 0) {
1802     target = stream->EnsureSpace(target);
1803     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1804       14, this->_internal_remapping(), target);
1805   }
1806 
1807   // .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15;
1808   if (this->_internal_scoped_allocator_optimization() != 0) {
1809     target = stream->EnsureSpace(target);
1810     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1811       15, this->_internal_scoped_allocator_optimization(), target);
1812   }
1813 
1814   // .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16;
1815   if (this->_internal_has_scoped_allocator_opts()) {
1816     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1817       InternalWriteMessage(16, _Internal::scoped_allocator_opts(this),
1818         _Internal::scoped_allocator_opts(this).GetCachedSize(), target, stream);
1819   }
1820 
1821   // int32 min_graph_nodes = 17;
1822   if (this->_internal_min_graph_nodes() != 0) {
1823     target = stream->EnsureSpace(target);
1824     target = ::_pbi::WireFormatLite::WriteInt32ToArray(17, this->_internal_min_graph_nodes(), target);
1825   }
1826 
1827   // .tensorflow.RewriterConfig.Toggle pin_to_host_optimization = 18;
1828   if (this->_internal_pin_to_host_optimization() != 0) {
1829     target = stream->EnsureSpace(target);
1830     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1831       18, this->_internal_pin_to_host_optimization(), target);
1832   }
1833 
1834   // bool disable_meta_optimizer = 19;
1835   if (this->_internal_disable_meta_optimizer() != 0) {
1836     target = stream->EnsureSpace(target);
1837     target = ::_pbi::WireFormatLite::WriteBoolToArray(19, this->_internal_disable_meta_optimizer(), target);
1838   }
1839 
1840   // int64 meta_optimizer_timeout_ms = 20;
1841   if (this->_internal_meta_optimizer_timeout_ms() != 0) {
1842     target = stream->EnsureSpace(target);
1843     target = ::_pbi::WireFormatLite::WriteInt64ToArray(20, this->_internal_meta_optimizer_timeout_ms(), target);
1844   }
1845 
1846   // bool fail_on_optimizer_errors = 21;
1847   if (this->_internal_fail_on_optimizer_errors() != 0) {
1848     target = stream->EnsureSpace(target);
1849     target = ::_pbi::WireFormatLite::WriteBoolToArray(21, this->_internal_fail_on_optimizer_errors(), target);
1850   }
1851 
1852   // .tensorflow.RewriterConfig.Toggle implementation_selector = 22;
1853   if (this->_internal_implementation_selector() != 0) {
1854     target = stream->EnsureSpace(target);
1855     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1856       22, this->_internal_implementation_selector(), target);
1857   }
1858 
1859   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision = 23;
1860   if (this->_internal_auto_mixed_precision() != 0) {
1861     target = stream->EnsureSpace(target);
1862     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1863       23, this->_internal_auto_mixed_precision(), target);
1864   }
1865 
1866   // .tensorflow.RewriterConfig.Toggle common_subgraph_elimination = 24;
1867   if (this->_internal_common_subgraph_elimination() != 0) {
1868     target = stream->EnsureSpace(target);
1869     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1870       24, this->_internal_common_subgraph_elimination(), target);
1871   }
1872 
1873   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_mkl = 25;
1874   if (this->_internal_auto_mixed_precision_mkl() != 0) {
1875     target = stream->EnsureSpace(target);
1876     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1877       25, this->_internal_auto_mixed_precision_mkl(), target);
1878   }
1879 
1880   // bool experimental_disable_compressed_tensor_optimization = 26;
1881   if (this->_internal_experimental_disable_compressed_tensor_optimization() != 0) {
1882     target = stream->EnsureSpace(target);
1883     target = ::_pbi::WireFormatLite::WriteBoolToArray(26, this->_internal_experimental_disable_compressed_tensor_optimization(), target);
1884   }
1885 
1886   // bool experimental_disable_folding_quantization_emulation = 27;
1887   if (this->_internal_experimental_disable_folding_quantization_emulation() != 0) {
1888     target = stream->EnsureSpace(target);
1889     target = ::_pbi::WireFormatLite::WriteBoolToArray(27, this->_internal_experimental_disable_folding_quantization_emulation(), target);
1890   }
1891 
1892   // .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28;
1893   if (this->_internal_use_plugin_optimizers() != 0) {
1894     target = stream->EnsureSpace(target);
1895     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1896       28, this->_internal_use_plugin_optimizers(), target);
1897   }
1898 
1899   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29;
1900   if (this->_internal_auto_mixed_precision_cpu() != 0) {
1901     target = stream->EnsureSpace(target);
1902     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1903       29, this->_internal_auto_mixed_precision_cpu(), target);
1904   }
1905 
1906   // .tensorflow.RewriterConfig.Toggle experimental_conditional_code_motion = 30;
1907   if (this->_internal_experimental_conditional_code_motion() != 0) {
1908     target = stream->EnsureSpace(target);
1909     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1910       30, this->_internal_experimental_conditional_code_motion(), target);
1911   }
1912 
1913   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_onednn_bfloat16 = 31;
1914   if (this->_internal_auto_mixed_precision_onednn_bfloat16() != 0) {
1915     target = stream->EnsureSpace(target);
1916     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1917       31, this->_internal_auto_mixed_precision_onednn_bfloat16(), target);
1918   }
1919 
1920   // .tensorflow.RewriterConfig.CpuLayout cpu_layout_conversion = 50;
1921   if (this->_internal_cpu_layout_conversion() != 0) {
1922     target = stream->EnsureSpace(target);
1923     target = ::_pbi::WireFormatLite::WriteEnumToArray(
1924       50, this->_internal_cpu_layout_conversion(), target);
1925   }
1926 
1927   // repeated string optimizers = 100;
1928   for (int i = 0, n = this->_internal_optimizers_size(); i < n; i++) {
1929     const auto& s = this->_internal_optimizers(i);
1930     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1931       s.data(), static_cast<int>(s.length()),
1932       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1933       "tensorflow.RewriterConfig.optimizers");
1934     target = stream->WriteString(100, s, target);
1935   }
1936 
1937   // repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200;
1938   for (unsigned i = 0,
1939       n = static_cast<unsigned>(this->_internal_custom_optimizers_size()); i < n; i++) {
1940     const auto& repfield = this->_internal_custom_optimizers(i);
1941     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1942         InternalWriteMessage(200, repfield, repfield.GetCachedSize(), target, stream);
1943   }
1944 
1945   // .tensorflow.VerifierConfig inter_optimizer_verifier_config = 300;
1946   if (this->_internal_has_inter_optimizer_verifier_config()) {
1947     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1948       InternalWriteMessage(300, _Internal::inter_optimizer_verifier_config(this),
1949         _Internal::inter_optimizer_verifier_config(this).GetCachedSize(), target, stream);
1950   }
1951 
1952   // .tensorflow.VerifierConfig post_optimization_verifier_config = 301;
1953   if (this->_internal_has_post_optimization_verifier_config()) {
1954     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1955       InternalWriteMessage(301, _Internal::post_optimization_verifier_config(this),
1956         _Internal::post_optimization_verifier_config(this).GetCachedSize(), target, stream);
1957   }
1958 
1959   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1960     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1961         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1962   }
1963   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RewriterConfig)
1964   return target;
1965 }
1966 
ByteSizeLong() const1967 size_t RewriterConfig::ByteSizeLong() const {
1968 // @@protoc_insertion_point(message_byte_size_start:tensorflow.RewriterConfig)
1969   size_t total_size = 0;
1970 
1971   ::uint32_t cached_has_bits = 0;
1972   // Prevent compiler warnings about cached_has_bits being unused
1973   (void) cached_has_bits;
1974 
1975   // repeated string optimizers = 100;
1976   total_size += 2 *
1977       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.optimizers_.size());
1978   for (int i = 0, n = _impl_.optimizers_.size(); i < n; i++) {
1979     total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1980       _impl_.optimizers_.Get(i));
1981   }
1982 
1983   // repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200;
1984   total_size += 2UL * this->_internal_custom_optimizers_size();
1985   for (const auto& msg : this->_impl_.custom_optimizers_) {
1986     total_size +=
1987       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1988   }
1989 
1990   // string memory_optimizer_target_node_name_scope = 6;
1991   if (!this->_internal_memory_optimizer_target_node_name_scope().empty()) {
1992     total_size += 1 +
1993       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1994         this->_internal_memory_optimizer_target_node_name_scope());
1995   }
1996 
1997   // .tensorflow.AutoParallelOptions auto_parallel = 5;
1998   if (this->_internal_has_auto_parallel()) {
1999     total_size += 1 +
2000       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2001         *_impl_.auto_parallel_);
2002   }
2003 
2004   // .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16;
2005   if (this->_internal_has_scoped_allocator_opts()) {
2006     total_size += 2 +
2007       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2008         *_impl_.scoped_allocator_opts_);
2009   }
2010 
2011   // .tensorflow.VerifierConfig inter_optimizer_verifier_config = 300;
2012   if (this->_internal_has_inter_optimizer_verifier_config()) {
2013     total_size += 2 +
2014       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2015         *_impl_.inter_optimizer_verifier_config_);
2016   }
2017 
2018   // .tensorflow.VerifierConfig post_optimization_verifier_config = 301;
2019   if (this->_internal_has_post_optimization_verifier_config()) {
2020     total_size += 2 +
2021       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2022         *_impl_.post_optimization_verifier_config_);
2023   }
2024 
2025   // .tensorflow.RewriterConfig.Toggle layout_optimizer = 1;
2026   if (this->_internal_layout_optimizer() != 0) {
2027     total_size += 1 +
2028       ::_pbi::WireFormatLite::EnumSize(this->_internal_layout_optimizer());
2029   }
2030 
2031   // .tensorflow.RewriterConfig.Toggle constant_folding = 3;
2032   if (this->_internal_constant_folding() != 0) {
2033     total_size += 1 +
2034       ::_pbi::WireFormatLite::EnumSize(this->_internal_constant_folding());
2035   }
2036 
2037   // .tensorflow.RewriterConfig.MemOptType memory_optimization = 4;
2038   if (this->_internal_memory_optimization() != 0) {
2039     total_size += 1 +
2040       ::_pbi::WireFormatLite::EnumSize(this->_internal_memory_optimization());
2041   }
2042 
2043   // .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7;
2044   if (this->_internal_arithmetic_optimization() != 0) {
2045     total_size += 1 +
2046       ::_pbi::WireFormatLite::EnumSize(this->_internal_arithmetic_optimization());
2047   }
2048 
2049   // .tensorflow.RewriterConfig.Toggle dependency_optimization = 8;
2050   if (this->_internal_dependency_optimization() != 0) {
2051     total_size += 1 +
2052       ::_pbi::WireFormatLite::EnumSize(this->_internal_dependency_optimization());
2053   }
2054 
2055   // .tensorflow.RewriterConfig.Toggle loop_optimization = 9;
2056   if (this->_internal_loop_optimization() != 0) {
2057     total_size += 1 +
2058       ::_pbi::WireFormatLite::EnumSize(this->_internal_loop_optimization());
2059   }
2060 
2061   // .tensorflow.RewriterConfig.Toggle function_optimization = 10;
2062   if (this->_internal_function_optimization() != 0) {
2063     total_size += 1 +
2064       ::_pbi::WireFormatLite::EnumSize(this->_internal_function_optimization());
2065   }
2066 
2067   // .tensorflow.RewriterConfig.Toggle debug_stripper = 11;
2068   if (this->_internal_debug_stripper() != 0) {
2069     total_size += 1 +
2070       ::_pbi::WireFormatLite::EnumSize(this->_internal_debug_stripper());
2071   }
2072 
2073   // .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12;
2074   if (this->_internal_meta_optimizer_iterations() != 0) {
2075     total_size += 1 +
2076       ::_pbi::WireFormatLite::EnumSize(this->_internal_meta_optimizer_iterations());
2077   }
2078 
2079   // .tensorflow.RewriterConfig.Toggle shape_optimization = 13;
2080   if (this->_internal_shape_optimization() != 0) {
2081     total_size += 1 +
2082       ::_pbi::WireFormatLite::EnumSize(this->_internal_shape_optimization());
2083   }
2084 
2085   // .tensorflow.RewriterConfig.Toggle remapping = 14;
2086   if (this->_internal_remapping() != 0) {
2087     total_size += 1 +
2088       ::_pbi::WireFormatLite::EnumSize(this->_internal_remapping());
2089   }
2090 
2091   // .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15;
2092   if (this->_internal_scoped_allocator_optimization() != 0) {
2093     total_size += 1 +
2094       ::_pbi::WireFormatLite::EnumSize(this->_internal_scoped_allocator_optimization());
2095   }
2096 
2097   // int32 min_graph_nodes = 17;
2098   if (this->_internal_min_graph_nodes() != 0) {
2099     total_size += 2 +
2100       ::_pbi::WireFormatLite::Int32Size(
2101         this->_internal_min_graph_nodes());
2102   }
2103 
2104   // .tensorflow.RewriterConfig.Toggle pin_to_host_optimization = 18;
2105   if (this->_internal_pin_to_host_optimization() != 0) {
2106     total_size += 2 +
2107       ::_pbi::WireFormatLite::EnumSize(this->_internal_pin_to_host_optimization());
2108   }
2109 
2110   // bool disable_model_pruning = 2;
2111   if (this->_internal_disable_model_pruning() != 0) {
2112     total_size += 1 + 1;
2113   }
2114 
2115   // bool disable_meta_optimizer = 19;
2116   if (this->_internal_disable_meta_optimizer() != 0) {
2117     total_size += 2 + 1;
2118   }
2119 
2120   // bool experimental_disable_compressed_tensor_optimization = 26;
2121   if (this->_internal_experimental_disable_compressed_tensor_optimization() != 0) {
2122     total_size += 2 + 1;
2123   }
2124 
2125   // bool experimental_disable_folding_quantization_emulation = 27;
2126   if (this->_internal_experimental_disable_folding_quantization_emulation() != 0) {
2127     total_size += 2 + 1;
2128   }
2129 
2130   // bool fail_on_optimizer_errors = 21;
2131   if (this->_internal_fail_on_optimizer_errors() != 0) {
2132     total_size += 2 + 1;
2133   }
2134 
2135   // int64 meta_optimizer_timeout_ms = 20;
2136   if (this->_internal_meta_optimizer_timeout_ms() != 0) {
2137     total_size += 2 +
2138       ::_pbi::WireFormatLite::Int64Size(
2139         this->_internal_meta_optimizer_timeout_ms());
2140   }
2141 
2142   // .tensorflow.RewriterConfig.Toggle implementation_selector = 22;
2143   if (this->_internal_implementation_selector() != 0) {
2144     total_size += 2 +
2145       ::_pbi::WireFormatLite::EnumSize(this->_internal_implementation_selector());
2146   }
2147 
2148   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision = 23;
2149   if (this->_internal_auto_mixed_precision() != 0) {
2150     total_size += 2 +
2151       ::_pbi::WireFormatLite::EnumSize(this->_internal_auto_mixed_precision());
2152   }
2153 
2154   // .tensorflow.RewriterConfig.Toggle common_subgraph_elimination = 24;
2155   if (this->_internal_common_subgraph_elimination() != 0) {
2156     total_size += 2 +
2157       ::_pbi::WireFormatLite::EnumSize(this->_internal_common_subgraph_elimination());
2158   }
2159 
2160   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_mkl = 25;
2161   if (this->_internal_auto_mixed_precision_mkl() != 0) {
2162     total_size += 2 +
2163       ::_pbi::WireFormatLite::EnumSize(this->_internal_auto_mixed_precision_mkl());
2164   }
2165 
2166   // .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28;
2167   if (this->_internal_use_plugin_optimizers() != 0) {
2168     total_size += 2 +
2169       ::_pbi::WireFormatLite::EnumSize(this->_internal_use_plugin_optimizers());
2170   }
2171 
2172   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29;
2173   if (this->_internal_auto_mixed_precision_cpu() != 0) {
2174     total_size += 2 +
2175       ::_pbi::WireFormatLite::EnumSize(this->_internal_auto_mixed_precision_cpu());
2176   }
2177 
2178   // .tensorflow.RewriterConfig.Toggle experimental_conditional_code_motion = 30;
2179   if (this->_internal_experimental_conditional_code_motion() != 0) {
2180     total_size += 2 +
2181       ::_pbi::WireFormatLite::EnumSize(this->_internal_experimental_conditional_code_motion());
2182   }
2183 
2184   // .tensorflow.RewriterConfig.Toggle auto_mixed_precision_onednn_bfloat16 = 31;
2185   if (this->_internal_auto_mixed_precision_onednn_bfloat16() != 0) {
2186     total_size += 2 +
2187       ::_pbi::WireFormatLite::EnumSize(this->_internal_auto_mixed_precision_onednn_bfloat16());
2188   }
2189 
2190   // .tensorflow.RewriterConfig.CpuLayout cpu_layout_conversion = 50;
2191   if (this->_internal_cpu_layout_conversion() != 0) {
2192     total_size += 2 +
2193       ::_pbi::WireFormatLite::EnumSize(this->_internal_cpu_layout_conversion());
2194   }
2195 
2196   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2197     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2198   }
2199   int cached_size = ::_pbi::ToCachedSize(total_size);
2200   SetCachedSize(cached_size);
2201   return total_size;
2202 }
2203 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2204 void RewriterConfig::CheckTypeAndMergeFrom(
2205     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2206   MergeFrom(*::_pbi::DownCast<const RewriterConfig*>(
2207       &from));
2208 }
2209 
MergeFrom(const RewriterConfig & from)2210 void RewriterConfig::MergeFrom(const RewriterConfig& from) {
2211   RewriterConfig* const _this = this;
2212   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RewriterConfig)
2213   GOOGLE_DCHECK_NE(&from, _this);
2214   ::uint32_t cached_has_bits = 0;
2215   (void) cached_has_bits;
2216 
2217   _this->_impl_.optimizers_.MergeFrom(from._impl_.optimizers_);
2218   _this->_impl_.custom_optimizers_.MergeFrom(from._impl_.custom_optimizers_);
2219   if (!from._internal_memory_optimizer_target_node_name_scope().empty()) {
2220     _this->_internal_set_memory_optimizer_target_node_name_scope(from._internal_memory_optimizer_target_node_name_scope());
2221   }
2222   if (from._internal_has_auto_parallel()) {
2223     _this->_internal_mutable_auto_parallel()->::tensorflow::AutoParallelOptions::MergeFrom(
2224         from._internal_auto_parallel());
2225   }
2226   if (from._internal_has_scoped_allocator_opts()) {
2227     _this->_internal_mutable_scoped_allocator_opts()->::tensorflow::ScopedAllocatorOptions::MergeFrom(
2228         from._internal_scoped_allocator_opts());
2229   }
2230   if (from._internal_has_inter_optimizer_verifier_config()) {
2231     _this->_internal_mutable_inter_optimizer_verifier_config()->::tensorflow::VerifierConfig::MergeFrom(
2232         from._internal_inter_optimizer_verifier_config());
2233   }
2234   if (from._internal_has_post_optimization_verifier_config()) {
2235     _this->_internal_mutable_post_optimization_verifier_config()->::tensorflow::VerifierConfig::MergeFrom(
2236         from._internal_post_optimization_verifier_config());
2237   }
2238   if (from._internal_layout_optimizer() != 0) {
2239     _this->_internal_set_layout_optimizer(from._internal_layout_optimizer());
2240   }
2241   if (from._internal_constant_folding() != 0) {
2242     _this->_internal_set_constant_folding(from._internal_constant_folding());
2243   }
2244   if (from._internal_memory_optimization() != 0) {
2245     _this->_internal_set_memory_optimization(from._internal_memory_optimization());
2246   }
2247   if (from._internal_arithmetic_optimization() != 0) {
2248     _this->_internal_set_arithmetic_optimization(from._internal_arithmetic_optimization());
2249   }
2250   if (from._internal_dependency_optimization() != 0) {
2251     _this->_internal_set_dependency_optimization(from._internal_dependency_optimization());
2252   }
2253   if (from._internal_loop_optimization() != 0) {
2254     _this->_internal_set_loop_optimization(from._internal_loop_optimization());
2255   }
2256   if (from._internal_function_optimization() != 0) {
2257     _this->_internal_set_function_optimization(from._internal_function_optimization());
2258   }
2259   if (from._internal_debug_stripper() != 0) {
2260     _this->_internal_set_debug_stripper(from._internal_debug_stripper());
2261   }
2262   if (from._internal_meta_optimizer_iterations() != 0) {
2263     _this->_internal_set_meta_optimizer_iterations(from._internal_meta_optimizer_iterations());
2264   }
2265   if (from._internal_shape_optimization() != 0) {
2266     _this->_internal_set_shape_optimization(from._internal_shape_optimization());
2267   }
2268   if (from._internal_remapping() != 0) {
2269     _this->_internal_set_remapping(from._internal_remapping());
2270   }
2271   if (from._internal_scoped_allocator_optimization() != 0) {
2272     _this->_internal_set_scoped_allocator_optimization(from._internal_scoped_allocator_optimization());
2273   }
2274   if (from._internal_min_graph_nodes() != 0) {
2275     _this->_internal_set_min_graph_nodes(from._internal_min_graph_nodes());
2276   }
2277   if (from._internal_pin_to_host_optimization() != 0) {
2278     _this->_internal_set_pin_to_host_optimization(from._internal_pin_to_host_optimization());
2279   }
2280   if (from._internal_disable_model_pruning() != 0) {
2281     _this->_internal_set_disable_model_pruning(from._internal_disable_model_pruning());
2282   }
2283   if (from._internal_disable_meta_optimizer() != 0) {
2284     _this->_internal_set_disable_meta_optimizer(from._internal_disable_meta_optimizer());
2285   }
2286   if (from._internal_experimental_disable_compressed_tensor_optimization() != 0) {
2287     _this->_internal_set_experimental_disable_compressed_tensor_optimization(from._internal_experimental_disable_compressed_tensor_optimization());
2288   }
2289   if (from._internal_experimental_disable_folding_quantization_emulation() != 0) {
2290     _this->_internal_set_experimental_disable_folding_quantization_emulation(from._internal_experimental_disable_folding_quantization_emulation());
2291   }
2292   if (from._internal_fail_on_optimizer_errors() != 0) {
2293     _this->_internal_set_fail_on_optimizer_errors(from._internal_fail_on_optimizer_errors());
2294   }
2295   if (from._internal_meta_optimizer_timeout_ms() != 0) {
2296     _this->_internal_set_meta_optimizer_timeout_ms(from._internal_meta_optimizer_timeout_ms());
2297   }
2298   if (from._internal_implementation_selector() != 0) {
2299     _this->_internal_set_implementation_selector(from._internal_implementation_selector());
2300   }
2301   if (from._internal_auto_mixed_precision() != 0) {
2302     _this->_internal_set_auto_mixed_precision(from._internal_auto_mixed_precision());
2303   }
2304   if (from._internal_common_subgraph_elimination() != 0) {
2305     _this->_internal_set_common_subgraph_elimination(from._internal_common_subgraph_elimination());
2306   }
2307   if (from._internal_auto_mixed_precision_mkl() != 0) {
2308     _this->_internal_set_auto_mixed_precision_mkl(from._internal_auto_mixed_precision_mkl());
2309   }
2310   if (from._internal_use_plugin_optimizers() != 0) {
2311     _this->_internal_set_use_plugin_optimizers(from._internal_use_plugin_optimizers());
2312   }
2313   if (from._internal_auto_mixed_precision_cpu() != 0) {
2314     _this->_internal_set_auto_mixed_precision_cpu(from._internal_auto_mixed_precision_cpu());
2315   }
2316   if (from._internal_experimental_conditional_code_motion() != 0) {
2317     _this->_internal_set_experimental_conditional_code_motion(from._internal_experimental_conditional_code_motion());
2318   }
2319   if (from._internal_auto_mixed_precision_onednn_bfloat16() != 0) {
2320     _this->_internal_set_auto_mixed_precision_onednn_bfloat16(from._internal_auto_mixed_precision_onednn_bfloat16());
2321   }
2322   if (from._internal_cpu_layout_conversion() != 0) {
2323     _this->_internal_set_cpu_layout_conversion(from._internal_cpu_layout_conversion());
2324   }
2325   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2326 }
2327 
CopyFrom(const RewriterConfig & from)2328 void RewriterConfig::CopyFrom(const RewriterConfig& from) {
2329 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RewriterConfig)
2330   if (&from == this) return;
2331   Clear();
2332   MergeFrom(from);
2333 }
2334 
IsInitialized() const2335 bool RewriterConfig::IsInitialized() const {
2336   return true;
2337 }
2338 
InternalSwap(RewriterConfig * other)2339 void RewriterConfig::InternalSwap(RewriterConfig* other) {
2340   using std::swap;
2341   auto* lhs_arena = GetArenaForAllocation();
2342   auto* rhs_arena = other->GetArenaForAllocation();
2343   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2344   _impl_.optimizers_.InternalSwap(&other->_impl_.optimizers_);
2345   _impl_.custom_optimizers_.InternalSwap(&other->_impl_.custom_optimizers_);
2346   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
2347       &_impl_.memory_optimizer_target_node_name_scope_, lhs_arena,
2348       &other->_impl_.memory_optimizer_target_node_name_scope_, rhs_arena
2349   );
2350   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
2351       PROTOBUF_FIELD_OFFSET(RewriterConfig, _impl_.cpu_layout_conversion_)
2352       + sizeof(RewriterConfig::_impl_.cpu_layout_conversion_)  // NOLINT
2353       - PROTOBUF_FIELD_OFFSET(RewriterConfig, _impl_.auto_parallel_)>(
2354           reinterpret_cast<char*>(&_impl_.auto_parallel_),
2355           reinterpret_cast<char*>(&other->_impl_.auto_parallel_));
2356 }
2357 
GetTypeName() const2358 std::string RewriterConfig::GetTypeName() const {
2359   return "tensorflow.RewriterConfig";
2360 }
2361 
2362 
2363 // @@protoc_insertion_point(namespace_scope)
2364 }  // namespace tensorflow
2365 PROTOBUF_NAMESPACE_OPEN
2366 template<> PROTOBUF_NOINLINE ::tensorflow::AutoParallelOptions*
CreateMaybeMessage(Arena * arena)2367 Arena::CreateMaybeMessage< ::tensorflow::AutoParallelOptions >(Arena* arena) {
2368   return Arena::CreateMessageInternal< ::tensorflow::AutoParallelOptions >(arena);
2369 }
2370 template<> PROTOBUF_NOINLINE ::tensorflow::ScopedAllocatorOptions*
CreateMaybeMessage(Arena * arena)2371 Arena::CreateMaybeMessage< ::tensorflow::ScopedAllocatorOptions >(Arena* arena) {
2372   return Arena::CreateMessageInternal< ::tensorflow::ScopedAllocatorOptions >(arena);
2373 }
2374 template<> PROTOBUF_NOINLINE ::tensorflow::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)2375 Arena::CreateMaybeMessage< ::tensorflow::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse >(Arena* arena) {
2376   return Arena::CreateMessageInternal< ::tensorflow::RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse >(arena);
2377 }
2378 template<> PROTOBUF_NOINLINE ::tensorflow::RewriterConfig_CustomGraphOptimizer*
CreateMaybeMessage(Arena * arena)2379 Arena::CreateMaybeMessage< ::tensorflow::RewriterConfig_CustomGraphOptimizer >(Arena* arena) {
2380   return Arena::CreateMessageInternal< ::tensorflow::RewriterConfig_CustomGraphOptimizer >(arena);
2381 }
2382 template<> PROTOBUF_NOINLINE ::tensorflow::RewriterConfig*
CreateMaybeMessage(Arena * arena)2383 Arena::CreateMaybeMessage< ::tensorflow::RewriterConfig >(Arena* arena) {
2384   return Arena::CreateMessageInternal< ::tensorflow::RewriterConfig >(arena);
2385 }
2386 PROTOBUF_NAMESPACE_CLOSE
2387 
2388 // @@protoc_insertion_point(global_scope)
2389 #include <google/protobuf/port_undef.inc>
2390