1 // Generated by the protocol buffer compiler.  DO NOT EDIT!
2 // source: tensorflow/core/grappler/costs/op_performance_data.proto
3 
4 #include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
5 
6 #include <algorithm>
7 #include <cstdint>
8 
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15 
16 PROTOBUF_PRAGMA_INIT_SEG
17 
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20 
21 namespace tensorflow {
SessionInfo(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR SessionInfo::SessionInfo(
23     ::_pbi::ConstantInitialized): _impl_{
24     /*decltype(_impl_.intra_op_parallelism_)*/::int64_t{0}
25   , /*decltype(_impl_._cached_size_)*/{}} {}
26 struct SessionInfoDefaultTypeInternal {
SessionInfoDefaultTypeInternaltensorflow::SessionInfoDefaultTypeInternal27   PROTOBUF_CONSTEXPR SessionInfoDefaultTypeInternal()
28       : _instance(::_pbi::ConstantInitialized{}) {}
~SessionInfoDefaultTypeInternaltensorflow::SessionInfoDefaultTypeInternal29   ~SessionInfoDefaultTypeInternal() {}
30   union {  // NOLINT(misc-non-private-member-variables-in-classes)
31     SessionInfo _instance;
32   };
33 };
34 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SessionInfoDefaultTypeInternal _SessionInfo_default_instance_;
OpInfo_AttrEntry_DoNotUse(::_pbi::ConstantInitialized)35 PROTOBUF_CONSTEXPR OpInfo_AttrEntry_DoNotUse::OpInfo_AttrEntry_DoNotUse(
36     ::_pbi::ConstantInitialized) {}
37 struct OpInfo_AttrEntry_DoNotUseDefaultTypeInternal {
OpInfo_AttrEntry_DoNotUseDefaultTypeInternaltensorflow::OpInfo_AttrEntry_DoNotUseDefaultTypeInternal38   PROTOBUF_CONSTEXPR OpInfo_AttrEntry_DoNotUseDefaultTypeInternal()
39       : _instance(::_pbi::ConstantInitialized{}) {}
~OpInfo_AttrEntry_DoNotUseDefaultTypeInternaltensorflow::OpInfo_AttrEntry_DoNotUseDefaultTypeInternal40   ~OpInfo_AttrEntry_DoNotUseDefaultTypeInternal() {}
41   union {  // NOLINT(misc-non-private-member-variables-in-classes)
42     OpInfo_AttrEntry_DoNotUse _instance;
43   };
44 };
45 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OpInfo_AttrEntry_DoNotUseDefaultTypeInternal _OpInfo_AttrEntry_DoNotUse_default_instance_;
OpInfo_TensorProperties(::_pbi::ConstantInitialized)46 PROTOBUF_CONSTEXPR OpInfo_TensorProperties::OpInfo_TensorProperties(
47     ::_pbi::ConstantInitialized): _impl_{
48     /*decltype(_impl_.shape_)*/nullptr
49   , /*decltype(_impl_.value_)*/nullptr
50   , /*decltype(_impl_.dtype_)*/0
51   , /*decltype(_impl_._cached_size_)*/{}} {}
52 struct OpInfo_TensorPropertiesDefaultTypeInternal {
OpInfo_TensorPropertiesDefaultTypeInternaltensorflow::OpInfo_TensorPropertiesDefaultTypeInternal53   PROTOBUF_CONSTEXPR OpInfo_TensorPropertiesDefaultTypeInternal()
54       : _instance(::_pbi::ConstantInitialized{}) {}
~OpInfo_TensorPropertiesDefaultTypeInternaltensorflow::OpInfo_TensorPropertiesDefaultTypeInternal55   ~OpInfo_TensorPropertiesDefaultTypeInternal() {}
56   union {  // NOLINT(misc-non-private-member-variables-in-classes)
57     OpInfo_TensorProperties _instance;
58   };
59 };
60 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OpInfo_TensorPropertiesDefaultTypeInternal _OpInfo_TensorProperties_default_instance_;
OpInfo(::_pbi::ConstantInitialized)61 PROTOBUF_CONSTEXPR OpInfo::OpInfo(
62     ::_pbi::ConstantInitialized): _impl_{
63     /*decltype(_impl_.attr_)*/{}
64   , /*decltype(_impl_.inputs_)*/{}
65   , /*decltype(_impl_.outputs_)*/{}
66   , /*decltype(_impl_.op_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
67   , /*decltype(_impl_.device_)*/nullptr
68   , /*decltype(_impl_.session_info_)*/nullptr
69   , /*decltype(_impl_._cached_size_)*/{}} {}
70 struct OpInfoDefaultTypeInternal {
OpInfoDefaultTypeInternaltensorflow::OpInfoDefaultTypeInternal71   PROTOBUF_CONSTEXPR OpInfoDefaultTypeInternal()
72       : _instance(::_pbi::ConstantInitialized{}) {}
~OpInfoDefaultTypeInternaltensorflow::OpInfoDefaultTypeInternal73   ~OpInfoDefaultTypeInternal() {}
74   union {  // NOLINT(misc-non-private-member-variables-in-classes)
75     OpInfo _instance;
76   };
77 };
78 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OpInfoDefaultTypeInternal _OpInfo_default_instance_;
NormalDistribution(::_pbi::ConstantInitialized)79 PROTOBUF_CONSTEXPR NormalDistribution::NormalDistribution(
80     ::_pbi::ConstantInitialized): _impl_{
81     /*decltype(_impl_.mu_)*/0
82   , /*decltype(_impl_.sigma_)*/0
83   , /*decltype(_impl_._cached_size_)*/{}} {}
84 struct NormalDistributionDefaultTypeInternal {
NormalDistributionDefaultTypeInternaltensorflow::NormalDistributionDefaultTypeInternal85   PROTOBUF_CONSTEXPR NormalDistributionDefaultTypeInternal()
86       : _instance(::_pbi::ConstantInitialized{}) {}
~NormalDistributionDefaultTypeInternaltensorflow::NormalDistributionDefaultTypeInternal87   ~NormalDistributionDefaultTypeInternal() {}
88   union {  // NOLINT(misc-non-private-member-variables-in-classes)
89     NormalDistribution _instance;
90   };
91 };
92 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 NormalDistributionDefaultTypeInternal _NormalDistribution_default_instance_;
LogNormalDistribution(::_pbi::ConstantInitialized)93 PROTOBUF_CONSTEXPR LogNormalDistribution::LogNormalDistribution(
94     ::_pbi::ConstantInitialized): _impl_{
95     /*decltype(_impl_.mu_)*/0
96   , /*decltype(_impl_.sigma_)*/0
97   , /*decltype(_impl_._cached_size_)*/{}} {}
98 struct LogNormalDistributionDefaultTypeInternal {
LogNormalDistributionDefaultTypeInternaltensorflow::LogNormalDistributionDefaultTypeInternal99   PROTOBUF_CONSTEXPR LogNormalDistributionDefaultTypeInternal()
100       : _instance(::_pbi::ConstantInitialized{}) {}
~LogNormalDistributionDefaultTypeInternaltensorflow::LogNormalDistributionDefaultTypeInternal101   ~LogNormalDistributionDefaultTypeInternal() {}
102   union {  // NOLINT(misc-non-private-member-variables-in-classes)
103     LogNormalDistribution _instance;
104   };
105 };
106 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 LogNormalDistributionDefaultTypeInternal _LogNormalDistribution_default_instance_;
OpPerformance_OpMemory(::_pbi::ConstantInitialized)107 PROTOBUF_CONSTEXPR OpPerformance_OpMemory::OpPerformance_OpMemory(
108     ::_pbi::ConstantInitialized): _impl_{
109     /*decltype(_impl_.output_memory_)*/{}
110   , /*decltype(_impl_._output_memory_cached_byte_size_)*/{0}
111   , /*decltype(_impl_.temp_memory_)*/::int64_t{0}
112   , /*decltype(_impl_.device_temp_memory_)*/::int64_t{0}
113   , /*decltype(_impl_.persistent_memory_)*/::int64_t{0}
114   , /*decltype(_impl_.device_persistent_memory_)*/::int64_t{0}
115   , /*decltype(_impl_._cached_size_)*/{}} {}
116 struct OpPerformance_OpMemoryDefaultTypeInternal {
OpPerformance_OpMemoryDefaultTypeInternaltensorflow::OpPerformance_OpMemoryDefaultTypeInternal117   PROTOBUF_CONSTEXPR OpPerformance_OpMemoryDefaultTypeInternal()
118       : _instance(::_pbi::ConstantInitialized{}) {}
~OpPerformance_OpMemoryDefaultTypeInternaltensorflow::OpPerformance_OpMemoryDefaultTypeInternal119   ~OpPerformance_OpMemoryDefaultTypeInternal() {}
120   union {  // NOLINT(misc-non-private-member-variables-in-classes)
121     OpPerformance_OpMemory _instance;
122   };
123 };
124 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OpPerformance_OpMemoryDefaultTypeInternal _OpPerformance_OpMemory_default_instance_;
OpPerformance(::_pbi::ConstantInitialized)125 PROTOBUF_CONSTEXPR OpPerformance::OpPerformance(
126     ::_pbi::ConstantInitialized): _impl_{
127     /*decltype(_impl_.node_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
128   , /*decltype(_impl_.op_)*/nullptr
129   , /*decltype(_impl_.op_memory_)*/nullptr
130   , /*decltype(_impl_.session_info_)*/nullptr
131   , /*decltype(_impl_.temporary_memory_size_)*/::int64_t{0}
132   , /*decltype(_impl_.compute_cost_)*/::int64_t{0}
133   , /*decltype(_impl_.compute_efficiency_)*/0
134   , /*decltype(_impl_.compute_time_)*/::int64_t{0}
135   , /*decltype(_impl_.memory_time_)*/::int64_t{0}
136   , /*decltype(_impl_.memory_efficiency_)*/0
137   , /*decltype(_impl_.execution_time_)*/{}
138   , /*decltype(_impl_._cached_size_)*/{}
139   , /*decltype(_impl_._oneof_case_)*/{}} {}
140 struct OpPerformanceDefaultTypeInternal {
OpPerformanceDefaultTypeInternaltensorflow::OpPerformanceDefaultTypeInternal141   PROTOBUF_CONSTEXPR OpPerformanceDefaultTypeInternal()
142       : _instance(::_pbi::ConstantInitialized{}) {}
~OpPerformanceDefaultTypeInternaltensorflow::OpPerformanceDefaultTypeInternal143   ~OpPerformanceDefaultTypeInternal() {}
144   union {  // NOLINT(misc-non-private-member-variables-in-classes)
145     OpPerformance _instance;
146   };
147 };
148 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OpPerformanceDefaultTypeInternal _OpPerformance_default_instance_;
OpPerformanceList(::_pbi::ConstantInitialized)149 PROTOBUF_CONSTEXPR OpPerformanceList::OpPerformanceList(
150     ::_pbi::ConstantInitialized): _impl_{
151     /*decltype(_impl_.op_performance_)*/{}
152   , /*decltype(_impl_._cached_size_)*/{}} {}
153 struct OpPerformanceListDefaultTypeInternal {
OpPerformanceListDefaultTypeInternaltensorflow::OpPerformanceListDefaultTypeInternal154   PROTOBUF_CONSTEXPR OpPerformanceListDefaultTypeInternal()
155       : _instance(::_pbi::ConstantInitialized{}) {}
~OpPerformanceListDefaultTypeInternaltensorflow::OpPerformanceListDefaultTypeInternal156   ~OpPerformanceListDefaultTypeInternal() {}
157   union {  // NOLINT(misc-non-private-member-variables-in-classes)
158     OpPerformanceList _instance;
159   };
160 };
161 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 OpPerformanceListDefaultTypeInternal _OpPerformanceList_default_instance_;
162 }  // namespace tensorflow
163 namespace tensorflow {
164 
165 // ===================================================================
166 
167 class SessionInfo::_Internal {
168  public:
169 };
170 
SessionInfo(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)171 SessionInfo::SessionInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena,
172                          bool is_message_owned)
173   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
174   SharedCtor(arena, is_message_owned);
175   // @@protoc_insertion_point(arena_constructor:tensorflow.SessionInfo)
176 }
SessionInfo(const SessionInfo & from)177 SessionInfo::SessionInfo(const SessionInfo& from)
178   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
179   SessionInfo* const _this = this; (void)_this;
180   new (&_impl_) Impl_{
181       decltype(_impl_.intra_op_parallelism_){}
182     , /*decltype(_impl_._cached_size_)*/{}};
183 
184   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
185   _this->_impl_.intra_op_parallelism_ = from._impl_.intra_op_parallelism_;
186   // @@protoc_insertion_point(copy_constructor:tensorflow.SessionInfo)
187 }
188 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)189 inline void SessionInfo::SharedCtor(
190     ::_pb::Arena* arena, bool is_message_owned) {
191   (void)arena;
192   (void)is_message_owned;
193   new (&_impl_) Impl_{
194       decltype(_impl_.intra_op_parallelism_){::int64_t{0}}
195     , /*decltype(_impl_._cached_size_)*/{}
196   };
197 }
198 
~SessionInfo()199 SessionInfo::~SessionInfo() {
200   // @@protoc_insertion_point(destructor:tensorflow.SessionInfo)
201   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
202   (void)arena;
203     return;
204   }
205   SharedDtor();
206 }
207 
SharedDtor()208 inline void SessionInfo::SharedDtor() {
209   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
210 }
211 
SetCachedSize(int size) const212 void SessionInfo::SetCachedSize(int size) const {
213   _impl_._cached_size_.Set(size);
214 }
215 
Clear()216 void SessionInfo::Clear() {
217 // @@protoc_insertion_point(message_clear_start:tensorflow.SessionInfo)
218   ::uint32_t cached_has_bits = 0;
219   // Prevent compiler warnings about cached_has_bits being unused
220   (void) cached_has_bits;
221 
222   _impl_.intra_op_parallelism_ = ::int64_t{0};
223   _internal_metadata_.Clear<std::string>();
224 }
225 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)226 const char* SessionInfo::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
227 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
228   while (!ctx->Done(&ptr)) {
229     ::uint32_t tag;
230     ptr = ::_pbi::ReadTag(ptr, &tag);
231     switch (tag >> 3) {
232       // int64 intra_op_parallelism = 1;
233       case 1:
234         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
235           _impl_.intra_op_parallelism_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
236           CHK_(ptr);
237         } else {
238           goto handle_unusual;
239         }
240         continue;
241       default:
242         goto handle_unusual;
243     }  // switch
244   handle_unusual:
245     if ((tag == 0) || ((tag & 7) == 4)) {
246       CHK_(ptr);
247       ctx->SetLastTag(tag);
248       goto message_done;
249     }
250     ptr = UnknownFieldParse(
251         tag,
252         _internal_metadata_.mutable_unknown_fields<std::string>(),
253         ptr, ctx);
254     CHK_(ptr != nullptr);
255   }  // while
256 message_done:
257   return ptr;
258 failure:
259   ptr = nullptr;
260   goto message_done;
261 #undef CHK_
262 }
263 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const264 ::uint8_t* SessionInfo::_InternalSerialize(
265     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
266   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.SessionInfo)
267   ::uint32_t cached_has_bits = 0;
268   (void) cached_has_bits;
269 
270   // int64 intra_op_parallelism = 1;
271   if (this->_internal_intra_op_parallelism() != 0) {
272     target = stream->EnsureSpace(target);
273     target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_intra_op_parallelism(), target);
274   }
275 
276   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
277     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
278         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
279   }
280   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.SessionInfo)
281   return target;
282 }
283 
ByteSizeLong() const284 size_t SessionInfo::ByteSizeLong() const {
285 // @@protoc_insertion_point(message_byte_size_start:tensorflow.SessionInfo)
286   size_t total_size = 0;
287 
288   ::uint32_t cached_has_bits = 0;
289   // Prevent compiler warnings about cached_has_bits being unused
290   (void) cached_has_bits;
291 
292   // int64 intra_op_parallelism = 1;
293   if (this->_internal_intra_op_parallelism() != 0) {
294     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_intra_op_parallelism());
295   }
296 
297   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
298     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
299   }
300   int cached_size = ::_pbi::ToCachedSize(total_size);
301   SetCachedSize(cached_size);
302   return total_size;
303 }
304 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)305 void SessionInfo::CheckTypeAndMergeFrom(
306     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
307   MergeFrom(*::_pbi::DownCast<const SessionInfo*>(
308       &from));
309 }
310 
MergeFrom(const SessionInfo & from)311 void SessionInfo::MergeFrom(const SessionInfo& from) {
312   SessionInfo* const _this = this;
313   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.SessionInfo)
314   GOOGLE_DCHECK_NE(&from, _this);
315   ::uint32_t cached_has_bits = 0;
316   (void) cached_has_bits;
317 
318   if (from._internal_intra_op_parallelism() != 0) {
319     _this->_internal_set_intra_op_parallelism(from._internal_intra_op_parallelism());
320   }
321   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
322 }
323 
CopyFrom(const SessionInfo & from)324 void SessionInfo::CopyFrom(const SessionInfo& from) {
325 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.SessionInfo)
326   if (&from == this) return;
327   Clear();
328   MergeFrom(from);
329 }
330 
IsInitialized() const331 bool SessionInfo::IsInitialized() const {
332   return true;
333 }
334 
InternalSwap(SessionInfo * other)335 void SessionInfo::InternalSwap(SessionInfo* other) {
336   using std::swap;
337   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
338   swap(_impl_.intra_op_parallelism_, other->_impl_.intra_op_parallelism_);
339 }
340 
GetTypeName() const341 std::string SessionInfo::GetTypeName() const {
342   return "tensorflow.SessionInfo";
343 }
344 
345 
346 // ===================================================================
347 
OpInfo_AttrEntry_DoNotUse()348 OpInfo_AttrEntry_DoNotUse::OpInfo_AttrEntry_DoNotUse() {}
OpInfo_AttrEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)349 OpInfo_AttrEntry_DoNotUse::OpInfo_AttrEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
350     : SuperType(arena) {}
MergeFrom(const OpInfo_AttrEntry_DoNotUse & other)351 void OpInfo_AttrEntry_DoNotUse::MergeFrom(const OpInfo_AttrEntry_DoNotUse& other) {
352   MergeFromInternal(other);
353 }
354 
355 // ===================================================================
356 
357 class OpInfo_TensorProperties::_Internal {
358  public:
359   static const ::tensorflow::TensorShapeProto& shape(const OpInfo_TensorProperties* msg);
360   static const ::tensorflow::TensorProto& value(const OpInfo_TensorProperties* msg);
361 };
362 
363 const ::tensorflow::TensorShapeProto&
shape(const OpInfo_TensorProperties * msg)364 OpInfo_TensorProperties::_Internal::shape(const OpInfo_TensorProperties* msg) {
365   return *msg->_impl_.shape_;
366 }
367 const ::tensorflow::TensorProto&
value(const OpInfo_TensorProperties * msg)368 OpInfo_TensorProperties::_Internal::value(const OpInfo_TensorProperties* msg) {
369   return *msg->_impl_.value_;
370 }
clear_shape()371 void OpInfo_TensorProperties::clear_shape() {
372   if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
373     delete _impl_.shape_;
374   }
375   _impl_.shape_ = nullptr;
376 }
clear_value()377 void OpInfo_TensorProperties::clear_value() {
378   if (GetArenaForAllocation() == nullptr && _impl_.value_ != nullptr) {
379     delete _impl_.value_;
380   }
381   _impl_.value_ = nullptr;
382 }
OpInfo_TensorProperties(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)383 OpInfo_TensorProperties::OpInfo_TensorProperties(::PROTOBUF_NAMESPACE_ID::Arena* arena,
384                          bool is_message_owned)
385   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
386   SharedCtor(arena, is_message_owned);
387   // @@protoc_insertion_point(arena_constructor:tensorflow.OpInfo.TensorProperties)
388 }
OpInfo_TensorProperties(const OpInfo_TensorProperties & from)389 OpInfo_TensorProperties::OpInfo_TensorProperties(const OpInfo_TensorProperties& from)
390   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
391   OpInfo_TensorProperties* const _this = this; (void)_this;
392   new (&_impl_) Impl_{
393       decltype(_impl_.shape_){nullptr}
394     , decltype(_impl_.value_){nullptr}
395     , decltype(_impl_.dtype_){}
396     , /*decltype(_impl_._cached_size_)*/{}};
397 
398   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
399   if (from._internal_has_shape()) {
400     _this->_impl_.shape_ = new ::tensorflow::TensorShapeProto(*from._impl_.shape_);
401   }
402   if (from._internal_has_value()) {
403     _this->_impl_.value_ = new ::tensorflow::TensorProto(*from._impl_.value_);
404   }
405   _this->_impl_.dtype_ = from._impl_.dtype_;
406   // @@protoc_insertion_point(copy_constructor:tensorflow.OpInfo.TensorProperties)
407 }
408 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)409 inline void OpInfo_TensorProperties::SharedCtor(
410     ::_pb::Arena* arena, bool is_message_owned) {
411   (void)arena;
412   (void)is_message_owned;
413   new (&_impl_) Impl_{
414       decltype(_impl_.shape_){nullptr}
415     , decltype(_impl_.value_){nullptr}
416     , decltype(_impl_.dtype_){0}
417     , /*decltype(_impl_._cached_size_)*/{}
418   };
419 }
420 
~OpInfo_TensorProperties()421 OpInfo_TensorProperties::~OpInfo_TensorProperties() {
422   // @@protoc_insertion_point(destructor:tensorflow.OpInfo.TensorProperties)
423   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
424   (void)arena;
425     return;
426   }
427   SharedDtor();
428 }
429 
SharedDtor()430 inline void OpInfo_TensorProperties::SharedDtor() {
431   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
432   if (this != internal_default_instance()) delete _impl_.shape_;
433   if (this != internal_default_instance()) delete _impl_.value_;
434 }
435 
SetCachedSize(int size) const436 void OpInfo_TensorProperties::SetCachedSize(int size) const {
437   _impl_._cached_size_.Set(size);
438 }
439 
Clear()440 void OpInfo_TensorProperties::Clear() {
441 // @@protoc_insertion_point(message_clear_start:tensorflow.OpInfo.TensorProperties)
442   ::uint32_t cached_has_bits = 0;
443   // Prevent compiler warnings about cached_has_bits being unused
444   (void) cached_has_bits;
445 
446   if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
447     delete _impl_.shape_;
448   }
449   _impl_.shape_ = nullptr;
450   if (GetArenaForAllocation() == nullptr && _impl_.value_ != nullptr) {
451     delete _impl_.value_;
452   }
453   _impl_.value_ = nullptr;
454   _impl_.dtype_ = 0;
455   _internal_metadata_.Clear<std::string>();
456 }
457 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)458 const char* OpInfo_TensorProperties::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
459 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
460   while (!ctx->Done(&ptr)) {
461     ::uint32_t tag;
462     ptr = ::_pbi::ReadTag(ptr, &tag);
463     switch (tag >> 3) {
464       // .tensorflow.DataType dtype = 1;
465       case 1:
466         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
467           ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
468           CHK_(ptr);
469           _internal_set_dtype(static_cast<::tensorflow::DataType>(val));
470         } else {
471           goto handle_unusual;
472         }
473         continue;
474       // .tensorflow.TensorShapeProto shape = 2;
475       case 2:
476         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
477           ptr = ctx->ParseMessage(_internal_mutable_shape(), ptr);
478           CHK_(ptr);
479         } else {
480           goto handle_unusual;
481         }
482         continue;
483       // .tensorflow.TensorProto value = 3;
484       case 3:
485         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
486           ptr = ctx->ParseMessage(_internal_mutable_value(), ptr);
487           CHK_(ptr);
488         } else {
489           goto handle_unusual;
490         }
491         continue;
492       default:
493         goto handle_unusual;
494     }  // switch
495   handle_unusual:
496     if ((tag == 0) || ((tag & 7) == 4)) {
497       CHK_(ptr);
498       ctx->SetLastTag(tag);
499       goto message_done;
500     }
501     ptr = UnknownFieldParse(
502         tag,
503         _internal_metadata_.mutable_unknown_fields<std::string>(),
504         ptr, ctx);
505     CHK_(ptr != nullptr);
506   }  // while
507 message_done:
508   return ptr;
509 failure:
510   ptr = nullptr;
511   goto message_done;
512 #undef CHK_
513 }
514 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const515 ::uint8_t* OpInfo_TensorProperties::_InternalSerialize(
516     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
517   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.OpInfo.TensorProperties)
518   ::uint32_t cached_has_bits = 0;
519   (void) cached_has_bits;
520 
521   // .tensorflow.DataType dtype = 1;
522   if (this->_internal_dtype() != 0) {
523     target = stream->EnsureSpace(target);
524     target = ::_pbi::WireFormatLite::WriteEnumToArray(
525       1, this->_internal_dtype(), target);
526   }
527 
528   // .tensorflow.TensorShapeProto shape = 2;
529   if (this->_internal_has_shape()) {
530     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
531       InternalWriteMessage(2, _Internal::shape(this),
532         _Internal::shape(this).GetCachedSize(), target, stream);
533   }
534 
535   // .tensorflow.TensorProto value = 3;
536   if (this->_internal_has_value()) {
537     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
538       InternalWriteMessage(3, _Internal::value(this),
539         _Internal::value(this).GetCachedSize(), target, stream);
540   }
541 
542   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
543     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
544         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
545   }
546   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.OpInfo.TensorProperties)
547   return target;
548 }
549 
ByteSizeLong() const550 size_t OpInfo_TensorProperties::ByteSizeLong() const {
551 // @@protoc_insertion_point(message_byte_size_start:tensorflow.OpInfo.TensorProperties)
552   size_t total_size = 0;
553 
554   ::uint32_t cached_has_bits = 0;
555   // Prevent compiler warnings about cached_has_bits being unused
556   (void) cached_has_bits;
557 
558   // .tensorflow.TensorShapeProto shape = 2;
559   if (this->_internal_has_shape()) {
560     total_size += 1 +
561       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
562         *_impl_.shape_);
563   }
564 
565   // .tensorflow.TensorProto value = 3;
566   if (this->_internal_has_value()) {
567     total_size += 1 +
568       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
569         *_impl_.value_);
570   }
571 
572   // .tensorflow.DataType dtype = 1;
573   if (this->_internal_dtype() != 0) {
574     total_size += 1 +
575       ::_pbi::WireFormatLite::EnumSize(this->_internal_dtype());
576   }
577 
578   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
579     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
580   }
581   int cached_size = ::_pbi::ToCachedSize(total_size);
582   SetCachedSize(cached_size);
583   return total_size;
584 }
585 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)586 void OpInfo_TensorProperties::CheckTypeAndMergeFrom(
587     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
588   MergeFrom(*::_pbi::DownCast<const OpInfo_TensorProperties*>(
589       &from));
590 }
591 
MergeFrom(const OpInfo_TensorProperties & from)592 void OpInfo_TensorProperties::MergeFrom(const OpInfo_TensorProperties& from) {
593   OpInfo_TensorProperties* const _this = this;
594   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.OpInfo.TensorProperties)
595   GOOGLE_DCHECK_NE(&from, _this);
596   ::uint32_t cached_has_bits = 0;
597   (void) cached_has_bits;
598 
599   if (from._internal_has_shape()) {
600     _this->_internal_mutable_shape()->::tensorflow::TensorShapeProto::MergeFrom(
601         from._internal_shape());
602   }
603   if (from._internal_has_value()) {
604     _this->_internal_mutable_value()->::tensorflow::TensorProto::MergeFrom(
605         from._internal_value());
606   }
607   if (from._internal_dtype() != 0) {
608     _this->_internal_set_dtype(from._internal_dtype());
609   }
610   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
611 }
612 
CopyFrom(const OpInfo_TensorProperties & from)613 void OpInfo_TensorProperties::CopyFrom(const OpInfo_TensorProperties& from) {
614 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.OpInfo.TensorProperties)
615   if (&from == this) return;
616   Clear();
617   MergeFrom(from);
618 }
619 
IsInitialized() const620 bool OpInfo_TensorProperties::IsInitialized() const {
621   return true;
622 }
623 
InternalSwap(OpInfo_TensorProperties * other)624 void OpInfo_TensorProperties::InternalSwap(OpInfo_TensorProperties* other) {
625   using std::swap;
626   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
627   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
628       PROTOBUF_FIELD_OFFSET(OpInfo_TensorProperties, _impl_.dtype_)
629       + sizeof(OpInfo_TensorProperties::_impl_.dtype_)  // NOLINT
630       - PROTOBUF_FIELD_OFFSET(OpInfo_TensorProperties, _impl_.shape_)>(
631           reinterpret_cast<char*>(&_impl_.shape_),
632           reinterpret_cast<char*>(&other->_impl_.shape_));
633 }
634 
GetTypeName() const635 std::string OpInfo_TensorProperties::GetTypeName() const {
636   return "tensorflow.OpInfo.TensorProperties";
637 }
638 
639 
640 // ===================================================================
641 
642 class OpInfo::_Internal {
643  public:
644   static const ::tensorflow::DeviceProperties& device(const OpInfo* msg);
645   static const ::tensorflow::SessionInfo& session_info(const OpInfo* msg);
646 };
647 
648 const ::tensorflow::DeviceProperties&
device(const OpInfo * msg)649 OpInfo::_Internal::device(const OpInfo* msg) {
650   return *msg->_impl_.device_;
651 }
652 const ::tensorflow::SessionInfo&
session_info(const OpInfo * msg)653 OpInfo::_Internal::session_info(const OpInfo* msg) {
654   return *msg->_impl_.session_info_;
655 }
clear_attr()656 void OpInfo::clear_attr() {
657   _impl_.attr_.Clear();
658 }
clear_device()659 void OpInfo::clear_device() {
660   if (GetArenaForAllocation() == nullptr && _impl_.device_ != nullptr) {
661     delete _impl_.device_;
662   }
663   _impl_.device_ = nullptr;
664 }
OpInfo(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)665 OpInfo::OpInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena,
666                          bool is_message_owned)
667   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
668   SharedCtor(arena, is_message_owned);
669   // @@protoc_insertion_point(arena_constructor:tensorflow.OpInfo)
670 }
OpInfo(const OpInfo & from)671 OpInfo::OpInfo(const OpInfo& from)
672   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
673   OpInfo* const _this = this; (void)_this;
674   new (&_impl_) Impl_{
675       /*decltype(_impl_.attr_)*/{}
676     , decltype(_impl_.inputs_){from._impl_.inputs_}
677     , decltype(_impl_.outputs_){from._impl_.outputs_}
678     , decltype(_impl_.op_){}
679     , decltype(_impl_.device_){nullptr}
680     , decltype(_impl_.session_info_){nullptr}
681     , /*decltype(_impl_._cached_size_)*/{}};
682 
683   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
684   _this->_impl_.attr_.MergeFrom(from._impl_.attr_);
685   _impl_.op_.InitDefault();
686   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
687     _impl_.op_.Set("", GetArenaForAllocation());
688   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
689   if (!from._internal_op().empty()) {
690     _this->_impl_.op_.Set(from._internal_op(),
691       _this->GetArenaForAllocation());
692   }
693   if (from._internal_has_device()) {
694     _this->_impl_.device_ = new ::tensorflow::DeviceProperties(*from._impl_.device_);
695   }
696   if (from._internal_has_session_info()) {
697     _this->_impl_.session_info_ = new ::tensorflow::SessionInfo(*from._impl_.session_info_);
698   }
699   // @@protoc_insertion_point(copy_constructor:tensorflow.OpInfo)
700 }
701 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)702 inline void OpInfo::SharedCtor(
703     ::_pb::Arena* arena, bool is_message_owned) {
704   (void)arena;
705   (void)is_message_owned;
706   new (&_impl_) Impl_{
707       /*decltype(_impl_.attr_)*/{::_pbi::ArenaInitialized(), arena}
708     , decltype(_impl_.inputs_){arena}
709     , decltype(_impl_.outputs_){arena}
710     , decltype(_impl_.op_){}
711     , decltype(_impl_.device_){nullptr}
712     , decltype(_impl_.session_info_){nullptr}
713     , /*decltype(_impl_._cached_size_)*/{}
714   };
715   _impl_.op_.InitDefault();
716   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
717     _impl_.op_.Set("", GetArenaForAllocation());
718   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
719 }
720 
~OpInfo()721 OpInfo::~OpInfo() {
722   // @@protoc_insertion_point(destructor:tensorflow.OpInfo)
723   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
724   (void)arena;
725     return;
726   }
727   SharedDtor();
728 }
729 
SharedDtor()730 inline void OpInfo::SharedDtor() {
731   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
732   _impl_.attr_.Destruct();
733   _impl_.attr_.~MapFieldLite();
734   _impl_.inputs_.~RepeatedPtrField();
735   _impl_.outputs_.~RepeatedPtrField();
736   _impl_.op_.Destroy();
737   if (this != internal_default_instance()) delete _impl_.device_;
738   if (this != internal_default_instance()) delete _impl_.session_info_;
739 }
740 
SetCachedSize(int size) const741 void OpInfo::SetCachedSize(int size) const {
742   _impl_._cached_size_.Set(size);
743 }
744 
Clear()745 void OpInfo::Clear() {
746 // @@protoc_insertion_point(message_clear_start:tensorflow.OpInfo)
747   ::uint32_t cached_has_bits = 0;
748   // Prevent compiler warnings about cached_has_bits being unused
749   (void) cached_has_bits;
750 
751   _impl_.attr_.Clear();
752   _impl_.inputs_.Clear();
753   _impl_.outputs_.Clear();
754   _impl_.op_.ClearToEmpty();
755   if (GetArenaForAllocation() == nullptr && _impl_.device_ != nullptr) {
756     delete _impl_.device_;
757   }
758   _impl_.device_ = nullptr;
759   if (GetArenaForAllocation() == nullptr && _impl_.session_info_ != nullptr) {
760     delete _impl_.session_info_;
761   }
762   _impl_.session_info_ = nullptr;
763   _internal_metadata_.Clear<std::string>();
764 }
765 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)766 const char* OpInfo::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
767 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
768   while (!ctx->Done(&ptr)) {
769     ::uint32_t tag;
770     ptr = ::_pbi::ReadTag(ptr, &tag);
771     switch (tag >> 3) {
772       // string op = 1;
773       case 1:
774         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
775           auto str = _internal_mutable_op();
776           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
777           CHK_(ptr);
778           CHK_(::_pbi::VerifyUTF8(str, nullptr));
779         } else {
780           goto handle_unusual;
781         }
782         continue;
783       // map<string, .tensorflow.AttrValue> attr = 2;
784       case 2:
785         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
786           ptr -= 1;
787           do {
788             ptr += 1;
789             ptr = ctx->ParseMessage(&_impl_.attr_, ptr);
790             CHK_(ptr);
791             if (!ctx->DataAvailable(ptr)) break;
792           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
793         } else {
794           goto handle_unusual;
795         }
796         continue;
797       // repeated .tensorflow.OpInfo.TensorProperties inputs = 3;
798       case 3:
799         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
800           ptr -= 1;
801           do {
802             ptr += 1;
803             ptr = ctx->ParseMessage(_internal_add_inputs(), ptr);
804             CHK_(ptr);
805             if (!ctx->DataAvailable(ptr)) break;
806           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr));
807         } else {
808           goto handle_unusual;
809         }
810         continue;
811       // .tensorflow.DeviceProperties device = 4;
812       case 4:
813         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
814           ptr = ctx->ParseMessage(_internal_mutable_device(), ptr);
815           CHK_(ptr);
816         } else {
817           goto handle_unusual;
818         }
819         continue;
820       // repeated .tensorflow.OpInfo.TensorProperties outputs = 5;
821       case 5:
822         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
823           ptr -= 1;
824           do {
825             ptr += 1;
826             ptr = ctx->ParseMessage(_internal_add_outputs(), ptr);
827             CHK_(ptr);
828             if (!ctx->DataAvailable(ptr)) break;
829           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<42>(ptr));
830         } else {
831           goto handle_unusual;
832         }
833         continue;
834       // .tensorflow.SessionInfo session_info = 6;
835       case 6:
836         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
837           ptr = ctx->ParseMessage(_internal_mutable_session_info(), ptr);
838           CHK_(ptr);
839         } else {
840           goto handle_unusual;
841         }
842         continue;
843       default:
844         goto handle_unusual;
845     }  // switch
846   handle_unusual:
847     if ((tag == 0) || ((tag & 7) == 4)) {
848       CHK_(ptr);
849       ctx->SetLastTag(tag);
850       goto message_done;
851     }
852     ptr = UnknownFieldParse(
853         tag,
854         _internal_metadata_.mutable_unknown_fields<std::string>(),
855         ptr, ctx);
856     CHK_(ptr != nullptr);
857   }  // while
858 message_done:
859   return ptr;
860 failure:
861   ptr = nullptr;
862   goto message_done;
863 #undef CHK_
864 }
865 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const866 ::uint8_t* OpInfo::_InternalSerialize(
867     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
868   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.OpInfo)
869   ::uint32_t cached_has_bits = 0;
870   (void) cached_has_bits;
871 
872   // string op = 1;
873   if (!this->_internal_op().empty()) {
874     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
875       this->_internal_op().data(), static_cast<int>(this->_internal_op().length()),
876       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
877       "tensorflow.OpInfo.op");
878     target = stream->WriteStringMaybeAliased(
879         1, this->_internal_op(), target);
880   }
881 
882   // map<string, .tensorflow.AttrValue> attr = 2;
883   if (!this->_internal_attr().empty()) {
884     using MapType = ::_pb::Map<std::string, ::tensorflow::AttrValue>;
885     using WireHelper = OpInfo_AttrEntry_DoNotUse::Funcs;
886     const auto& map_field = this->_internal_attr();
887     auto check_utf8 = [](const MapType::value_type& entry) {
888       (void)entry;
889       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
890         entry.first.data(), static_cast<int>(entry.first.length()),
891         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
892         "tensorflow.OpInfo.AttrEntry.key");
893     };
894 
895     if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
896       for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
897         target = WireHelper::InternalSerialize(2, entry.first, entry.second, target, stream);
898         check_utf8(entry);
899       }
900     } else {
901       for (const auto& entry : map_field) {
902         target = WireHelper::InternalSerialize(2, entry.first, entry.second, target, stream);
903         check_utf8(entry);
904       }
905     }
906   }
907 
908   // repeated .tensorflow.OpInfo.TensorProperties inputs = 3;
909   for (unsigned i = 0,
910       n = static_cast<unsigned>(this->_internal_inputs_size()); i < n; i++) {
911     const auto& repfield = this->_internal_inputs(i);
912     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
913         InternalWriteMessage(3, repfield, repfield.GetCachedSize(), target, stream);
914   }
915 
916   // .tensorflow.DeviceProperties device = 4;
917   if (this->_internal_has_device()) {
918     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
919       InternalWriteMessage(4, _Internal::device(this),
920         _Internal::device(this).GetCachedSize(), target, stream);
921   }
922 
923   // repeated .tensorflow.OpInfo.TensorProperties outputs = 5;
924   for (unsigned i = 0,
925       n = static_cast<unsigned>(this->_internal_outputs_size()); i < n; i++) {
926     const auto& repfield = this->_internal_outputs(i);
927     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
928         InternalWriteMessage(5, repfield, repfield.GetCachedSize(), target, stream);
929   }
930 
931   // .tensorflow.SessionInfo session_info = 6;
932   if (this->_internal_has_session_info()) {
933     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
934       InternalWriteMessage(6, _Internal::session_info(this),
935         _Internal::session_info(this).GetCachedSize(), target, stream);
936   }
937 
938   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
939     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
940         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
941   }
942   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.OpInfo)
943   return target;
944 }
945 
ByteSizeLong() const946 size_t OpInfo::ByteSizeLong() const {
947 // @@protoc_insertion_point(message_byte_size_start:tensorflow.OpInfo)
948   size_t total_size = 0;
949 
950   ::uint32_t cached_has_bits = 0;
951   // Prevent compiler warnings about cached_has_bits being unused
952   (void) cached_has_bits;
953 
954   // map<string, .tensorflow.AttrValue> attr = 2;
955   total_size += 1 *
956       ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_attr_size());
957   for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::AttrValue >::const_iterator
958       it = this->_internal_attr().begin();
959       it != this->_internal_attr().end(); ++it) {
960     total_size += OpInfo_AttrEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
961   }
962 
963   // repeated .tensorflow.OpInfo.TensorProperties inputs = 3;
964   total_size += 1UL * this->_internal_inputs_size();
965   for (const auto& msg : this->_impl_.inputs_) {
966     total_size +=
967       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
968   }
969 
970   // repeated .tensorflow.OpInfo.TensorProperties outputs = 5;
971   total_size += 1UL * this->_internal_outputs_size();
972   for (const auto& msg : this->_impl_.outputs_) {
973     total_size +=
974       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
975   }
976 
977   // string op = 1;
978   if (!this->_internal_op().empty()) {
979     total_size += 1 +
980       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
981         this->_internal_op());
982   }
983 
984   // .tensorflow.DeviceProperties device = 4;
985   if (this->_internal_has_device()) {
986     total_size += 1 +
987       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
988         *_impl_.device_);
989   }
990 
991   // .tensorflow.SessionInfo session_info = 6;
992   if (this->_internal_has_session_info()) {
993     total_size += 1 +
994       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
995         *_impl_.session_info_);
996   }
997 
998   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
999     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1000   }
1001   int cached_size = ::_pbi::ToCachedSize(total_size);
1002   SetCachedSize(cached_size);
1003   return total_size;
1004 }
1005 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1006 void OpInfo::CheckTypeAndMergeFrom(
1007     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1008   MergeFrom(*::_pbi::DownCast<const OpInfo*>(
1009       &from));
1010 }
1011 
MergeFrom(const OpInfo & from)1012 void OpInfo::MergeFrom(const OpInfo& from) {
1013   OpInfo* const _this = this;
1014   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.OpInfo)
1015   GOOGLE_DCHECK_NE(&from, _this);
1016   ::uint32_t cached_has_bits = 0;
1017   (void) cached_has_bits;
1018 
1019   _this->_impl_.attr_.MergeFrom(from._impl_.attr_);
1020   _this->_impl_.inputs_.MergeFrom(from._impl_.inputs_);
1021   _this->_impl_.outputs_.MergeFrom(from._impl_.outputs_);
1022   if (!from._internal_op().empty()) {
1023     _this->_internal_set_op(from._internal_op());
1024   }
1025   if (from._internal_has_device()) {
1026     _this->_internal_mutable_device()->::tensorflow::DeviceProperties::MergeFrom(
1027         from._internal_device());
1028   }
1029   if (from._internal_has_session_info()) {
1030     _this->_internal_mutable_session_info()->::tensorflow::SessionInfo::MergeFrom(
1031         from._internal_session_info());
1032   }
1033   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1034 }
1035 
CopyFrom(const OpInfo & from)1036 void OpInfo::CopyFrom(const OpInfo& from) {
1037 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.OpInfo)
1038   if (&from == this) return;
1039   Clear();
1040   MergeFrom(from);
1041 }
1042 
IsInitialized() const1043 bool OpInfo::IsInitialized() const {
1044   return true;
1045 }
1046 
InternalSwap(OpInfo * other)1047 void OpInfo::InternalSwap(OpInfo* other) {
1048   using std::swap;
1049   auto* lhs_arena = GetArenaForAllocation();
1050   auto* rhs_arena = other->GetArenaForAllocation();
1051   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1052   _impl_.attr_.InternalSwap(&other->_impl_.attr_);
1053   _impl_.inputs_.InternalSwap(&other->_impl_.inputs_);
1054   _impl_.outputs_.InternalSwap(&other->_impl_.outputs_);
1055   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1056       &_impl_.op_, lhs_arena,
1057       &other->_impl_.op_, rhs_arena
1058   );
1059   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1060       PROTOBUF_FIELD_OFFSET(OpInfo, _impl_.session_info_)
1061       + sizeof(OpInfo::_impl_.session_info_)  // NOLINT
1062       - PROTOBUF_FIELD_OFFSET(OpInfo, _impl_.device_)>(
1063           reinterpret_cast<char*>(&_impl_.device_),
1064           reinterpret_cast<char*>(&other->_impl_.device_));
1065 }
1066 
GetTypeName() const1067 std::string OpInfo::GetTypeName() const {
1068   return "tensorflow.OpInfo";
1069 }
1070 
1071 
1072 // ===================================================================
1073 
1074 class NormalDistribution::_Internal {
1075  public:
1076 };
1077 
NormalDistribution(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1078 NormalDistribution::NormalDistribution(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1079                          bool is_message_owned)
1080   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1081   SharedCtor(arena, is_message_owned);
1082   // @@protoc_insertion_point(arena_constructor:tensorflow.NormalDistribution)
1083 }
NormalDistribution(const NormalDistribution & from)1084 NormalDistribution::NormalDistribution(const NormalDistribution& from)
1085   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1086   NormalDistribution* const _this = this; (void)_this;
1087   new (&_impl_) Impl_{
1088       decltype(_impl_.mu_){}
1089     , decltype(_impl_.sigma_){}
1090     , /*decltype(_impl_._cached_size_)*/{}};
1091 
1092   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1093   ::memcpy(&_impl_.mu_, &from._impl_.mu_,
1094     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.sigma_) -
1095     reinterpret_cast<char*>(&_impl_.mu_)) + sizeof(_impl_.sigma_));
1096   // @@protoc_insertion_point(copy_constructor:tensorflow.NormalDistribution)
1097 }
1098 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1099 inline void NormalDistribution::SharedCtor(
1100     ::_pb::Arena* arena, bool is_message_owned) {
1101   (void)arena;
1102   (void)is_message_owned;
1103   new (&_impl_) Impl_{
1104       decltype(_impl_.mu_){0}
1105     , decltype(_impl_.sigma_){0}
1106     , /*decltype(_impl_._cached_size_)*/{}
1107   };
1108 }
1109 
~NormalDistribution()1110 NormalDistribution::~NormalDistribution() {
1111   // @@protoc_insertion_point(destructor:tensorflow.NormalDistribution)
1112   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1113   (void)arena;
1114     return;
1115   }
1116   SharedDtor();
1117 }
1118 
SharedDtor()1119 inline void NormalDistribution::SharedDtor() {
1120   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1121 }
1122 
SetCachedSize(int size) const1123 void NormalDistribution::SetCachedSize(int size) const {
1124   _impl_._cached_size_.Set(size);
1125 }
1126 
Clear()1127 void NormalDistribution::Clear() {
1128 // @@protoc_insertion_point(message_clear_start:tensorflow.NormalDistribution)
1129   ::uint32_t cached_has_bits = 0;
1130   // Prevent compiler warnings about cached_has_bits being unused
1131   (void) cached_has_bits;
1132 
1133   ::memset(&_impl_.mu_, 0, static_cast<size_t>(
1134       reinterpret_cast<char*>(&_impl_.sigma_) -
1135       reinterpret_cast<char*>(&_impl_.mu_)) + sizeof(_impl_.sigma_));
1136   _internal_metadata_.Clear<std::string>();
1137 }
1138 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1139 const char* NormalDistribution::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1140 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1141   while (!ctx->Done(&ptr)) {
1142     ::uint32_t tag;
1143     ptr = ::_pbi::ReadTag(ptr, &tag);
1144     switch (tag >> 3) {
1145       // double mu = 1;
1146       case 1:
1147         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
1148           _impl_.mu_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1149           ptr += sizeof(double);
1150         } else {
1151           goto handle_unusual;
1152         }
1153         continue;
1154       // double sigma = 2;
1155       case 2:
1156         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 17)) {
1157           _impl_.sigma_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1158           ptr += sizeof(double);
1159         } else {
1160           goto handle_unusual;
1161         }
1162         continue;
1163       default:
1164         goto handle_unusual;
1165     }  // switch
1166   handle_unusual:
1167     if ((tag == 0) || ((tag & 7) == 4)) {
1168       CHK_(ptr);
1169       ctx->SetLastTag(tag);
1170       goto message_done;
1171     }
1172     ptr = UnknownFieldParse(
1173         tag,
1174         _internal_metadata_.mutable_unknown_fields<std::string>(),
1175         ptr, ctx);
1176     CHK_(ptr != nullptr);
1177   }  // while
1178 message_done:
1179   return ptr;
1180 failure:
1181   ptr = nullptr;
1182   goto message_done;
1183 #undef CHK_
1184 }
1185 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1186 ::uint8_t* NormalDistribution::_InternalSerialize(
1187     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1188   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.NormalDistribution)
1189   ::uint32_t cached_has_bits = 0;
1190   (void) cached_has_bits;
1191 
1192   // double mu = 1;
1193   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1194   double tmp_mu = this->_internal_mu();
1195   ::uint64_t raw_mu;
1196   memcpy(&raw_mu, &tmp_mu, sizeof(tmp_mu));
1197   if (raw_mu != 0) {
1198     target = stream->EnsureSpace(target);
1199     target = ::_pbi::WireFormatLite::WriteDoubleToArray(1, this->_internal_mu(), target);
1200   }
1201 
1202   // double sigma = 2;
1203   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1204   double tmp_sigma = this->_internal_sigma();
1205   ::uint64_t raw_sigma;
1206   memcpy(&raw_sigma, &tmp_sigma, sizeof(tmp_sigma));
1207   if (raw_sigma != 0) {
1208     target = stream->EnsureSpace(target);
1209     target = ::_pbi::WireFormatLite::WriteDoubleToArray(2, this->_internal_sigma(), target);
1210   }
1211 
1212   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1213     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1214         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1215   }
1216   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.NormalDistribution)
1217   return target;
1218 }
1219 
ByteSizeLong() const1220 size_t NormalDistribution::ByteSizeLong() const {
1221 // @@protoc_insertion_point(message_byte_size_start:tensorflow.NormalDistribution)
1222   size_t total_size = 0;
1223 
1224   ::uint32_t cached_has_bits = 0;
1225   // Prevent compiler warnings about cached_has_bits being unused
1226   (void) cached_has_bits;
1227 
1228   // double mu = 1;
1229   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1230   double tmp_mu = this->_internal_mu();
1231   ::uint64_t raw_mu;
1232   memcpy(&raw_mu, &tmp_mu, sizeof(tmp_mu));
1233   if (raw_mu != 0) {
1234     total_size += 1 + 8;
1235   }
1236 
1237   // double sigma = 2;
1238   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1239   double tmp_sigma = this->_internal_sigma();
1240   ::uint64_t raw_sigma;
1241   memcpy(&raw_sigma, &tmp_sigma, sizeof(tmp_sigma));
1242   if (raw_sigma != 0) {
1243     total_size += 1 + 8;
1244   }
1245 
1246   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1247     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1248   }
1249   int cached_size = ::_pbi::ToCachedSize(total_size);
1250   SetCachedSize(cached_size);
1251   return total_size;
1252 }
1253 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1254 void NormalDistribution::CheckTypeAndMergeFrom(
1255     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1256   MergeFrom(*::_pbi::DownCast<const NormalDistribution*>(
1257       &from));
1258 }
1259 
MergeFrom(const NormalDistribution & from)1260 void NormalDistribution::MergeFrom(const NormalDistribution& from) {
1261   NormalDistribution* const _this = this;
1262   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.NormalDistribution)
1263   GOOGLE_DCHECK_NE(&from, _this);
1264   ::uint32_t cached_has_bits = 0;
1265   (void) cached_has_bits;
1266 
1267   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1268   double tmp_mu = from._internal_mu();
1269   ::uint64_t raw_mu;
1270   memcpy(&raw_mu, &tmp_mu, sizeof(tmp_mu));
1271   if (raw_mu != 0) {
1272     _this->_internal_set_mu(from._internal_mu());
1273   }
1274   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1275   double tmp_sigma = from._internal_sigma();
1276   ::uint64_t raw_sigma;
1277   memcpy(&raw_sigma, &tmp_sigma, sizeof(tmp_sigma));
1278   if (raw_sigma != 0) {
1279     _this->_internal_set_sigma(from._internal_sigma());
1280   }
1281   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1282 }
1283 
CopyFrom(const NormalDistribution & from)1284 void NormalDistribution::CopyFrom(const NormalDistribution& from) {
1285 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.NormalDistribution)
1286   if (&from == this) return;
1287   Clear();
1288   MergeFrom(from);
1289 }
1290 
IsInitialized() const1291 bool NormalDistribution::IsInitialized() const {
1292   return true;
1293 }
1294 
InternalSwap(NormalDistribution * other)1295 void NormalDistribution::InternalSwap(NormalDistribution* other) {
1296   using std::swap;
1297   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1298   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1299       PROTOBUF_FIELD_OFFSET(NormalDistribution, _impl_.sigma_)
1300       + sizeof(NormalDistribution::_impl_.sigma_)  // NOLINT
1301       - PROTOBUF_FIELD_OFFSET(NormalDistribution, _impl_.mu_)>(
1302           reinterpret_cast<char*>(&_impl_.mu_),
1303           reinterpret_cast<char*>(&other->_impl_.mu_));
1304 }
1305 
GetTypeName() const1306 std::string NormalDistribution::GetTypeName() const {
1307   return "tensorflow.NormalDistribution";
1308 }
1309 
1310 
1311 // ===================================================================
1312 
1313 class LogNormalDistribution::_Internal {
1314  public:
1315 };
1316 
LogNormalDistribution(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1317 LogNormalDistribution::LogNormalDistribution(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1318                          bool is_message_owned)
1319   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1320   SharedCtor(arena, is_message_owned);
1321   // @@protoc_insertion_point(arena_constructor:tensorflow.LogNormalDistribution)
1322 }
LogNormalDistribution(const LogNormalDistribution & from)1323 LogNormalDistribution::LogNormalDistribution(const LogNormalDistribution& from)
1324   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1325   LogNormalDistribution* const _this = this; (void)_this;
1326   new (&_impl_) Impl_{
1327       decltype(_impl_.mu_){}
1328     , decltype(_impl_.sigma_){}
1329     , /*decltype(_impl_._cached_size_)*/{}};
1330 
1331   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1332   ::memcpy(&_impl_.mu_, &from._impl_.mu_,
1333     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.sigma_) -
1334     reinterpret_cast<char*>(&_impl_.mu_)) + sizeof(_impl_.sigma_));
1335   // @@protoc_insertion_point(copy_constructor:tensorflow.LogNormalDistribution)
1336 }
1337 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1338 inline void LogNormalDistribution::SharedCtor(
1339     ::_pb::Arena* arena, bool is_message_owned) {
1340   (void)arena;
1341   (void)is_message_owned;
1342   new (&_impl_) Impl_{
1343       decltype(_impl_.mu_){0}
1344     , decltype(_impl_.sigma_){0}
1345     , /*decltype(_impl_._cached_size_)*/{}
1346   };
1347 }
1348 
~LogNormalDistribution()1349 LogNormalDistribution::~LogNormalDistribution() {
1350   // @@protoc_insertion_point(destructor:tensorflow.LogNormalDistribution)
1351   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1352   (void)arena;
1353     return;
1354   }
1355   SharedDtor();
1356 }
1357 
SharedDtor()1358 inline void LogNormalDistribution::SharedDtor() {
1359   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1360 }
1361 
SetCachedSize(int size) const1362 void LogNormalDistribution::SetCachedSize(int size) const {
1363   _impl_._cached_size_.Set(size);
1364 }
1365 
Clear()1366 void LogNormalDistribution::Clear() {
1367 // @@protoc_insertion_point(message_clear_start:tensorflow.LogNormalDistribution)
1368   ::uint32_t cached_has_bits = 0;
1369   // Prevent compiler warnings about cached_has_bits being unused
1370   (void) cached_has_bits;
1371 
1372   ::memset(&_impl_.mu_, 0, static_cast<size_t>(
1373       reinterpret_cast<char*>(&_impl_.sigma_) -
1374       reinterpret_cast<char*>(&_impl_.mu_)) + sizeof(_impl_.sigma_));
1375   _internal_metadata_.Clear<std::string>();
1376 }
1377 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1378 const char* LogNormalDistribution::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1379 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1380   while (!ctx->Done(&ptr)) {
1381     ::uint32_t tag;
1382     ptr = ::_pbi::ReadTag(ptr, &tag);
1383     switch (tag >> 3) {
1384       // double mu = 1;
1385       case 1:
1386         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 9)) {
1387           _impl_.mu_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1388           ptr += sizeof(double);
1389         } else {
1390           goto handle_unusual;
1391         }
1392         continue;
1393       // double sigma = 2;
1394       case 2:
1395         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 17)) {
1396           _impl_.sigma_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1397           ptr += sizeof(double);
1398         } else {
1399           goto handle_unusual;
1400         }
1401         continue;
1402       default:
1403         goto handle_unusual;
1404     }  // switch
1405   handle_unusual:
1406     if ((tag == 0) || ((tag & 7) == 4)) {
1407       CHK_(ptr);
1408       ctx->SetLastTag(tag);
1409       goto message_done;
1410     }
1411     ptr = UnknownFieldParse(
1412         tag,
1413         _internal_metadata_.mutable_unknown_fields<std::string>(),
1414         ptr, ctx);
1415     CHK_(ptr != nullptr);
1416   }  // while
1417 message_done:
1418   return ptr;
1419 failure:
1420   ptr = nullptr;
1421   goto message_done;
1422 #undef CHK_
1423 }
1424 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1425 ::uint8_t* LogNormalDistribution::_InternalSerialize(
1426     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1427   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.LogNormalDistribution)
1428   ::uint32_t cached_has_bits = 0;
1429   (void) cached_has_bits;
1430 
1431   // double mu = 1;
1432   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1433   double tmp_mu = this->_internal_mu();
1434   ::uint64_t raw_mu;
1435   memcpy(&raw_mu, &tmp_mu, sizeof(tmp_mu));
1436   if (raw_mu != 0) {
1437     target = stream->EnsureSpace(target);
1438     target = ::_pbi::WireFormatLite::WriteDoubleToArray(1, this->_internal_mu(), target);
1439   }
1440 
1441   // double sigma = 2;
1442   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1443   double tmp_sigma = this->_internal_sigma();
1444   ::uint64_t raw_sigma;
1445   memcpy(&raw_sigma, &tmp_sigma, sizeof(tmp_sigma));
1446   if (raw_sigma != 0) {
1447     target = stream->EnsureSpace(target);
1448     target = ::_pbi::WireFormatLite::WriteDoubleToArray(2, this->_internal_sigma(), target);
1449   }
1450 
1451   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1452     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1453         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1454   }
1455   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.LogNormalDistribution)
1456   return target;
1457 }
1458 
ByteSizeLong() const1459 size_t LogNormalDistribution::ByteSizeLong() const {
1460 // @@protoc_insertion_point(message_byte_size_start:tensorflow.LogNormalDistribution)
1461   size_t total_size = 0;
1462 
1463   ::uint32_t cached_has_bits = 0;
1464   // Prevent compiler warnings about cached_has_bits being unused
1465   (void) cached_has_bits;
1466 
1467   // double mu = 1;
1468   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1469   double tmp_mu = this->_internal_mu();
1470   ::uint64_t raw_mu;
1471   memcpy(&raw_mu, &tmp_mu, sizeof(tmp_mu));
1472   if (raw_mu != 0) {
1473     total_size += 1 + 8;
1474   }
1475 
1476   // double sigma = 2;
1477   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1478   double tmp_sigma = this->_internal_sigma();
1479   ::uint64_t raw_sigma;
1480   memcpy(&raw_sigma, &tmp_sigma, sizeof(tmp_sigma));
1481   if (raw_sigma != 0) {
1482     total_size += 1 + 8;
1483   }
1484 
1485   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1486     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1487   }
1488   int cached_size = ::_pbi::ToCachedSize(total_size);
1489   SetCachedSize(cached_size);
1490   return total_size;
1491 }
1492 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1493 void LogNormalDistribution::CheckTypeAndMergeFrom(
1494     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1495   MergeFrom(*::_pbi::DownCast<const LogNormalDistribution*>(
1496       &from));
1497 }
1498 
MergeFrom(const LogNormalDistribution & from)1499 void LogNormalDistribution::MergeFrom(const LogNormalDistribution& from) {
1500   LogNormalDistribution* const _this = this;
1501   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.LogNormalDistribution)
1502   GOOGLE_DCHECK_NE(&from, _this);
1503   ::uint32_t cached_has_bits = 0;
1504   (void) cached_has_bits;
1505 
1506   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1507   double tmp_mu = from._internal_mu();
1508   ::uint64_t raw_mu;
1509   memcpy(&raw_mu, &tmp_mu, sizeof(tmp_mu));
1510   if (raw_mu != 0) {
1511     _this->_internal_set_mu(from._internal_mu());
1512   }
1513   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
1514   double tmp_sigma = from._internal_sigma();
1515   ::uint64_t raw_sigma;
1516   memcpy(&raw_sigma, &tmp_sigma, sizeof(tmp_sigma));
1517   if (raw_sigma != 0) {
1518     _this->_internal_set_sigma(from._internal_sigma());
1519   }
1520   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1521 }
1522 
CopyFrom(const LogNormalDistribution & from)1523 void LogNormalDistribution::CopyFrom(const LogNormalDistribution& from) {
1524 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.LogNormalDistribution)
1525   if (&from == this) return;
1526   Clear();
1527   MergeFrom(from);
1528 }
1529 
IsInitialized() const1530 bool LogNormalDistribution::IsInitialized() const {
1531   return true;
1532 }
1533 
InternalSwap(LogNormalDistribution * other)1534 void LogNormalDistribution::InternalSwap(LogNormalDistribution* other) {
1535   using std::swap;
1536   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1537   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1538       PROTOBUF_FIELD_OFFSET(LogNormalDistribution, _impl_.sigma_)
1539       + sizeof(LogNormalDistribution::_impl_.sigma_)  // NOLINT
1540       - PROTOBUF_FIELD_OFFSET(LogNormalDistribution, _impl_.mu_)>(
1541           reinterpret_cast<char*>(&_impl_.mu_),
1542           reinterpret_cast<char*>(&other->_impl_.mu_));
1543 }
1544 
GetTypeName() const1545 std::string LogNormalDistribution::GetTypeName() const {
1546   return "tensorflow.LogNormalDistribution";
1547 }
1548 
1549 
1550 // ===================================================================
1551 
1552 class OpPerformance_OpMemory::_Internal {
1553  public:
1554 };
1555 
OpPerformance_OpMemory(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1556 OpPerformance_OpMemory::OpPerformance_OpMemory(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1557                          bool is_message_owned)
1558   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1559   SharedCtor(arena, is_message_owned);
1560   // @@protoc_insertion_point(arena_constructor:tensorflow.OpPerformance.OpMemory)
1561 }
OpPerformance_OpMemory(const OpPerformance_OpMemory & from)1562 OpPerformance_OpMemory::OpPerformance_OpMemory(const OpPerformance_OpMemory& from)
1563   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1564   OpPerformance_OpMemory* const _this = this; (void)_this;
1565   new (&_impl_) Impl_{
1566       decltype(_impl_.output_memory_){from._impl_.output_memory_}
1567     , /*decltype(_impl_._output_memory_cached_byte_size_)*/{0}
1568     , decltype(_impl_.temp_memory_){}
1569     , decltype(_impl_.device_temp_memory_){}
1570     , decltype(_impl_.persistent_memory_){}
1571     , decltype(_impl_.device_persistent_memory_){}
1572     , /*decltype(_impl_._cached_size_)*/{}};
1573 
1574   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1575   ::memcpy(&_impl_.temp_memory_, &from._impl_.temp_memory_,
1576     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.device_persistent_memory_) -
1577     reinterpret_cast<char*>(&_impl_.temp_memory_)) + sizeof(_impl_.device_persistent_memory_));
1578   // @@protoc_insertion_point(copy_constructor:tensorflow.OpPerformance.OpMemory)
1579 }
1580 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1581 inline void OpPerformance_OpMemory::SharedCtor(
1582     ::_pb::Arena* arena, bool is_message_owned) {
1583   (void)arena;
1584   (void)is_message_owned;
1585   new (&_impl_) Impl_{
1586       decltype(_impl_.output_memory_){arena}
1587     , /*decltype(_impl_._output_memory_cached_byte_size_)*/{0}
1588     , decltype(_impl_.temp_memory_){::int64_t{0}}
1589     , decltype(_impl_.device_temp_memory_){::int64_t{0}}
1590     , decltype(_impl_.persistent_memory_){::int64_t{0}}
1591     , decltype(_impl_.device_persistent_memory_){::int64_t{0}}
1592     , /*decltype(_impl_._cached_size_)*/{}
1593   };
1594 }
1595 
~OpPerformance_OpMemory()1596 OpPerformance_OpMemory::~OpPerformance_OpMemory() {
1597   // @@protoc_insertion_point(destructor:tensorflow.OpPerformance.OpMemory)
1598   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1599   (void)arena;
1600     return;
1601   }
1602   SharedDtor();
1603 }
1604 
SharedDtor()1605 inline void OpPerformance_OpMemory::SharedDtor() {
1606   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1607   _impl_.output_memory_.~RepeatedField();
1608 }
1609 
SetCachedSize(int size) const1610 void OpPerformance_OpMemory::SetCachedSize(int size) const {
1611   _impl_._cached_size_.Set(size);
1612 }
1613 
Clear()1614 void OpPerformance_OpMemory::Clear() {
1615 // @@protoc_insertion_point(message_clear_start:tensorflow.OpPerformance.OpMemory)
1616   ::uint32_t cached_has_bits = 0;
1617   // Prevent compiler warnings about cached_has_bits being unused
1618   (void) cached_has_bits;
1619 
1620   _impl_.output_memory_.Clear();
1621   ::memset(&_impl_.temp_memory_, 0, static_cast<size_t>(
1622       reinterpret_cast<char*>(&_impl_.device_persistent_memory_) -
1623       reinterpret_cast<char*>(&_impl_.temp_memory_)) + sizeof(_impl_.device_persistent_memory_));
1624   _internal_metadata_.Clear<std::string>();
1625 }
1626 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1627 const char* OpPerformance_OpMemory::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1628 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1629   while (!ctx->Done(&ptr)) {
1630     ::uint32_t tag;
1631     ptr = ::_pbi::ReadTag(ptr, &tag);
1632     switch (tag >> 3) {
1633       // repeated int64 output_memory = 1;
1634       case 1:
1635         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1636           ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(_internal_mutable_output_memory(), ptr, ctx);
1637           CHK_(ptr);
1638         } else if (static_cast<::uint8_t>(tag) == 8) {
1639           _internal_add_output_memory(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1640           CHK_(ptr);
1641         } else {
1642           goto handle_unusual;
1643         }
1644         continue;
1645       // int64 temp_memory = 2;
1646       case 2:
1647         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1648           _impl_.temp_memory_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1649           CHK_(ptr);
1650         } else {
1651           goto handle_unusual;
1652         }
1653         continue;
1654       // int64 device_temp_memory = 3 [deprecated = true];
1655       case 3:
1656         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1657           _impl_.device_temp_memory_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1658           CHK_(ptr);
1659         } else {
1660           goto handle_unusual;
1661         }
1662         continue;
1663       // int64 persistent_memory = 4;
1664       case 4:
1665         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
1666           _impl_.persistent_memory_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1667           CHK_(ptr);
1668         } else {
1669           goto handle_unusual;
1670         }
1671         continue;
1672       // int64 device_persistent_memory = 5 [deprecated = true];
1673       case 5:
1674         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
1675           _impl_.device_persistent_memory_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1676           CHK_(ptr);
1677         } else {
1678           goto handle_unusual;
1679         }
1680         continue;
1681       default:
1682         goto handle_unusual;
1683     }  // switch
1684   handle_unusual:
1685     if ((tag == 0) || ((tag & 7) == 4)) {
1686       CHK_(ptr);
1687       ctx->SetLastTag(tag);
1688       goto message_done;
1689     }
1690     ptr = UnknownFieldParse(
1691         tag,
1692         _internal_metadata_.mutable_unknown_fields<std::string>(),
1693         ptr, ctx);
1694     CHK_(ptr != nullptr);
1695   }  // while
1696 message_done:
1697   return ptr;
1698 failure:
1699   ptr = nullptr;
1700   goto message_done;
1701 #undef CHK_
1702 }
1703 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1704 ::uint8_t* OpPerformance_OpMemory::_InternalSerialize(
1705     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1706   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.OpPerformance.OpMemory)
1707   ::uint32_t cached_has_bits = 0;
1708   (void) cached_has_bits;
1709 
1710   // repeated int64 output_memory = 1;
1711   {
1712     int byte_size = _impl_._output_memory_cached_byte_size_.load(std::memory_order_relaxed);
1713     if (byte_size > 0) {
1714       target = stream->WriteInt64Packed(
1715           1, _internal_output_memory(), byte_size, target);
1716     }
1717   }
1718 
1719   // int64 temp_memory = 2;
1720   if (this->_internal_temp_memory() != 0) {
1721     target = stream->EnsureSpace(target);
1722     target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_temp_memory(), target);
1723   }
1724 
1725   // int64 device_temp_memory = 3 [deprecated = true];
1726   if (this->_internal_device_temp_memory() != 0) {
1727     target = stream->EnsureSpace(target);
1728     target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_device_temp_memory(), target);
1729   }
1730 
1731   // int64 persistent_memory = 4;
1732   if (this->_internal_persistent_memory() != 0) {
1733     target = stream->EnsureSpace(target);
1734     target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_persistent_memory(), target);
1735   }
1736 
1737   // int64 device_persistent_memory = 5 [deprecated = true];
1738   if (this->_internal_device_persistent_memory() != 0) {
1739     target = stream->EnsureSpace(target);
1740     target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_device_persistent_memory(), target);
1741   }
1742 
1743   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1744     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1745         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1746   }
1747   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.OpPerformance.OpMemory)
1748   return target;
1749 }
1750 
ByteSizeLong() const1751 size_t OpPerformance_OpMemory::ByteSizeLong() const {
1752 // @@protoc_insertion_point(message_byte_size_start:tensorflow.OpPerformance.OpMemory)
1753   size_t total_size = 0;
1754 
1755   ::uint32_t cached_has_bits = 0;
1756   // Prevent compiler warnings about cached_has_bits being unused
1757   (void) cached_has_bits;
1758 
1759   // repeated int64 output_memory = 1;
1760   {
1761     size_t data_size = ::_pbi::WireFormatLite::
1762       Int64Size(this->_impl_.output_memory_);
1763     if (data_size > 0) {
1764       total_size += 1 +
1765         ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
1766     }
1767     int cached_size = ::_pbi::ToCachedSize(data_size);
1768     _impl_._output_memory_cached_byte_size_.store(cached_size,
1769                                     std::memory_order_relaxed);
1770     total_size += data_size;
1771   }
1772 
1773   // int64 temp_memory = 2;
1774   if (this->_internal_temp_memory() != 0) {
1775     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_temp_memory());
1776   }
1777 
1778   // int64 device_temp_memory = 3 [deprecated = true];
1779   if (this->_internal_device_temp_memory() != 0) {
1780     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_device_temp_memory());
1781   }
1782 
1783   // int64 persistent_memory = 4;
1784   if (this->_internal_persistent_memory() != 0) {
1785     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_persistent_memory());
1786   }
1787 
1788   // int64 device_persistent_memory = 5 [deprecated = true];
1789   if (this->_internal_device_persistent_memory() != 0) {
1790     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_device_persistent_memory());
1791   }
1792 
1793   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1794     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1795   }
1796   int cached_size = ::_pbi::ToCachedSize(total_size);
1797   SetCachedSize(cached_size);
1798   return total_size;
1799 }
1800 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1801 void OpPerformance_OpMemory::CheckTypeAndMergeFrom(
1802     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1803   MergeFrom(*::_pbi::DownCast<const OpPerformance_OpMemory*>(
1804       &from));
1805 }
1806 
MergeFrom(const OpPerformance_OpMemory & from)1807 void OpPerformance_OpMemory::MergeFrom(const OpPerformance_OpMemory& from) {
1808   OpPerformance_OpMemory* const _this = this;
1809   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.OpPerformance.OpMemory)
1810   GOOGLE_DCHECK_NE(&from, _this);
1811   ::uint32_t cached_has_bits = 0;
1812   (void) cached_has_bits;
1813 
1814   _this->_impl_.output_memory_.MergeFrom(from._impl_.output_memory_);
1815   if (from._internal_temp_memory() != 0) {
1816     _this->_internal_set_temp_memory(from._internal_temp_memory());
1817   }
1818   if (from._internal_device_temp_memory() != 0) {
1819     _this->_internal_set_device_temp_memory(from._internal_device_temp_memory());
1820   }
1821   if (from._internal_persistent_memory() != 0) {
1822     _this->_internal_set_persistent_memory(from._internal_persistent_memory());
1823   }
1824   if (from._internal_device_persistent_memory() != 0) {
1825     _this->_internal_set_device_persistent_memory(from._internal_device_persistent_memory());
1826   }
1827   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1828 }
1829 
CopyFrom(const OpPerformance_OpMemory & from)1830 void OpPerformance_OpMemory::CopyFrom(const OpPerformance_OpMemory& from) {
1831 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.OpPerformance.OpMemory)
1832   if (&from == this) return;
1833   Clear();
1834   MergeFrom(from);
1835 }
1836 
IsInitialized() const1837 bool OpPerformance_OpMemory::IsInitialized() const {
1838   return true;
1839 }
1840 
InternalSwap(OpPerformance_OpMemory * other)1841 void OpPerformance_OpMemory::InternalSwap(OpPerformance_OpMemory* other) {
1842   using std::swap;
1843   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1844   _impl_.output_memory_.InternalSwap(&other->_impl_.output_memory_);
1845   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1846       PROTOBUF_FIELD_OFFSET(OpPerformance_OpMemory, _impl_.device_persistent_memory_)
1847       + sizeof(OpPerformance_OpMemory::_impl_.device_persistent_memory_)  // NOLINT
1848       - PROTOBUF_FIELD_OFFSET(OpPerformance_OpMemory, _impl_.temp_memory_)>(
1849           reinterpret_cast<char*>(&_impl_.temp_memory_),
1850           reinterpret_cast<char*>(&other->_impl_.temp_memory_));
1851 }
1852 
GetTypeName() const1853 std::string OpPerformance_OpMemory::GetTypeName() const {
1854   return "tensorflow.OpPerformance.OpMemory";
1855 }
1856 
1857 
1858 // ===================================================================
1859 
1860 class OpPerformance::_Internal {
1861  public:
1862   static const ::tensorflow::OpInfo& op(const OpPerformance* msg);
1863   static const ::tensorflow::SessionInfo& session_info(const OpPerformance* msg);
1864   static const ::tensorflow::NormalDistribution& execution_time_normal(const OpPerformance* msg);
1865   static const ::tensorflow::LogNormalDistribution& execution_time_log_normal(const OpPerformance* msg);
1866   static const ::tensorflow::OpPerformance_OpMemory& op_memory(const OpPerformance* msg);
1867 };
1868 
1869 const ::tensorflow::OpInfo&
op(const OpPerformance * msg)1870 OpPerformance::_Internal::op(const OpPerformance* msg) {
1871   return *msg->_impl_.op_;
1872 }
1873 const ::tensorflow::SessionInfo&
session_info(const OpPerformance * msg)1874 OpPerformance::_Internal::session_info(const OpPerformance* msg) {
1875   return *msg->_impl_.session_info_;
1876 }
1877 const ::tensorflow::NormalDistribution&
execution_time_normal(const OpPerformance * msg)1878 OpPerformance::_Internal::execution_time_normal(const OpPerformance* msg) {
1879   return *msg->_impl_.execution_time_.execution_time_normal_;
1880 }
1881 const ::tensorflow::LogNormalDistribution&
execution_time_log_normal(const OpPerformance * msg)1882 OpPerformance::_Internal::execution_time_log_normal(const OpPerformance* msg) {
1883   return *msg->_impl_.execution_time_.execution_time_log_normal_;
1884 }
1885 const ::tensorflow::OpPerformance_OpMemory&
op_memory(const OpPerformance * msg)1886 OpPerformance::_Internal::op_memory(const OpPerformance* msg) {
1887   return *msg->_impl_.op_memory_;
1888 }
set_allocated_execution_time_normal(::tensorflow::NormalDistribution * execution_time_normal)1889 void OpPerformance::set_allocated_execution_time_normal(::tensorflow::NormalDistribution* execution_time_normal) {
1890   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1891   clear_execution_time();
1892   if (execution_time_normal) {
1893     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1894       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(execution_time_normal);
1895     if (message_arena != submessage_arena) {
1896       execution_time_normal = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1897           message_arena, execution_time_normal, submessage_arena);
1898     }
1899     set_has_execution_time_normal();
1900     _impl_.execution_time_.execution_time_normal_ = execution_time_normal;
1901   }
1902   // @@protoc_insertion_point(field_set_allocated:tensorflow.OpPerformance.execution_time_normal)
1903 }
set_allocated_execution_time_log_normal(::tensorflow::LogNormalDistribution * execution_time_log_normal)1904 void OpPerformance::set_allocated_execution_time_log_normal(::tensorflow::LogNormalDistribution* execution_time_log_normal) {
1905   ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1906   clear_execution_time();
1907   if (execution_time_log_normal) {
1908     ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1909       ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(execution_time_log_normal);
1910     if (message_arena != submessage_arena) {
1911       execution_time_log_normal = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1912           message_arena, execution_time_log_normal, submessage_arena);
1913     }
1914     set_has_execution_time_log_normal();
1915     _impl_.execution_time_.execution_time_log_normal_ = execution_time_log_normal;
1916   }
1917   // @@protoc_insertion_point(field_set_allocated:tensorflow.OpPerformance.execution_time_log_normal)
1918 }
OpPerformance(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1919 OpPerformance::OpPerformance(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1920                          bool is_message_owned)
1921   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1922   SharedCtor(arena, is_message_owned);
1923   // @@protoc_insertion_point(arena_constructor:tensorflow.OpPerformance)
1924 }
OpPerformance(const OpPerformance & from)1925 OpPerformance::OpPerformance(const OpPerformance& from)
1926   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1927   OpPerformance* const _this = this; (void)_this;
1928   new (&_impl_) Impl_{
1929       decltype(_impl_.node_){}
1930     , decltype(_impl_.op_){nullptr}
1931     , decltype(_impl_.op_memory_){nullptr}
1932     , decltype(_impl_.session_info_){nullptr}
1933     , decltype(_impl_.temporary_memory_size_){}
1934     , decltype(_impl_.compute_cost_){}
1935     , decltype(_impl_.compute_efficiency_){}
1936     , decltype(_impl_.compute_time_){}
1937     , decltype(_impl_.memory_time_){}
1938     , decltype(_impl_.memory_efficiency_){}
1939     , decltype(_impl_.execution_time_){}
1940     , /*decltype(_impl_._cached_size_)*/{}
1941     , /*decltype(_impl_._oneof_case_)*/{}};
1942 
1943   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1944   _impl_.node_.InitDefault();
1945   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1946     _impl_.node_.Set("", GetArenaForAllocation());
1947   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1948   if (!from._internal_node().empty()) {
1949     _this->_impl_.node_.Set(from._internal_node(),
1950       _this->GetArenaForAllocation());
1951   }
1952   if (from._internal_has_op()) {
1953     _this->_impl_.op_ = new ::tensorflow::OpInfo(*from._impl_.op_);
1954   }
1955   if (from._internal_has_op_memory()) {
1956     _this->_impl_.op_memory_ = new ::tensorflow::OpPerformance_OpMemory(*from._impl_.op_memory_);
1957   }
1958   if (from._internal_has_session_info()) {
1959     _this->_impl_.session_info_ = new ::tensorflow::SessionInfo(*from._impl_.session_info_);
1960   }
1961   ::memcpy(&_impl_.temporary_memory_size_, &from._impl_.temporary_memory_size_,
1962     static_cast<size_t>(reinterpret_cast<char*>(&_impl_.memory_efficiency_) -
1963     reinterpret_cast<char*>(&_impl_.temporary_memory_size_)) + sizeof(_impl_.memory_efficiency_));
1964   clear_has_execution_time();
1965   switch (from.execution_time_case()) {
1966     case kExecutionTimeNormal: {
1967       _this->_internal_mutable_execution_time_normal()->::tensorflow::NormalDistribution::MergeFrom(
1968           from._internal_execution_time_normal());
1969       break;
1970     }
1971     case kExecutionTimeLogNormal: {
1972       _this->_internal_mutable_execution_time_log_normal()->::tensorflow::LogNormalDistribution::MergeFrom(
1973           from._internal_execution_time_log_normal());
1974       break;
1975     }
1976     case EXECUTION_TIME_NOT_SET: {
1977       break;
1978     }
1979   }
1980   // @@protoc_insertion_point(copy_constructor:tensorflow.OpPerformance)
1981 }
1982 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1983 inline void OpPerformance::SharedCtor(
1984     ::_pb::Arena* arena, bool is_message_owned) {
1985   (void)arena;
1986   (void)is_message_owned;
1987   new (&_impl_) Impl_{
1988       decltype(_impl_.node_){}
1989     , decltype(_impl_.op_){nullptr}
1990     , decltype(_impl_.op_memory_){nullptr}
1991     , decltype(_impl_.session_info_){nullptr}
1992     , decltype(_impl_.temporary_memory_size_){::int64_t{0}}
1993     , decltype(_impl_.compute_cost_){::int64_t{0}}
1994     , decltype(_impl_.compute_efficiency_){0}
1995     , decltype(_impl_.compute_time_){::int64_t{0}}
1996     , decltype(_impl_.memory_time_){::int64_t{0}}
1997     , decltype(_impl_.memory_efficiency_){0}
1998     , decltype(_impl_.execution_time_){}
1999     , /*decltype(_impl_._cached_size_)*/{}
2000     , /*decltype(_impl_._oneof_case_)*/{}
2001   };
2002   _impl_.node_.InitDefault();
2003   #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2004     _impl_.node_.Set("", GetArenaForAllocation());
2005   #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2006   clear_has_execution_time();
2007 }
2008 
~OpPerformance()2009 OpPerformance::~OpPerformance() {
2010   // @@protoc_insertion_point(destructor:tensorflow.OpPerformance)
2011   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2012   (void)arena;
2013     return;
2014   }
2015   SharedDtor();
2016 }
2017 
SharedDtor()2018 inline void OpPerformance::SharedDtor() {
2019   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2020   _impl_.node_.Destroy();
2021   if (this != internal_default_instance()) delete _impl_.op_;
2022   if (this != internal_default_instance()) delete _impl_.op_memory_;
2023   if (this != internal_default_instance()) delete _impl_.session_info_;
2024   if (has_execution_time()) {
2025     clear_execution_time();
2026   }
2027 }
2028 
SetCachedSize(int size) const2029 void OpPerformance::SetCachedSize(int size) const {
2030   _impl_._cached_size_.Set(size);
2031 }
2032 
clear_execution_time()2033 void OpPerformance::clear_execution_time() {
2034 // @@protoc_insertion_point(one_of_clear_start:tensorflow.OpPerformance)
2035   switch (execution_time_case()) {
2036     case kExecutionTimeNormal: {
2037       if (GetArenaForAllocation() == nullptr) {
2038         delete _impl_.execution_time_.execution_time_normal_;
2039       }
2040       break;
2041     }
2042     case kExecutionTimeLogNormal: {
2043       if (GetArenaForAllocation() == nullptr) {
2044         delete _impl_.execution_time_.execution_time_log_normal_;
2045       }
2046       break;
2047     }
2048     case EXECUTION_TIME_NOT_SET: {
2049       break;
2050     }
2051   }
2052   _impl_._oneof_case_[0] = EXECUTION_TIME_NOT_SET;
2053 }
2054 
2055 
Clear()2056 void OpPerformance::Clear() {
2057 // @@protoc_insertion_point(message_clear_start:tensorflow.OpPerformance)
2058   ::uint32_t cached_has_bits = 0;
2059   // Prevent compiler warnings about cached_has_bits being unused
2060   (void) cached_has_bits;
2061 
2062   _impl_.node_.ClearToEmpty();
2063   if (GetArenaForAllocation() == nullptr && _impl_.op_ != nullptr) {
2064     delete _impl_.op_;
2065   }
2066   _impl_.op_ = nullptr;
2067   if (GetArenaForAllocation() == nullptr && _impl_.op_memory_ != nullptr) {
2068     delete _impl_.op_memory_;
2069   }
2070   _impl_.op_memory_ = nullptr;
2071   if (GetArenaForAllocation() == nullptr && _impl_.session_info_ != nullptr) {
2072     delete _impl_.session_info_;
2073   }
2074   _impl_.session_info_ = nullptr;
2075   ::memset(&_impl_.temporary_memory_size_, 0, static_cast<size_t>(
2076       reinterpret_cast<char*>(&_impl_.memory_efficiency_) -
2077       reinterpret_cast<char*>(&_impl_.temporary_memory_size_)) + sizeof(_impl_.memory_efficiency_));
2078   clear_execution_time();
2079   _internal_metadata_.Clear<std::string>();
2080 }
2081 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2082 const char* OpPerformance::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2083 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2084   while (!ctx->Done(&ptr)) {
2085     ::uint32_t tag;
2086     ptr = ::_pbi::ReadTag(ptr, &tag);
2087     switch (tag >> 3) {
2088       // .tensorflow.OpInfo op = 1;
2089       case 1:
2090         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2091           ptr = ctx->ParseMessage(_internal_mutable_op(), ptr);
2092           CHK_(ptr);
2093         } else {
2094           goto handle_unusual;
2095         }
2096         continue;
2097       // int64 temporary_memory_size = 2;
2098       case 2:
2099         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
2100           _impl_.temporary_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2101           CHK_(ptr);
2102         } else {
2103           goto handle_unusual;
2104         }
2105         continue;
2106       // int64 compute_cost = 3;
2107       case 3:
2108         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
2109           _impl_.compute_cost_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2110           CHK_(ptr);
2111         } else {
2112           goto handle_unusual;
2113         }
2114         continue;
2115       // double compute_efficiency = 4;
2116       case 4:
2117         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 33)) {
2118           _impl_.compute_efficiency_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
2119           ptr += sizeof(double);
2120         } else {
2121           goto handle_unusual;
2122         }
2123         continue;
2124       // string node = 5;
2125       case 5:
2126         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
2127           auto str = _internal_mutable_node();
2128           ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
2129           CHK_(ptr);
2130           CHK_(::_pbi::VerifyUTF8(str, nullptr));
2131         } else {
2132           goto handle_unusual;
2133         }
2134         continue;
2135       // int64 compute_time = 6;
2136       case 6:
2137         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) {
2138           _impl_.compute_time_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2139           CHK_(ptr);
2140         } else {
2141           goto handle_unusual;
2142         }
2143         continue;
2144       // int64 memory_time = 7;
2145       case 7:
2146         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) {
2147           _impl_.memory_time_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2148           CHK_(ptr);
2149         } else {
2150           goto handle_unusual;
2151         }
2152         continue;
2153       // double memory_efficiency = 8;
2154       case 8:
2155         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 65)) {
2156           _impl_.memory_efficiency_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
2157           ptr += sizeof(double);
2158         } else {
2159           goto handle_unusual;
2160         }
2161         continue;
2162       // .tensorflow.OpPerformance.OpMemory op_memory = 9;
2163       case 9:
2164         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 74)) {
2165           ptr = ctx->ParseMessage(_internal_mutable_op_memory(), ptr);
2166           CHK_(ptr);
2167         } else {
2168           goto handle_unusual;
2169         }
2170         continue;
2171       // .tensorflow.NormalDistribution execution_time_normal = 10;
2172       case 10:
2173         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 82)) {
2174           ptr = ctx->ParseMessage(_internal_mutable_execution_time_normal(), ptr);
2175           CHK_(ptr);
2176         } else {
2177           goto handle_unusual;
2178         }
2179         continue;
2180       // .tensorflow.LogNormalDistribution execution_time_log_normal = 11;
2181       case 11:
2182         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 90)) {
2183           ptr = ctx->ParseMessage(_internal_mutable_execution_time_log_normal(), ptr);
2184           CHK_(ptr);
2185         } else {
2186           goto handle_unusual;
2187         }
2188         continue;
2189       // .tensorflow.SessionInfo session_info = 12 [deprecated = true];
2190       case 12:
2191         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 98)) {
2192           ptr = ctx->ParseMessage(_internal_mutable_session_info(), ptr);
2193           CHK_(ptr);
2194         } else {
2195           goto handle_unusual;
2196         }
2197         continue;
2198       default:
2199         goto handle_unusual;
2200     }  // switch
2201   handle_unusual:
2202     if ((tag == 0) || ((tag & 7) == 4)) {
2203       CHK_(ptr);
2204       ctx->SetLastTag(tag);
2205       goto message_done;
2206     }
2207     ptr = UnknownFieldParse(
2208         tag,
2209         _internal_metadata_.mutable_unknown_fields<std::string>(),
2210         ptr, ctx);
2211     CHK_(ptr != nullptr);
2212   }  // while
2213 message_done:
2214   return ptr;
2215 failure:
2216   ptr = nullptr;
2217   goto message_done;
2218 #undef CHK_
2219 }
2220 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2221 ::uint8_t* OpPerformance::_InternalSerialize(
2222     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2223   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.OpPerformance)
2224   ::uint32_t cached_has_bits = 0;
2225   (void) cached_has_bits;
2226 
2227   // .tensorflow.OpInfo op = 1;
2228   if (this->_internal_has_op()) {
2229     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2230       InternalWriteMessage(1, _Internal::op(this),
2231         _Internal::op(this).GetCachedSize(), target, stream);
2232   }
2233 
2234   // int64 temporary_memory_size = 2;
2235   if (this->_internal_temporary_memory_size() != 0) {
2236     target = stream->EnsureSpace(target);
2237     target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_temporary_memory_size(), target);
2238   }
2239 
2240   // int64 compute_cost = 3;
2241   if (this->_internal_compute_cost() != 0) {
2242     target = stream->EnsureSpace(target);
2243     target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_compute_cost(), target);
2244   }
2245 
2246   // double compute_efficiency = 4;
2247   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
2248   double tmp_compute_efficiency = this->_internal_compute_efficiency();
2249   ::uint64_t raw_compute_efficiency;
2250   memcpy(&raw_compute_efficiency, &tmp_compute_efficiency, sizeof(tmp_compute_efficiency));
2251   if (raw_compute_efficiency != 0) {
2252     target = stream->EnsureSpace(target);
2253     target = ::_pbi::WireFormatLite::WriteDoubleToArray(4, this->_internal_compute_efficiency(), target);
2254   }
2255 
2256   // string node = 5;
2257   if (!this->_internal_node().empty()) {
2258     ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2259       this->_internal_node().data(), static_cast<int>(this->_internal_node().length()),
2260       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2261       "tensorflow.OpPerformance.node");
2262     target = stream->WriteStringMaybeAliased(
2263         5, this->_internal_node(), target);
2264   }
2265 
2266   // int64 compute_time = 6;
2267   if (this->_internal_compute_time() != 0) {
2268     target = stream->EnsureSpace(target);
2269     target = ::_pbi::WireFormatLite::WriteInt64ToArray(6, this->_internal_compute_time(), target);
2270   }
2271 
2272   // int64 memory_time = 7;
2273   if (this->_internal_memory_time() != 0) {
2274     target = stream->EnsureSpace(target);
2275     target = ::_pbi::WireFormatLite::WriteInt64ToArray(7, this->_internal_memory_time(), target);
2276   }
2277 
2278   // double memory_efficiency = 8;
2279   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
2280   double tmp_memory_efficiency = this->_internal_memory_efficiency();
2281   ::uint64_t raw_memory_efficiency;
2282   memcpy(&raw_memory_efficiency, &tmp_memory_efficiency, sizeof(tmp_memory_efficiency));
2283   if (raw_memory_efficiency != 0) {
2284     target = stream->EnsureSpace(target);
2285     target = ::_pbi::WireFormatLite::WriteDoubleToArray(8, this->_internal_memory_efficiency(), target);
2286   }
2287 
2288   // .tensorflow.OpPerformance.OpMemory op_memory = 9;
2289   if (this->_internal_has_op_memory()) {
2290     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2291       InternalWriteMessage(9, _Internal::op_memory(this),
2292         _Internal::op_memory(this).GetCachedSize(), target, stream);
2293   }
2294 
2295   // .tensorflow.NormalDistribution execution_time_normal = 10;
2296   if (_internal_has_execution_time_normal()) {
2297     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2298       InternalWriteMessage(10, _Internal::execution_time_normal(this),
2299         _Internal::execution_time_normal(this).GetCachedSize(), target, stream);
2300   }
2301 
2302   // .tensorflow.LogNormalDistribution execution_time_log_normal = 11;
2303   if (_internal_has_execution_time_log_normal()) {
2304     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2305       InternalWriteMessage(11, _Internal::execution_time_log_normal(this),
2306         _Internal::execution_time_log_normal(this).GetCachedSize(), target, stream);
2307   }
2308 
2309   // .tensorflow.SessionInfo session_info = 12 [deprecated = true];
2310   if (this->_internal_has_session_info()) {
2311     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2312       InternalWriteMessage(12, _Internal::session_info(this),
2313         _Internal::session_info(this).GetCachedSize(), target, stream);
2314   }
2315 
2316   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2317     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2318         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2319   }
2320   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.OpPerformance)
2321   return target;
2322 }
2323 
ByteSizeLong() const2324 size_t OpPerformance::ByteSizeLong() const {
2325 // @@protoc_insertion_point(message_byte_size_start:tensorflow.OpPerformance)
2326   size_t total_size = 0;
2327 
2328   ::uint32_t cached_has_bits = 0;
2329   // Prevent compiler warnings about cached_has_bits being unused
2330   (void) cached_has_bits;
2331 
2332   // string node = 5;
2333   if (!this->_internal_node().empty()) {
2334     total_size += 1 +
2335       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2336         this->_internal_node());
2337   }
2338 
2339   // .tensorflow.OpInfo op = 1;
2340   if (this->_internal_has_op()) {
2341     total_size += 1 +
2342       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2343         *_impl_.op_);
2344   }
2345 
2346   // .tensorflow.OpPerformance.OpMemory op_memory = 9;
2347   if (this->_internal_has_op_memory()) {
2348     total_size += 1 +
2349       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2350         *_impl_.op_memory_);
2351   }
2352 
2353   // .tensorflow.SessionInfo session_info = 12 [deprecated = true];
2354   if (this->_internal_has_session_info()) {
2355     total_size += 1 +
2356       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2357         *_impl_.session_info_);
2358   }
2359 
2360   // int64 temporary_memory_size = 2;
2361   if (this->_internal_temporary_memory_size() != 0) {
2362     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_temporary_memory_size());
2363   }
2364 
2365   // int64 compute_cost = 3;
2366   if (this->_internal_compute_cost() != 0) {
2367     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_compute_cost());
2368   }
2369 
2370   // double compute_efficiency = 4;
2371   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
2372   double tmp_compute_efficiency = this->_internal_compute_efficiency();
2373   ::uint64_t raw_compute_efficiency;
2374   memcpy(&raw_compute_efficiency, &tmp_compute_efficiency, sizeof(tmp_compute_efficiency));
2375   if (raw_compute_efficiency != 0) {
2376     total_size += 1 + 8;
2377   }
2378 
2379   // int64 compute_time = 6;
2380   if (this->_internal_compute_time() != 0) {
2381     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_compute_time());
2382   }
2383 
2384   // int64 memory_time = 7;
2385   if (this->_internal_memory_time() != 0) {
2386     total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_memory_time());
2387   }
2388 
2389   // double memory_efficiency = 8;
2390   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
2391   double tmp_memory_efficiency = this->_internal_memory_efficiency();
2392   ::uint64_t raw_memory_efficiency;
2393   memcpy(&raw_memory_efficiency, &tmp_memory_efficiency, sizeof(tmp_memory_efficiency));
2394   if (raw_memory_efficiency != 0) {
2395     total_size += 1 + 8;
2396   }
2397 
2398   switch (execution_time_case()) {
2399     // .tensorflow.NormalDistribution execution_time_normal = 10;
2400     case kExecutionTimeNormal: {
2401       total_size += 1 +
2402         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2403           *_impl_.execution_time_.execution_time_normal_);
2404       break;
2405     }
2406     // .tensorflow.LogNormalDistribution execution_time_log_normal = 11;
2407     case kExecutionTimeLogNormal: {
2408       total_size += 1 +
2409         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2410           *_impl_.execution_time_.execution_time_log_normal_);
2411       break;
2412     }
2413     case EXECUTION_TIME_NOT_SET: {
2414       break;
2415     }
2416   }
2417   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2418     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2419   }
2420   int cached_size = ::_pbi::ToCachedSize(total_size);
2421   SetCachedSize(cached_size);
2422   return total_size;
2423 }
2424 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2425 void OpPerformance::CheckTypeAndMergeFrom(
2426     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2427   MergeFrom(*::_pbi::DownCast<const OpPerformance*>(
2428       &from));
2429 }
2430 
MergeFrom(const OpPerformance & from)2431 void OpPerformance::MergeFrom(const OpPerformance& from) {
2432   OpPerformance* const _this = this;
2433   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.OpPerformance)
2434   GOOGLE_DCHECK_NE(&from, _this);
2435   ::uint32_t cached_has_bits = 0;
2436   (void) cached_has_bits;
2437 
2438   if (!from._internal_node().empty()) {
2439     _this->_internal_set_node(from._internal_node());
2440   }
2441   if (from._internal_has_op()) {
2442     _this->_internal_mutable_op()->::tensorflow::OpInfo::MergeFrom(
2443         from._internal_op());
2444   }
2445   if (from._internal_has_op_memory()) {
2446     _this->_internal_mutable_op_memory()->::tensorflow::OpPerformance_OpMemory::MergeFrom(
2447         from._internal_op_memory());
2448   }
2449   if (from._internal_has_session_info()) {
2450     _this->_internal_mutable_session_info()->::tensorflow::SessionInfo::MergeFrom(
2451         from._internal_session_info());
2452   }
2453   if (from._internal_temporary_memory_size() != 0) {
2454     _this->_internal_set_temporary_memory_size(from._internal_temporary_memory_size());
2455   }
2456   if (from._internal_compute_cost() != 0) {
2457     _this->_internal_set_compute_cost(from._internal_compute_cost());
2458   }
2459   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
2460   double tmp_compute_efficiency = from._internal_compute_efficiency();
2461   ::uint64_t raw_compute_efficiency;
2462   memcpy(&raw_compute_efficiency, &tmp_compute_efficiency, sizeof(tmp_compute_efficiency));
2463   if (raw_compute_efficiency != 0) {
2464     _this->_internal_set_compute_efficiency(from._internal_compute_efficiency());
2465   }
2466   if (from._internal_compute_time() != 0) {
2467     _this->_internal_set_compute_time(from._internal_compute_time());
2468   }
2469   if (from._internal_memory_time() != 0) {
2470     _this->_internal_set_memory_time(from._internal_memory_time());
2471   }
2472   static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size.");
2473   double tmp_memory_efficiency = from._internal_memory_efficiency();
2474   ::uint64_t raw_memory_efficiency;
2475   memcpy(&raw_memory_efficiency, &tmp_memory_efficiency, sizeof(tmp_memory_efficiency));
2476   if (raw_memory_efficiency != 0) {
2477     _this->_internal_set_memory_efficiency(from._internal_memory_efficiency());
2478   }
2479   switch (from.execution_time_case()) {
2480     case kExecutionTimeNormal: {
2481       _this->_internal_mutable_execution_time_normal()->::tensorflow::NormalDistribution::MergeFrom(
2482           from._internal_execution_time_normal());
2483       break;
2484     }
2485     case kExecutionTimeLogNormal: {
2486       _this->_internal_mutable_execution_time_log_normal()->::tensorflow::LogNormalDistribution::MergeFrom(
2487           from._internal_execution_time_log_normal());
2488       break;
2489     }
2490     case EXECUTION_TIME_NOT_SET: {
2491       break;
2492     }
2493   }
2494   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2495 }
2496 
CopyFrom(const OpPerformance & from)2497 void OpPerformance::CopyFrom(const OpPerformance& from) {
2498 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.OpPerformance)
2499   if (&from == this) return;
2500   Clear();
2501   MergeFrom(from);
2502 }
2503 
IsInitialized() const2504 bool OpPerformance::IsInitialized() const {
2505   return true;
2506 }
2507 
InternalSwap(OpPerformance * other)2508 void OpPerformance::InternalSwap(OpPerformance* other) {
2509   using std::swap;
2510   auto* lhs_arena = GetArenaForAllocation();
2511   auto* rhs_arena = other->GetArenaForAllocation();
2512   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2513   ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
2514       &_impl_.node_, lhs_arena,
2515       &other->_impl_.node_, rhs_arena
2516   );
2517   ::PROTOBUF_NAMESPACE_ID::internal::memswap<
2518       PROTOBUF_FIELD_OFFSET(OpPerformance, _impl_.memory_efficiency_)
2519       + sizeof(OpPerformance::_impl_.memory_efficiency_)  // NOLINT
2520       - PROTOBUF_FIELD_OFFSET(OpPerformance, _impl_.op_)>(
2521           reinterpret_cast<char*>(&_impl_.op_),
2522           reinterpret_cast<char*>(&other->_impl_.op_));
2523   swap(_impl_.execution_time_, other->_impl_.execution_time_);
2524   swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
2525 }
2526 
GetTypeName() const2527 std::string OpPerformance::GetTypeName() const {
2528   return "tensorflow.OpPerformance";
2529 }
2530 
2531 
2532 // ===================================================================
2533 
2534 class OpPerformanceList::_Internal {
2535  public:
2536 };
2537 
OpPerformanceList(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2538 OpPerformanceList::OpPerformanceList(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2539                          bool is_message_owned)
2540   : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2541   SharedCtor(arena, is_message_owned);
2542   // @@protoc_insertion_point(arena_constructor:tensorflow.OpPerformanceList)
2543 }
OpPerformanceList(const OpPerformanceList & from)2544 OpPerformanceList::OpPerformanceList(const OpPerformanceList& from)
2545   : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2546   OpPerformanceList* const _this = this; (void)_this;
2547   new (&_impl_) Impl_{
2548       decltype(_impl_.op_performance_){from._impl_.op_performance_}
2549     , /*decltype(_impl_._cached_size_)*/{}};
2550 
2551   _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2552   // @@protoc_insertion_point(copy_constructor:tensorflow.OpPerformanceList)
2553 }
2554 
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2555 inline void OpPerformanceList::SharedCtor(
2556     ::_pb::Arena* arena, bool is_message_owned) {
2557   (void)arena;
2558   (void)is_message_owned;
2559   new (&_impl_) Impl_{
2560       decltype(_impl_.op_performance_){arena}
2561     , /*decltype(_impl_._cached_size_)*/{}
2562   };
2563 }
2564 
~OpPerformanceList()2565 OpPerformanceList::~OpPerformanceList() {
2566   // @@protoc_insertion_point(destructor:tensorflow.OpPerformanceList)
2567   if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2568   (void)arena;
2569     return;
2570   }
2571   SharedDtor();
2572 }
2573 
SharedDtor()2574 inline void OpPerformanceList::SharedDtor() {
2575   GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2576   _impl_.op_performance_.~RepeatedPtrField();
2577 }
2578 
SetCachedSize(int size) const2579 void OpPerformanceList::SetCachedSize(int size) const {
2580   _impl_._cached_size_.Set(size);
2581 }
2582 
Clear()2583 void OpPerformanceList::Clear() {
2584 // @@protoc_insertion_point(message_clear_start:tensorflow.OpPerformanceList)
2585   ::uint32_t cached_has_bits = 0;
2586   // Prevent compiler warnings about cached_has_bits being unused
2587   (void) cached_has_bits;
2588 
2589   _impl_.op_performance_.Clear();
2590   _internal_metadata_.Clear<std::string>();
2591 }
2592 
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2593 const char* OpPerformanceList::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2594 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2595   while (!ctx->Done(&ptr)) {
2596     ::uint32_t tag;
2597     ptr = ::_pbi::ReadTag(ptr, &tag);
2598     switch (tag >> 3) {
2599       // repeated .tensorflow.OpPerformance op_performance = 1;
2600       case 1:
2601         if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2602           ptr -= 1;
2603           do {
2604             ptr += 1;
2605             ptr = ctx->ParseMessage(_internal_add_op_performance(), ptr);
2606             CHK_(ptr);
2607             if (!ctx->DataAvailable(ptr)) break;
2608           } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
2609         } else {
2610           goto handle_unusual;
2611         }
2612         continue;
2613       default:
2614         goto handle_unusual;
2615     }  // switch
2616   handle_unusual:
2617     if ((tag == 0) || ((tag & 7) == 4)) {
2618       CHK_(ptr);
2619       ctx->SetLastTag(tag);
2620       goto message_done;
2621     }
2622     ptr = UnknownFieldParse(
2623         tag,
2624         _internal_metadata_.mutable_unknown_fields<std::string>(),
2625         ptr, ctx);
2626     CHK_(ptr != nullptr);
2627   }  // while
2628 message_done:
2629   return ptr;
2630 failure:
2631   ptr = nullptr;
2632   goto message_done;
2633 #undef CHK_
2634 }
2635 
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2636 ::uint8_t* OpPerformanceList::_InternalSerialize(
2637     ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2638   // @@protoc_insertion_point(serialize_to_array_start:tensorflow.OpPerformanceList)
2639   ::uint32_t cached_has_bits = 0;
2640   (void) cached_has_bits;
2641 
2642   // repeated .tensorflow.OpPerformance op_performance = 1;
2643   for (unsigned i = 0,
2644       n = static_cast<unsigned>(this->_internal_op_performance_size()); i < n; i++) {
2645     const auto& repfield = this->_internal_op_performance(i);
2646     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2647         InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
2648   }
2649 
2650   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2651     target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2652         static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2653   }
2654   // @@protoc_insertion_point(serialize_to_array_end:tensorflow.OpPerformanceList)
2655   return target;
2656 }
2657 
ByteSizeLong() const2658 size_t OpPerformanceList::ByteSizeLong() const {
2659 // @@protoc_insertion_point(message_byte_size_start:tensorflow.OpPerformanceList)
2660   size_t total_size = 0;
2661 
2662   ::uint32_t cached_has_bits = 0;
2663   // Prevent compiler warnings about cached_has_bits being unused
2664   (void) cached_has_bits;
2665 
2666   // repeated .tensorflow.OpPerformance op_performance = 1;
2667   total_size += 1UL * this->_internal_op_performance_size();
2668   for (const auto& msg : this->_impl_.op_performance_) {
2669     total_size +=
2670       ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2671   }
2672 
2673   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2674     total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2675   }
2676   int cached_size = ::_pbi::ToCachedSize(total_size);
2677   SetCachedSize(cached_size);
2678   return total_size;
2679 }
2680 
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2681 void OpPerformanceList::CheckTypeAndMergeFrom(
2682     const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2683   MergeFrom(*::_pbi::DownCast<const OpPerformanceList*>(
2684       &from));
2685 }
2686 
MergeFrom(const OpPerformanceList & from)2687 void OpPerformanceList::MergeFrom(const OpPerformanceList& from) {
2688   OpPerformanceList* const _this = this;
2689   // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.OpPerformanceList)
2690   GOOGLE_DCHECK_NE(&from, _this);
2691   ::uint32_t cached_has_bits = 0;
2692   (void) cached_has_bits;
2693 
2694   _this->_impl_.op_performance_.MergeFrom(from._impl_.op_performance_);
2695   _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2696 }
2697 
CopyFrom(const OpPerformanceList & from)2698 void OpPerformanceList::CopyFrom(const OpPerformanceList& from) {
2699 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.OpPerformanceList)
2700   if (&from == this) return;
2701   Clear();
2702   MergeFrom(from);
2703 }
2704 
IsInitialized() const2705 bool OpPerformanceList::IsInitialized() const {
2706   return true;
2707 }
2708 
InternalSwap(OpPerformanceList * other)2709 void OpPerformanceList::InternalSwap(OpPerformanceList* other) {
2710   using std::swap;
2711   _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2712   _impl_.op_performance_.InternalSwap(&other->_impl_.op_performance_);
2713 }
2714 
GetTypeName() const2715 std::string OpPerformanceList::GetTypeName() const {
2716   return "tensorflow.OpPerformanceList";
2717 }
2718 
2719 
2720 // @@protoc_insertion_point(namespace_scope)
2721 }  // namespace tensorflow
2722 PROTOBUF_NAMESPACE_OPEN
2723 template<> PROTOBUF_NOINLINE ::tensorflow::SessionInfo*
CreateMaybeMessage(Arena * arena)2724 Arena::CreateMaybeMessage< ::tensorflow::SessionInfo >(Arena* arena) {
2725   return Arena::CreateMessageInternal< ::tensorflow::SessionInfo >(arena);
2726 }
2727 template<> PROTOBUF_NOINLINE ::tensorflow::OpInfo_AttrEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)2728 Arena::CreateMaybeMessage< ::tensorflow::OpInfo_AttrEntry_DoNotUse >(Arena* arena) {
2729   return Arena::CreateMessageInternal< ::tensorflow::OpInfo_AttrEntry_DoNotUse >(arena);
2730 }
2731 template<> PROTOBUF_NOINLINE ::tensorflow::OpInfo_TensorProperties*
CreateMaybeMessage(Arena * arena)2732 Arena::CreateMaybeMessage< ::tensorflow::OpInfo_TensorProperties >(Arena* arena) {
2733   return Arena::CreateMessageInternal< ::tensorflow::OpInfo_TensorProperties >(arena);
2734 }
2735 template<> PROTOBUF_NOINLINE ::tensorflow::OpInfo*
CreateMaybeMessage(Arena * arena)2736 Arena::CreateMaybeMessage< ::tensorflow::OpInfo >(Arena* arena) {
2737   return Arena::CreateMessageInternal< ::tensorflow::OpInfo >(arena);
2738 }
2739 template<> PROTOBUF_NOINLINE ::tensorflow::NormalDistribution*
CreateMaybeMessage(Arena * arena)2740 Arena::CreateMaybeMessage< ::tensorflow::NormalDistribution >(Arena* arena) {
2741   return Arena::CreateMessageInternal< ::tensorflow::NormalDistribution >(arena);
2742 }
2743 template<> PROTOBUF_NOINLINE ::tensorflow::LogNormalDistribution*
CreateMaybeMessage(Arena * arena)2744 Arena::CreateMaybeMessage< ::tensorflow::LogNormalDistribution >(Arena* arena) {
2745   return Arena::CreateMessageInternal< ::tensorflow::LogNormalDistribution >(arena);
2746 }
2747 template<> PROTOBUF_NOINLINE ::tensorflow::OpPerformance_OpMemory*
CreateMaybeMessage(Arena * arena)2748 Arena::CreateMaybeMessage< ::tensorflow::OpPerformance_OpMemory >(Arena* arena) {
2749   return Arena::CreateMessageInternal< ::tensorflow::OpPerformance_OpMemory >(arena);
2750 }
2751 template<> PROTOBUF_NOINLINE ::tensorflow::OpPerformance*
CreateMaybeMessage(Arena * arena)2752 Arena::CreateMaybeMessage< ::tensorflow::OpPerformance >(Arena* arena) {
2753   return Arena::CreateMessageInternal< ::tensorflow::OpPerformance >(arena);
2754 }
2755 template<> PROTOBUF_NOINLINE ::tensorflow::OpPerformanceList*
CreateMaybeMessage(Arena * arena)2756 Arena::CreateMaybeMessage< ::tensorflow::OpPerformanceList >(Arena* arena) {
2757   return Arena::CreateMessageInternal< ::tensorflow::OpPerformanceList >(arena);
2758 }
2759 PROTOBUF_NAMESPACE_CLOSE
2760 
2761 // @@protoc_insertion_point(global_scope)
2762 #include <google/protobuf/port_undef.inc>
2763