1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/step_stats.proto
3
4 #include "tensorflow/core/framework/step_stats.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
AllocationRecord(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR AllocationRecord::AllocationRecord(
23 ::_pbi::ConstantInitialized): _impl_{
24 /*decltype(_impl_.alloc_micros_)*/::int64_t{0}
25 , /*decltype(_impl_.alloc_bytes_)*/::int64_t{0}
26 , /*decltype(_impl_._cached_size_)*/{}} {}
27 struct AllocationRecordDefaultTypeInternal {
AllocationRecordDefaultTypeInternaltensorflow::AllocationRecordDefaultTypeInternal28 PROTOBUF_CONSTEXPR AllocationRecordDefaultTypeInternal()
29 : _instance(::_pbi::ConstantInitialized{}) {}
~AllocationRecordDefaultTypeInternaltensorflow::AllocationRecordDefaultTypeInternal30 ~AllocationRecordDefaultTypeInternal() {}
31 union { // NOLINT(misc-non-private-member-variables-in-classes)
32 AllocationRecord _instance;
33 };
34 };
35 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AllocationRecordDefaultTypeInternal _AllocationRecord_default_instance_;
AllocatorMemoryUsed(::_pbi::ConstantInitialized)36 PROTOBUF_CONSTEXPR AllocatorMemoryUsed::AllocatorMemoryUsed(
37 ::_pbi::ConstantInitialized): _impl_{
38 /*decltype(_impl_.allocation_records_)*/{}
39 , /*decltype(_impl_.allocator_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
40 , /*decltype(_impl_.total_bytes_)*/::int64_t{0}
41 , /*decltype(_impl_.peak_bytes_)*/::int64_t{0}
42 , /*decltype(_impl_.live_bytes_)*/::int64_t{0}
43 , /*decltype(_impl_.allocator_bytes_in_use_)*/::int64_t{0}
44 , /*decltype(_impl_._cached_size_)*/{}} {}
45 struct AllocatorMemoryUsedDefaultTypeInternal {
AllocatorMemoryUsedDefaultTypeInternaltensorflow::AllocatorMemoryUsedDefaultTypeInternal46 PROTOBUF_CONSTEXPR AllocatorMemoryUsedDefaultTypeInternal()
47 : _instance(::_pbi::ConstantInitialized{}) {}
~AllocatorMemoryUsedDefaultTypeInternaltensorflow::AllocatorMemoryUsedDefaultTypeInternal48 ~AllocatorMemoryUsedDefaultTypeInternal() {}
49 union { // NOLINT(misc-non-private-member-variables-in-classes)
50 AllocatorMemoryUsed _instance;
51 };
52 };
53 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AllocatorMemoryUsedDefaultTypeInternal _AllocatorMemoryUsed_default_instance_;
NodeOutput(::_pbi::ConstantInitialized)54 PROTOBUF_CONSTEXPR NodeOutput::NodeOutput(
55 ::_pbi::ConstantInitialized): _impl_{
56 /*decltype(_impl_.tensor_description_)*/nullptr
57 , /*decltype(_impl_.slot_)*/0
58 , /*decltype(_impl_._cached_size_)*/{}} {}
59 struct NodeOutputDefaultTypeInternal {
NodeOutputDefaultTypeInternaltensorflow::NodeOutputDefaultTypeInternal60 PROTOBUF_CONSTEXPR NodeOutputDefaultTypeInternal()
61 : _instance(::_pbi::ConstantInitialized{}) {}
~NodeOutputDefaultTypeInternaltensorflow::NodeOutputDefaultTypeInternal62 ~NodeOutputDefaultTypeInternal() {}
63 union { // NOLINT(misc-non-private-member-variables-in-classes)
64 NodeOutput _instance;
65 };
66 };
67 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 NodeOutputDefaultTypeInternal _NodeOutput_default_instance_;
MemoryStats(::_pbi::ConstantInitialized)68 PROTOBUF_CONSTEXPR MemoryStats::MemoryStats(
69 ::_pbi::ConstantInitialized): _impl_{
70 /*decltype(_impl_.persistent_tensor_alloc_ids_)*/{}
71 , /*decltype(_impl_._persistent_tensor_alloc_ids_cached_byte_size_)*/{0}
72 , /*decltype(_impl_.device_persistent_tensor_alloc_ids_)*/{}
73 , /*decltype(_impl_._device_persistent_tensor_alloc_ids_cached_byte_size_)*/{0}
74 , /*decltype(_impl_.temp_memory_size_)*/::int64_t{0}
75 , /*decltype(_impl_.device_temp_memory_size_)*/::int64_t{0}
76 , /*decltype(_impl_.persistent_memory_size_)*/::int64_t{0}
77 , /*decltype(_impl_.device_persistent_memory_size_)*/::int64_t{0}
78 , /*decltype(_impl_._cached_size_)*/{}} {}
79 struct MemoryStatsDefaultTypeInternal {
MemoryStatsDefaultTypeInternaltensorflow::MemoryStatsDefaultTypeInternal80 PROTOBUF_CONSTEXPR MemoryStatsDefaultTypeInternal()
81 : _instance(::_pbi::ConstantInitialized{}) {}
~MemoryStatsDefaultTypeInternaltensorflow::MemoryStatsDefaultTypeInternal82 ~MemoryStatsDefaultTypeInternal() {}
83 union { // NOLINT(misc-non-private-member-variables-in-classes)
84 MemoryStats _instance;
85 };
86 };
87 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MemoryStatsDefaultTypeInternal _MemoryStats_default_instance_;
NodeExecStats(::_pbi::ConstantInitialized)88 PROTOBUF_CONSTEXPR NodeExecStats::NodeExecStats(
89 ::_pbi::ConstantInitialized): _impl_{
90 /*decltype(_impl_.memory_)*/{}
91 , /*decltype(_impl_.output_)*/{}
92 , /*decltype(_impl_.referenced_tensor_)*/{}
93 , /*decltype(_impl_.node_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
94 , /*decltype(_impl_.timeline_label_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
95 , /*decltype(_impl_.memory_stats_)*/nullptr
96 , /*decltype(_impl_.all_start_micros_)*/::int64_t{0}
97 , /*decltype(_impl_.op_start_rel_micros_)*/::int64_t{0}
98 , /*decltype(_impl_.op_end_rel_micros_)*/::int64_t{0}
99 , /*decltype(_impl_.all_end_rel_micros_)*/::int64_t{0}
100 , /*decltype(_impl_.scheduled_micros_)*/::int64_t{0}
101 , /*decltype(_impl_.all_start_nanos_)*/::int64_t{0}
102 , /*decltype(_impl_.op_start_rel_nanos_)*/::int64_t{0}
103 , /*decltype(_impl_.op_end_rel_nanos_)*/::int64_t{0}
104 , /*decltype(_impl_.all_end_rel_nanos_)*/::int64_t{0}
105 , /*decltype(_impl_.scheduled_nanos_)*/::int64_t{0}
106 , /*decltype(_impl_.thread_id_)*/0u
107 , /*decltype(_impl_._cached_size_)*/{}} {}
108 struct NodeExecStatsDefaultTypeInternal {
NodeExecStatsDefaultTypeInternaltensorflow::NodeExecStatsDefaultTypeInternal109 PROTOBUF_CONSTEXPR NodeExecStatsDefaultTypeInternal()
110 : _instance(::_pbi::ConstantInitialized{}) {}
~NodeExecStatsDefaultTypeInternaltensorflow::NodeExecStatsDefaultTypeInternal111 ~NodeExecStatsDefaultTypeInternal() {}
112 union { // NOLINT(misc-non-private-member-variables-in-classes)
113 NodeExecStats _instance;
114 };
115 };
116 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 NodeExecStatsDefaultTypeInternal _NodeExecStats_default_instance_;
DeviceStepStats_ThreadNamesEntry_DoNotUse(::_pbi::ConstantInitialized)117 PROTOBUF_CONSTEXPR DeviceStepStats_ThreadNamesEntry_DoNotUse::DeviceStepStats_ThreadNamesEntry_DoNotUse(
118 ::_pbi::ConstantInitialized) {}
119 struct DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternal {
DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternaltensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternal120 PROTOBUF_CONSTEXPR DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternal()
121 : _instance(::_pbi::ConstantInitialized{}) {}
~DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternaltensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternal122 ~DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternal() {}
123 union { // NOLINT(misc-non-private-member-variables-in-classes)
124 DeviceStepStats_ThreadNamesEntry_DoNotUse _instance;
125 };
126 };
127 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternal _DeviceStepStats_ThreadNamesEntry_DoNotUse_default_instance_;
DeviceStepStats(::_pbi::ConstantInitialized)128 PROTOBUF_CONSTEXPR DeviceStepStats::DeviceStepStats(
129 ::_pbi::ConstantInitialized): _impl_{
130 /*decltype(_impl_.node_stats_)*/{}
131 , /*decltype(_impl_.thread_names_)*/{}
132 , /*decltype(_impl_.device_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
133 , /*decltype(_impl_._cached_size_)*/{}} {}
134 struct DeviceStepStatsDefaultTypeInternal {
DeviceStepStatsDefaultTypeInternaltensorflow::DeviceStepStatsDefaultTypeInternal135 PROTOBUF_CONSTEXPR DeviceStepStatsDefaultTypeInternal()
136 : _instance(::_pbi::ConstantInitialized{}) {}
~DeviceStepStatsDefaultTypeInternaltensorflow::DeviceStepStatsDefaultTypeInternal137 ~DeviceStepStatsDefaultTypeInternal() {}
138 union { // NOLINT(misc-non-private-member-variables-in-classes)
139 DeviceStepStats _instance;
140 };
141 };
142 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 DeviceStepStatsDefaultTypeInternal _DeviceStepStats_default_instance_;
StepStats(::_pbi::ConstantInitialized)143 PROTOBUF_CONSTEXPR StepStats::StepStats(
144 ::_pbi::ConstantInitialized): _impl_{
145 /*decltype(_impl_.dev_stats_)*/{}
146 , /*decltype(_impl_._cached_size_)*/{}} {}
147 struct StepStatsDefaultTypeInternal {
StepStatsDefaultTypeInternaltensorflow::StepStatsDefaultTypeInternal148 PROTOBUF_CONSTEXPR StepStatsDefaultTypeInternal()
149 : _instance(::_pbi::ConstantInitialized{}) {}
~StepStatsDefaultTypeInternaltensorflow::StepStatsDefaultTypeInternal150 ~StepStatsDefaultTypeInternal() {}
151 union { // NOLINT(misc-non-private-member-variables-in-classes)
152 StepStats _instance;
153 };
154 };
155 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 StepStatsDefaultTypeInternal _StepStats_default_instance_;
156 } // namespace tensorflow
157 namespace tensorflow {
158
159 // ===================================================================
160
161 class AllocationRecord::_Internal {
162 public:
163 };
164
AllocationRecord(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)165 AllocationRecord::AllocationRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena,
166 bool is_message_owned)
167 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
168 SharedCtor(arena, is_message_owned);
169 // @@protoc_insertion_point(arena_constructor:tensorflow.AllocationRecord)
170 }
AllocationRecord(const AllocationRecord & from)171 AllocationRecord::AllocationRecord(const AllocationRecord& from)
172 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
173 AllocationRecord* const _this = this; (void)_this;
174 new (&_impl_) Impl_{
175 decltype(_impl_.alloc_micros_){}
176 , decltype(_impl_.alloc_bytes_){}
177 , /*decltype(_impl_._cached_size_)*/{}};
178
179 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
180 ::memcpy(&_impl_.alloc_micros_, &from._impl_.alloc_micros_,
181 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.alloc_bytes_) -
182 reinterpret_cast<char*>(&_impl_.alloc_micros_)) + sizeof(_impl_.alloc_bytes_));
183 // @@protoc_insertion_point(copy_constructor:tensorflow.AllocationRecord)
184 }
185
SharedCtor(::_pb::Arena * arena,bool is_message_owned)186 inline void AllocationRecord::SharedCtor(
187 ::_pb::Arena* arena, bool is_message_owned) {
188 (void)arena;
189 (void)is_message_owned;
190 new (&_impl_) Impl_{
191 decltype(_impl_.alloc_micros_){::int64_t{0}}
192 , decltype(_impl_.alloc_bytes_){::int64_t{0}}
193 , /*decltype(_impl_._cached_size_)*/{}
194 };
195 }
196
~AllocationRecord()197 AllocationRecord::~AllocationRecord() {
198 // @@protoc_insertion_point(destructor:tensorflow.AllocationRecord)
199 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
200 (void)arena;
201 return;
202 }
203 SharedDtor();
204 }
205
SharedDtor()206 inline void AllocationRecord::SharedDtor() {
207 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
208 }
209
SetCachedSize(int size) const210 void AllocationRecord::SetCachedSize(int size) const {
211 _impl_._cached_size_.Set(size);
212 }
213
Clear()214 void AllocationRecord::Clear() {
215 // @@protoc_insertion_point(message_clear_start:tensorflow.AllocationRecord)
216 ::uint32_t cached_has_bits = 0;
217 // Prevent compiler warnings about cached_has_bits being unused
218 (void) cached_has_bits;
219
220 ::memset(&_impl_.alloc_micros_, 0, static_cast<size_t>(
221 reinterpret_cast<char*>(&_impl_.alloc_bytes_) -
222 reinterpret_cast<char*>(&_impl_.alloc_micros_)) + sizeof(_impl_.alloc_bytes_));
223 _internal_metadata_.Clear<std::string>();
224 }
225
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)226 const char* AllocationRecord::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
227 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
228 while (!ctx->Done(&ptr)) {
229 ::uint32_t tag;
230 ptr = ::_pbi::ReadTag(ptr, &tag);
231 switch (tag >> 3) {
232 // int64 alloc_micros = 1;
233 case 1:
234 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
235 _impl_.alloc_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
236 CHK_(ptr);
237 } else {
238 goto handle_unusual;
239 }
240 continue;
241 // int64 alloc_bytes = 2;
242 case 2:
243 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
244 _impl_.alloc_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
245 CHK_(ptr);
246 } else {
247 goto handle_unusual;
248 }
249 continue;
250 default:
251 goto handle_unusual;
252 } // switch
253 handle_unusual:
254 if ((tag == 0) || ((tag & 7) == 4)) {
255 CHK_(ptr);
256 ctx->SetLastTag(tag);
257 goto message_done;
258 }
259 ptr = UnknownFieldParse(
260 tag,
261 _internal_metadata_.mutable_unknown_fields<std::string>(),
262 ptr, ctx);
263 CHK_(ptr != nullptr);
264 } // while
265 message_done:
266 return ptr;
267 failure:
268 ptr = nullptr;
269 goto message_done;
270 #undef CHK_
271 }
272
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const273 ::uint8_t* AllocationRecord::_InternalSerialize(
274 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
275 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.AllocationRecord)
276 ::uint32_t cached_has_bits = 0;
277 (void) cached_has_bits;
278
279 // int64 alloc_micros = 1;
280 if (this->_internal_alloc_micros() != 0) {
281 target = stream->EnsureSpace(target);
282 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_alloc_micros(), target);
283 }
284
285 // int64 alloc_bytes = 2;
286 if (this->_internal_alloc_bytes() != 0) {
287 target = stream->EnsureSpace(target);
288 target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_alloc_bytes(), target);
289 }
290
291 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
292 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
293 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
294 }
295 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.AllocationRecord)
296 return target;
297 }
298
ByteSizeLong() const299 size_t AllocationRecord::ByteSizeLong() const {
300 // @@protoc_insertion_point(message_byte_size_start:tensorflow.AllocationRecord)
301 size_t total_size = 0;
302
303 ::uint32_t cached_has_bits = 0;
304 // Prevent compiler warnings about cached_has_bits being unused
305 (void) cached_has_bits;
306
307 // int64 alloc_micros = 1;
308 if (this->_internal_alloc_micros() != 0) {
309 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_alloc_micros());
310 }
311
312 // int64 alloc_bytes = 2;
313 if (this->_internal_alloc_bytes() != 0) {
314 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_alloc_bytes());
315 }
316
317 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
318 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
319 }
320 int cached_size = ::_pbi::ToCachedSize(total_size);
321 SetCachedSize(cached_size);
322 return total_size;
323 }
324
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)325 void AllocationRecord::CheckTypeAndMergeFrom(
326 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
327 MergeFrom(*::_pbi::DownCast<const AllocationRecord*>(
328 &from));
329 }
330
MergeFrom(const AllocationRecord & from)331 void AllocationRecord::MergeFrom(const AllocationRecord& from) {
332 AllocationRecord* const _this = this;
333 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.AllocationRecord)
334 GOOGLE_DCHECK_NE(&from, _this);
335 ::uint32_t cached_has_bits = 0;
336 (void) cached_has_bits;
337
338 if (from._internal_alloc_micros() != 0) {
339 _this->_internal_set_alloc_micros(from._internal_alloc_micros());
340 }
341 if (from._internal_alloc_bytes() != 0) {
342 _this->_internal_set_alloc_bytes(from._internal_alloc_bytes());
343 }
344 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
345 }
346
CopyFrom(const AllocationRecord & from)347 void AllocationRecord::CopyFrom(const AllocationRecord& from) {
348 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.AllocationRecord)
349 if (&from == this) return;
350 Clear();
351 MergeFrom(from);
352 }
353
IsInitialized() const354 bool AllocationRecord::IsInitialized() const {
355 return true;
356 }
357
InternalSwap(AllocationRecord * other)358 void AllocationRecord::InternalSwap(AllocationRecord* other) {
359 using std::swap;
360 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
361 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
362 PROTOBUF_FIELD_OFFSET(AllocationRecord, _impl_.alloc_bytes_)
363 + sizeof(AllocationRecord::_impl_.alloc_bytes_) // NOLINT
364 - PROTOBUF_FIELD_OFFSET(AllocationRecord, _impl_.alloc_micros_)>(
365 reinterpret_cast<char*>(&_impl_.alloc_micros_),
366 reinterpret_cast<char*>(&other->_impl_.alloc_micros_));
367 }
368
GetTypeName() const369 std::string AllocationRecord::GetTypeName() const {
370 return "tensorflow.AllocationRecord";
371 }
372
373
374 // ===================================================================
375
376 class AllocatorMemoryUsed::_Internal {
377 public:
378 };
379
AllocatorMemoryUsed(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)380 AllocatorMemoryUsed::AllocatorMemoryUsed(::PROTOBUF_NAMESPACE_ID::Arena* arena,
381 bool is_message_owned)
382 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
383 SharedCtor(arena, is_message_owned);
384 // @@protoc_insertion_point(arena_constructor:tensorflow.AllocatorMemoryUsed)
385 }
AllocatorMemoryUsed(const AllocatorMemoryUsed & from)386 AllocatorMemoryUsed::AllocatorMemoryUsed(const AllocatorMemoryUsed& from)
387 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
388 AllocatorMemoryUsed* const _this = this; (void)_this;
389 new (&_impl_) Impl_{
390 decltype(_impl_.allocation_records_){from._impl_.allocation_records_}
391 , decltype(_impl_.allocator_name_){}
392 , decltype(_impl_.total_bytes_){}
393 , decltype(_impl_.peak_bytes_){}
394 , decltype(_impl_.live_bytes_){}
395 , decltype(_impl_.allocator_bytes_in_use_){}
396 , /*decltype(_impl_._cached_size_)*/{}};
397
398 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
399 _impl_.allocator_name_.InitDefault();
400 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
401 _impl_.allocator_name_.Set("", GetArenaForAllocation());
402 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
403 if (!from._internal_allocator_name().empty()) {
404 _this->_impl_.allocator_name_.Set(from._internal_allocator_name(),
405 _this->GetArenaForAllocation());
406 }
407 ::memcpy(&_impl_.total_bytes_, &from._impl_.total_bytes_,
408 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.allocator_bytes_in_use_) -
409 reinterpret_cast<char*>(&_impl_.total_bytes_)) + sizeof(_impl_.allocator_bytes_in_use_));
410 // @@protoc_insertion_point(copy_constructor:tensorflow.AllocatorMemoryUsed)
411 }
412
SharedCtor(::_pb::Arena * arena,bool is_message_owned)413 inline void AllocatorMemoryUsed::SharedCtor(
414 ::_pb::Arena* arena, bool is_message_owned) {
415 (void)arena;
416 (void)is_message_owned;
417 new (&_impl_) Impl_{
418 decltype(_impl_.allocation_records_){arena}
419 , decltype(_impl_.allocator_name_){}
420 , decltype(_impl_.total_bytes_){::int64_t{0}}
421 , decltype(_impl_.peak_bytes_){::int64_t{0}}
422 , decltype(_impl_.live_bytes_){::int64_t{0}}
423 , decltype(_impl_.allocator_bytes_in_use_){::int64_t{0}}
424 , /*decltype(_impl_._cached_size_)*/{}
425 };
426 _impl_.allocator_name_.InitDefault();
427 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
428 _impl_.allocator_name_.Set("", GetArenaForAllocation());
429 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
430 }
431
~AllocatorMemoryUsed()432 AllocatorMemoryUsed::~AllocatorMemoryUsed() {
433 // @@protoc_insertion_point(destructor:tensorflow.AllocatorMemoryUsed)
434 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
435 (void)arena;
436 return;
437 }
438 SharedDtor();
439 }
440
SharedDtor()441 inline void AllocatorMemoryUsed::SharedDtor() {
442 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
443 _impl_.allocation_records_.~RepeatedPtrField();
444 _impl_.allocator_name_.Destroy();
445 }
446
SetCachedSize(int size) const447 void AllocatorMemoryUsed::SetCachedSize(int size) const {
448 _impl_._cached_size_.Set(size);
449 }
450
Clear()451 void AllocatorMemoryUsed::Clear() {
452 // @@protoc_insertion_point(message_clear_start:tensorflow.AllocatorMemoryUsed)
453 ::uint32_t cached_has_bits = 0;
454 // Prevent compiler warnings about cached_has_bits being unused
455 (void) cached_has_bits;
456
457 _impl_.allocation_records_.Clear();
458 _impl_.allocator_name_.ClearToEmpty();
459 ::memset(&_impl_.total_bytes_, 0, static_cast<size_t>(
460 reinterpret_cast<char*>(&_impl_.allocator_bytes_in_use_) -
461 reinterpret_cast<char*>(&_impl_.total_bytes_)) + sizeof(_impl_.allocator_bytes_in_use_));
462 _internal_metadata_.Clear<std::string>();
463 }
464
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)465 const char* AllocatorMemoryUsed::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
466 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
467 while (!ctx->Done(&ptr)) {
468 ::uint32_t tag;
469 ptr = ::_pbi::ReadTag(ptr, &tag);
470 switch (tag >> 3) {
471 // string allocator_name = 1;
472 case 1:
473 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
474 auto str = _internal_mutable_allocator_name();
475 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
476 CHK_(ptr);
477 CHK_(::_pbi::VerifyUTF8(str, nullptr));
478 } else {
479 goto handle_unusual;
480 }
481 continue;
482 // int64 total_bytes = 2;
483 case 2:
484 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
485 _impl_.total_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
486 CHK_(ptr);
487 } else {
488 goto handle_unusual;
489 }
490 continue;
491 // int64 peak_bytes = 3;
492 case 3:
493 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
494 _impl_.peak_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
495 CHK_(ptr);
496 } else {
497 goto handle_unusual;
498 }
499 continue;
500 // int64 live_bytes = 4;
501 case 4:
502 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
503 _impl_.live_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
504 CHK_(ptr);
505 } else {
506 goto handle_unusual;
507 }
508 continue;
509 // int64 allocator_bytes_in_use = 5;
510 case 5:
511 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
512 _impl_.allocator_bytes_in_use_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
513 CHK_(ptr);
514 } else {
515 goto handle_unusual;
516 }
517 continue;
518 // repeated .tensorflow.AllocationRecord allocation_records = 6;
519 case 6:
520 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
521 ptr -= 1;
522 do {
523 ptr += 1;
524 ptr = ctx->ParseMessage(_internal_add_allocation_records(), ptr);
525 CHK_(ptr);
526 if (!ctx->DataAvailable(ptr)) break;
527 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<50>(ptr));
528 } else {
529 goto handle_unusual;
530 }
531 continue;
532 default:
533 goto handle_unusual;
534 } // switch
535 handle_unusual:
536 if ((tag == 0) || ((tag & 7) == 4)) {
537 CHK_(ptr);
538 ctx->SetLastTag(tag);
539 goto message_done;
540 }
541 ptr = UnknownFieldParse(
542 tag,
543 _internal_metadata_.mutable_unknown_fields<std::string>(),
544 ptr, ctx);
545 CHK_(ptr != nullptr);
546 } // while
547 message_done:
548 return ptr;
549 failure:
550 ptr = nullptr;
551 goto message_done;
552 #undef CHK_
553 }
554
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const555 ::uint8_t* AllocatorMemoryUsed::_InternalSerialize(
556 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
557 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.AllocatorMemoryUsed)
558 ::uint32_t cached_has_bits = 0;
559 (void) cached_has_bits;
560
561 // string allocator_name = 1;
562 if (!this->_internal_allocator_name().empty()) {
563 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
564 this->_internal_allocator_name().data(), static_cast<int>(this->_internal_allocator_name().length()),
565 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
566 "tensorflow.AllocatorMemoryUsed.allocator_name");
567 target = stream->WriteStringMaybeAliased(
568 1, this->_internal_allocator_name(), target);
569 }
570
571 // int64 total_bytes = 2;
572 if (this->_internal_total_bytes() != 0) {
573 target = stream->EnsureSpace(target);
574 target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_total_bytes(), target);
575 }
576
577 // int64 peak_bytes = 3;
578 if (this->_internal_peak_bytes() != 0) {
579 target = stream->EnsureSpace(target);
580 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_peak_bytes(), target);
581 }
582
583 // int64 live_bytes = 4;
584 if (this->_internal_live_bytes() != 0) {
585 target = stream->EnsureSpace(target);
586 target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_live_bytes(), target);
587 }
588
589 // int64 allocator_bytes_in_use = 5;
590 if (this->_internal_allocator_bytes_in_use() != 0) {
591 target = stream->EnsureSpace(target);
592 target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_allocator_bytes_in_use(), target);
593 }
594
595 // repeated .tensorflow.AllocationRecord allocation_records = 6;
596 for (unsigned i = 0,
597 n = static_cast<unsigned>(this->_internal_allocation_records_size()); i < n; i++) {
598 const auto& repfield = this->_internal_allocation_records(i);
599 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
600 InternalWriteMessage(6, repfield, repfield.GetCachedSize(), target, stream);
601 }
602
603 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
604 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
605 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
606 }
607 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.AllocatorMemoryUsed)
608 return target;
609 }
610
ByteSizeLong() const611 size_t AllocatorMemoryUsed::ByteSizeLong() const {
612 // @@protoc_insertion_point(message_byte_size_start:tensorflow.AllocatorMemoryUsed)
613 size_t total_size = 0;
614
615 ::uint32_t cached_has_bits = 0;
616 // Prevent compiler warnings about cached_has_bits being unused
617 (void) cached_has_bits;
618
619 // repeated .tensorflow.AllocationRecord allocation_records = 6;
620 total_size += 1UL * this->_internal_allocation_records_size();
621 for (const auto& msg : this->_impl_.allocation_records_) {
622 total_size +=
623 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
624 }
625
626 // string allocator_name = 1;
627 if (!this->_internal_allocator_name().empty()) {
628 total_size += 1 +
629 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
630 this->_internal_allocator_name());
631 }
632
633 // int64 total_bytes = 2;
634 if (this->_internal_total_bytes() != 0) {
635 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_total_bytes());
636 }
637
638 // int64 peak_bytes = 3;
639 if (this->_internal_peak_bytes() != 0) {
640 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_peak_bytes());
641 }
642
643 // int64 live_bytes = 4;
644 if (this->_internal_live_bytes() != 0) {
645 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_live_bytes());
646 }
647
648 // int64 allocator_bytes_in_use = 5;
649 if (this->_internal_allocator_bytes_in_use() != 0) {
650 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_allocator_bytes_in_use());
651 }
652
653 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
654 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
655 }
656 int cached_size = ::_pbi::ToCachedSize(total_size);
657 SetCachedSize(cached_size);
658 return total_size;
659 }
660
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)661 void AllocatorMemoryUsed::CheckTypeAndMergeFrom(
662 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
663 MergeFrom(*::_pbi::DownCast<const AllocatorMemoryUsed*>(
664 &from));
665 }
666
MergeFrom(const AllocatorMemoryUsed & from)667 void AllocatorMemoryUsed::MergeFrom(const AllocatorMemoryUsed& from) {
668 AllocatorMemoryUsed* const _this = this;
669 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.AllocatorMemoryUsed)
670 GOOGLE_DCHECK_NE(&from, _this);
671 ::uint32_t cached_has_bits = 0;
672 (void) cached_has_bits;
673
674 _this->_impl_.allocation_records_.MergeFrom(from._impl_.allocation_records_);
675 if (!from._internal_allocator_name().empty()) {
676 _this->_internal_set_allocator_name(from._internal_allocator_name());
677 }
678 if (from._internal_total_bytes() != 0) {
679 _this->_internal_set_total_bytes(from._internal_total_bytes());
680 }
681 if (from._internal_peak_bytes() != 0) {
682 _this->_internal_set_peak_bytes(from._internal_peak_bytes());
683 }
684 if (from._internal_live_bytes() != 0) {
685 _this->_internal_set_live_bytes(from._internal_live_bytes());
686 }
687 if (from._internal_allocator_bytes_in_use() != 0) {
688 _this->_internal_set_allocator_bytes_in_use(from._internal_allocator_bytes_in_use());
689 }
690 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
691 }
692
CopyFrom(const AllocatorMemoryUsed & from)693 void AllocatorMemoryUsed::CopyFrom(const AllocatorMemoryUsed& from) {
694 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.AllocatorMemoryUsed)
695 if (&from == this) return;
696 Clear();
697 MergeFrom(from);
698 }
699
IsInitialized() const700 bool AllocatorMemoryUsed::IsInitialized() const {
701 return true;
702 }
703
InternalSwap(AllocatorMemoryUsed * other)704 void AllocatorMemoryUsed::InternalSwap(AllocatorMemoryUsed* other) {
705 using std::swap;
706 auto* lhs_arena = GetArenaForAllocation();
707 auto* rhs_arena = other->GetArenaForAllocation();
708 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
709 _impl_.allocation_records_.InternalSwap(&other->_impl_.allocation_records_);
710 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
711 &_impl_.allocator_name_, lhs_arena,
712 &other->_impl_.allocator_name_, rhs_arena
713 );
714 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
715 PROTOBUF_FIELD_OFFSET(AllocatorMemoryUsed, _impl_.allocator_bytes_in_use_)
716 + sizeof(AllocatorMemoryUsed::_impl_.allocator_bytes_in_use_) // NOLINT
717 - PROTOBUF_FIELD_OFFSET(AllocatorMemoryUsed, _impl_.total_bytes_)>(
718 reinterpret_cast<char*>(&_impl_.total_bytes_),
719 reinterpret_cast<char*>(&other->_impl_.total_bytes_));
720 }
721
GetTypeName() const722 std::string AllocatorMemoryUsed::GetTypeName() const {
723 return "tensorflow.AllocatorMemoryUsed";
724 }
725
726
727 // ===================================================================
728
729 class NodeOutput::_Internal {
730 public:
731 static const ::tensorflow::TensorDescription& tensor_description(const NodeOutput* msg);
732 };
733
734 const ::tensorflow::TensorDescription&
tensor_description(const NodeOutput * msg)735 NodeOutput::_Internal::tensor_description(const NodeOutput* msg) {
736 return *msg->_impl_.tensor_description_;
737 }
clear_tensor_description()738 void NodeOutput::clear_tensor_description() {
739 if (GetArenaForAllocation() == nullptr && _impl_.tensor_description_ != nullptr) {
740 delete _impl_.tensor_description_;
741 }
742 _impl_.tensor_description_ = nullptr;
743 }
NodeOutput(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)744 NodeOutput::NodeOutput(::PROTOBUF_NAMESPACE_ID::Arena* arena,
745 bool is_message_owned)
746 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
747 SharedCtor(arena, is_message_owned);
748 // @@protoc_insertion_point(arena_constructor:tensorflow.NodeOutput)
749 }
NodeOutput(const NodeOutput & from)750 NodeOutput::NodeOutput(const NodeOutput& from)
751 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
752 NodeOutput* const _this = this; (void)_this;
753 new (&_impl_) Impl_{
754 decltype(_impl_.tensor_description_){nullptr}
755 , decltype(_impl_.slot_){}
756 , /*decltype(_impl_._cached_size_)*/{}};
757
758 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
759 if (from._internal_has_tensor_description()) {
760 _this->_impl_.tensor_description_ = new ::tensorflow::TensorDescription(*from._impl_.tensor_description_);
761 }
762 _this->_impl_.slot_ = from._impl_.slot_;
763 // @@protoc_insertion_point(copy_constructor:tensorflow.NodeOutput)
764 }
765
SharedCtor(::_pb::Arena * arena,bool is_message_owned)766 inline void NodeOutput::SharedCtor(
767 ::_pb::Arena* arena, bool is_message_owned) {
768 (void)arena;
769 (void)is_message_owned;
770 new (&_impl_) Impl_{
771 decltype(_impl_.tensor_description_){nullptr}
772 , decltype(_impl_.slot_){0}
773 , /*decltype(_impl_._cached_size_)*/{}
774 };
775 }
776
~NodeOutput()777 NodeOutput::~NodeOutput() {
778 // @@protoc_insertion_point(destructor:tensorflow.NodeOutput)
779 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
780 (void)arena;
781 return;
782 }
783 SharedDtor();
784 }
785
SharedDtor()786 inline void NodeOutput::SharedDtor() {
787 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
788 if (this != internal_default_instance()) delete _impl_.tensor_description_;
789 }
790
SetCachedSize(int size) const791 void NodeOutput::SetCachedSize(int size) const {
792 _impl_._cached_size_.Set(size);
793 }
794
Clear()795 void NodeOutput::Clear() {
796 // @@protoc_insertion_point(message_clear_start:tensorflow.NodeOutput)
797 ::uint32_t cached_has_bits = 0;
798 // Prevent compiler warnings about cached_has_bits being unused
799 (void) cached_has_bits;
800
801 if (GetArenaForAllocation() == nullptr && _impl_.tensor_description_ != nullptr) {
802 delete _impl_.tensor_description_;
803 }
804 _impl_.tensor_description_ = nullptr;
805 _impl_.slot_ = 0;
806 _internal_metadata_.Clear<std::string>();
807 }
808
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)809 const char* NodeOutput::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
810 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
811 while (!ctx->Done(&ptr)) {
812 ::uint32_t tag;
813 ptr = ::_pbi::ReadTag(ptr, &tag);
814 switch (tag >> 3) {
815 // int32 slot = 1;
816 case 1:
817 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
818 _impl_.slot_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
819 CHK_(ptr);
820 } else {
821 goto handle_unusual;
822 }
823 continue;
824 // .tensorflow.TensorDescription tensor_description = 3;
825 case 3:
826 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
827 ptr = ctx->ParseMessage(_internal_mutable_tensor_description(), ptr);
828 CHK_(ptr);
829 } else {
830 goto handle_unusual;
831 }
832 continue;
833 default:
834 goto handle_unusual;
835 } // switch
836 handle_unusual:
837 if ((tag == 0) || ((tag & 7) == 4)) {
838 CHK_(ptr);
839 ctx->SetLastTag(tag);
840 goto message_done;
841 }
842 ptr = UnknownFieldParse(
843 tag,
844 _internal_metadata_.mutable_unknown_fields<std::string>(),
845 ptr, ctx);
846 CHK_(ptr != nullptr);
847 } // while
848 message_done:
849 return ptr;
850 failure:
851 ptr = nullptr;
852 goto message_done;
853 #undef CHK_
854 }
855
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const856 ::uint8_t* NodeOutput::_InternalSerialize(
857 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
858 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.NodeOutput)
859 ::uint32_t cached_has_bits = 0;
860 (void) cached_has_bits;
861
862 // int32 slot = 1;
863 if (this->_internal_slot() != 0) {
864 target = stream->EnsureSpace(target);
865 target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_slot(), target);
866 }
867
868 // .tensorflow.TensorDescription tensor_description = 3;
869 if (this->_internal_has_tensor_description()) {
870 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
871 InternalWriteMessage(3, _Internal::tensor_description(this),
872 _Internal::tensor_description(this).GetCachedSize(), target, stream);
873 }
874
875 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
876 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
877 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
878 }
879 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.NodeOutput)
880 return target;
881 }
882
ByteSizeLong() const883 size_t NodeOutput::ByteSizeLong() const {
884 // @@protoc_insertion_point(message_byte_size_start:tensorflow.NodeOutput)
885 size_t total_size = 0;
886
887 ::uint32_t cached_has_bits = 0;
888 // Prevent compiler warnings about cached_has_bits being unused
889 (void) cached_has_bits;
890
891 // .tensorflow.TensorDescription tensor_description = 3;
892 if (this->_internal_has_tensor_description()) {
893 total_size += 1 +
894 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
895 *_impl_.tensor_description_);
896 }
897
898 // int32 slot = 1;
899 if (this->_internal_slot() != 0) {
900 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_slot());
901 }
902
903 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
904 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
905 }
906 int cached_size = ::_pbi::ToCachedSize(total_size);
907 SetCachedSize(cached_size);
908 return total_size;
909 }
910
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)911 void NodeOutput::CheckTypeAndMergeFrom(
912 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
913 MergeFrom(*::_pbi::DownCast<const NodeOutput*>(
914 &from));
915 }
916
MergeFrom(const NodeOutput & from)917 void NodeOutput::MergeFrom(const NodeOutput& from) {
918 NodeOutput* const _this = this;
919 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.NodeOutput)
920 GOOGLE_DCHECK_NE(&from, _this);
921 ::uint32_t cached_has_bits = 0;
922 (void) cached_has_bits;
923
924 if (from._internal_has_tensor_description()) {
925 _this->_internal_mutable_tensor_description()->::tensorflow::TensorDescription::MergeFrom(
926 from._internal_tensor_description());
927 }
928 if (from._internal_slot() != 0) {
929 _this->_internal_set_slot(from._internal_slot());
930 }
931 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
932 }
933
CopyFrom(const NodeOutput & from)934 void NodeOutput::CopyFrom(const NodeOutput& from) {
935 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.NodeOutput)
936 if (&from == this) return;
937 Clear();
938 MergeFrom(from);
939 }
940
IsInitialized() const941 bool NodeOutput::IsInitialized() const {
942 return true;
943 }
944
InternalSwap(NodeOutput * other)945 void NodeOutput::InternalSwap(NodeOutput* other) {
946 using std::swap;
947 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
948 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
949 PROTOBUF_FIELD_OFFSET(NodeOutput, _impl_.slot_)
950 + sizeof(NodeOutput::_impl_.slot_) // NOLINT
951 - PROTOBUF_FIELD_OFFSET(NodeOutput, _impl_.tensor_description_)>(
952 reinterpret_cast<char*>(&_impl_.tensor_description_),
953 reinterpret_cast<char*>(&other->_impl_.tensor_description_));
954 }
955
GetTypeName() const956 std::string NodeOutput::GetTypeName() const {
957 return "tensorflow.NodeOutput";
958 }
959
960
961 // ===================================================================
962
963 class MemoryStats::_Internal {
964 public:
965 };
966
MemoryStats(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)967 MemoryStats::MemoryStats(::PROTOBUF_NAMESPACE_ID::Arena* arena,
968 bool is_message_owned)
969 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
970 SharedCtor(arena, is_message_owned);
971 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryStats)
972 }
MemoryStats(const MemoryStats & from)973 MemoryStats::MemoryStats(const MemoryStats& from)
974 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
975 MemoryStats* const _this = this; (void)_this;
976 new (&_impl_) Impl_{
977 decltype(_impl_.persistent_tensor_alloc_ids_){from._impl_.persistent_tensor_alloc_ids_}
978 , /*decltype(_impl_._persistent_tensor_alloc_ids_cached_byte_size_)*/{0}
979 , decltype(_impl_.device_persistent_tensor_alloc_ids_){from._impl_.device_persistent_tensor_alloc_ids_}
980 , /*decltype(_impl_._device_persistent_tensor_alloc_ids_cached_byte_size_)*/{0}
981 , decltype(_impl_.temp_memory_size_){}
982 , decltype(_impl_.device_temp_memory_size_){}
983 , decltype(_impl_.persistent_memory_size_){}
984 , decltype(_impl_.device_persistent_memory_size_){}
985 , /*decltype(_impl_._cached_size_)*/{}};
986
987 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
988 ::memcpy(&_impl_.temp_memory_size_, &from._impl_.temp_memory_size_,
989 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.device_persistent_memory_size_) -
990 reinterpret_cast<char*>(&_impl_.temp_memory_size_)) + sizeof(_impl_.device_persistent_memory_size_));
991 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryStats)
992 }
993
SharedCtor(::_pb::Arena * arena,bool is_message_owned)994 inline void MemoryStats::SharedCtor(
995 ::_pb::Arena* arena, bool is_message_owned) {
996 (void)arena;
997 (void)is_message_owned;
998 new (&_impl_) Impl_{
999 decltype(_impl_.persistent_tensor_alloc_ids_){arena}
1000 , /*decltype(_impl_._persistent_tensor_alloc_ids_cached_byte_size_)*/{0}
1001 , decltype(_impl_.device_persistent_tensor_alloc_ids_){arena}
1002 , /*decltype(_impl_._device_persistent_tensor_alloc_ids_cached_byte_size_)*/{0}
1003 , decltype(_impl_.temp_memory_size_){::int64_t{0}}
1004 , decltype(_impl_.device_temp_memory_size_){::int64_t{0}}
1005 , decltype(_impl_.persistent_memory_size_){::int64_t{0}}
1006 , decltype(_impl_.device_persistent_memory_size_){::int64_t{0}}
1007 , /*decltype(_impl_._cached_size_)*/{}
1008 };
1009 }
1010
~MemoryStats()1011 MemoryStats::~MemoryStats() {
1012 // @@protoc_insertion_point(destructor:tensorflow.MemoryStats)
1013 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1014 (void)arena;
1015 return;
1016 }
1017 SharedDtor();
1018 }
1019
SharedDtor()1020 inline void MemoryStats::SharedDtor() {
1021 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1022 _impl_.persistent_tensor_alloc_ids_.~RepeatedField();
1023 _impl_.device_persistent_tensor_alloc_ids_.~RepeatedField();
1024 }
1025
SetCachedSize(int size) const1026 void MemoryStats::SetCachedSize(int size) const {
1027 _impl_._cached_size_.Set(size);
1028 }
1029
Clear()1030 void MemoryStats::Clear() {
1031 // @@protoc_insertion_point(message_clear_start:tensorflow.MemoryStats)
1032 ::uint32_t cached_has_bits = 0;
1033 // Prevent compiler warnings about cached_has_bits being unused
1034 (void) cached_has_bits;
1035
1036 _impl_.persistent_tensor_alloc_ids_.Clear();
1037 _impl_.device_persistent_tensor_alloc_ids_.Clear();
1038 ::memset(&_impl_.temp_memory_size_, 0, static_cast<size_t>(
1039 reinterpret_cast<char*>(&_impl_.device_persistent_memory_size_) -
1040 reinterpret_cast<char*>(&_impl_.temp_memory_size_)) + sizeof(_impl_.device_persistent_memory_size_));
1041 _internal_metadata_.Clear<std::string>();
1042 }
1043
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1044 const char* MemoryStats::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1045 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1046 while (!ctx->Done(&ptr)) {
1047 ::uint32_t tag;
1048 ptr = ::_pbi::ReadTag(ptr, &tag);
1049 switch (tag >> 3) {
1050 // int64 temp_memory_size = 1;
1051 case 1:
1052 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1053 _impl_.temp_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1054 CHK_(ptr);
1055 } else {
1056 goto handle_unusual;
1057 }
1058 continue;
1059 // int64 device_temp_memory_size = 2 [deprecated = true];
1060 case 2:
1061 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1062 _impl_.device_temp_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1063 CHK_(ptr);
1064 } else {
1065 goto handle_unusual;
1066 }
1067 continue;
1068 // int64 persistent_memory_size = 3;
1069 case 3:
1070 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1071 _impl_.persistent_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1072 CHK_(ptr);
1073 } else {
1074 goto handle_unusual;
1075 }
1076 continue;
1077 // int64 device_persistent_memory_size = 4 [deprecated = true];
1078 case 4:
1079 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
1080 _impl_.device_persistent_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1081 CHK_(ptr);
1082 } else {
1083 goto handle_unusual;
1084 }
1085 continue;
1086 // repeated int64 persistent_tensor_alloc_ids = 5;
1087 case 5:
1088 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
1089 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(_internal_mutable_persistent_tensor_alloc_ids(), ptr, ctx);
1090 CHK_(ptr);
1091 } else if (static_cast<::uint8_t>(tag) == 40) {
1092 _internal_add_persistent_tensor_alloc_ids(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1093 CHK_(ptr);
1094 } else {
1095 goto handle_unusual;
1096 }
1097 continue;
1098 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1099 case 6:
1100 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
1101 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(_internal_mutable_device_persistent_tensor_alloc_ids(), ptr, ctx);
1102 CHK_(ptr);
1103 } else if (static_cast<::uint8_t>(tag) == 48) {
1104 _internal_add_device_persistent_tensor_alloc_ids(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
1105 CHK_(ptr);
1106 } else {
1107 goto handle_unusual;
1108 }
1109 continue;
1110 default:
1111 goto handle_unusual;
1112 } // switch
1113 handle_unusual:
1114 if ((tag == 0) || ((tag & 7) == 4)) {
1115 CHK_(ptr);
1116 ctx->SetLastTag(tag);
1117 goto message_done;
1118 }
1119 ptr = UnknownFieldParse(
1120 tag,
1121 _internal_metadata_.mutable_unknown_fields<std::string>(),
1122 ptr, ctx);
1123 CHK_(ptr != nullptr);
1124 } // while
1125 message_done:
1126 return ptr;
1127 failure:
1128 ptr = nullptr;
1129 goto message_done;
1130 #undef CHK_
1131 }
1132
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1133 ::uint8_t* MemoryStats::_InternalSerialize(
1134 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1135 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryStats)
1136 ::uint32_t cached_has_bits = 0;
1137 (void) cached_has_bits;
1138
1139 // int64 temp_memory_size = 1;
1140 if (this->_internal_temp_memory_size() != 0) {
1141 target = stream->EnsureSpace(target);
1142 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_temp_memory_size(), target);
1143 }
1144
1145 // int64 device_temp_memory_size = 2 [deprecated = true];
1146 if (this->_internal_device_temp_memory_size() != 0) {
1147 target = stream->EnsureSpace(target);
1148 target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_device_temp_memory_size(), target);
1149 }
1150
1151 // int64 persistent_memory_size = 3;
1152 if (this->_internal_persistent_memory_size() != 0) {
1153 target = stream->EnsureSpace(target);
1154 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_persistent_memory_size(), target);
1155 }
1156
1157 // int64 device_persistent_memory_size = 4 [deprecated = true];
1158 if (this->_internal_device_persistent_memory_size() != 0) {
1159 target = stream->EnsureSpace(target);
1160 target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_device_persistent_memory_size(), target);
1161 }
1162
1163 // repeated int64 persistent_tensor_alloc_ids = 5;
1164 {
1165 int byte_size = _impl_._persistent_tensor_alloc_ids_cached_byte_size_.load(std::memory_order_relaxed);
1166 if (byte_size > 0) {
1167 target = stream->WriteInt64Packed(
1168 5, _internal_persistent_tensor_alloc_ids(), byte_size, target);
1169 }
1170 }
1171
1172 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1173 {
1174 int byte_size = _impl_._device_persistent_tensor_alloc_ids_cached_byte_size_.load(std::memory_order_relaxed);
1175 if (byte_size > 0) {
1176 target = stream->WriteInt64Packed(
1177 6, _internal_device_persistent_tensor_alloc_ids(), byte_size, target);
1178 }
1179 }
1180
1181 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1182 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1183 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1184 }
1185 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryStats)
1186 return target;
1187 }
1188
ByteSizeLong() const1189 size_t MemoryStats::ByteSizeLong() const {
1190 // @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryStats)
1191 size_t total_size = 0;
1192
1193 ::uint32_t cached_has_bits = 0;
1194 // Prevent compiler warnings about cached_has_bits being unused
1195 (void) cached_has_bits;
1196
1197 // repeated int64 persistent_tensor_alloc_ids = 5;
1198 {
1199 size_t data_size = ::_pbi::WireFormatLite::
1200 Int64Size(this->_impl_.persistent_tensor_alloc_ids_);
1201 if (data_size > 0) {
1202 total_size += 1 +
1203 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
1204 }
1205 int cached_size = ::_pbi::ToCachedSize(data_size);
1206 _impl_._persistent_tensor_alloc_ids_cached_byte_size_.store(cached_size,
1207 std::memory_order_relaxed);
1208 total_size += data_size;
1209 }
1210
1211 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1212 {
1213 size_t data_size = ::_pbi::WireFormatLite::
1214 Int64Size(this->_impl_.device_persistent_tensor_alloc_ids_);
1215 if (data_size > 0) {
1216 total_size += 1 +
1217 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
1218 }
1219 int cached_size = ::_pbi::ToCachedSize(data_size);
1220 _impl_._device_persistent_tensor_alloc_ids_cached_byte_size_.store(cached_size,
1221 std::memory_order_relaxed);
1222 total_size += data_size;
1223 }
1224
1225 // int64 temp_memory_size = 1;
1226 if (this->_internal_temp_memory_size() != 0) {
1227 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_temp_memory_size());
1228 }
1229
1230 // int64 device_temp_memory_size = 2 [deprecated = true];
1231 if (this->_internal_device_temp_memory_size() != 0) {
1232 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_device_temp_memory_size());
1233 }
1234
1235 // int64 persistent_memory_size = 3;
1236 if (this->_internal_persistent_memory_size() != 0) {
1237 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_persistent_memory_size());
1238 }
1239
1240 // int64 device_persistent_memory_size = 4 [deprecated = true];
1241 if (this->_internal_device_persistent_memory_size() != 0) {
1242 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_device_persistent_memory_size());
1243 }
1244
1245 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1246 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1247 }
1248 int cached_size = ::_pbi::ToCachedSize(total_size);
1249 SetCachedSize(cached_size);
1250 return total_size;
1251 }
1252
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1253 void MemoryStats::CheckTypeAndMergeFrom(
1254 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1255 MergeFrom(*::_pbi::DownCast<const MemoryStats*>(
1256 &from));
1257 }
1258
MergeFrom(const MemoryStats & from)1259 void MemoryStats::MergeFrom(const MemoryStats& from) {
1260 MemoryStats* const _this = this;
1261 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryStats)
1262 GOOGLE_DCHECK_NE(&from, _this);
1263 ::uint32_t cached_has_bits = 0;
1264 (void) cached_has_bits;
1265
1266 _this->_impl_.persistent_tensor_alloc_ids_.MergeFrom(from._impl_.persistent_tensor_alloc_ids_);
1267 _this->_impl_.device_persistent_tensor_alloc_ids_.MergeFrom(from._impl_.device_persistent_tensor_alloc_ids_);
1268 if (from._internal_temp_memory_size() != 0) {
1269 _this->_internal_set_temp_memory_size(from._internal_temp_memory_size());
1270 }
1271 if (from._internal_device_temp_memory_size() != 0) {
1272 _this->_internal_set_device_temp_memory_size(from._internal_device_temp_memory_size());
1273 }
1274 if (from._internal_persistent_memory_size() != 0) {
1275 _this->_internal_set_persistent_memory_size(from._internal_persistent_memory_size());
1276 }
1277 if (from._internal_device_persistent_memory_size() != 0) {
1278 _this->_internal_set_device_persistent_memory_size(from._internal_device_persistent_memory_size());
1279 }
1280 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1281 }
1282
CopyFrom(const MemoryStats & from)1283 void MemoryStats::CopyFrom(const MemoryStats& from) {
1284 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryStats)
1285 if (&from == this) return;
1286 Clear();
1287 MergeFrom(from);
1288 }
1289
IsInitialized() const1290 bool MemoryStats::IsInitialized() const {
1291 return true;
1292 }
1293
InternalSwap(MemoryStats * other)1294 void MemoryStats::InternalSwap(MemoryStats* other) {
1295 using std::swap;
1296 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1297 _impl_.persistent_tensor_alloc_ids_.InternalSwap(&other->_impl_.persistent_tensor_alloc_ids_);
1298 _impl_.device_persistent_tensor_alloc_ids_.InternalSwap(&other->_impl_.device_persistent_tensor_alloc_ids_);
1299 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1300 PROTOBUF_FIELD_OFFSET(MemoryStats, _impl_.device_persistent_memory_size_)
1301 + sizeof(MemoryStats::_impl_.device_persistent_memory_size_) // NOLINT
1302 - PROTOBUF_FIELD_OFFSET(MemoryStats, _impl_.temp_memory_size_)>(
1303 reinterpret_cast<char*>(&_impl_.temp_memory_size_),
1304 reinterpret_cast<char*>(&other->_impl_.temp_memory_size_));
1305 }
1306
GetTypeName() const1307 std::string MemoryStats::GetTypeName() const {
1308 return "tensorflow.MemoryStats";
1309 }
1310
1311
1312 // ===================================================================
1313
1314 class NodeExecStats::_Internal {
1315 public:
1316 static const ::tensorflow::MemoryStats& memory_stats(const NodeExecStats* msg);
1317 };
1318
1319 const ::tensorflow::MemoryStats&
memory_stats(const NodeExecStats * msg)1320 NodeExecStats::_Internal::memory_stats(const NodeExecStats* msg) {
1321 return *msg->_impl_.memory_stats_;
1322 }
clear_referenced_tensor()1323 void NodeExecStats::clear_referenced_tensor() {
1324 _impl_.referenced_tensor_.Clear();
1325 }
NodeExecStats(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1326 NodeExecStats::NodeExecStats(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1327 bool is_message_owned)
1328 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1329 SharedCtor(arena, is_message_owned);
1330 // @@protoc_insertion_point(arena_constructor:tensorflow.NodeExecStats)
1331 }
NodeExecStats(const NodeExecStats & from)1332 NodeExecStats::NodeExecStats(const NodeExecStats& from)
1333 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1334 NodeExecStats* const _this = this; (void)_this;
1335 new (&_impl_) Impl_{
1336 decltype(_impl_.memory_){from._impl_.memory_}
1337 , decltype(_impl_.output_){from._impl_.output_}
1338 , decltype(_impl_.referenced_tensor_){from._impl_.referenced_tensor_}
1339 , decltype(_impl_.node_name_){}
1340 , decltype(_impl_.timeline_label_){}
1341 , decltype(_impl_.memory_stats_){nullptr}
1342 , decltype(_impl_.all_start_micros_){}
1343 , decltype(_impl_.op_start_rel_micros_){}
1344 , decltype(_impl_.op_end_rel_micros_){}
1345 , decltype(_impl_.all_end_rel_micros_){}
1346 , decltype(_impl_.scheduled_micros_){}
1347 , decltype(_impl_.all_start_nanos_){}
1348 , decltype(_impl_.op_start_rel_nanos_){}
1349 , decltype(_impl_.op_end_rel_nanos_){}
1350 , decltype(_impl_.all_end_rel_nanos_){}
1351 , decltype(_impl_.scheduled_nanos_){}
1352 , decltype(_impl_.thread_id_){}
1353 , /*decltype(_impl_._cached_size_)*/{}};
1354
1355 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1356 _impl_.node_name_.InitDefault();
1357 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1358 _impl_.node_name_.Set("", GetArenaForAllocation());
1359 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1360 if (!from._internal_node_name().empty()) {
1361 _this->_impl_.node_name_.Set(from._internal_node_name(),
1362 _this->GetArenaForAllocation());
1363 }
1364 _impl_.timeline_label_.InitDefault();
1365 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1366 _impl_.timeline_label_.Set("", GetArenaForAllocation());
1367 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1368 if (!from._internal_timeline_label().empty()) {
1369 _this->_impl_.timeline_label_.Set(from._internal_timeline_label(),
1370 _this->GetArenaForAllocation());
1371 }
1372 if (from._internal_has_memory_stats()) {
1373 _this->_impl_.memory_stats_ = new ::tensorflow::MemoryStats(*from._impl_.memory_stats_);
1374 }
1375 ::memcpy(&_impl_.all_start_micros_, &from._impl_.all_start_micros_,
1376 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.thread_id_) -
1377 reinterpret_cast<char*>(&_impl_.all_start_micros_)) + sizeof(_impl_.thread_id_));
1378 // @@protoc_insertion_point(copy_constructor:tensorflow.NodeExecStats)
1379 }
1380
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1381 inline void NodeExecStats::SharedCtor(
1382 ::_pb::Arena* arena, bool is_message_owned) {
1383 (void)arena;
1384 (void)is_message_owned;
1385 new (&_impl_) Impl_{
1386 decltype(_impl_.memory_){arena}
1387 , decltype(_impl_.output_){arena}
1388 , decltype(_impl_.referenced_tensor_){arena}
1389 , decltype(_impl_.node_name_){}
1390 , decltype(_impl_.timeline_label_){}
1391 , decltype(_impl_.memory_stats_){nullptr}
1392 , decltype(_impl_.all_start_micros_){::int64_t{0}}
1393 , decltype(_impl_.op_start_rel_micros_){::int64_t{0}}
1394 , decltype(_impl_.op_end_rel_micros_){::int64_t{0}}
1395 , decltype(_impl_.all_end_rel_micros_){::int64_t{0}}
1396 , decltype(_impl_.scheduled_micros_){::int64_t{0}}
1397 , decltype(_impl_.all_start_nanos_){::int64_t{0}}
1398 , decltype(_impl_.op_start_rel_nanos_){::int64_t{0}}
1399 , decltype(_impl_.op_end_rel_nanos_){::int64_t{0}}
1400 , decltype(_impl_.all_end_rel_nanos_){::int64_t{0}}
1401 , decltype(_impl_.scheduled_nanos_){::int64_t{0}}
1402 , decltype(_impl_.thread_id_){0u}
1403 , /*decltype(_impl_._cached_size_)*/{}
1404 };
1405 _impl_.node_name_.InitDefault();
1406 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1407 _impl_.node_name_.Set("", GetArenaForAllocation());
1408 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1409 _impl_.timeline_label_.InitDefault();
1410 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1411 _impl_.timeline_label_.Set("", GetArenaForAllocation());
1412 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1413 }
1414
~NodeExecStats()1415 NodeExecStats::~NodeExecStats() {
1416 // @@protoc_insertion_point(destructor:tensorflow.NodeExecStats)
1417 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1418 (void)arena;
1419 return;
1420 }
1421 SharedDtor();
1422 }
1423
SharedDtor()1424 inline void NodeExecStats::SharedDtor() {
1425 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1426 _impl_.memory_.~RepeatedPtrField();
1427 _impl_.output_.~RepeatedPtrField();
1428 _impl_.referenced_tensor_.~RepeatedPtrField();
1429 _impl_.node_name_.Destroy();
1430 _impl_.timeline_label_.Destroy();
1431 if (this != internal_default_instance()) delete _impl_.memory_stats_;
1432 }
1433
SetCachedSize(int size) const1434 void NodeExecStats::SetCachedSize(int size) const {
1435 _impl_._cached_size_.Set(size);
1436 }
1437
Clear()1438 void NodeExecStats::Clear() {
1439 // @@protoc_insertion_point(message_clear_start:tensorflow.NodeExecStats)
1440 ::uint32_t cached_has_bits = 0;
1441 // Prevent compiler warnings about cached_has_bits being unused
1442 (void) cached_has_bits;
1443
1444 _impl_.memory_.Clear();
1445 _impl_.output_.Clear();
1446 _impl_.referenced_tensor_.Clear();
1447 _impl_.node_name_.ClearToEmpty();
1448 _impl_.timeline_label_.ClearToEmpty();
1449 if (GetArenaForAllocation() == nullptr && _impl_.memory_stats_ != nullptr) {
1450 delete _impl_.memory_stats_;
1451 }
1452 _impl_.memory_stats_ = nullptr;
1453 ::memset(&_impl_.all_start_micros_, 0, static_cast<size_t>(
1454 reinterpret_cast<char*>(&_impl_.thread_id_) -
1455 reinterpret_cast<char*>(&_impl_.all_start_micros_)) + sizeof(_impl_.thread_id_));
1456 _internal_metadata_.Clear<std::string>();
1457 }
1458
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1459 const char* NodeExecStats::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1460 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1461 while (!ctx->Done(&ptr)) {
1462 ::uint32_t tag;
1463 ptr = ::_pbi::ReadTag(ptr, &tag);
1464 switch (tag >> 3) {
1465 // string node_name = 1;
1466 case 1:
1467 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1468 auto str = _internal_mutable_node_name();
1469 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1470 CHK_(ptr);
1471 CHK_(::_pbi::VerifyUTF8(str, nullptr));
1472 } else {
1473 goto handle_unusual;
1474 }
1475 continue;
1476 // int64 all_start_micros = 2;
1477 case 2:
1478 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) {
1479 _impl_.all_start_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1480 CHK_(ptr);
1481 } else {
1482 goto handle_unusual;
1483 }
1484 continue;
1485 // int64 op_start_rel_micros = 3;
1486 case 3:
1487 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1488 _impl_.op_start_rel_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1489 CHK_(ptr);
1490 } else {
1491 goto handle_unusual;
1492 }
1493 continue;
1494 // int64 op_end_rel_micros = 4;
1495 case 4:
1496 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
1497 _impl_.op_end_rel_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1498 CHK_(ptr);
1499 } else {
1500 goto handle_unusual;
1501 }
1502 continue;
1503 // int64 all_end_rel_micros = 5;
1504 case 5:
1505 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
1506 _impl_.all_end_rel_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1507 CHK_(ptr);
1508 } else {
1509 goto handle_unusual;
1510 }
1511 continue;
1512 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
1513 case 6:
1514 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
1515 ptr -= 1;
1516 do {
1517 ptr += 1;
1518 ptr = ctx->ParseMessage(_internal_add_memory(), ptr);
1519 CHK_(ptr);
1520 if (!ctx->DataAvailable(ptr)) break;
1521 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<50>(ptr));
1522 } else {
1523 goto handle_unusual;
1524 }
1525 continue;
1526 // repeated .tensorflow.NodeOutput output = 7;
1527 case 7:
1528 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 58)) {
1529 ptr -= 1;
1530 do {
1531 ptr += 1;
1532 ptr = ctx->ParseMessage(_internal_add_output(), ptr);
1533 CHK_(ptr);
1534 if (!ctx->DataAvailable(ptr)) break;
1535 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<58>(ptr));
1536 } else {
1537 goto handle_unusual;
1538 }
1539 continue;
1540 // string timeline_label = 8;
1541 case 8:
1542 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 66)) {
1543 auto str = _internal_mutable_timeline_label();
1544 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1545 CHK_(ptr);
1546 CHK_(::_pbi::VerifyUTF8(str, nullptr));
1547 } else {
1548 goto handle_unusual;
1549 }
1550 continue;
1551 // int64 scheduled_micros = 9;
1552 case 9:
1553 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) {
1554 _impl_.scheduled_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1555 CHK_(ptr);
1556 } else {
1557 goto handle_unusual;
1558 }
1559 continue;
1560 // uint32 thread_id = 10;
1561 case 10:
1562 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 80)) {
1563 _impl_.thread_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1564 CHK_(ptr);
1565 } else {
1566 goto handle_unusual;
1567 }
1568 continue;
1569 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
1570 case 11:
1571 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 90)) {
1572 ptr -= 1;
1573 do {
1574 ptr += 1;
1575 ptr = ctx->ParseMessage(_internal_add_referenced_tensor(), ptr);
1576 CHK_(ptr);
1577 if (!ctx->DataAvailable(ptr)) break;
1578 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<90>(ptr));
1579 } else {
1580 goto handle_unusual;
1581 }
1582 continue;
1583 // .tensorflow.MemoryStats memory_stats = 12;
1584 case 12:
1585 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 98)) {
1586 ptr = ctx->ParseMessage(_internal_mutable_memory_stats(), ptr);
1587 CHK_(ptr);
1588 } else {
1589 goto handle_unusual;
1590 }
1591 continue;
1592 // int64 all_start_nanos = 13;
1593 case 13:
1594 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 104)) {
1595 _impl_.all_start_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1596 CHK_(ptr);
1597 } else {
1598 goto handle_unusual;
1599 }
1600 continue;
1601 // int64 op_start_rel_nanos = 14;
1602 case 14:
1603 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 112)) {
1604 _impl_.op_start_rel_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1605 CHK_(ptr);
1606 } else {
1607 goto handle_unusual;
1608 }
1609 continue;
1610 // int64 op_end_rel_nanos = 15;
1611 case 15:
1612 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 120)) {
1613 _impl_.op_end_rel_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1614 CHK_(ptr);
1615 } else {
1616 goto handle_unusual;
1617 }
1618 continue;
1619 // int64 all_end_rel_nanos = 16;
1620 case 16:
1621 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 128)) {
1622 _impl_.all_end_rel_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1623 CHK_(ptr);
1624 } else {
1625 goto handle_unusual;
1626 }
1627 continue;
1628 // int64 scheduled_nanos = 17;
1629 case 17:
1630 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 136)) {
1631 _impl_.scheduled_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1632 CHK_(ptr);
1633 } else {
1634 goto handle_unusual;
1635 }
1636 continue;
1637 default:
1638 goto handle_unusual;
1639 } // switch
1640 handle_unusual:
1641 if ((tag == 0) || ((tag & 7) == 4)) {
1642 CHK_(ptr);
1643 ctx->SetLastTag(tag);
1644 goto message_done;
1645 }
1646 ptr = UnknownFieldParse(
1647 tag,
1648 _internal_metadata_.mutable_unknown_fields<std::string>(),
1649 ptr, ctx);
1650 CHK_(ptr != nullptr);
1651 } // while
1652 message_done:
1653 return ptr;
1654 failure:
1655 ptr = nullptr;
1656 goto message_done;
1657 #undef CHK_
1658 }
1659
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1660 ::uint8_t* NodeExecStats::_InternalSerialize(
1661 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1662 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.NodeExecStats)
1663 ::uint32_t cached_has_bits = 0;
1664 (void) cached_has_bits;
1665
1666 // string node_name = 1;
1667 if (!this->_internal_node_name().empty()) {
1668 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1669 this->_internal_node_name().data(), static_cast<int>(this->_internal_node_name().length()),
1670 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1671 "tensorflow.NodeExecStats.node_name");
1672 target = stream->WriteStringMaybeAliased(
1673 1, this->_internal_node_name(), target);
1674 }
1675
1676 // int64 all_start_micros = 2;
1677 if (this->_internal_all_start_micros() != 0) {
1678 target = stream->EnsureSpace(target);
1679 target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_all_start_micros(), target);
1680 }
1681
1682 // int64 op_start_rel_micros = 3;
1683 if (this->_internal_op_start_rel_micros() != 0) {
1684 target = stream->EnsureSpace(target);
1685 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_op_start_rel_micros(), target);
1686 }
1687
1688 // int64 op_end_rel_micros = 4;
1689 if (this->_internal_op_end_rel_micros() != 0) {
1690 target = stream->EnsureSpace(target);
1691 target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_op_end_rel_micros(), target);
1692 }
1693
1694 // int64 all_end_rel_micros = 5;
1695 if (this->_internal_all_end_rel_micros() != 0) {
1696 target = stream->EnsureSpace(target);
1697 target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_all_end_rel_micros(), target);
1698 }
1699
1700 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
1701 for (unsigned i = 0,
1702 n = static_cast<unsigned>(this->_internal_memory_size()); i < n; i++) {
1703 const auto& repfield = this->_internal_memory(i);
1704 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1705 InternalWriteMessage(6, repfield, repfield.GetCachedSize(), target, stream);
1706 }
1707
1708 // repeated .tensorflow.NodeOutput output = 7;
1709 for (unsigned i = 0,
1710 n = static_cast<unsigned>(this->_internal_output_size()); i < n; i++) {
1711 const auto& repfield = this->_internal_output(i);
1712 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1713 InternalWriteMessage(7, repfield, repfield.GetCachedSize(), target, stream);
1714 }
1715
1716 // string timeline_label = 8;
1717 if (!this->_internal_timeline_label().empty()) {
1718 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1719 this->_internal_timeline_label().data(), static_cast<int>(this->_internal_timeline_label().length()),
1720 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1721 "tensorflow.NodeExecStats.timeline_label");
1722 target = stream->WriteStringMaybeAliased(
1723 8, this->_internal_timeline_label(), target);
1724 }
1725
1726 // int64 scheduled_micros = 9;
1727 if (this->_internal_scheduled_micros() != 0) {
1728 target = stream->EnsureSpace(target);
1729 target = ::_pbi::WireFormatLite::WriteInt64ToArray(9, this->_internal_scheduled_micros(), target);
1730 }
1731
1732 // uint32 thread_id = 10;
1733 if (this->_internal_thread_id() != 0) {
1734 target = stream->EnsureSpace(target);
1735 target = ::_pbi::WireFormatLite::WriteUInt32ToArray(10, this->_internal_thread_id(), target);
1736 }
1737
1738 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
1739 for (unsigned i = 0,
1740 n = static_cast<unsigned>(this->_internal_referenced_tensor_size()); i < n; i++) {
1741 const auto& repfield = this->_internal_referenced_tensor(i);
1742 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1743 InternalWriteMessage(11, repfield, repfield.GetCachedSize(), target, stream);
1744 }
1745
1746 // .tensorflow.MemoryStats memory_stats = 12;
1747 if (this->_internal_has_memory_stats()) {
1748 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1749 InternalWriteMessage(12, _Internal::memory_stats(this),
1750 _Internal::memory_stats(this).GetCachedSize(), target, stream);
1751 }
1752
1753 // int64 all_start_nanos = 13;
1754 if (this->_internal_all_start_nanos() != 0) {
1755 target = stream->EnsureSpace(target);
1756 target = ::_pbi::WireFormatLite::WriteInt64ToArray(13, this->_internal_all_start_nanos(), target);
1757 }
1758
1759 // int64 op_start_rel_nanos = 14;
1760 if (this->_internal_op_start_rel_nanos() != 0) {
1761 target = stream->EnsureSpace(target);
1762 target = ::_pbi::WireFormatLite::WriteInt64ToArray(14, this->_internal_op_start_rel_nanos(), target);
1763 }
1764
1765 // int64 op_end_rel_nanos = 15;
1766 if (this->_internal_op_end_rel_nanos() != 0) {
1767 target = stream->EnsureSpace(target);
1768 target = ::_pbi::WireFormatLite::WriteInt64ToArray(15, this->_internal_op_end_rel_nanos(), target);
1769 }
1770
1771 // int64 all_end_rel_nanos = 16;
1772 if (this->_internal_all_end_rel_nanos() != 0) {
1773 target = stream->EnsureSpace(target);
1774 target = ::_pbi::WireFormatLite::WriteInt64ToArray(16, this->_internal_all_end_rel_nanos(), target);
1775 }
1776
1777 // int64 scheduled_nanos = 17;
1778 if (this->_internal_scheduled_nanos() != 0) {
1779 target = stream->EnsureSpace(target);
1780 target = ::_pbi::WireFormatLite::WriteInt64ToArray(17, this->_internal_scheduled_nanos(), target);
1781 }
1782
1783 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1784 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1785 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1786 }
1787 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.NodeExecStats)
1788 return target;
1789 }
1790
ByteSizeLong() const1791 size_t NodeExecStats::ByteSizeLong() const {
1792 // @@protoc_insertion_point(message_byte_size_start:tensorflow.NodeExecStats)
1793 size_t total_size = 0;
1794
1795 ::uint32_t cached_has_bits = 0;
1796 // Prevent compiler warnings about cached_has_bits being unused
1797 (void) cached_has_bits;
1798
1799 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
1800 total_size += 1UL * this->_internal_memory_size();
1801 for (const auto& msg : this->_impl_.memory_) {
1802 total_size +=
1803 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1804 }
1805
1806 // repeated .tensorflow.NodeOutput output = 7;
1807 total_size += 1UL * this->_internal_output_size();
1808 for (const auto& msg : this->_impl_.output_) {
1809 total_size +=
1810 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1811 }
1812
1813 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
1814 total_size += 1UL * this->_internal_referenced_tensor_size();
1815 for (const auto& msg : this->_impl_.referenced_tensor_) {
1816 total_size +=
1817 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1818 }
1819
1820 // string node_name = 1;
1821 if (!this->_internal_node_name().empty()) {
1822 total_size += 1 +
1823 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1824 this->_internal_node_name());
1825 }
1826
1827 // string timeline_label = 8;
1828 if (!this->_internal_timeline_label().empty()) {
1829 total_size += 1 +
1830 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1831 this->_internal_timeline_label());
1832 }
1833
1834 // .tensorflow.MemoryStats memory_stats = 12;
1835 if (this->_internal_has_memory_stats()) {
1836 total_size += 1 +
1837 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1838 *_impl_.memory_stats_);
1839 }
1840
1841 // int64 all_start_micros = 2;
1842 if (this->_internal_all_start_micros() != 0) {
1843 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_all_start_micros());
1844 }
1845
1846 // int64 op_start_rel_micros = 3;
1847 if (this->_internal_op_start_rel_micros() != 0) {
1848 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_op_start_rel_micros());
1849 }
1850
1851 // int64 op_end_rel_micros = 4;
1852 if (this->_internal_op_end_rel_micros() != 0) {
1853 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_op_end_rel_micros());
1854 }
1855
1856 // int64 all_end_rel_micros = 5;
1857 if (this->_internal_all_end_rel_micros() != 0) {
1858 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_all_end_rel_micros());
1859 }
1860
1861 // int64 scheduled_micros = 9;
1862 if (this->_internal_scheduled_micros() != 0) {
1863 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_scheduled_micros());
1864 }
1865
1866 // int64 all_start_nanos = 13;
1867 if (this->_internal_all_start_nanos() != 0) {
1868 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_all_start_nanos());
1869 }
1870
1871 // int64 op_start_rel_nanos = 14;
1872 if (this->_internal_op_start_rel_nanos() != 0) {
1873 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_op_start_rel_nanos());
1874 }
1875
1876 // int64 op_end_rel_nanos = 15;
1877 if (this->_internal_op_end_rel_nanos() != 0) {
1878 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_op_end_rel_nanos());
1879 }
1880
1881 // int64 all_end_rel_nanos = 16;
1882 if (this->_internal_all_end_rel_nanos() != 0) {
1883 total_size += 2 +
1884 ::_pbi::WireFormatLite::Int64Size(
1885 this->_internal_all_end_rel_nanos());
1886 }
1887
1888 // int64 scheduled_nanos = 17;
1889 if (this->_internal_scheduled_nanos() != 0) {
1890 total_size += 2 +
1891 ::_pbi::WireFormatLite::Int64Size(
1892 this->_internal_scheduled_nanos());
1893 }
1894
1895 // uint32 thread_id = 10;
1896 if (this->_internal_thread_id() != 0) {
1897 total_size += ::_pbi::WireFormatLite::UInt32SizePlusOne(this->_internal_thread_id());
1898 }
1899
1900 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1901 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1902 }
1903 int cached_size = ::_pbi::ToCachedSize(total_size);
1904 SetCachedSize(cached_size);
1905 return total_size;
1906 }
1907
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1908 void NodeExecStats::CheckTypeAndMergeFrom(
1909 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1910 MergeFrom(*::_pbi::DownCast<const NodeExecStats*>(
1911 &from));
1912 }
1913
MergeFrom(const NodeExecStats & from)1914 void NodeExecStats::MergeFrom(const NodeExecStats& from) {
1915 NodeExecStats* const _this = this;
1916 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.NodeExecStats)
1917 GOOGLE_DCHECK_NE(&from, _this);
1918 ::uint32_t cached_has_bits = 0;
1919 (void) cached_has_bits;
1920
1921 _this->_impl_.memory_.MergeFrom(from._impl_.memory_);
1922 _this->_impl_.output_.MergeFrom(from._impl_.output_);
1923 _this->_impl_.referenced_tensor_.MergeFrom(from._impl_.referenced_tensor_);
1924 if (!from._internal_node_name().empty()) {
1925 _this->_internal_set_node_name(from._internal_node_name());
1926 }
1927 if (!from._internal_timeline_label().empty()) {
1928 _this->_internal_set_timeline_label(from._internal_timeline_label());
1929 }
1930 if (from._internal_has_memory_stats()) {
1931 _this->_internal_mutable_memory_stats()->::tensorflow::MemoryStats::MergeFrom(
1932 from._internal_memory_stats());
1933 }
1934 if (from._internal_all_start_micros() != 0) {
1935 _this->_internal_set_all_start_micros(from._internal_all_start_micros());
1936 }
1937 if (from._internal_op_start_rel_micros() != 0) {
1938 _this->_internal_set_op_start_rel_micros(from._internal_op_start_rel_micros());
1939 }
1940 if (from._internal_op_end_rel_micros() != 0) {
1941 _this->_internal_set_op_end_rel_micros(from._internal_op_end_rel_micros());
1942 }
1943 if (from._internal_all_end_rel_micros() != 0) {
1944 _this->_internal_set_all_end_rel_micros(from._internal_all_end_rel_micros());
1945 }
1946 if (from._internal_scheduled_micros() != 0) {
1947 _this->_internal_set_scheduled_micros(from._internal_scheduled_micros());
1948 }
1949 if (from._internal_all_start_nanos() != 0) {
1950 _this->_internal_set_all_start_nanos(from._internal_all_start_nanos());
1951 }
1952 if (from._internal_op_start_rel_nanos() != 0) {
1953 _this->_internal_set_op_start_rel_nanos(from._internal_op_start_rel_nanos());
1954 }
1955 if (from._internal_op_end_rel_nanos() != 0) {
1956 _this->_internal_set_op_end_rel_nanos(from._internal_op_end_rel_nanos());
1957 }
1958 if (from._internal_all_end_rel_nanos() != 0) {
1959 _this->_internal_set_all_end_rel_nanos(from._internal_all_end_rel_nanos());
1960 }
1961 if (from._internal_scheduled_nanos() != 0) {
1962 _this->_internal_set_scheduled_nanos(from._internal_scheduled_nanos());
1963 }
1964 if (from._internal_thread_id() != 0) {
1965 _this->_internal_set_thread_id(from._internal_thread_id());
1966 }
1967 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1968 }
1969
CopyFrom(const NodeExecStats & from)1970 void NodeExecStats::CopyFrom(const NodeExecStats& from) {
1971 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.NodeExecStats)
1972 if (&from == this) return;
1973 Clear();
1974 MergeFrom(from);
1975 }
1976
IsInitialized() const1977 bool NodeExecStats::IsInitialized() const {
1978 return true;
1979 }
1980
InternalSwap(NodeExecStats * other)1981 void NodeExecStats::InternalSwap(NodeExecStats* other) {
1982 using std::swap;
1983 auto* lhs_arena = GetArenaForAllocation();
1984 auto* rhs_arena = other->GetArenaForAllocation();
1985 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1986 _impl_.memory_.InternalSwap(&other->_impl_.memory_);
1987 _impl_.output_.InternalSwap(&other->_impl_.output_);
1988 _impl_.referenced_tensor_.InternalSwap(&other->_impl_.referenced_tensor_);
1989 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1990 &_impl_.node_name_, lhs_arena,
1991 &other->_impl_.node_name_, rhs_arena
1992 );
1993 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1994 &_impl_.timeline_label_, lhs_arena,
1995 &other->_impl_.timeline_label_, rhs_arena
1996 );
1997 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1998 PROTOBUF_FIELD_OFFSET(NodeExecStats, _impl_.thread_id_)
1999 + sizeof(NodeExecStats::_impl_.thread_id_) // NOLINT
2000 - PROTOBUF_FIELD_OFFSET(NodeExecStats, _impl_.memory_stats_)>(
2001 reinterpret_cast<char*>(&_impl_.memory_stats_),
2002 reinterpret_cast<char*>(&other->_impl_.memory_stats_));
2003 }
2004
GetTypeName() const2005 std::string NodeExecStats::GetTypeName() const {
2006 return "tensorflow.NodeExecStats";
2007 }
2008
2009
2010 // ===================================================================
2011
DeviceStepStats_ThreadNamesEntry_DoNotUse()2012 DeviceStepStats_ThreadNamesEntry_DoNotUse::DeviceStepStats_ThreadNamesEntry_DoNotUse() {}
DeviceStepStats_ThreadNamesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)2013 DeviceStepStats_ThreadNamesEntry_DoNotUse::DeviceStepStats_ThreadNamesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
2014 : SuperType(arena) {}
MergeFrom(const DeviceStepStats_ThreadNamesEntry_DoNotUse & other)2015 void DeviceStepStats_ThreadNamesEntry_DoNotUse::MergeFrom(const DeviceStepStats_ThreadNamesEntry_DoNotUse& other) {
2016 MergeFromInternal(other);
2017 }
2018
2019 // ===================================================================
2020
2021 class DeviceStepStats::_Internal {
2022 public:
2023 };
2024
DeviceStepStats(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2025 DeviceStepStats::DeviceStepStats(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2026 bool is_message_owned)
2027 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2028 SharedCtor(arena, is_message_owned);
2029 // @@protoc_insertion_point(arena_constructor:tensorflow.DeviceStepStats)
2030 }
DeviceStepStats(const DeviceStepStats & from)2031 DeviceStepStats::DeviceStepStats(const DeviceStepStats& from)
2032 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2033 DeviceStepStats* const _this = this; (void)_this;
2034 new (&_impl_) Impl_{
2035 decltype(_impl_.node_stats_){from._impl_.node_stats_}
2036 , /*decltype(_impl_.thread_names_)*/{}
2037 , decltype(_impl_.device_){}
2038 , /*decltype(_impl_._cached_size_)*/{}};
2039
2040 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2041 _this->_impl_.thread_names_.MergeFrom(from._impl_.thread_names_);
2042 _impl_.device_.InitDefault();
2043 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2044 _impl_.device_.Set("", GetArenaForAllocation());
2045 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2046 if (!from._internal_device().empty()) {
2047 _this->_impl_.device_.Set(from._internal_device(),
2048 _this->GetArenaForAllocation());
2049 }
2050 // @@protoc_insertion_point(copy_constructor:tensorflow.DeviceStepStats)
2051 }
2052
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2053 inline void DeviceStepStats::SharedCtor(
2054 ::_pb::Arena* arena, bool is_message_owned) {
2055 (void)arena;
2056 (void)is_message_owned;
2057 new (&_impl_) Impl_{
2058 decltype(_impl_.node_stats_){arena}
2059 , /*decltype(_impl_.thread_names_)*/{::_pbi::ArenaInitialized(), arena}
2060 , decltype(_impl_.device_){}
2061 , /*decltype(_impl_._cached_size_)*/{}
2062 };
2063 _impl_.device_.InitDefault();
2064 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2065 _impl_.device_.Set("", GetArenaForAllocation());
2066 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2067 }
2068
~DeviceStepStats()2069 DeviceStepStats::~DeviceStepStats() {
2070 // @@protoc_insertion_point(destructor:tensorflow.DeviceStepStats)
2071 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2072 (void)arena;
2073 return;
2074 }
2075 SharedDtor();
2076 }
2077
SharedDtor()2078 inline void DeviceStepStats::SharedDtor() {
2079 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2080 _impl_.node_stats_.~RepeatedPtrField();
2081 _impl_.thread_names_.Destruct();
2082 _impl_.thread_names_.~MapFieldLite();
2083 _impl_.device_.Destroy();
2084 }
2085
SetCachedSize(int size) const2086 void DeviceStepStats::SetCachedSize(int size) const {
2087 _impl_._cached_size_.Set(size);
2088 }
2089
Clear()2090 void DeviceStepStats::Clear() {
2091 // @@protoc_insertion_point(message_clear_start:tensorflow.DeviceStepStats)
2092 ::uint32_t cached_has_bits = 0;
2093 // Prevent compiler warnings about cached_has_bits being unused
2094 (void) cached_has_bits;
2095
2096 _impl_.node_stats_.Clear();
2097 _impl_.thread_names_.Clear();
2098 _impl_.device_.ClearToEmpty();
2099 _internal_metadata_.Clear<std::string>();
2100 }
2101
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2102 const char* DeviceStepStats::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2103 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2104 while (!ctx->Done(&ptr)) {
2105 ::uint32_t tag;
2106 ptr = ::_pbi::ReadTag(ptr, &tag);
2107 switch (tag >> 3) {
2108 // string device = 1;
2109 case 1:
2110 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2111 auto str = _internal_mutable_device();
2112 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
2113 CHK_(ptr);
2114 CHK_(::_pbi::VerifyUTF8(str, nullptr));
2115 } else {
2116 goto handle_unusual;
2117 }
2118 continue;
2119 // repeated .tensorflow.NodeExecStats node_stats = 2;
2120 case 2:
2121 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2122 ptr -= 1;
2123 do {
2124 ptr += 1;
2125 ptr = ctx->ParseMessage(_internal_add_node_stats(), ptr);
2126 CHK_(ptr);
2127 if (!ctx->DataAvailable(ptr)) break;
2128 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
2129 } else {
2130 goto handle_unusual;
2131 }
2132 continue;
2133 // map<uint32, string> thread_names = 3;
2134 case 3:
2135 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
2136 ptr -= 1;
2137 do {
2138 ptr += 1;
2139 ptr = ctx->ParseMessage(&_impl_.thread_names_, ptr);
2140 CHK_(ptr);
2141 if (!ctx->DataAvailable(ptr)) break;
2142 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr));
2143 } else {
2144 goto handle_unusual;
2145 }
2146 continue;
2147 default:
2148 goto handle_unusual;
2149 } // switch
2150 handle_unusual:
2151 if ((tag == 0) || ((tag & 7) == 4)) {
2152 CHK_(ptr);
2153 ctx->SetLastTag(tag);
2154 goto message_done;
2155 }
2156 ptr = UnknownFieldParse(
2157 tag,
2158 _internal_metadata_.mutable_unknown_fields<std::string>(),
2159 ptr, ctx);
2160 CHK_(ptr != nullptr);
2161 } // while
2162 message_done:
2163 return ptr;
2164 failure:
2165 ptr = nullptr;
2166 goto message_done;
2167 #undef CHK_
2168 }
2169
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2170 ::uint8_t* DeviceStepStats::_InternalSerialize(
2171 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2172 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.DeviceStepStats)
2173 ::uint32_t cached_has_bits = 0;
2174 (void) cached_has_bits;
2175
2176 // string device = 1;
2177 if (!this->_internal_device().empty()) {
2178 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2179 this->_internal_device().data(), static_cast<int>(this->_internal_device().length()),
2180 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2181 "tensorflow.DeviceStepStats.device");
2182 target = stream->WriteStringMaybeAliased(
2183 1, this->_internal_device(), target);
2184 }
2185
2186 // repeated .tensorflow.NodeExecStats node_stats = 2;
2187 for (unsigned i = 0,
2188 n = static_cast<unsigned>(this->_internal_node_stats_size()); i < n; i++) {
2189 const auto& repfield = this->_internal_node_stats(i);
2190 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2191 InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
2192 }
2193
2194 // map<uint32, string> thread_names = 3;
2195 if (!this->_internal_thread_names().empty()) {
2196 using MapType = ::_pb::Map<::uint32_t, std::string>;
2197 using WireHelper = DeviceStepStats_ThreadNamesEntry_DoNotUse::Funcs;
2198 const auto& map_field = this->_internal_thread_names();
2199 auto check_utf8 = [](const MapType::value_type& entry) {
2200 (void)entry;
2201 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2202 entry.second.data(), static_cast<int>(entry.second.length()),
2203 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2204 "tensorflow.DeviceStepStats.ThreadNamesEntry.value");
2205 };
2206
2207 if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
2208 for (const auto& entry : ::_pbi::MapSorterFlat<MapType>(map_field)) {
2209 target = WireHelper::InternalSerialize(3, entry.first, entry.second, target, stream);
2210 check_utf8(entry);
2211 }
2212 } else {
2213 for (const auto& entry : map_field) {
2214 target = WireHelper::InternalSerialize(3, entry.first, entry.second, target, stream);
2215 check_utf8(entry);
2216 }
2217 }
2218 }
2219
2220 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2221 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2222 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2223 }
2224 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.DeviceStepStats)
2225 return target;
2226 }
2227
ByteSizeLong() const2228 size_t DeviceStepStats::ByteSizeLong() const {
2229 // @@protoc_insertion_point(message_byte_size_start:tensorflow.DeviceStepStats)
2230 size_t total_size = 0;
2231
2232 ::uint32_t cached_has_bits = 0;
2233 // Prevent compiler warnings about cached_has_bits being unused
2234 (void) cached_has_bits;
2235
2236 // repeated .tensorflow.NodeExecStats node_stats = 2;
2237 total_size += 1UL * this->_internal_node_stats_size();
2238 for (const auto& msg : this->_impl_.node_stats_) {
2239 total_size +=
2240 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2241 }
2242
2243 // map<uint32, string> thread_names = 3;
2244 total_size += 1 *
2245 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_thread_names_size());
2246 for (::PROTOBUF_NAMESPACE_ID::Map< ::uint32_t, std::string >::const_iterator
2247 it = this->_internal_thread_names().begin();
2248 it != this->_internal_thread_names().end(); ++it) {
2249 total_size += DeviceStepStats_ThreadNamesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
2250 }
2251
2252 // string device = 1;
2253 if (!this->_internal_device().empty()) {
2254 total_size += 1 +
2255 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2256 this->_internal_device());
2257 }
2258
2259 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2260 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2261 }
2262 int cached_size = ::_pbi::ToCachedSize(total_size);
2263 SetCachedSize(cached_size);
2264 return total_size;
2265 }
2266
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2267 void DeviceStepStats::CheckTypeAndMergeFrom(
2268 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2269 MergeFrom(*::_pbi::DownCast<const DeviceStepStats*>(
2270 &from));
2271 }
2272
MergeFrom(const DeviceStepStats & from)2273 void DeviceStepStats::MergeFrom(const DeviceStepStats& from) {
2274 DeviceStepStats* const _this = this;
2275 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.DeviceStepStats)
2276 GOOGLE_DCHECK_NE(&from, _this);
2277 ::uint32_t cached_has_bits = 0;
2278 (void) cached_has_bits;
2279
2280 _this->_impl_.node_stats_.MergeFrom(from._impl_.node_stats_);
2281 _this->_impl_.thread_names_.MergeFrom(from._impl_.thread_names_);
2282 if (!from._internal_device().empty()) {
2283 _this->_internal_set_device(from._internal_device());
2284 }
2285 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2286 }
2287
CopyFrom(const DeviceStepStats & from)2288 void DeviceStepStats::CopyFrom(const DeviceStepStats& from) {
2289 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.DeviceStepStats)
2290 if (&from == this) return;
2291 Clear();
2292 MergeFrom(from);
2293 }
2294
IsInitialized() const2295 bool DeviceStepStats::IsInitialized() const {
2296 return true;
2297 }
2298
InternalSwap(DeviceStepStats * other)2299 void DeviceStepStats::InternalSwap(DeviceStepStats* other) {
2300 using std::swap;
2301 auto* lhs_arena = GetArenaForAllocation();
2302 auto* rhs_arena = other->GetArenaForAllocation();
2303 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2304 _impl_.node_stats_.InternalSwap(&other->_impl_.node_stats_);
2305 _impl_.thread_names_.InternalSwap(&other->_impl_.thread_names_);
2306 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
2307 &_impl_.device_, lhs_arena,
2308 &other->_impl_.device_, rhs_arena
2309 );
2310 }
2311
GetTypeName() const2312 std::string DeviceStepStats::GetTypeName() const {
2313 return "tensorflow.DeviceStepStats";
2314 }
2315
2316
2317 // ===================================================================
2318
2319 class StepStats::_Internal {
2320 public:
2321 };
2322
StepStats(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2323 StepStats::StepStats(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2324 bool is_message_owned)
2325 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2326 SharedCtor(arena, is_message_owned);
2327 // @@protoc_insertion_point(arena_constructor:tensorflow.StepStats)
2328 }
StepStats(const StepStats & from)2329 StepStats::StepStats(const StepStats& from)
2330 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2331 StepStats* const _this = this; (void)_this;
2332 new (&_impl_) Impl_{
2333 decltype(_impl_.dev_stats_){from._impl_.dev_stats_}
2334 , /*decltype(_impl_._cached_size_)*/{}};
2335
2336 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2337 // @@protoc_insertion_point(copy_constructor:tensorflow.StepStats)
2338 }
2339
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2340 inline void StepStats::SharedCtor(
2341 ::_pb::Arena* arena, bool is_message_owned) {
2342 (void)arena;
2343 (void)is_message_owned;
2344 new (&_impl_) Impl_{
2345 decltype(_impl_.dev_stats_){arena}
2346 , /*decltype(_impl_._cached_size_)*/{}
2347 };
2348 }
2349
~StepStats()2350 StepStats::~StepStats() {
2351 // @@protoc_insertion_point(destructor:tensorflow.StepStats)
2352 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2353 (void)arena;
2354 return;
2355 }
2356 SharedDtor();
2357 }
2358
SharedDtor()2359 inline void StepStats::SharedDtor() {
2360 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2361 _impl_.dev_stats_.~RepeatedPtrField();
2362 }
2363
SetCachedSize(int size) const2364 void StepStats::SetCachedSize(int size) const {
2365 _impl_._cached_size_.Set(size);
2366 }
2367
Clear()2368 void StepStats::Clear() {
2369 // @@protoc_insertion_point(message_clear_start:tensorflow.StepStats)
2370 ::uint32_t cached_has_bits = 0;
2371 // Prevent compiler warnings about cached_has_bits being unused
2372 (void) cached_has_bits;
2373
2374 _impl_.dev_stats_.Clear();
2375 _internal_metadata_.Clear<std::string>();
2376 }
2377
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2378 const char* StepStats::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2379 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2380 while (!ctx->Done(&ptr)) {
2381 ::uint32_t tag;
2382 ptr = ::_pbi::ReadTag(ptr, &tag);
2383 switch (tag >> 3) {
2384 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
2385 case 1:
2386 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2387 ptr -= 1;
2388 do {
2389 ptr += 1;
2390 ptr = ctx->ParseMessage(_internal_add_dev_stats(), ptr);
2391 CHK_(ptr);
2392 if (!ctx->DataAvailable(ptr)) break;
2393 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
2394 } else {
2395 goto handle_unusual;
2396 }
2397 continue;
2398 default:
2399 goto handle_unusual;
2400 } // switch
2401 handle_unusual:
2402 if ((tag == 0) || ((tag & 7) == 4)) {
2403 CHK_(ptr);
2404 ctx->SetLastTag(tag);
2405 goto message_done;
2406 }
2407 ptr = UnknownFieldParse(
2408 tag,
2409 _internal_metadata_.mutable_unknown_fields<std::string>(),
2410 ptr, ctx);
2411 CHK_(ptr != nullptr);
2412 } // while
2413 message_done:
2414 return ptr;
2415 failure:
2416 ptr = nullptr;
2417 goto message_done;
2418 #undef CHK_
2419 }
2420
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2421 ::uint8_t* StepStats::_InternalSerialize(
2422 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2423 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.StepStats)
2424 ::uint32_t cached_has_bits = 0;
2425 (void) cached_has_bits;
2426
2427 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
2428 for (unsigned i = 0,
2429 n = static_cast<unsigned>(this->_internal_dev_stats_size()); i < n; i++) {
2430 const auto& repfield = this->_internal_dev_stats(i);
2431 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2432 InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
2433 }
2434
2435 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2436 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2437 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2438 }
2439 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.StepStats)
2440 return target;
2441 }
2442
ByteSizeLong() const2443 size_t StepStats::ByteSizeLong() const {
2444 // @@protoc_insertion_point(message_byte_size_start:tensorflow.StepStats)
2445 size_t total_size = 0;
2446
2447 ::uint32_t cached_has_bits = 0;
2448 // Prevent compiler warnings about cached_has_bits being unused
2449 (void) cached_has_bits;
2450
2451 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
2452 total_size += 1UL * this->_internal_dev_stats_size();
2453 for (const auto& msg : this->_impl_.dev_stats_) {
2454 total_size +=
2455 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2456 }
2457
2458 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2459 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2460 }
2461 int cached_size = ::_pbi::ToCachedSize(total_size);
2462 SetCachedSize(cached_size);
2463 return total_size;
2464 }
2465
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2466 void StepStats::CheckTypeAndMergeFrom(
2467 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2468 MergeFrom(*::_pbi::DownCast<const StepStats*>(
2469 &from));
2470 }
2471
MergeFrom(const StepStats & from)2472 void StepStats::MergeFrom(const StepStats& from) {
2473 StepStats* const _this = this;
2474 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.StepStats)
2475 GOOGLE_DCHECK_NE(&from, _this);
2476 ::uint32_t cached_has_bits = 0;
2477 (void) cached_has_bits;
2478
2479 _this->_impl_.dev_stats_.MergeFrom(from._impl_.dev_stats_);
2480 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2481 }
2482
CopyFrom(const StepStats & from)2483 void StepStats::CopyFrom(const StepStats& from) {
2484 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.StepStats)
2485 if (&from == this) return;
2486 Clear();
2487 MergeFrom(from);
2488 }
2489
IsInitialized() const2490 bool StepStats::IsInitialized() const {
2491 return true;
2492 }
2493
InternalSwap(StepStats * other)2494 void StepStats::InternalSwap(StepStats* other) {
2495 using std::swap;
2496 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2497 _impl_.dev_stats_.InternalSwap(&other->_impl_.dev_stats_);
2498 }
2499
GetTypeName() const2500 std::string StepStats::GetTypeName() const {
2501 return "tensorflow.StepStats";
2502 }
2503
2504
2505 // @@protoc_insertion_point(namespace_scope)
2506 } // namespace tensorflow
2507 PROTOBUF_NAMESPACE_OPEN
2508 template<> PROTOBUF_NOINLINE ::tensorflow::AllocationRecord*
CreateMaybeMessage(Arena * arena)2509 Arena::CreateMaybeMessage< ::tensorflow::AllocationRecord >(Arena* arena) {
2510 return Arena::CreateMessageInternal< ::tensorflow::AllocationRecord >(arena);
2511 }
2512 template<> PROTOBUF_NOINLINE ::tensorflow::AllocatorMemoryUsed*
CreateMaybeMessage(Arena * arena)2513 Arena::CreateMaybeMessage< ::tensorflow::AllocatorMemoryUsed >(Arena* arena) {
2514 return Arena::CreateMessageInternal< ::tensorflow::AllocatorMemoryUsed >(arena);
2515 }
2516 template<> PROTOBUF_NOINLINE ::tensorflow::NodeOutput*
CreateMaybeMessage(Arena * arena)2517 Arena::CreateMaybeMessage< ::tensorflow::NodeOutput >(Arena* arena) {
2518 return Arena::CreateMessageInternal< ::tensorflow::NodeOutput >(arena);
2519 }
2520 template<> PROTOBUF_NOINLINE ::tensorflow::MemoryStats*
CreateMaybeMessage(Arena * arena)2521 Arena::CreateMaybeMessage< ::tensorflow::MemoryStats >(Arena* arena) {
2522 return Arena::CreateMessageInternal< ::tensorflow::MemoryStats >(arena);
2523 }
2524 template<> PROTOBUF_NOINLINE ::tensorflow::NodeExecStats*
CreateMaybeMessage(Arena * arena)2525 Arena::CreateMaybeMessage< ::tensorflow::NodeExecStats >(Arena* arena) {
2526 return Arena::CreateMessageInternal< ::tensorflow::NodeExecStats >(arena);
2527 }
2528 template<> PROTOBUF_NOINLINE ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)2529 Arena::CreateMaybeMessage< ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse >(Arena* arena) {
2530 return Arena::CreateMessageInternal< ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse >(arena);
2531 }
2532 template<> PROTOBUF_NOINLINE ::tensorflow::DeviceStepStats*
CreateMaybeMessage(Arena * arena)2533 Arena::CreateMaybeMessage< ::tensorflow::DeviceStepStats >(Arena* arena) {
2534 return Arena::CreateMessageInternal< ::tensorflow::DeviceStepStats >(arena);
2535 }
2536 template<> PROTOBUF_NOINLINE ::tensorflow::StepStats*
CreateMaybeMessage(Arena * arena)2537 Arena::CreateMaybeMessage< ::tensorflow::StepStats >(Arena* arena) {
2538 return Arena::CreateMessageInternal< ::tensorflow::StepStats >(arena);
2539 }
2540 PROTOBUF_NAMESPACE_CLOSE
2541
2542 // @@protoc_insertion_point(global_scope)
2543 #include <google/protobuf/port_undef.inc>
2544