1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/log_memory.proto
3
4 #include "tensorflow/core/framework/log_memory.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
MemoryLogStep(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR MemoryLogStep::MemoryLogStep(
23 ::_pbi::ConstantInitialized): _impl_{
24 /*decltype(_impl_.handle_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
25 , /*decltype(_impl_.step_id_)*/::int64_t{0}
26 , /*decltype(_impl_._cached_size_)*/{}} {}
27 struct MemoryLogStepDefaultTypeInternal {
MemoryLogStepDefaultTypeInternaltensorflow::MemoryLogStepDefaultTypeInternal28 PROTOBUF_CONSTEXPR MemoryLogStepDefaultTypeInternal()
29 : _instance(::_pbi::ConstantInitialized{}) {}
~MemoryLogStepDefaultTypeInternaltensorflow::MemoryLogStepDefaultTypeInternal30 ~MemoryLogStepDefaultTypeInternal() {}
31 union { // NOLINT(misc-non-private-member-variables-in-classes)
32 MemoryLogStep _instance;
33 };
34 };
35 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MemoryLogStepDefaultTypeInternal _MemoryLogStep_default_instance_;
MemoryLogTensorAllocation(::_pbi::ConstantInitialized)36 PROTOBUF_CONSTEXPR MemoryLogTensorAllocation::MemoryLogTensorAllocation(
37 ::_pbi::ConstantInitialized): _impl_{
38 /*decltype(_impl_.kernel_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
39 , /*decltype(_impl_.tensor_)*/nullptr
40 , /*decltype(_impl_.step_id_)*/::int64_t{0}
41 , /*decltype(_impl_._cached_size_)*/{}} {}
42 struct MemoryLogTensorAllocationDefaultTypeInternal {
MemoryLogTensorAllocationDefaultTypeInternaltensorflow::MemoryLogTensorAllocationDefaultTypeInternal43 PROTOBUF_CONSTEXPR MemoryLogTensorAllocationDefaultTypeInternal()
44 : _instance(::_pbi::ConstantInitialized{}) {}
~MemoryLogTensorAllocationDefaultTypeInternaltensorflow::MemoryLogTensorAllocationDefaultTypeInternal45 ~MemoryLogTensorAllocationDefaultTypeInternal() {}
46 union { // NOLINT(misc-non-private-member-variables-in-classes)
47 MemoryLogTensorAllocation _instance;
48 };
49 };
50 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MemoryLogTensorAllocationDefaultTypeInternal _MemoryLogTensorAllocation_default_instance_;
MemoryLogTensorDeallocation(::_pbi::ConstantInitialized)51 PROTOBUF_CONSTEXPR MemoryLogTensorDeallocation::MemoryLogTensorDeallocation(
52 ::_pbi::ConstantInitialized): _impl_{
53 /*decltype(_impl_.allocator_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
54 , /*decltype(_impl_.allocation_id_)*/::int64_t{0}
55 , /*decltype(_impl_._cached_size_)*/{}} {}
56 struct MemoryLogTensorDeallocationDefaultTypeInternal {
MemoryLogTensorDeallocationDefaultTypeInternaltensorflow::MemoryLogTensorDeallocationDefaultTypeInternal57 PROTOBUF_CONSTEXPR MemoryLogTensorDeallocationDefaultTypeInternal()
58 : _instance(::_pbi::ConstantInitialized{}) {}
~MemoryLogTensorDeallocationDefaultTypeInternaltensorflow::MemoryLogTensorDeallocationDefaultTypeInternal59 ~MemoryLogTensorDeallocationDefaultTypeInternal() {}
60 union { // NOLINT(misc-non-private-member-variables-in-classes)
61 MemoryLogTensorDeallocation _instance;
62 };
63 };
64 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MemoryLogTensorDeallocationDefaultTypeInternal _MemoryLogTensorDeallocation_default_instance_;
MemoryLogTensorOutput(::_pbi::ConstantInitialized)65 PROTOBUF_CONSTEXPR MemoryLogTensorOutput::MemoryLogTensorOutput(
66 ::_pbi::ConstantInitialized): _impl_{
67 /*decltype(_impl_.kernel_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
68 , /*decltype(_impl_.tensor_)*/nullptr
69 , /*decltype(_impl_.step_id_)*/::int64_t{0}
70 , /*decltype(_impl_.index_)*/0
71 , /*decltype(_impl_._cached_size_)*/{}} {}
72 struct MemoryLogTensorOutputDefaultTypeInternal {
MemoryLogTensorOutputDefaultTypeInternaltensorflow::MemoryLogTensorOutputDefaultTypeInternal73 PROTOBUF_CONSTEXPR MemoryLogTensorOutputDefaultTypeInternal()
74 : _instance(::_pbi::ConstantInitialized{}) {}
~MemoryLogTensorOutputDefaultTypeInternaltensorflow::MemoryLogTensorOutputDefaultTypeInternal75 ~MemoryLogTensorOutputDefaultTypeInternal() {}
76 union { // NOLINT(misc-non-private-member-variables-in-classes)
77 MemoryLogTensorOutput _instance;
78 };
79 };
80 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MemoryLogTensorOutputDefaultTypeInternal _MemoryLogTensorOutput_default_instance_;
MemoryLogRawAllocation(::_pbi::ConstantInitialized)81 PROTOBUF_CONSTEXPR MemoryLogRawAllocation::MemoryLogRawAllocation(
82 ::_pbi::ConstantInitialized): _impl_{
83 /*decltype(_impl_.operation_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
84 , /*decltype(_impl_.allocator_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
85 , /*decltype(_impl_.step_id_)*/::int64_t{0}
86 , /*decltype(_impl_.num_bytes_)*/::int64_t{0}
87 , /*decltype(_impl_.ptr_)*/::uint64_t{0u}
88 , /*decltype(_impl_.allocation_id_)*/::int64_t{0}
89 , /*decltype(_impl_._cached_size_)*/{}} {}
90 struct MemoryLogRawAllocationDefaultTypeInternal {
MemoryLogRawAllocationDefaultTypeInternaltensorflow::MemoryLogRawAllocationDefaultTypeInternal91 PROTOBUF_CONSTEXPR MemoryLogRawAllocationDefaultTypeInternal()
92 : _instance(::_pbi::ConstantInitialized{}) {}
~MemoryLogRawAllocationDefaultTypeInternaltensorflow::MemoryLogRawAllocationDefaultTypeInternal93 ~MemoryLogRawAllocationDefaultTypeInternal() {}
94 union { // NOLINT(misc-non-private-member-variables-in-classes)
95 MemoryLogRawAllocation _instance;
96 };
97 };
98 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MemoryLogRawAllocationDefaultTypeInternal _MemoryLogRawAllocation_default_instance_;
MemoryLogRawDeallocation(::_pbi::ConstantInitialized)99 PROTOBUF_CONSTEXPR MemoryLogRawDeallocation::MemoryLogRawDeallocation(
100 ::_pbi::ConstantInitialized): _impl_{
101 /*decltype(_impl_.operation_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
102 , /*decltype(_impl_.allocator_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
103 , /*decltype(_impl_.step_id_)*/::int64_t{0}
104 , /*decltype(_impl_.allocation_id_)*/::int64_t{0}
105 , /*decltype(_impl_.deferred_)*/false
106 , /*decltype(_impl_._cached_size_)*/{}} {}
107 struct MemoryLogRawDeallocationDefaultTypeInternal {
MemoryLogRawDeallocationDefaultTypeInternaltensorflow::MemoryLogRawDeallocationDefaultTypeInternal108 PROTOBUF_CONSTEXPR MemoryLogRawDeallocationDefaultTypeInternal()
109 : _instance(::_pbi::ConstantInitialized{}) {}
~MemoryLogRawDeallocationDefaultTypeInternaltensorflow::MemoryLogRawDeallocationDefaultTypeInternal110 ~MemoryLogRawDeallocationDefaultTypeInternal() {}
111 union { // NOLINT(misc-non-private-member-variables-in-classes)
112 MemoryLogRawDeallocation _instance;
113 };
114 };
115 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MemoryLogRawDeallocationDefaultTypeInternal _MemoryLogRawDeallocation_default_instance_;
116 } // namespace tensorflow
117 namespace tensorflow {
118
119 // ===================================================================
120
121 class MemoryLogStep::_Internal {
122 public:
123 };
124
MemoryLogStep(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)125 MemoryLogStep::MemoryLogStep(::PROTOBUF_NAMESPACE_ID::Arena* arena,
126 bool is_message_owned)
127 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
128 SharedCtor(arena, is_message_owned);
129 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogStep)
130 }
MemoryLogStep(const MemoryLogStep & from)131 MemoryLogStep::MemoryLogStep(const MemoryLogStep& from)
132 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
133 MemoryLogStep* const _this = this; (void)_this;
134 new (&_impl_) Impl_{
135 decltype(_impl_.handle_){}
136 , decltype(_impl_.step_id_){}
137 , /*decltype(_impl_._cached_size_)*/{}};
138
139 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
140 _impl_.handle_.InitDefault();
141 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
142 _impl_.handle_.Set("", GetArenaForAllocation());
143 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
144 if (!from._internal_handle().empty()) {
145 _this->_impl_.handle_.Set(from._internal_handle(),
146 _this->GetArenaForAllocation());
147 }
148 _this->_impl_.step_id_ = from._impl_.step_id_;
149 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogStep)
150 }
151
SharedCtor(::_pb::Arena * arena,bool is_message_owned)152 inline void MemoryLogStep::SharedCtor(
153 ::_pb::Arena* arena, bool is_message_owned) {
154 (void)arena;
155 (void)is_message_owned;
156 new (&_impl_) Impl_{
157 decltype(_impl_.handle_){}
158 , decltype(_impl_.step_id_){::int64_t{0}}
159 , /*decltype(_impl_._cached_size_)*/{}
160 };
161 _impl_.handle_.InitDefault();
162 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
163 _impl_.handle_.Set("", GetArenaForAllocation());
164 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
165 }
166
~MemoryLogStep()167 MemoryLogStep::~MemoryLogStep() {
168 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogStep)
169 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
170 (void)arena;
171 return;
172 }
173 SharedDtor();
174 }
175
SharedDtor()176 inline void MemoryLogStep::SharedDtor() {
177 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
178 _impl_.handle_.Destroy();
179 }
180
SetCachedSize(int size) const181 void MemoryLogStep::SetCachedSize(int size) const {
182 _impl_._cached_size_.Set(size);
183 }
184
Clear()185 void MemoryLogStep::Clear() {
186 // @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogStep)
187 ::uint32_t cached_has_bits = 0;
188 // Prevent compiler warnings about cached_has_bits being unused
189 (void) cached_has_bits;
190
191 _impl_.handle_.ClearToEmpty();
192 _impl_.step_id_ = ::int64_t{0};
193 _internal_metadata_.Clear<std::string>();
194 }
195
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)196 const char* MemoryLogStep::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
197 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
198 while (!ctx->Done(&ptr)) {
199 ::uint32_t tag;
200 ptr = ::_pbi::ReadTag(ptr, &tag);
201 switch (tag >> 3) {
202 // int64 step_id = 1;
203 case 1:
204 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
205 _impl_.step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
206 CHK_(ptr);
207 } else {
208 goto handle_unusual;
209 }
210 continue;
211 // string handle = 2;
212 case 2:
213 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
214 auto str = _internal_mutable_handle();
215 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
216 CHK_(ptr);
217 CHK_(::_pbi::VerifyUTF8(str, nullptr));
218 } else {
219 goto handle_unusual;
220 }
221 continue;
222 default:
223 goto handle_unusual;
224 } // switch
225 handle_unusual:
226 if ((tag == 0) || ((tag & 7) == 4)) {
227 CHK_(ptr);
228 ctx->SetLastTag(tag);
229 goto message_done;
230 }
231 ptr = UnknownFieldParse(
232 tag,
233 _internal_metadata_.mutable_unknown_fields<std::string>(),
234 ptr, ctx);
235 CHK_(ptr != nullptr);
236 } // while
237 message_done:
238 return ptr;
239 failure:
240 ptr = nullptr;
241 goto message_done;
242 #undef CHK_
243 }
244
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const245 ::uint8_t* MemoryLogStep::_InternalSerialize(
246 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
247 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogStep)
248 ::uint32_t cached_has_bits = 0;
249 (void) cached_has_bits;
250
251 // int64 step_id = 1;
252 if (this->_internal_step_id() != 0) {
253 target = stream->EnsureSpace(target);
254 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_step_id(), target);
255 }
256
257 // string handle = 2;
258 if (!this->_internal_handle().empty()) {
259 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
260 this->_internal_handle().data(), static_cast<int>(this->_internal_handle().length()),
261 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
262 "tensorflow.MemoryLogStep.handle");
263 target = stream->WriteStringMaybeAliased(
264 2, this->_internal_handle(), target);
265 }
266
267 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
268 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
269 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
270 }
271 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogStep)
272 return target;
273 }
274
ByteSizeLong() const275 size_t MemoryLogStep::ByteSizeLong() const {
276 // @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogStep)
277 size_t total_size = 0;
278
279 ::uint32_t cached_has_bits = 0;
280 // Prevent compiler warnings about cached_has_bits being unused
281 (void) cached_has_bits;
282
283 // string handle = 2;
284 if (!this->_internal_handle().empty()) {
285 total_size += 1 +
286 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
287 this->_internal_handle());
288 }
289
290 // int64 step_id = 1;
291 if (this->_internal_step_id() != 0) {
292 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_step_id());
293 }
294
295 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
296 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
297 }
298 int cached_size = ::_pbi::ToCachedSize(total_size);
299 SetCachedSize(cached_size);
300 return total_size;
301 }
302
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)303 void MemoryLogStep::CheckTypeAndMergeFrom(
304 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
305 MergeFrom(*::_pbi::DownCast<const MemoryLogStep*>(
306 &from));
307 }
308
MergeFrom(const MemoryLogStep & from)309 void MemoryLogStep::MergeFrom(const MemoryLogStep& from) {
310 MemoryLogStep* const _this = this;
311 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogStep)
312 GOOGLE_DCHECK_NE(&from, _this);
313 ::uint32_t cached_has_bits = 0;
314 (void) cached_has_bits;
315
316 if (!from._internal_handle().empty()) {
317 _this->_internal_set_handle(from._internal_handle());
318 }
319 if (from._internal_step_id() != 0) {
320 _this->_internal_set_step_id(from._internal_step_id());
321 }
322 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
323 }
324
CopyFrom(const MemoryLogStep & from)325 void MemoryLogStep::CopyFrom(const MemoryLogStep& from) {
326 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogStep)
327 if (&from == this) return;
328 Clear();
329 MergeFrom(from);
330 }
331
IsInitialized() const332 bool MemoryLogStep::IsInitialized() const {
333 return true;
334 }
335
InternalSwap(MemoryLogStep * other)336 void MemoryLogStep::InternalSwap(MemoryLogStep* other) {
337 using std::swap;
338 auto* lhs_arena = GetArenaForAllocation();
339 auto* rhs_arena = other->GetArenaForAllocation();
340 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
341 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
342 &_impl_.handle_, lhs_arena,
343 &other->_impl_.handle_, rhs_arena
344 );
345 swap(_impl_.step_id_, other->_impl_.step_id_);
346 }
347
GetTypeName() const348 std::string MemoryLogStep::GetTypeName() const {
349 return "tensorflow.MemoryLogStep";
350 }
351
352
353 // ===================================================================
354
355 class MemoryLogTensorAllocation::_Internal {
356 public:
357 static const ::tensorflow::TensorDescription& tensor(const MemoryLogTensorAllocation* msg);
358 };
359
360 const ::tensorflow::TensorDescription&
tensor(const MemoryLogTensorAllocation * msg)361 MemoryLogTensorAllocation::_Internal::tensor(const MemoryLogTensorAllocation* msg) {
362 return *msg->_impl_.tensor_;
363 }
clear_tensor()364 void MemoryLogTensorAllocation::clear_tensor() {
365 if (GetArenaForAllocation() == nullptr && _impl_.tensor_ != nullptr) {
366 delete _impl_.tensor_;
367 }
368 _impl_.tensor_ = nullptr;
369 }
MemoryLogTensorAllocation(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)370 MemoryLogTensorAllocation::MemoryLogTensorAllocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
371 bool is_message_owned)
372 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
373 SharedCtor(arena, is_message_owned);
374 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogTensorAllocation)
375 }
MemoryLogTensorAllocation(const MemoryLogTensorAllocation & from)376 MemoryLogTensorAllocation::MemoryLogTensorAllocation(const MemoryLogTensorAllocation& from)
377 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
378 MemoryLogTensorAllocation* const _this = this; (void)_this;
379 new (&_impl_) Impl_{
380 decltype(_impl_.kernel_name_){}
381 , decltype(_impl_.tensor_){nullptr}
382 , decltype(_impl_.step_id_){}
383 , /*decltype(_impl_._cached_size_)*/{}};
384
385 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
386 _impl_.kernel_name_.InitDefault();
387 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
388 _impl_.kernel_name_.Set("", GetArenaForAllocation());
389 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
390 if (!from._internal_kernel_name().empty()) {
391 _this->_impl_.kernel_name_.Set(from._internal_kernel_name(),
392 _this->GetArenaForAllocation());
393 }
394 if (from._internal_has_tensor()) {
395 _this->_impl_.tensor_ = new ::tensorflow::TensorDescription(*from._impl_.tensor_);
396 }
397 _this->_impl_.step_id_ = from._impl_.step_id_;
398 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogTensorAllocation)
399 }
400
SharedCtor(::_pb::Arena * arena,bool is_message_owned)401 inline void MemoryLogTensorAllocation::SharedCtor(
402 ::_pb::Arena* arena, bool is_message_owned) {
403 (void)arena;
404 (void)is_message_owned;
405 new (&_impl_) Impl_{
406 decltype(_impl_.kernel_name_){}
407 , decltype(_impl_.tensor_){nullptr}
408 , decltype(_impl_.step_id_){::int64_t{0}}
409 , /*decltype(_impl_._cached_size_)*/{}
410 };
411 _impl_.kernel_name_.InitDefault();
412 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
413 _impl_.kernel_name_.Set("", GetArenaForAllocation());
414 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
415 }
416
~MemoryLogTensorAllocation()417 MemoryLogTensorAllocation::~MemoryLogTensorAllocation() {
418 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogTensorAllocation)
419 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
420 (void)arena;
421 return;
422 }
423 SharedDtor();
424 }
425
SharedDtor()426 inline void MemoryLogTensorAllocation::SharedDtor() {
427 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
428 _impl_.kernel_name_.Destroy();
429 if (this != internal_default_instance()) delete _impl_.tensor_;
430 }
431
SetCachedSize(int size) const432 void MemoryLogTensorAllocation::SetCachedSize(int size) const {
433 _impl_._cached_size_.Set(size);
434 }
435
Clear()436 void MemoryLogTensorAllocation::Clear() {
437 // @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogTensorAllocation)
438 ::uint32_t cached_has_bits = 0;
439 // Prevent compiler warnings about cached_has_bits being unused
440 (void) cached_has_bits;
441
442 _impl_.kernel_name_.ClearToEmpty();
443 if (GetArenaForAllocation() == nullptr && _impl_.tensor_ != nullptr) {
444 delete _impl_.tensor_;
445 }
446 _impl_.tensor_ = nullptr;
447 _impl_.step_id_ = ::int64_t{0};
448 _internal_metadata_.Clear<std::string>();
449 }
450
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)451 const char* MemoryLogTensorAllocation::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
452 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
453 while (!ctx->Done(&ptr)) {
454 ::uint32_t tag;
455 ptr = ::_pbi::ReadTag(ptr, &tag);
456 switch (tag >> 3) {
457 // int64 step_id = 1;
458 case 1:
459 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
460 _impl_.step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
461 CHK_(ptr);
462 } else {
463 goto handle_unusual;
464 }
465 continue;
466 // string kernel_name = 2;
467 case 2:
468 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
469 auto str = _internal_mutable_kernel_name();
470 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
471 CHK_(ptr);
472 CHK_(::_pbi::VerifyUTF8(str, nullptr));
473 } else {
474 goto handle_unusual;
475 }
476 continue;
477 // .tensorflow.TensorDescription tensor = 3;
478 case 3:
479 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
480 ptr = ctx->ParseMessage(_internal_mutable_tensor(), ptr);
481 CHK_(ptr);
482 } else {
483 goto handle_unusual;
484 }
485 continue;
486 default:
487 goto handle_unusual;
488 } // switch
489 handle_unusual:
490 if ((tag == 0) || ((tag & 7) == 4)) {
491 CHK_(ptr);
492 ctx->SetLastTag(tag);
493 goto message_done;
494 }
495 ptr = UnknownFieldParse(
496 tag,
497 _internal_metadata_.mutable_unknown_fields<std::string>(),
498 ptr, ctx);
499 CHK_(ptr != nullptr);
500 } // while
501 message_done:
502 return ptr;
503 failure:
504 ptr = nullptr;
505 goto message_done;
506 #undef CHK_
507 }
508
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const509 ::uint8_t* MemoryLogTensorAllocation::_InternalSerialize(
510 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
511 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogTensorAllocation)
512 ::uint32_t cached_has_bits = 0;
513 (void) cached_has_bits;
514
515 // int64 step_id = 1;
516 if (this->_internal_step_id() != 0) {
517 target = stream->EnsureSpace(target);
518 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_step_id(), target);
519 }
520
521 // string kernel_name = 2;
522 if (!this->_internal_kernel_name().empty()) {
523 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
524 this->_internal_kernel_name().data(), static_cast<int>(this->_internal_kernel_name().length()),
525 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
526 "tensorflow.MemoryLogTensorAllocation.kernel_name");
527 target = stream->WriteStringMaybeAliased(
528 2, this->_internal_kernel_name(), target);
529 }
530
531 // .tensorflow.TensorDescription tensor = 3;
532 if (this->_internal_has_tensor()) {
533 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
534 InternalWriteMessage(3, _Internal::tensor(this),
535 _Internal::tensor(this).GetCachedSize(), target, stream);
536 }
537
538 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
539 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
540 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
541 }
542 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogTensorAllocation)
543 return target;
544 }
545
ByteSizeLong() const546 size_t MemoryLogTensorAllocation::ByteSizeLong() const {
547 // @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogTensorAllocation)
548 size_t total_size = 0;
549
550 ::uint32_t cached_has_bits = 0;
551 // Prevent compiler warnings about cached_has_bits being unused
552 (void) cached_has_bits;
553
554 // string kernel_name = 2;
555 if (!this->_internal_kernel_name().empty()) {
556 total_size += 1 +
557 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
558 this->_internal_kernel_name());
559 }
560
561 // .tensorflow.TensorDescription tensor = 3;
562 if (this->_internal_has_tensor()) {
563 total_size += 1 +
564 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
565 *_impl_.tensor_);
566 }
567
568 // int64 step_id = 1;
569 if (this->_internal_step_id() != 0) {
570 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_step_id());
571 }
572
573 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
574 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
575 }
576 int cached_size = ::_pbi::ToCachedSize(total_size);
577 SetCachedSize(cached_size);
578 return total_size;
579 }
580
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)581 void MemoryLogTensorAllocation::CheckTypeAndMergeFrom(
582 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
583 MergeFrom(*::_pbi::DownCast<const MemoryLogTensorAllocation*>(
584 &from));
585 }
586
MergeFrom(const MemoryLogTensorAllocation & from)587 void MemoryLogTensorAllocation::MergeFrom(const MemoryLogTensorAllocation& from) {
588 MemoryLogTensorAllocation* const _this = this;
589 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogTensorAllocation)
590 GOOGLE_DCHECK_NE(&from, _this);
591 ::uint32_t cached_has_bits = 0;
592 (void) cached_has_bits;
593
594 if (!from._internal_kernel_name().empty()) {
595 _this->_internal_set_kernel_name(from._internal_kernel_name());
596 }
597 if (from._internal_has_tensor()) {
598 _this->_internal_mutable_tensor()->::tensorflow::TensorDescription::MergeFrom(
599 from._internal_tensor());
600 }
601 if (from._internal_step_id() != 0) {
602 _this->_internal_set_step_id(from._internal_step_id());
603 }
604 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
605 }
606
CopyFrom(const MemoryLogTensorAllocation & from)607 void MemoryLogTensorAllocation::CopyFrom(const MemoryLogTensorAllocation& from) {
608 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogTensorAllocation)
609 if (&from == this) return;
610 Clear();
611 MergeFrom(from);
612 }
613
IsInitialized() const614 bool MemoryLogTensorAllocation::IsInitialized() const {
615 return true;
616 }
617
InternalSwap(MemoryLogTensorAllocation * other)618 void MemoryLogTensorAllocation::InternalSwap(MemoryLogTensorAllocation* other) {
619 using std::swap;
620 auto* lhs_arena = GetArenaForAllocation();
621 auto* rhs_arena = other->GetArenaForAllocation();
622 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
623 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
624 &_impl_.kernel_name_, lhs_arena,
625 &other->_impl_.kernel_name_, rhs_arena
626 );
627 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
628 PROTOBUF_FIELD_OFFSET(MemoryLogTensorAllocation, _impl_.step_id_)
629 + sizeof(MemoryLogTensorAllocation::_impl_.step_id_) // NOLINT
630 - PROTOBUF_FIELD_OFFSET(MemoryLogTensorAllocation, _impl_.tensor_)>(
631 reinterpret_cast<char*>(&_impl_.tensor_),
632 reinterpret_cast<char*>(&other->_impl_.tensor_));
633 }
634
GetTypeName() const635 std::string MemoryLogTensorAllocation::GetTypeName() const {
636 return "tensorflow.MemoryLogTensorAllocation";
637 }
638
639
640 // ===================================================================
641
642 class MemoryLogTensorDeallocation::_Internal {
643 public:
644 };
645
MemoryLogTensorDeallocation(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)646 MemoryLogTensorDeallocation::MemoryLogTensorDeallocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
647 bool is_message_owned)
648 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
649 SharedCtor(arena, is_message_owned);
650 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogTensorDeallocation)
651 }
MemoryLogTensorDeallocation(const MemoryLogTensorDeallocation & from)652 MemoryLogTensorDeallocation::MemoryLogTensorDeallocation(const MemoryLogTensorDeallocation& from)
653 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
654 MemoryLogTensorDeallocation* const _this = this; (void)_this;
655 new (&_impl_) Impl_{
656 decltype(_impl_.allocator_name_){}
657 , decltype(_impl_.allocation_id_){}
658 , /*decltype(_impl_._cached_size_)*/{}};
659
660 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
661 _impl_.allocator_name_.InitDefault();
662 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
663 _impl_.allocator_name_.Set("", GetArenaForAllocation());
664 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
665 if (!from._internal_allocator_name().empty()) {
666 _this->_impl_.allocator_name_.Set(from._internal_allocator_name(),
667 _this->GetArenaForAllocation());
668 }
669 _this->_impl_.allocation_id_ = from._impl_.allocation_id_;
670 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogTensorDeallocation)
671 }
672
SharedCtor(::_pb::Arena * arena,bool is_message_owned)673 inline void MemoryLogTensorDeallocation::SharedCtor(
674 ::_pb::Arena* arena, bool is_message_owned) {
675 (void)arena;
676 (void)is_message_owned;
677 new (&_impl_) Impl_{
678 decltype(_impl_.allocator_name_){}
679 , decltype(_impl_.allocation_id_){::int64_t{0}}
680 , /*decltype(_impl_._cached_size_)*/{}
681 };
682 _impl_.allocator_name_.InitDefault();
683 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
684 _impl_.allocator_name_.Set("", GetArenaForAllocation());
685 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
686 }
687
~MemoryLogTensorDeallocation()688 MemoryLogTensorDeallocation::~MemoryLogTensorDeallocation() {
689 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogTensorDeallocation)
690 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
691 (void)arena;
692 return;
693 }
694 SharedDtor();
695 }
696
SharedDtor()697 inline void MemoryLogTensorDeallocation::SharedDtor() {
698 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
699 _impl_.allocator_name_.Destroy();
700 }
701
SetCachedSize(int size) const702 void MemoryLogTensorDeallocation::SetCachedSize(int size) const {
703 _impl_._cached_size_.Set(size);
704 }
705
Clear()706 void MemoryLogTensorDeallocation::Clear() {
707 // @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogTensorDeallocation)
708 ::uint32_t cached_has_bits = 0;
709 // Prevent compiler warnings about cached_has_bits being unused
710 (void) cached_has_bits;
711
712 _impl_.allocator_name_.ClearToEmpty();
713 _impl_.allocation_id_ = ::int64_t{0};
714 _internal_metadata_.Clear<std::string>();
715 }
716
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)717 const char* MemoryLogTensorDeallocation::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
718 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
719 while (!ctx->Done(&ptr)) {
720 ::uint32_t tag;
721 ptr = ::_pbi::ReadTag(ptr, &tag);
722 switch (tag >> 3) {
723 // int64 allocation_id = 1;
724 case 1:
725 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
726 _impl_.allocation_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
727 CHK_(ptr);
728 } else {
729 goto handle_unusual;
730 }
731 continue;
732 // string allocator_name = 2;
733 case 2:
734 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
735 auto str = _internal_mutable_allocator_name();
736 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
737 CHK_(ptr);
738 CHK_(::_pbi::VerifyUTF8(str, nullptr));
739 } else {
740 goto handle_unusual;
741 }
742 continue;
743 default:
744 goto handle_unusual;
745 } // switch
746 handle_unusual:
747 if ((tag == 0) || ((tag & 7) == 4)) {
748 CHK_(ptr);
749 ctx->SetLastTag(tag);
750 goto message_done;
751 }
752 ptr = UnknownFieldParse(
753 tag,
754 _internal_metadata_.mutable_unknown_fields<std::string>(),
755 ptr, ctx);
756 CHK_(ptr != nullptr);
757 } // while
758 message_done:
759 return ptr;
760 failure:
761 ptr = nullptr;
762 goto message_done;
763 #undef CHK_
764 }
765
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const766 ::uint8_t* MemoryLogTensorDeallocation::_InternalSerialize(
767 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
768 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogTensorDeallocation)
769 ::uint32_t cached_has_bits = 0;
770 (void) cached_has_bits;
771
772 // int64 allocation_id = 1;
773 if (this->_internal_allocation_id() != 0) {
774 target = stream->EnsureSpace(target);
775 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_allocation_id(), target);
776 }
777
778 // string allocator_name = 2;
779 if (!this->_internal_allocator_name().empty()) {
780 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
781 this->_internal_allocator_name().data(), static_cast<int>(this->_internal_allocator_name().length()),
782 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
783 "tensorflow.MemoryLogTensorDeallocation.allocator_name");
784 target = stream->WriteStringMaybeAliased(
785 2, this->_internal_allocator_name(), target);
786 }
787
788 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
789 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
790 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
791 }
792 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogTensorDeallocation)
793 return target;
794 }
795
ByteSizeLong() const796 size_t MemoryLogTensorDeallocation::ByteSizeLong() const {
797 // @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogTensorDeallocation)
798 size_t total_size = 0;
799
800 ::uint32_t cached_has_bits = 0;
801 // Prevent compiler warnings about cached_has_bits being unused
802 (void) cached_has_bits;
803
804 // string allocator_name = 2;
805 if (!this->_internal_allocator_name().empty()) {
806 total_size += 1 +
807 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
808 this->_internal_allocator_name());
809 }
810
811 // int64 allocation_id = 1;
812 if (this->_internal_allocation_id() != 0) {
813 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_allocation_id());
814 }
815
816 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
817 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
818 }
819 int cached_size = ::_pbi::ToCachedSize(total_size);
820 SetCachedSize(cached_size);
821 return total_size;
822 }
823
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)824 void MemoryLogTensorDeallocation::CheckTypeAndMergeFrom(
825 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
826 MergeFrom(*::_pbi::DownCast<const MemoryLogTensorDeallocation*>(
827 &from));
828 }
829
MergeFrom(const MemoryLogTensorDeallocation & from)830 void MemoryLogTensorDeallocation::MergeFrom(const MemoryLogTensorDeallocation& from) {
831 MemoryLogTensorDeallocation* const _this = this;
832 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogTensorDeallocation)
833 GOOGLE_DCHECK_NE(&from, _this);
834 ::uint32_t cached_has_bits = 0;
835 (void) cached_has_bits;
836
837 if (!from._internal_allocator_name().empty()) {
838 _this->_internal_set_allocator_name(from._internal_allocator_name());
839 }
840 if (from._internal_allocation_id() != 0) {
841 _this->_internal_set_allocation_id(from._internal_allocation_id());
842 }
843 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
844 }
845
CopyFrom(const MemoryLogTensorDeallocation & from)846 void MemoryLogTensorDeallocation::CopyFrom(const MemoryLogTensorDeallocation& from) {
847 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogTensorDeallocation)
848 if (&from == this) return;
849 Clear();
850 MergeFrom(from);
851 }
852
IsInitialized() const853 bool MemoryLogTensorDeallocation::IsInitialized() const {
854 return true;
855 }
856
InternalSwap(MemoryLogTensorDeallocation * other)857 void MemoryLogTensorDeallocation::InternalSwap(MemoryLogTensorDeallocation* other) {
858 using std::swap;
859 auto* lhs_arena = GetArenaForAllocation();
860 auto* rhs_arena = other->GetArenaForAllocation();
861 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
862 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
863 &_impl_.allocator_name_, lhs_arena,
864 &other->_impl_.allocator_name_, rhs_arena
865 );
866 swap(_impl_.allocation_id_, other->_impl_.allocation_id_);
867 }
868
GetTypeName() const869 std::string MemoryLogTensorDeallocation::GetTypeName() const {
870 return "tensorflow.MemoryLogTensorDeallocation";
871 }
872
873
874 // ===================================================================
875
876 class MemoryLogTensorOutput::_Internal {
877 public:
878 static const ::tensorflow::TensorDescription& tensor(const MemoryLogTensorOutput* msg);
879 };
880
881 const ::tensorflow::TensorDescription&
tensor(const MemoryLogTensorOutput * msg)882 MemoryLogTensorOutput::_Internal::tensor(const MemoryLogTensorOutput* msg) {
883 return *msg->_impl_.tensor_;
884 }
clear_tensor()885 void MemoryLogTensorOutput::clear_tensor() {
886 if (GetArenaForAllocation() == nullptr && _impl_.tensor_ != nullptr) {
887 delete _impl_.tensor_;
888 }
889 _impl_.tensor_ = nullptr;
890 }
MemoryLogTensorOutput(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)891 MemoryLogTensorOutput::MemoryLogTensorOutput(::PROTOBUF_NAMESPACE_ID::Arena* arena,
892 bool is_message_owned)
893 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
894 SharedCtor(arena, is_message_owned);
895 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogTensorOutput)
896 }
MemoryLogTensorOutput(const MemoryLogTensorOutput & from)897 MemoryLogTensorOutput::MemoryLogTensorOutput(const MemoryLogTensorOutput& from)
898 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
899 MemoryLogTensorOutput* const _this = this; (void)_this;
900 new (&_impl_) Impl_{
901 decltype(_impl_.kernel_name_){}
902 , decltype(_impl_.tensor_){nullptr}
903 , decltype(_impl_.step_id_){}
904 , decltype(_impl_.index_){}
905 , /*decltype(_impl_._cached_size_)*/{}};
906
907 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
908 _impl_.kernel_name_.InitDefault();
909 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
910 _impl_.kernel_name_.Set("", GetArenaForAllocation());
911 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
912 if (!from._internal_kernel_name().empty()) {
913 _this->_impl_.kernel_name_.Set(from._internal_kernel_name(),
914 _this->GetArenaForAllocation());
915 }
916 if (from._internal_has_tensor()) {
917 _this->_impl_.tensor_ = new ::tensorflow::TensorDescription(*from._impl_.tensor_);
918 }
919 ::memcpy(&_impl_.step_id_, &from._impl_.step_id_,
920 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.index_) -
921 reinterpret_cast<char*>(&_impl_.step_id_)) + sizeof(_impl_.index_));
922 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogTensorOutput)
923 }
924
SharedCtor(::_pb::Arena * arena,bool is_message_owned)925 inline void MemoryLogTensorOutput::SharedCtor(
926 ::_pb::Arena* arena, bool is_message_owned) {
927 (void)arena;
928 (void)is_message_owned;
929 new (&_impl_) Impl_{
930 decltype(_impl_.kernel_name_){}
931 , decltype(_impl_.tensor_){nullptr}
932 , decltype(_impl_.step_id_){::int64_t{0}}
933 , decltype(_impl_.index_){0}
934 , /*decltype(_impl_._cached_size_)*/{}
935 };
936 _impl_.kernel_name_.InitDefault();
937 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
938 _impl_.kernel_name_.Set("", GetArenaForAllocation());
939 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
940 }
941
~MemoryLogTensorOutput()942 MemoryLogTensorOutput::~MemoryLogTensorOutput() {
943 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogTensorOutput)
944 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
945 (void)arena;
946 return;
947 }
948 SharedDtor();
949 }
950
SharedDtor()951 inline void MemoryLogTensorOutput::SharedDtor() {
952 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
953 _impl_.kernel_name_.Destroy();
954 if (this != internal_default_instance()) delete _impl_.tensor_;
955 }
956
SetCachedSize(int size) const957 void MemoryLogTensorOutput::SetCachedSize(int size) const {
958 _impl_._cached_size_.Set(size);
959 }
960
Clear()961 void MemoryLogTensorOutput::Clear() {
962 // @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogTensorOutput)
963 ::uint32_t cached_has_bits = 0;
964 // Prevent compiler warnings about cached_has_bits being unused
965 (void) cached_has_bits;
966
967 _impl_.kernel_name_.ClearToEmpty();
968 if (GetArenaForAllocation() == nullptr && _impl_.tensor_ != nullptr) {
969 delete _impl_.tensor_;
970 }
971 _impl_.tensor_ = nullptr;
972 ::memset(&_impl_.step_id_, 0, static_cast<size_t>(
973 reinterpret_cast<char*>(&_impl_.index_) -
974 reinterpret_cast<char*>(&_impl_.step_id_)) + sizeof(_impl_.index_));
975 _internal_metadata_.Clear<std::string>();
976 }
977
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)978 const char* MemoryLogTensorOutput::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
979 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
980 while (!ctx->Done(&ptr)) {
981 ::uint32_t tag;
982 ptr = ::_pbi::ReadTag(ptr, &tag);
983 switch (tag >> 3) {
984 // int64 step_id = 1;
985 case 1:
986 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
987 _impl_.step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
988 CHK_(ptr);
989 } else {
990 goto handle_unusual;
991 }
992 continue;
993 // string kernel_name = 2;
994 case 2:
995 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
996 auto str = _internal_mutable_kernel_name();
997 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
998 CHK_(ptr);
999 CHK_(::_pbi::VerifyUTF8(str, nullptr));
1000 } else {
1001 goto handle_unusual;
1002 }
1003 continue;
1004 // int32 index = 3;
1005 case 3:
1006 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1007 _impl_.index_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
1008 CHK_(ptr);
1009 } else {
1010 goto handle_unusual;
1011 }
1012 continue;
1013 // .tensorflow.TensorDescription tensor = 4;
1014 case 4:
1015 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
1016 ptr = ctx->ParseMessage(_internal_mutable_tensor(), ptr);
1017 CHK_(ptr);
1018 } else {
1019 goto handle_unusual;
1020 }
1021 continue;
1022 default:
1023 goto handle_unusual;
1024 } // switch
1025 handle_unusual:
1026 if ((tag == 0) || ((tag & 7) == 4)) {
1027 CHK_(ptr);
1028 ctx->SetLastTag(tag);
1029 goto message_done;
1030 }
1031 ptr = UnknownFieldParse(
1032 tag,
1033 _internal_metadata_.mutable_unknown_fields<std::string>(),
1034 ptr, ctx);
1035 CHK_(ptr != nullptr);
1036 } // while
1037 message_done:
1038 return ptr;
1039 failure:
1040 ptr = nullptr;
1041 goto message_done;
1042 #undef CHK_
1043 }
1044
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1045 ::uint8_t* MemoryLogTensorOutput::_InternalSerialize(
1046 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1047 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogTensorOutput)
1048 ::uint32_t cached_has_bits = 0;
1049 (void) cached_has_bits;
1050
1051 // int64 step_id = 1;
1052 if (this->_internal_step_id() != 0) {
1053 target = stream->EnsureSpace(target);
1054 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_step_id(), target);
1055 }
1056
1057 // string kernel_name = 2;
1058 if (!this->_internal_kernel_name().empty()) {
1059 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1060 this->_internal_kernel_name().data(), static_cast<int>(this->_internal_kernel_name().length()),
1061 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1062 "tensorflow.MemoryLogTensorOutput.kernel_name");
1063 target = stream->WriteStringMaybeAliased(
1064 2, this->_internal_kernel_name(), target);
1065 }
1066
1067 // int32 index = 3;
1068 if (this->_internal_index() != 0) {
1069 target = stream->EnsureSpace(target);
1070 target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_index(), target);
1071 }
1072
1073 // .tensorflow.TensorDescription tensor = 4;
1074 if (this->_internal_has_tensor()) {
1075 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1076 InternalWriteMessage(4, _Internal::tensor(this),
1077 _Internal::tensor(this).GetCachedSize(), target, stream);
1078 }
1079
1080 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1081 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1082 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1083 }
1084 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogTensorOutput)
1085 return target;
1086 }
1087
ByteSizeLong() const1088 size_t MemoryLogTensorOutput::ByteSizeLong() const {
1089 // @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogTensorOutput)
1090 size_t total_size = 0;
1091
1092 ::uint32_t cached_has_bits = 0;
1093 // Prevent compiler warnings about cached_has_bits being unused
1094 (void) cached_has_bits;
1095
1096 // string kernel_name = 2;
1097 if (!this->_internal_kernel_name().empty()) {
1098 total_size += 1 +
1099 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1100 this->_internal_kernel_name());
1101 }
1102
1103 // .tensorflow.TensorDescription tensor = 4;
1104 if (this->_internal_has_tensor()) {
1105 total_size += 1 +
1106 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1107 *_impl_.tensor_);
1108 }
1109
1110 // int64 step_id = 1;
1111 if (this->_internal_step_id() != 0) {
1112 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_step_id());
1113 }
1114
1115 // int32 index = 3;
1116 if (this->_internal_index() != 0) {
1117 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_index());
1118 }
1119
1120 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1121 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1122 }
1123 int cached_size = ::_pbi::ToCachedSize(total_size);
1124 SetCachedSize(cached_size);
1125 return total_size;
1126 }
1127
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1128 void MemoryLogTensorOutput::CheckTypeAndMergeFrom(
1129 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1130 MergeFrom(*::_pbi::DownCast<const MemoryLogTensorOutput*>(
1131 &from));
1132 }
1133
MergeFrom(const MemoryLogTensorOutput & from)1134 void MemoryLogTensorOutput::MergeFrom(const MemoryLogTensorOutput& from) {
1135 MemoryLogTensorOutput* const _this = this;
1136 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogTensorOutput)
1137 GOOGLE_DCHECK_NE(&from, _this);
1138 ::uint32_t cached_has_bits = 0;
1139 (void) cached_has_bits;
1140
1141 if (!from._internal_kernel_name().empty()) {
1142 _this->_internal_set_kernel_name(from._internal_kernel_name());
1143 }
1144 if (from._internal_has_tensor()) {
1145 _this->_internal_mutable_tensor()->::tensorflow::TensorDescription::MergeFrom(
1146 from._internal_tensor());
1147 }
1148 if (from._internal_step_id() != 0) {
1149 _this->_internal_set_step_id(from._internal_step_id());
1150 }
1151 if (from._internal_index() != 0) {
1152 _this->_internal_set_index(from._internal_index());
1153 }
1154 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1155 }
1156
CopyFrom(const MemoryLogTensorOutput & from)1157 void MemoryLogTensorOutput::CopyFrom(const MemoryLogTensorOutput& from) {
1158 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogTensorOutput)
1159 if (&from == this) return;
1160 Clear();
1161 MergeFrom(from);
1162 }
1163
IsInitialized() const1164 bool MemoryLogTensorOutput::IsInitialized() const {
1165 return true;
1166 }
1167
InternalSwap(MemoryLogTensorOutput * other)1168 void MemoryLogTensorOutput::InternalSwap(MemoryLogTensorOutput* other) {
1169 using std::swap;
1170 auto* lhs_arena = GetArenaForAllocation();
1171 auto* rhs_arena = other->GetArenaForAllocation();
1172 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1173 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1174 &_impl_.kernel_name_, lhs_arena,
1175 &other->_impl_.kernel_name_, rhs_arena
1176 );
1177 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1178 PROTOBUF_FIELD_OFFSET(MemoryLogTensorOutput, _impl_.index_)
1179 + sizeof(MemoryLogTensorOutput::_impl_.index_) // NOLINT
1180 - PROTOBUF_FIELD_OFFSET(MemoryLogTensorOutput, _impl_.tensor_)>(
1181 reinterpret_cast<char*>(&_impl_.tensor_),
1182 reinterpret_cast<char*>(&other->_impl_.tensor_));
1183 }
1184
GetTypeName() const1185 std::string MemoryLogTensorOutput::GetTypeName() const {
1186 return "tensorflow.MemoryLogTensorOutput";
1187 }
1188
1189
1190 // ===================================================================
1191
1192 class MemoryLogRawAllocation::_Internal {
1193 public:
1194 };
1195
MemoryLogRawAllocation(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1196 MemoryLogRawAllocation::MemoryLogRawAllocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1197 bool is_message_owned)
1198 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1199 SharedCtor(arena, is_message_owned);
1200 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogRawAllocation)
1201 }
MemoryLogRawAllocation(const MemoryLogRawAllocation & from)1202 MemoryLogRawAllocation::MemoryLogRawAllocation(const MemoryLogRawAllocation& from)
1203 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1204 MemoryLogRawAllocation* const _this = this; (void)_this;
1205 new (&_impl_) Impl_{
1206 decltype(_impl_.operation_){}
1207 , decltype(_impl_.allocator_name_){}
1208 , decltype(_impl_.step_id_){}
1209 , decltype(_impl_.num_bytes_){}
1210 , decltype(_impl_.ptr_){}
1211 , decltype(_impl_.allocation_id_){}
1212 , /*decltype(_impl_._cached_size_)*/{}};
1213
1214 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1215 _impl_.operation_.InitDefault();
1216 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1217 _impl_.operation_.Set("", GetArenaForAllocation());
1218 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1219 if (!from._internal_operation().empty()) {
1220 _this->_impl_.operation_.Set(from._internal_operation(),
1221 _this->GetArenaForAllocation());
1222 }
1223 _impl_.allocator_name_.InitDefault();
1224 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1225 _impl_.allocator_name_.Set("", GetArenaForAllocation());
1226 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1227 if (!from._internal_allocator_name().empty()) {
1228 _this->_impl_.allocator_name_.Set(from._internal_allocator_name(),
1229 _this->GetArenaForAllocation());
1230 }
1231 ::memcpy(&_impl_.step_id_, &from._impl_.step_id_,
1232 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.allocation_id_) -
1233 reinterpret_cast<char*>(&_impl_.step_id_)) + sizeof(_impl_.allocation_id_));
1234 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogRawAllocation)
1235 }
1236
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1237 inline void MemoryLogRawAllocation::SharedCtor(
1238 ::_pb::Arena* arena, bool is_message_owned) {
1239 (void)arena;
1240 (void)is_message_owned;
1241 new (&_impl_) Impl_{
1242 decltype(_impl_.operation_){}
1243 , decltype(_impl_.allocator_name_){}
1244 , decltype(_impl_.step_id_){::int64_t{0}}
1245 , decltype(_impl_.num_bytes_){::int64_t{0}}
1246 , decltype(_impl_.ptr_){::uint64_t{0u}}
1247 , decltype(_impl_.allocation_id_){::int64_t{0}}
1248 , /*decltype(_impl_._cached_size_)*/{}
1249 };
1250 _impl_.operation_.InitDefault();
1251 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1252 _impl_.operation_.Set("", GetArenaForAllocation());
1253 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1254 _impl_.allocator_name_.InitDefault();
1255 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1256 _impl_.allocator_name_.Set("", GetArenaForAllocation());
1257 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1258 }
1259
~MemoryLogRawAllocation()1260 MemoryLogRawAllocation::~MemoryLogRawAllocation() {
1261 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogRawAllocation)
1262 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1263 (void)arena;
1264 return;
1265 }
1266 SharedDtor();
1267 }
1268
SharedDtor()1269 inline void MemoryLogRawAllocation::SharedDtor() {
1270 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1271 _impl_.operation_.Destroy();
1272 _impl_.allocator_name_.Destroy();
1273 }
1274
SetCachedSize(int size) const1275 void MemoryLogRawAllocation::SetCachedSize(int size) const {
1276 _impl_._cached_size_.Set(size);
1277 }
1278
Clear()1279 void MemoryLogRawAllocation::Clear() {
1280 // @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogRawAllocation)
1281 ::uint32_t cached_has_bits = 0;
1282 // Prevent compiler warnings about cached_has_bits being unused
1283 (void) cached_has_bits;
1284
1285 _impl_.operation_.ClearToEmpty();
1286 _impl_.allocator_name_.ClearToEmpty();
1287 ::memset(&_impl_.step_id_, 0, static_cast<size_t>(
1288 reinterpret_cast<char*>(&_impl_.allocation_id_) -
1289 reinterpret_cast<char*>(&_impl_.step_id_)) + sizeof(_impl_.allocation_id_));
1290 _internal_metadata_.Clear<std::string>();
1291 }
1292
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1293 const char* MemoryLogRawAllocation::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1294 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1295 while (!ctx->Done(&ptr)) {
1296 ::uint32_t tag;
1297 ptr = ::_pbi::ReadTag(ptr, &tag);
1298 switch (tag >> 3) {
1299 // int64 step_id = 1;
1300 case 1:
1301 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1302 _impl_.step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1303 CHK_(ptr);
1304 } else {
1305 goto handle_unusual;
1306 }
1307 continue;
1308 // string operation = 2;
1309 case 2:
1310 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
1311 auto str = _internal_mutable_operation();
1312 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1313 CHK_(ptr);
1314 CHK_(::_pbi::VerifyUTF8(str, nullptr));
1315 } else {
1316 goto handle_unusual;
1317 }
1318 continue;
1319 // int64 num_bytes = 3;
1320 case 3:
1321 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1322 _impl_.num_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1323 CHK_(ptr);
1324 } else {
1325 goto handle_unusual;
1326 }
1327 continue;
1328 // uint64 ptr = 4;
1329 case 4:
1330 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
1331 _impl_.ptr_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1332 CHK_(ptr);
1333 } else {
1334 goto handle_unusual;
1335 }
1336 continue;
1337 // int64 allocation_id = 5;
1338 case 5:
1339 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
1340 _impl_.allocation_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1341 CHK_(ptr);
1342 } else {
1343 goto handle_unusual;
1344 }
1345 continue;
1346 // string allocator_name = 6;
1347 case 6:
1348 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
1349 auto str = _internal_mutable_allocator_name();
1350 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1351 CHK_(ptr);
1352 CHK_(::_pbi::VerifyUTF8(str, nullptr));
1353 } else {
1354 goto handle_unusual;
1355 }
1356 continue;
1357 default:
1358 goto handle_unusual;
1359 } // switch
1360 handle_unusual:
1361 if ((tag == 0) || ((tag & 7) == 4)) {
1362 CHK_(ptr);
1363 ctx->SetLastTag(tag);
1364 goto message_done;
1365 }
1366 ptr = UnknownFieldParse(
1367 tag,
1368 _internal_metadata_.mutable_unknown_fields<std::string>(),
1369 ptr, ctx);
1370 CHK_(ptr != nullptr);
1371 } // while
1372 message_done:
1373 return ptr;
1374 failure:
1375 ptr = nullptr;
1376 goto message_done;
1377 #undef CHK_
1378 }
1379
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1380 ::uint8_t* MemoryLogRawAllocation::_InternalSerialize(
1381 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1382 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogRawAllocation)
1383 ::uint32_t cached_has_bits = 0;
1384 (void) cached_has_bits;
1385
1386 // int64 step_id = 1;
1387 if (this->_internal_step_id() != 0) {
1388 target = stream->EnsureSpace(target);
1389 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_step_id(), target);
1390 }
1391
1392 // string operation = 2;
1393 if (!this->_internal_operation().empty()) {
1394 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1395 this->_internal_operation().data(), static_cast<int>(this->_internal_operation().length()),
1396 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1397 "tensorflow.MemoryLogRawAllocation.operation");
1398 target = stream->WriteStringMaybeAliased(
1399 2, this->_internal_operation(), target);
1400 }
1401
1402 // int64 num_bytes = 3;
1403 if (this->_internal_num_bytes() != 0) {
1404 target = stream->EnsureSpace(target);
1405 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_num_bytes(), target);
1406 }
1407
1408 // uint64 ptr = 4;
1409 if (this->_internal_ptr() != 0) {
1410 target = stream->EnsureSpace(target);
1411 target = ::_pbi::WireFormatLite::WriteUInt64ToArray(4, this->_internal_ptr(), target);
1412 }
1413
1414 // int64 allocation_id = 5;
1415 if (this->_internal_allocation_id() != 0) {
1416 target = stream->EnsureSpace(target);
1417 target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_allocation_id(), target);
1418 }
1419
1420 // string allocator_name = 6;
1421 if (!this->_internal_allocator_name().empty()) {
1422 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1423 this->_internal_allocator_name().data(), static_cast<int>(this->_internal_allocator_name().length()),
1424 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1425 "tensorflow.MemoryLogRawAllocation.allocator_name");
1426 target = stream->WriteStringMaybeAliased(
1427 6, this->_internal_allocator_name(), target);
1428 }
1429
1430 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1431 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1432 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1433 }
1434 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogRawAllocation)
1435 return target;
1436 }
1437
ByteSizeLong() const1438 size_t MemoryLogRawAllocation::ByteSizeLong() const {
1439 // @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogRawAllocation)
1440 size_t total_size = 0;
1441
1442 ::uint32_t cached_has_bits = 0;
1443 // Prevent compiler warnings about cached_has_bits being unused
1444 (void) cached_has_bits;
1445
1446 // string operation = 2;
1447 if (!this->_internal_operation().empty()) {
1448 total_size += 1 +
1449 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1450 this->_internal_operation());
1451 }
1452
1453 // string allocator_name = 6;
1454 if (!this->_internal_allocator_name().empty()) {
1455 total_size += 1 +
1456 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1457 this->_internal_allocator_name());
1458 }
1459
1460 // int64 step_id = 1;
1461 if (this->_internal_step_id() != 0) {
1462 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_step_id());
1463 }
1464
1465 // int64 num_bytes = 3;
1466 if (this->_internal_num_bytes() != 0) {
1467 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_num_bytes());
1468 }
1469
1470 // uint64 ptr = 4;
1471 if (this->_internal_ptr() != 0) {
1472 total_size += ::_pbi::WireFormatLite::UInt64SizePlusOne(this->_internal_ptr());
1473 }
1474
1475 // int64 allocation_id = 5;
1476 if (this->_internal_allocation_id() != 0) {
1477 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_allocation_id());
1478 }
1479
1480 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1481 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1482 }
1483 int cached_size = ::_pbi::ToCachedSize(total_size);
1484 SetCachedSize(cached_size);
1485 return total_size;
1486 }
1487
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1488 void MemoryLogRawAllocation::CheckTypeAndMergeFrom(
1489 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1490 MergeFrom(*::_pbi::DownCast<const MemoryLogRawAllocation*>(
1491 &from));
1492 }
1493
MergeFrom(const MemoryLogRawAllocation & from)1494 void MemoryLogRawAllocation::MergeFrom(const MemoryLogRawAllocation& from) {
1495 MemoryLogRawAllocation* const _this = this;
1496 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogRawAllocation)
1497 GOOGLE_DCHECK_NE(&from, _this);
1498 ::uint32_t cached_has_bits = 0;
1499 (void) cached_has_bits;
1500
1501 if (!from._internal_operation().empty()) {
1502 _this->_internal_set_operation(from._internal_operation());
1503 }
1504 if (!from._internal_allocator_name().empty()) {
1505 _this->_internal_set_allocator_name(from._internal_allocator_name());
1506 }
1507 if (from._internal_step_id() != 0) {
1508 _this->_internal_set_step_id(from._internal_step_id());
1509 }
1510 if (from._internal_num_bytes() != 0) {
1511 _this->_internal_set_num_bytes(from._internal_num_bytes());
1512 }
1513 if (from._internal_ptr() != 0) {
1514 _this->_internal_set_ptr(from._internal_ptr());
1515 }
1516 if (from._internal_allocation_id() != 0) {
1517 _this->_internal_set_allocation_id(from._internal_allocation_id());
1518 }
1519 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1520 }
1521
CopyFrom(const MemoryLogRawAllocation & from)1522 void MemoryLogRawAllocation::CopyFrom(const MemoryLogRawAllocation& from) {
1523 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogRawAllocation)
1524 if (&from == this) return;
1525 Clear();
1526 MergeFrom(from);
1527 }
1528
IsInitialized() const1529 bool MemoryLogRawAllocation::IsInitialized() const {
1530 return true;
1531 }
1532
InternalSwap(MemoryLogRawAllocation * other)1533 void MemoryLogRawAllocation::InternalSwap(MemoryLogRawAllocation* other) {
1534 using std::swap;
1535 auto* lhs_arena = GetArenaForAllocation();
1536 auto* rhs_arena = other->GetArenaForAllocation();
1537 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1538 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1539 &_impl_.operation_, lhs_arena,
1540 &other->_impl_.operation_, rhs_arena
1541 );
1542 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1543 &_impl_.allocator_name_, lhs_arena,
1544 &other->_impl_.allocator_name_, rhs_arena
1545 );
1546 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1547 PROTOBUF_FIELD_OFFSET(MemoryLogRawAllocation, _impl_.allocation_id_)
1548 + sizeof(MemoryLogRawAllocation::_impl_.allocation_id_) // NOLINT
1549 - PROTOBUF_FIELD_OFFSET(MemoryLogRawAllocation, _impl_.step_id_)>(
1550 reinterpret_cast<char*>(&_impl_.step_id_),
1551 reinterpret_cast<char*>(&other->_impl_.step_id_));
1552 }
1553
GetTypeName() const1554 std::string MemoryLogRawAllocation::GetTypeName() const {
1555 return "tensorflow.MemoryLogRawAllocation";
1556 }
1557
1558
1559 // ===================================================================
1560
1561 class MemoryLogRawDeallocation::_Internal {
1562 public:
1563 };
1564
MemoryLogRawDeallocation(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1565 MemoryLogRawDeallocation::MemoryLogRawDeallocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1566 bool is_message_owned)
1567 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1568 SharedCtor(arena, is_message_owned);
1569 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogRawDeallocation)
1570 }
MemoryLogRawDeallocation(const MemoryLogRawDeallocation & from)1571 MemoryLogRawDeallocation::MemoryLogRawDeallocation(const MemoryLogRawDeallocation& from)
1572 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1573 MemoryLogRawDeallocation* const _this = this; (void)_this;
1574 new (&_impl_) Impl_{
1575 decltype(_impl_.operation_){}
1576 , decltype(_impl_.allocator_name_){}
1577 , decltype(_impl_.step_id_){}
1578 , decltype(_impl_.allocation_id_){}
1579 , decltype(_impl_.deferred_){}
1580 , /*decltype(_impl_._cached_size_)*/{}};
1581
1582 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1583 _impl_.operation_.InitDefault();
1584 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1585 _impl_.operation_.Set("", GetArenaForAllocation());
1586 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1587 if (!from._internal_operation().empty()) {
1588 _this->_impl_.operation_.Set(from._internal_operation(),
1589 _this->GetArenaForAllocation());
1590 }
1591 _impl_.allocator_name_.InitDefault();
1592 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1593 _impl_.allocator_name_.Set("", GetArenaForAllocation());
1594 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1595 if (!from._internal_allocator_name().empty()) {
1596 _this->_impl_.allocator_name_.Set(from._internal_allocator_name(),
1597 _this->GetArenaForAllocation());
1598 }
1599 ::memcpy(&_impl_.step_id_, &from._impl_.step_id_,
1600 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.deferred_) -
1601 reinterpret_cast<char*>(&_impl_.step_id_)) + sizeof(_impl_.deferred_));
1602 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogRawDeallocation)
1603 }
1604
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1605 inline void MemoryLogRawDeallocation::SharedCtor(
1606 ::_pb::Arena* arena, bool is_message_owned) {
1607 (void)arena;
1608 (void)is_message_owned;
1609 new (&_impl_) Impl_{
1610 decltype(_impl_.operation_){}
1611 , decltype(_impl_.allocator_name_){}
1612 , decltype(_impl_.step_id_){::int64_t{0}}
1613 , decltype(_impl_.allocation_id_){::int64_t{0}}
1614 , decltype(_impl_.deferred_){false}
1615 , /*decltype(_impl_._cached_size_)*/{}
1616 };
1617 _impl_.operation_.InitDefault();
1618 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1619 _impl_.operation_.Set("", GetArenaForAllocation());
1620 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1621 _impl_.allocator_name_.InitDefault();
1622 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1623 _impl_.allocator_name_.Set("", GetArenaForAllocation());
1624 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1625 }
1626
~MemoryLogRawDeallocation()1627 MemoryLogRawDeallocation::~MemoryLogRawDeallocation() {
1628 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogRawDeallocation)
1629 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1630 (void)arena;
1631 return;
1632 }
1633 SharedDtor();
1634 }
1635
SharedDtor()1636 inline void MemoryLogRawDeallocation::SharedDtor() {
1637 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1638 _impl_.operation_.Destroy();
1639 _impl_.allocator_name_.Destroy();
1640 }
1641
SetCachedSize(int size) const1642 void MemoryLogRawDeallocation::SetCachedSize(int size) const {
1643 _impl_._cached_size_.Set(size);
1644 }
1645
Clear()1646 void MemoryLogRawDeallocation::Clear() {
1647 // @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogRawDeallocation)
1648 ::uint32_t cached_has_bits = 0;
1649 // Prevent compiler warnings about cached_has_bits being unused
1650 (void) cached_has_bits;
1651
1652 _impl_.operation_.ClearToEmpty();
1653 _impl_.allocator_name_.ClearToEmpty();
1654 ::memset(&_impl_.step_id_, 0, static_cast<size_t>(
1655 reinterpret_cast<char*>(&_impl_.deferred_) -
1656 reinterpret_cast<char*>(&_impl_.step_id_)) + sizeof(_impl_.deferred_));
1657 _internal_metadata_.Clear<std::string>();
1658 }
1659
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1660 const char* MemoryLogRawDeallocation::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1661 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1662 while (!ctx->Done(&ptr)) {
1663 ::uint32_t tag;
1664 ptr = ::_pbi::ReadTag(ptr, &tag);
1665 switch (tag >> 3) {
1666 // int64 step_id = 1;
1667 case 1:
1668 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
1669 _impl_.step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1670 CHK_(ptr);
1671 } else {
1672 goto handle_unusual;
1673 }
1674 continue;
1675 // string operation = 2;
1676 case 2:
1677 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
1678 auto str = _internal_mutable_operation();
1679 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1680 CHK_(ptr);
1681 CHK_(::_pbi::VerifyUTF8(str, nullptr));
1682 } else {
1683 goto handle_unusual;
1684 }
1685 continue;
1686 // int64 allocation_id = 3;
1687 case 3:
1688 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
1689 _impl_.allocation_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1690 CHK_(ptr);
1691 } else {
1692 goto handle_unusual;
1693 }
1694 continue;
1695 // string allocator_name = 4;
1696 case 4:
1697 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
1698 auto str = _internal_mutable_allocator_name();
1699 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
1700 CHK_(ptr);
1701 CHK_(::_pbi::VerifyUTF8(str, nullptr));
1702 } else {
1703 goto handle_unusual;
1704 }
1705 continue;
1706 // bool deferred = 5;
1707 case 5:
1708 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) {
1709 _impl_.deferred_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
1710 CHK_(ptr);
1711 } else {
1712 goto handle_unusual;
1713 }
1714 continue;
1715 default:
1716 goto handle_unusual;
1717 } // switch
1718 handle_unusual:
1719 if ((tag == 0) || ((tag & 7) == 4)) {
1720 CHK_(ptr);
1721 ctx->SetLastTag(tag);
1722 goto message_done;
1723 }
1724 ptr = UnknownFieldParse(
1725 tag,
1726 _internal_metadata_.mutable_unknown_fields<std::string>(),
1727 ptr, ctx);
1728 CHK_(ptr != nullptr);
1729 } // while
1730 message_done:
1731 return ptr;
1732 failure:
1733 ptr = nullptr;
1734 goto message_done;
1735 #undef CHK_
1736 }
1737
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1738 ::uint8_t* MemoryLogRawDeallocation::_InternalSerialize(
1739 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1740 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogRawDeallocation)
1741 ::uint32_t cached_has_bits = 0;
1742 (void) cached_has_bits;
1743
1744 // int64 step_id = 1;
1745 if (this->_internal_step_id() != 0) {
1746 target = stream->EnsureSpace(target);
1747 target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_step_id(), target);
1748 }
1749
1750 // string operation = 2;
1751 if (!this->_internal_operation().empty()) {
1752 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1753 this->_internal_operation().data(), static_cast<int>(this->_internal_operation().length()),
1754 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1755 "tensorflow.MemoryLogRawDeallocation.operation");
1756 target = stream->WriteStringMaybeAliased(
1757 2, this->_internal_operation(), target);
1758 }
1759
1760 // int64 allocation_id = 3;
1761 if (this->_internal_allocation_id() != 0) {
1762 target = stream->EnsureSpace(target);
1763 target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_allocation_id(), target);
1764 }
1765
1766 // string allocator_name = 4;
1767 if (!this->_internal_allocator_name().empty()) {
1768 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1769 this->_internal_allocator_name().data(), static_cast<int>(this->_internal_allocator_name().length()),
1770 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1771 "tensorflow.MemoryLogRawDeallocation.allocator_name");
1772 target = stream->WriteStringMaybeAliased(
1773 4, this->_internal_allocator_name(), target);
1774 }
1775
1776 // bool deferred = 5;
1777 if (this->_internal_deferred() != 0) {
1778 target = stream->EnsureSpace(target);
1779 target = ::_pbi::WireFormatLite::WriteBoolToArray(5, this->_internal_deferred(), target);
1780 }
1781
1782 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1783 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1784 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1785 }
1786 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogRawDeallocation)
1787 return target;
1788 }
1789
ByteSizeLong() const1790 size_t MemoryLogRawDeallocation::ByteSizeLong() const {
1791 // @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogRawDeallocation)
1792 size_t total_size = 0;
1793
1794 ::uint32_t cached_has_bits = 0;
1795 // Prevent compiler warnings about cached_has_bits being unused
1796 (void) cached_has_bits;
1797
1798 // string operation = 2;
1799 if (!this->_internal_operation().empty()) {
1800 total_size += 1 +
1801 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1802 this->_internal_operation());
1803 }
1804
1805 // string allocator_name = 4;
1806 if (!this->_internal_allocator_name().empty()) {
1807 total_size += 1 +
1808 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1809 this->_internal_allocator_name());
1810 }
1811
1812 // int64 step_id = 1;
1813 if (this->_internal_step_id() != 0) {
1814 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_step_id());
1815 }
1816
1817 // int64 allocation_id = 3;
1818 if (this->_internal_allocation_id() != 0) {
1819 total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_allocation_id());
1820 }
1821
1822 // bool deferred = 5;
1823 if (this->_internal_deferred() != 0) {
1824 total_size += 1 + 1;
1825 }
1826
1827 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1828 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1829 }
1830 int cached_size = ::_pbi::ToCachedSize(total_size);
1831 SetCachedSize(cached_size);
1832 return total_size;
1833 }
1834
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1835 void MemoryLogRawDeallocation::CheckTypeAndMergeFrom(
1836 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1837 MergeFrom(*::_pbi::DownCast<const MemoryLogRawDeallocation*>(
1838 &from));
1839 }
1840
MergeFrom(const MemoryLogRawDeallocation & from)1841 void MemoryLogRawDeallocation::MergeFrom(const MemoryLogRawDeallocation& from) {
1842 MemoryLogRawDeallocation* const _this = this;
1843 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogRawDeallocation)
1844 GOOGLE_DCHECK_NE(&from, _this);
1845 ::uint32_t cached_has_bits = 0;
1846 (void) cached_has_bits;
1847
1848 if (!from._internal_operation().empty()) {
1849 _this->_internal_set_operation(from._internal_operation());
1850 }
1851 if (!from._internal_allocator_name().empty()) {
1852 _this->_internal_set_allocator_name(from._internal_allocator_name());
1853 }
1854 if (from._internal_step_id() != 0) {
1855 _this->_internal_set_step_id(from._internal_step_id());
1856 }
1857 if (from._internal_allocation_id() != 0) {
1858 _this->_internal_set_allocation_id(from._internal_allocation_id());
1859 }
1860 if (from._internal_deferred() != 0) {
1861 _this->_internal_set_deferred(from._internal_deferred());
1862 }
1863 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1864 }
1865
CopyFrom(const MemoryLogRawDeallocation & from)1866 void MemoryLogRawDeallocation::CopyFrom(const MemoryLogRawDeallocation& from) {
1867 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogRawDeallocation)
1868 if (&from == this) return;
1869 Clear();
1870 MergeFrom(from);
1871 }
1872
IsInitialized() const1873 bool MemoryLogRawDeallocation::IsInitialized() const {
1874 return true;
1875 }
1876
InternalSwap(MemoryLogRawDeallocation * other)1877 void MemoryLogRawDeallocation::InternalSwap(MemoryLogRawDeallocation* other) {
1878 using std::swap;
1879 auto* lhs_arena = GetArenaForAllocation();
1880 auto* rhs_arena = other->GetArenaForAllocation();
1881 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1882 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1883 &_impl_.operation_, lhs_arena,
1884 &other->_impl_.operation_, rhs_arena
1885 );
1886 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1887 &_impl_.allocator_name_, lhs_arena,
1888 &other->_impl_.allocator_name_, rhs_arena
1889 );
1890 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
1891 PROTOBUF_FIELD_OFFSET(MemoryLogRawDeallocation, _impl_.deferred_)
1892 + sizeof(MemoryLogRawDeallocation::_impl_.deferred_) // NOLINT
1893 - PROTOBUF_FIELD_OFFSET(MemoryLogRawDeallocation, _impl_.step_id_)>(
1894 reinterpret_cast<char*>(&_impl_.step_id_),
1895 reinterpret_cast<char*>(&other->_impl_.step_id_));
1896 }
1897
GetTypeName() const1898 std::string MemoryLogRawDeallocation::GetTypeName() const {
1899 return "tensorflow.MemoryLogRawDeallocation";
1900 }
1901
1902
1903 // @@protoc_insertion_point(namespace_scope)
1904 } // namespace tensorflow
1905 PROTOBUF_NAMESPACE_OPEN
1906 template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogStep*
CreateMaybeMessage(Arena * arena)1907 Arena::CreateMaybeMessage< ::tensorflow::MemoryLogStep >(Arena* arena) {
1908 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogStep >(arena);
1909 }
1910 template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogTensorAllocation*
CreateMaybeMessage(Arena * arena)1911 Arena::CreateMaybeMessage< ::tensorflow::MemoryLogTensorAllocation >(Arena* arena) {
1912 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogTensorAllocation >(arena);
1913 }
1914 template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogTensorDeallocation*
CreateMaybeMessage(Arena * arena)1915 Arena::CreateMaybeMessage< ::tensorflow::MemoryLogTensorDeallocation >(Arena* arena) {
1916 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogTensorDeallocation >(arena);
1917 }
1918 template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogTensorOutput*
CreateMaybeMessage(Arena * arena)1919 Arena::CreateMaybeMessage< ::tensorflow::MemoryLogTensorOutput >(Arena* arena) {
1920 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogTensorOutput >(arena);
1921 }
1922 template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogRawAllocation*
CreateMaybeMessage(Arena * arena)1923 Arena::CreateMaybeMessage< ::tensorflow::MemoryLogRawAllocation >(Arena* arena) {
1924 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogRawAllocation >(arena);
1925 }
1926 template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogRawDeallocation*
CreateMaybeMessage(Arena * arena)1927 Arena::CreateMaybeMessage< ::tensorflow::MemoryLogRawDeallocation >(Arena* arena) {
1928 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogRawDeallocation >(arena);
1929 }
1930 PROTOBUF_NAMESPACE_CLOSE
1931
1932 // @@protoc_insertion_point(global_scope)
1933 #include <google/protobuf/port_undef.inc>
1934