1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/log_memory.proto
3
4 #ifndef GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto
5 #define GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto
6
7 #include <cstdint>
8 #include <limits>
9 #include <string>
10
11 #include <google/protobuf/port_def.inc>
12 #if PROTOBUF_VERSION < 3021000
13 #error This file was generated by a newer version of protoc which is
14 #error incompatible with your Protocol Buffer headers. Please update
15 #error your headers.
16 #endif
17 #if 3021012 < PROTOBUF_MIN_PROTOC_VERSION
18 #error This file was generated by an older version of protoc which is
19 #error incompatible with your Protocol Buffer headers. Please
20 #error regenerate this file with a newer version of protoc.
21 #endif
22
23 #include <google/protobuf/port_undef.inc>
24 #include <google/protobuf/io/coded_stream.h>
25 #include <google/protobuf/arena.h>
26 #include <google/protobuf/arenastring.h>
27 #include <google/protobuf/generated_message_util.h>
28 #include <google/protobuf/metadata_lite.h>
29 #include <google/protobuf/message_lite.h>
30 #include <google/protobuf/repeated_field.h> // IWYU pragma: export
31 #include <google/protobuf/extension_set.h> // IWYU pragma: export
32 #include "tensorflow/core/framework/tensor_description.pb.h"
33 // @@protoc_insertion_point(includes)
34 #include <google/protobuf/port_def.inc>
35 #define PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto
36 PROTOBUF_NAMESPACE_OPEN
37 namespace internal {
38 class AnyMetadata;
39 } // namespace internal
40 PROTOBUF_NAMESPACE_CLOSE
41
42 // Internal implementation detail -- do not use these members.
43 struct TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto {
44 static const ::uint32_t offsets[];
45 };
46 namespace tensorflow {
47 class MemoryLogRawAllocation;
48 struct MemoryLogRawAllocationDefaultTypeInternal;
49 extern MemoryLogRawAllocationDefaultTypeInternal _MemoryLogRawAllocation_default_instance_;
50 class MemoryLogRawDeallocation;
51 struct MemoryLogRawDeallocationDefaultTypeInternal;
52 extern MemoryLogRawDeallocationDefaultTypeInternal _MemoryLogRawDeallocation_default_instance_;
53 class MemoryLogStep;
54 struct MemoryLogStepDefaultTypeInternal;
55 extern MemoryLogStepDefaultTypeInternal _MemoryLogStep_default_instance_;
56 class MemoryLogTensorAllocation;
57 struct MemoryLogTensorAllocationDefaultTypeInternal;
58 extern MemoryLogTensorAllocationDefaultTypeInternal _MemoryLogTensorAllocation_default_instance_;
59 class MemoryLogTensorDeallocation;
60 struct MemoryLogTensorDeallocationDefaultTypeInternal;
61 extern MemoryLogTensorDeallocationDefaultTypeInternal _MemoryLogTensorDeallocation_default_instance_;
62 class MemoryLogTensorOutput;
63 struct MemoryLogTensorOutputDefaultTypeInternal;
64 extern MemoryLogTensorOutputDefaultTypeInternal _MemoryLogTensorOutput_default_instance_;
65 } // namespace tensorflow
66 PROTOBUF_NAMESPACE_OPEN
67 template<> ::tensorflow::MemoryLogRawAllocation* Arena::CreateMaybeMessage<::tensorflow::MemoryLogRawAllocation>(Arena*);
68 template<> ::tensorflow::MemoryLogRawDeallocation* Arena::CreateMaybeMessage<::tensorflow::MemoryLogRawDeallocation>(Arena*);
69 template<> ::tensorflow::MemoryLogStep* Arena::CreateMaybeMessage<::tensorflow::MemoryLogStep>(Arena*);
70 template<> ::tensorflow::MemoryLogTensorAllocation* Arena::CreateMaybeMessage<::tensorflow::MemoryLogTensorAllocation>(Arena*);
71 template<> ::tensorflow::MemoryLogTensorDeallocation* Arena::CreateMaybeMessage<::tensorflow::MemoryLogTensorDeallocation>(Arena*);
72 template<> ::tensorflow::MemoryLogTensorOutput* Arena::CreateMaybeMessage<::tensorflow::MemoryLogTensorOutput>(Arena*);
73 PROTOBUF_NAMESPACE_CLOSE
74 namespace tensorflow {
75
76 // ===================================================================
77
78 class MemoryLogStep final :
79 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.MemoryLogStep) */ {
80 public:
MemoryLogStep()81 inline MemoryLogStep() : MemoryLogStep(nullptr) {}
82 ~MemoryLogStep() override;
83 explicit PROTOBUF_CONSTEXPR MemoryLogStep(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
84
85 MemoryLogStep(const MemoryLogStep& from);
MemoryLogStep(MemoryLogStep && from)86 MemoryLogStep(MemoryLogStep&& from) noexcept
87 : MemoryLogStep() {
88 *this = ::std::move(from);
89 }
90
91 inline MemoryLogStep& operator=(const MemoryLogStep& from) {
92 if (this == &from) return *this;
93 CopyFrom(from);
94 return *this;
95 }
96 inline MemoryLogStep& operator=(MemoryLogStep&& from) noexcept {
97 if (this == &from) return *this;
98 if (GetOwningArena() == from.GetOwningArena()
99 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
100 && GetOwningArena() != nullptr
101 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
102 ) {
103 InternalSwap(&from);
104 } else {
105 CopyFrom(from);
106 }
107 return *this;
108 }
109
default_instance()110 static const MemoryLogStep& default_instance() {
111 return *internal_default_instance();
112 }
internal_default_instance()113 static inline const MemoryLogStep* internal_default_instance() {
114 return reinterpret_cast<const MemoryLogStep*>(
115 &_MemoryLogStep_default_instance_);
116 }
117 static constexpr int kIndexInFileMessages =
118 0;
119
swap(MemoryLogStep & a,MemoryLogStep & b)120 friend void swap(MemoryLogStep& a, MemoryLogStep& b) {
121 a.Swap(&b);
122 }
Swap(MemoryLogStep * other)123 inline void Swap(MemoryLogStep* other) {
124 if (other == this) return;
125 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
126 if (GetOwningArena() != nullptr &&
127 GetOwningArena() == other->GetOwningArena()) {
128 #else // PROTOBUF_FORCE_COPY_IN_SWAP
129 if (GetOwningArena() == other->GetOwningArena()) {
130 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
131 InternalSwap(other);
132 } else {
133 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
134 }
135 }
136 void UnsafeArenaSwap(MemoryLogStep* other) {
137 if (other == this) return;
138 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
139 InternalSwap(other);
140 }
141
142 // implements Message ----------------------------------------------
143
144 MemoryLogStep* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
145 return CreateMaybeMessage<MemoryLogStep>(arena);
146 }
147 MemoryLogStep* New() const {
148 return New(nullptr);
149 }
150 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
151 void CopyFrom(const MemoryLogStep& from);
152 void MergeFrom(const MemoryLogStep& from);
153 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
154 bool IsInitialized() const final;
155
156 size_t ByteSizeLong() const final;
157 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
158 ::uint8_t* _InternalSerialize(
159 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
160 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
161
162 private:
163 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
164 void SharedDtor();
165 void SetCachedSize(int size) const;
166 void InternalSwap(MemoryLogStep* other);
167
168 private:
169 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
170 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
171 return "tensorflow.MemoryLogStep";
172 }
173 protected:
174 explicit MemoryLogStep(::PROTOBUF_NAMESPACE_ID::Arena* arena,
175 bool is_message_owned = false);
176 public:
177
178 std::string GetTypeName() const final;
179
180 // nested types ----------------------------------------------------
181
182 // accessors -------------------------------------------------------
183
184 enum : int {
185 kHandleFieldNumber = 2,
186 kStepIdFieldNumber = 1,
187 };
188 // string handle = 2;
189 void clear_handle();
190 const std::string& handle() const;
191 template <typename ArgT0 = const std::string&, typename... ArgT>
192 void set_handle(ArgT0&& arg0, ArgT... args);
193 std::string* mutable_handle();
194 PROTOBUF_NODISCARD std::string* release_handle();
195 void set_allocated_handle(std::string* handle);
196 private:
197 const std::string& _internal_handle() const;
198 inline PROTOBUF_ALWAYS_INLINE void _internal_set_handle(const std::string& value);
199 std::string* _internal_mutable_handle();
200 public:
201
202 // int64 step_id = 1;
203 void clear_step_id();
204 ::int64_t step_id() const;
205 void set_step_id(::int64_t value);
206 private:
207 ::int64_t _internal_step_id() const;
208 void _internal_set_step_id(::int64_t value);
209 public:
210
211 // @@protoc_insertion_point(class_scope:tensorflow.MemoryLogStep)
212 private:
213 class _Internal;
214
215 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
216 typedef void InternalArenaConstructable_;
217 typedef void DestructorSkippable_;
218 struct Impl_ {
219 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr handle_;
220 ::int64_t step_id_;
221 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
222 };
223 union { Impl_ _impl_; };
224 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto;
225 };
226 // -------------------------------------------------------------------
227
228 class MemoryLogTensorAllocation final :
229 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.MemoryLogTensorAllocation) */ {
230 public:
MemoryLogTensorAllocation()231 inline MemoryLogTensorAllocation() : MemoryLogTensorAllocation(nullptr) {}
232 ~MemoryLogTensorAllocation() override;
233 explicit PROTOBUF_CONSTEXPR MemoryLogTensorAllocation(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
234
235 MemoryLogTensorAllocation(const MemoryLogTensorAllocation& from);
MemoryLogTensorAllocation(MemoryLogTensorAllocation && from)236 MemoryLogTensorAllocation(MemoryLogTensorAllocation&& from) noexcept
237 : MemoryLogTensorAllocation() {
238 *this = ::std::move(from);
239 }
240
241 inline MemoryLogTensorAllocation& operator=(const MemoryLogTensorAllocation& from) {
242 if (this == &from) return *this;
243 CopyFrom(from);
244 return *this;
245 }
246 inline MemoryLogTensorAllocation& operator=(MemoryLogTensorAllocation&& from) noexcept {
247 if (this == &from) return *this;
248 if (GetOwningArena() == from.GetOwningArena()
249 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
250 && GetOwningArena() != nullptr
251 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
252 ) {
253 InternalSwap(&from);
254 } else {
255 CopyFrom(from);
256 }
257 return *this;
258 }
259
default_instance()260 static const MemoryLogTensorAllocation& default_instance() {
261 return *internal_default_instance();
262 }
internal_default_instance()263 static inline const MemoryLogTensorAllocation* internal_default_instance() {
264 return reinterpret_cast<const MemoryLogTensorAllocation*>(
265 &_MemoryLogTensorAllocation_default_instance_);
266 }
267 static constexpr int kIndexInFileMessages =
268 1;
269
swap(MemoryLogTensorAllocation & a,MemoryLogTensorAllocation & b)270 friend void swap(MemoryLogTensorAllocation& a, MemoryLogTensorAllocation& b) {
271 a.Swap(&b);
272 }
Swap(MemoryLogTensorAllocation * other)273 inline void Swap(MemoryLogTensorAllocation* other) {
274 if (other == this) return;
275 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
276 if (GetOwningArena() != nullptr &&
277 GetOwningArena() == other->GetOwningArena()) {
278 #else // PROTOBUF_FORCE_COPY_IN_SWAP
279 if (GetOwningArena() == other->GetOwningArena()) {
280 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
281 InternalSwap(other);
282 } else {
283 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
284 }
285 }
286 void UnsafeArenaSwap(MemoryLogTensorAllocation* other) {
287 if (other == this) return;
288 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
289 InternalSwap(other);
290 }
291
292 // implements Message ----------------------------------------------
293
294 MemoryLogTensorAllocation* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
295 return CreateMaybeMessage<MemoryLogTensorAllocation>(arena);
296 }
297 MemoryLogTensorAllocation* New() const {
298 return New(nullptr);
299 }
300 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
301 void CopyFrom(const MemoryLogTensorAllocation& from);
302 void MergeFrom(const MemoryLogTensorAllocation& from);
303 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
304 bool IsInitialized() const final;
305
306 size_t ByteSizeLong() const final;
307 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
308 ::uint8_t* _InternalSerialize(
309 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
310 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
311
312 private:
313 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
314 void SharedDtor();
315 void SetCachedSize(int size) const;
316 void InternalSwap(MemoryLogTensorAllocation* other);
317
318 private:
319 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
320 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
321 return "tensorflow.MemoryLogTensorAllocation";
322 }
323 protected:
324 explicit MemoryLogTensorAllocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
325 bool is_message_owned = false);
326 public:
327
328 std::string GetTypeName() const final;
329
330 // nested types ----------------------------------------------------
331
332 // accessors -------------------------------------------------------
333
334 enum : int {
335 kKernelNameFieldNumber = 2,
336 kTensorFieldNumber = 3,
337 kStepIdFieldNumber = 1,
338 };
339 // string kernel_name = 2;
340 void clear_kernel_name();
341 const std::string& kernel_name() const;
342 template <typename ArgT0 = const std::string&, typename... ArgT>
343 void set_kernel_name(ArgT0&& arg0, ArgT... args);
344 std::string* mutable_kernel_name();
345 PROTOBUF_NODISCARD std::string* release_kernel_name();
346 void set_allocated_kernel_name(std::string* kernel_name);
347 private:
348 const std::string& _internal_kernel_name() const;
349 inline PROTOBUF_ALWAYS_INLINE void _internal_set_kernel_name(const std::string& value);
350 std::string* _internal_mutable_kernel_name();
351 public:
352
353 // .tensorflow.TensorDescription tensor = 3;
354 bool has_tensor() const;
355 private:
356 bool _internal_has_tensor() const;
357 public:
358 void clear_tensor();
359 const ::tensorflow::TensorDescription& tensor() const;
360 PROTOBUF_NODISCARD ::tensorflow::TensorDescription* release_tensor();
361 ::tensorflow::TensorDescription* mutable_tensor();
362 void set_allocated_tensor(::tensorflow::TensorDescription* tensor);
363 private:
364 const ::tensorflow::TensorDescription& _internal_tensor() const;
365 ::tensorflow::TensorDescription* _internal_mutable_tensor();
366 public:
367 void unsafe_arena_set_allocated_tensor(
368 ::tensorflow::TensorDescription* tensor);
369 ::tensorflow::TensorDescription* unsafe_arena_release_tensor();
370
371 // int64 step_id = 1;
372 void clear_step_id();
373 ::int64_t step_id() const;
374 void set_step_id(::int64_t value);
375 private:
376 ::int64_t _internal_step_id() const;
377 void _internal_set_step_id(::int64_t value);
378 public:
379
380 // @@protoc_insertion_point(class_scope:tensorflow.MemoryLogTensorAllocation)
381 private:
382 class _Internal;
383
384 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
385 typedef void InternalArenaConstructable_;
386 typedef void DestructorSkippable_;
387 struct Impl_ {
388 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr kernel_name_;
389 ::tensorflow::TensorDescription* tensor_;
390 ::int64_t step_id_;
391 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
392 };
393 union { Impl_ _impl_; };
394 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto;
395 };
396 // -------------------------------------------------------------------
397
398 class MemoryLogTensorDeallocation final :
399 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.MemoryLogTensorDeallocation) */ {
400 public:
MemoryLogTensorDeallocation()401 inline MemoryLogTensorDeallocation() : MemoryLogTensorDeallocation(nullptr) {}
402 ~MemoryLogTensorDeallocation() override;
403 explicit PROTOBUF_CONSTEXPR MemoryLogTensorDeallocation(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
404
405 MemoryLogTensorDeallocation(const MemoryLogTensorDeallocation& from);
MemoryLogTensorDeallocation(MemoryLogTensorDeallocation && from)406 MemoryLogTensorDeallocation(MemoryLogTensorDeallocation&& from) noexcept
407 : MemoryLogTensorDeallocation() {
408 *this = ::std::move(from);
409 }
410
411 inline MemoryLogTensorDeallocation& operator=(const MemoryLogTensorDeallocation& from) {
412 if (this == &from) return *this;
413 CopyFrom(from);
414 return *this;
415 }
416 inline MemoryLogTensorDeallocation& operator=(MemoryLogTensorDeallocation&& from) noexcept {
417 if (this == &from) return *this;
418 if (GetOwningArena() == from.GetOwningArena()
419 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
420 && GetOwningArena() != nullptr
421 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
422 ) {
423 InternalSwap(&from);
424 } else {
425 CopyFrom(from);
426 }
427 return *this;
428 }
429
default_instance()430 static const MemoryLogTensorDeallocation& default_instance() {
431 return *internal_default_instance();
432 }
internal_default_instance()433 static inline const MemoryLogTensorDeallocation* internal_default_instance() {
434 return reinterpret_cast<const MemoryLogTensorDeallocation*>(
435 &_MemoryLogTensorDeallocation_default_instance_);
436 }
437 static constexpr int kIndexInFileMessages =
438 2;
439
swap(MemoryLogTensorDeallocation & a,MemoryLogTensorDeallocation & b)440 friend void swap(MemoryLogTensorDeallocation& a, MemoryLogTensorDeallocation& b) {
441 a.Swap(&b);
442 }
Swap(MemoryLogTensorDeallocation * other)443 inline void Swap(MemoryLogTensorDeallocation* other) {
444 if (other == this) return;
445 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
446 if (GetOwningArena() != nullptr &&
447 GetOwningArena() == other->GetOwningArena()) {
448 #else // PROTOBUF_FORCE_COPY_IN_SWAP
449 if (GetOwningArena() == other->GetOwningArena()) {
450 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
451 InternalSwap(other);
452 } else {
453 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
454 }
455 }
456 void UnsafeArenaSwap(MemoryLogTensorDeallocation* other) {
457 if (other == this) return;
458 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
459 InternalSwap(other);
460 }
461
462 // implements Message ----------------------------------------------
463
464 MemoryLogTensorDeallocation* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
465 return CreateMaybeMessage<MemoryLogTensorDeallocation>(arena);
466 }
467 MemoryLogTensorDeallocation* New() const {
468 return New(nullptr);
469 }
470 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
471 void CopyFrom(const MemoryLogTensorDeallocation& from);
472 void MergeFrom(const MemoryLogTensorDeallocation& from);
473 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
474 bool IsInitialized() const final;
475
476 size_t ByteSizeLong() const final;
477 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
478 ::uint8_t* _InternalSerialize(
479 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
480 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
481
482 private:
483 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
484 void SharedDtor();
485 void SetCachedSize(int size) const;
486 void InternalSwap(MemoryLogTensorDeallocation* other);
487
488 private:
489 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
490 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
491 return "tensorflow.MemoryLogTensorDeallocation";
492 }
493 protected:
494 explicit MemoryLogTensorDeallocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
495 bool is_message_owned = false);
496 public:
497
498 std::string GetTypeName() const final;
499
500 // nested types ----------------------------------------------------
501
502 // accessors -------------------------------------------------------
503
504 enum : int {
505 kAllocatorNameFieldNumber = 2,
506 kAllocationIdFieldNumber = 1,
507 };
508 // string allocator_name = 2;
509 void clear_allocator_name();
510 const std::string& allocator_name() const;
511 template <typename ArgT0 = const std::string&, typename... ArgT>
512 void set_allocator_name(ArgT0&& arg0, ArgT... args);
513 std::string* mutable_allocator_name();
514 PROTOBUF_NODISCARD std::string* release_allocator_name();
515 void set_allocated_allocator_name(std::string* allocator_name);
516 private:
517 const std::string& _internal_allocator_name() const;
518 inline PROTOBUF_ALWAYS_INLINE void _internal_set_allocator_name(const std::string& value);
519 std::string* _internal_mutable_allocator_name();
520 public:
521
522 // int64 allocation_id = 1;
523 void clear_allocation_id();
524 ::int64_t allocation_id() const;
525 void set_allocation_id(::int64_t value);
526 private:
527 ::int64_t _internal_allocation_id() const;
528 void _internal_set_allocation_id(::int64_t value);
529 public:
530
531 // @@protoc_insertion_point(class_scope:tensorflow.MemoryLogTensorDeallocation)
532 private:
533 class _Internal;
534
535 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
536 typedef void InternalArenaConstructable_;
537 typedef void DestructorSkippable_;
538 struct Impl_ {
539 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr allocator_name_;
540 ::int64_t allocation_id_;
541 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
542 };
543 union { Impl_ _impl_; };
544 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto;
545 };
546 // -------------------------------------------------------------------
547
548 class MemoryLogTensorOutput final :
549 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.MemoryLogTensorOutput) */ {
550 public:
MemoryLogTensorOutput()551 inline MemoryLogTensorOutput() : MemoryLogTensorOutput(nullptr) {}
552 ~MemoryLogTensorOutput() override;
553 explicit PROTOBUF_CONSTEXPR MemoryLogTensorOutput(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
554
555 MemoryLogTensorOutput(const MemoryLogTensorOutput& from);
MemoryLogTensorOutput(MemoryLogTensorOutput && from)556 MemoryLogTensorOutput(MemoryLogTensorOutput&& from) noexcept
557 : MemoryLogTensorOutput() {
558 *this = ::std::move(from);
559 }
560
561 inline MemoryLogTensorOutput& operator=(const MemoryLogTensorOutput& from) {
562 if (this == &from) return *this;
563 CopyFrom(from);
564 return *this;
565 }
566 inline MemoryLogTensorOutput& operator=(MemoryLogTensorOutput&& from) noexcept {
567 if (this == &from) return *this;
568 if (GetOwningArena() == from.GetOwningArena()
569 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
570 && GetOwningArena() != nullptr
571 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
572 ) {
573 InternalSwap(&from);
574 } else {
575 CopyFrom(from);
576 }
577 return *this;
578 }
579
default_instance()580 static const MemoryLogTensorOutput& default_instance() {
581 return *internal_default_instance();
582 }
internal_default_instance()583 static inline const MemoryLogTensorOutput* internal_default_instance() {
584 return reinterpret_cast<const MemoryLogTensorOutput*>(
585 &_MemoryLogTensorOutput_default_instance_);
586 }
587 static constexpr int kIndexInFileMessages =
588 3;
589
swap(MemoryLogTensorOutput & a,MemoryLogTensorOutput & b)590 friend void swap(MemoryLogTensorOutput& a, MemoryLogTensorOutput& b) {
591 a.Swap(&b);
592 }
Swap(MemoryLogTensorOutput * other)593 inline void Swap(MemoryLogTensorOutput* other) {
594 if (other == this) return;
595 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
596 if (GetOwningArena() != nullptr &&
597 GetOwningArena() == other->GetOwningArena()) {
598 #else // PROTOBUF_FORCE_COPY_IN_SWAP
599 if (GetOwningArena() == other->GetOwningArena()) {
600 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
601 InternalSwap(other);
602 } else {
603 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
604 }
605 }
606 void UnsafeArenaSwap(MemoryLogTensorOutput* other) {
607 if (other == this) return;
608 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
609 InternalSwap(other);
610 }
611
612 // implements Message ----------------------------------------------
613
614 MemoryLogTensorOutput* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
615 return CreateMaybeMessage<MemoryLogTensorOutput>(arena);
616 }
617 MemoryLogTensorOutput* New() const {
618 return New(nullptr);
619 }
620 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
621 void CopyFrom(const MemoryLogTensorOutput& from);
622 void MergeFrom(const MemoryLogTensorOutput& from);
623 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
624 bool IsInitialized() const final;
625
626 size_t ByteSizeLong() const final;
627 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
628 ::uint8_t* _InternalSerialize(
629 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
630 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
631
632 private:
633 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
634 void SharedDtor();
635 void SetCachedSize(int size) const;
636 void InternalSwap(MemoryLogTensorOutput* other);
637
638 private:
639 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
640 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
641 return "tensorflow.MemoryLogTensorOutput";
642 }
643 protected:
644 explicit MemoryLogTensorOutput(::PROTOBUF_NAMESPACE_ID::Arena* arena,
645 bool is_message_owned = false);
646 public:
647
648 std::string GetTypeName() const final;
649
650 // nested types ----------------------------------------------------
651
652 // accessors -------------------------------------------------------
653
654 enum : int {
655 kKernelNameFieldNumber = 2,
656 kTensorFieldNumber = 4,
657 kStepIdFieldNumber = 1,
658 kIndexFieldNumber = 3,
659 };
660 // string kernel_name = 2;
661 void clear_kernel_name();
662 const std::string& kernel_name() const;
663 template <typename ArgT0 = const std::string&, typename... ArgT>
664 void set_kernel_name(ArgT0&& arg0, ArgT... args);
665 std::string* mutable_kernel_name();
666 PROTOBUF_NODISCARD std::string* release_kernel_name();
667 void set_allocated_kernel_name(std::string* kernel_name);
668 private:
669 const std::string& _internal_kernel_name() const;
670 inline PROTOBUF_ALWAYS_INLINE void _internal_set_kernel_name(const std::string& value);
671 std::string* _internal_mutable_kernel_name();
672 public:
673
674 // .tensorflow.TensorDescription tensor = 4;
675 bool has_tensor() const;
676 private:
677 bool _internal_has_tensor() const;
678 public:
679 void clear_tensor();
680 const ::tensorflow::TensorDescription& tensor() const;
681 PROTOBUF_NODISCARD ::tensorflow::TensorDescription* release_tensor();
682 ::tensorflow::TensorDescription* mutable_tensor();
683 void set_allocated_tensor(::tensorflow::TensorDescription* tensor);
684 private:
685 const ::tensorflow::TensorDescription& _internal_tensor() const;
686 ::tensorflow::TensorDescription* _internal_mutable_tensor();
687 public:
688 void unsafe_arena_set_allocated_tensor(
689 ::tensorflow::TensorDescription* tensor);
690 ::tensorflow::TensorDescription* unsafe_arena_release_tensor();
691
692 // int64 step_id = 1;
693 void clear_step_id();
694 ::int64_t step_id() const;
695 void set_step_id(::int64_t value);
696 private:
697 ::int64_t _internal_step_id() const;
698 void _internal_set_step_id(::int64_t value);
699 public:
700
701 // int32 index = 3;
702 void clear_index();
703 ::int32_t index() const;
704 void set_index(::int32_t value);
705 private:
706 ::int32_t _internal_index() const;
707 void _internal_set_index(::int32_t value);
708 public:
709
710 // @@protoc_insertion_point(class_scope:tensorflow.MemoryLogTensorOutput)
711 private:
712 class _Internal;
713
714 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
715 typedef void InternalArenaConstructable_;
716 typedef void DestructorSkippable_;
717 struct Impl_ {
718 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr kernel_name_;
719 ::tensorflow::TensorDescription* tensor_;
720 ::int64_t step_id_;
721 ::int32_t index_;
722 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
723 };
724 union { Impl_ _impl_; };
725 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto;
726 };
727 // -------------------------------------------------------------------
728
729 class MemoryLogRawAllocation final :
730 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.MemoryLogRawAllocation) */ {
731 public:
MemoryLogRawAllocation()732 inline MemoryLogRawAllocation() : MemoryLogRawAllocation(nullptr) {}
733 ~MemoryLogRawAllocation() override;
734 explicit PROTOBUF_CONSTEXPR MemoryLogRawAllocation(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
735
736 MemoryLogRawAllocation(const MemoryLogRawAllocation& from);
MemoryLogRawAllocation(MemoryLogRawAllocation && from)737 MemoryLogRawAllocation(MemoryLogRawAllocation&& from) noexcept
738 : MemoryLogRawAllocation() {
739 *this = ::std::move(from);
740 }
741
742 inline MemoryLogRawAllocation& operator=(const MemoryLogRawAllocation& from) {
743 if (this == &from) return *this;
744 CopyFrom(from);
745 return *this;
746 }
747 inline MemoryLogRawAllocation& operator=(MemoryLogRawAllocation&& from) noexcept {
748 if (this == &from) return *this;
749 if (GetOwningArena() == from.GetOwningArena()
750 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
751 && GetOwningArena() != nullptr
752 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
753 ) {
754 InternalSwap(&from);
755 } else {
756 CopyFrom(from);
757 }
758 return *this;
759 }
760
default_instance()761 static const MemoryLogRawAllocation& default_instance() {
762 return *internal_default_instance();
763 }
internal_default_instance()764 static inline const MemoryLogRawAllocation* internal_default_instance() {
765 return reinterpret_cast<const MemoryLogRawAllocation*>(
766 &_MemoryLogRawAllocation_default_instance_);
767 }
768 static constexpr int kIndexInFileMessages =
769 4;
770
swap(MemoryLogRawAllocation & a,MemoryLogRawAllocation & b)771 friend void swap(MemoryLogRawAllocation& a, MemoryLogRawAllocation& b) {
772 a.Swap(&b);
773 }
Swap(MemoryLogRawAllocation * other)774 inline void Swap(MemoryLogRawAllocation* other) {
775 if (other == this) return;
776 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
777 if (GetOwningArena() != nullptr &&
778 GetOwningArena() == other->GetOwningArena()) {
779 #else // PROTOBUF_FORCE_COPY_IN_SWAP
780 if (GetOwningArena() == other->GetOwningArena()) {
781 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
782 InternalSwap(other);
783 } else {
784 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
785 }
786 }
787 void UnsafeArenaSwap(MemoryLogRawAllocation* other) {
788 if (other == this) return;
789 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
790 InternalSwap(other);
791 }
792
793 // implements Message ----------------------------------------------
794
795 MemoryLogRawAllocation* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
796 return CreateMaybeMessage<MemoryLogRawAllocation>(arena);
797 }
798 MemoryLogRawAllocation* New() const {
799 return New(nullptr);
800 }
801 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
802 void CopyFrom(const MemoryLogRawAllocation& from);
803 void MergeFrom(const MemoryLogRawAllocation& from);
804 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
805 bool IsInitialized() const final;
806
807 size_t ByteSizeLong() const final;
808 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
809 ::uint8_t* _InternalSerialize(
810 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
811 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
812
813 private:
814 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
815 void SharedDtor();
816 void SetCachedSize(int size) const;
817 void InternalSwap(MemoryLogRawAllocation* other);
818
819 private:
820 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
821 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
822 return "tensorflow.MemoryLogRawAllocation";
823 }
824 protected:
825 explicit MemoryLogRawAllocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
826 bool is_message_owned = false);
827 public:
828
829 std::string GetTypeName() const final;
830
831 // nested types ----------------------------------------------------
832
833 // accessors -------------------------------------------------------
834
835 enum : int {
836 kOperationFieldNumber = 2,
837 kAllocatorNameFieldNumber = 6,
838 kStepIdFieldNumber = 1,
839 kNumBytesFieldNumber = 3,
840 kPtrFieldNumber = 4,
841 kAllocationIdFieldNumber = 5,
842 };
843 // string operation = 2;
844 void clear_operation();
845 const std::string& operation() const;
846 template <typename ArgT0 = const std::string&, typename... ArgT>
847 void set_operation(ArgT0&& arg0, ArgT... args);
848 std::string* mutable_operation();
849 PROTOBUF_NODISCARD std::string* release_operation();
850 void set_allocated_operation(std::string* operation);
851 private:
852 const std::string& _internal_operation() const;
853 inline PROTOBUF_ALWAYS_INLINE void _internal_set_operation(const std::string& value);
854 std::string* _internal_mutable_operation();
855 public:
856
857 // string allocator_name = 6;
858 void clear_allocator_name();
859 const std::string& allocator_name() const;
860 template <typename ArgT0 = const std::string&, typename... ArgT>
861 void set_allocator_name(ArgT0&& arg0, ArgT... args);
862 std::string* mutable_allocator_name();
863 PROTOBUF_NODISCARD std::string* release_allocator_name();
864 void set_allocated_allocator_name(std::string* allocator_name);
865 private:
866 const std::string& _internal_allocator_name() const;
867 inline PROTOBUF_ALWAYS_INLINE void _internal_set_allocator_name(const std::string& value);
868 std::string* _internal_mutable_allocator_name();
869 public:
870
871 // int64 step_id = 1;
872 void clear_step_id();
873 ::int64_t step_id() const;
874 void set_step_id(::int64_t value);
875 private:
876 ::int64_t _internal_step_id() const;
877 void _internal_set_step_id(::int64_t value);
878 public:
879
880 // int64 num_bytes = 3;
881 void clear_num_bytes();
882 ::int64_t num_bytes() const;
883 void set_num_bytes(::int64_t value);
884 private:
885 ::int64_t _internal_num_bytes() const;
886 void _internal_set_num_bytes(::int64_t value);
887 public:
888
889 // uint64 ptr = 4;
890 void clear_ptr();
891 ::uint64_t ptr() const;
892 void set_ptr(::uint64_t value);
893 private:
894 ::uint64_t _internal_ptr() const;
895 void _internal_set_ptr(::uint64_t value);
896 public:
897
898 // int64 allocation_id = 5;
899 void clear_allocation_id();
900 ::int64_t allocation_id() const;
901 void set_allocation_id(::int64_t value);
902 private:
903 ::int64_t _internal_allocation_id() const;
904 void _internal_set_allocation_id(::int64_t value);
905 public:
906
907 // @@protoc_insertion_point(class_scope:tensorflow.MemoryLogRawAllocation)
908 private:
909 class _Internal;
910
911 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
912 typedef void InternalArenaConstructable_;
913 typedef void DestructorSkippable_;
914 struct Impl_ {
915 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr operation_;
916 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr allocator_name_;
917 ::int64_t step_id_;
918 ::int64_t num_bytes_;
919 ::uint64_t ptr_;
920 ::int64_t allocation_id_;
921 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
922 };
923 union { Impl_ _impl_; };
924 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto;
925 };
926 // -------------------------------------------------------------------
927
928 class MemoryLogRawDeallocation final :
929 public ::PROTOBUF_NAMESPACE_ID::MessageLite /* @@protoc_insertion_point(class_definition:tensorflow.MemoryLogRawDeallocation) */ {
930 public:
MemoryLogRawDeallocation()931 inline MemoryLogRawDeallocation() : MemoryLogRawDeallocation(nullptr) {}
932 ~MemoryLogRawDeallocation() override;
933 explicit PROTOBUF_CONSTEXPR MemoryLogRawDeallocation(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
934
935 MemoryLogRawDeallocation(const MemoryLogRawDeallocation& from);
MemoryLogRawDeallocation(MemoryLogRawDeallocation && from)936 MemoryLogRawDeallocation(MemoryLogRawDeallocation&& from) noexcept
937 : MemoryLogRawDeallocation() {
938 *this = ::std::move(from);
939 }
940
941 inline MemoryLogRawDeallocation& operator=(const MemoryLogRawDeallocation& from) {
942 if (this == &from) return *this;
943 CopyFrom(from);
944 return *this;
945 }
946 inline MemoryLogRawDeallocation& operator=(MemoryLogRawDeallocation&& from) noexcept {
947 if (this == &from) return *this;
948 if (GetOwningArena() == from.GetOwningArena()
949 #ifdef PROTOBUF_FORCE_COPY_IN_MOVE
950 && GetOwningArena() != nullptr
951 #endif // !PROTOBUF_FORCE_COPY_IN_MOVE
952 ) {
953 InternalSwap(&from);
954 } else {
955 CopyFrom(from);
956 }
957 return *this;
958 }
959
default_instance()960 static const MemoryLogRawDeallocation& default_instance() {
961 return *internal_default_instance();
962 }
internal_default_instance()963 static inline const MemoryLogRawDeallocation* internal_default_instance() {
964 return reinterpret_cast<const MemoryLogRawDeallocation*>(
965 &_MemoryLogRawDeallocation_default_instance_);
966 }
967 static constexpr int kIndexInFileMessages =
968 5;
969
swap(MemoryLogRawDeallocation & a,MemoryLogRawDeallocation & b)970 friend void swap(MemoryLogRawDeallocation& a, MemoryLogRawDeallocation& b) {
971 a.Swap(&b);
972 }
Swap(MemoryLogRawDeallocation * other)973 inline void Swap(MemoryLogRawDeallocation* other) {
974 if (other == this) return;
975 #ifdef PROTOBUF_FORCE_COPY_IN_SWAP
976 if (GetOwningArena() != nullptr &&
977 GetOwningArena() == other->GetOwningArena()) {
978 #else // PROTOBUF_FORCE_COPY_IN_SWAP
979 if (GetOwningArena() == other->GetOwningArena()) {
980 #endif // !PROTOBUF_FORCE_COPY_IN_SWAP
981 InternalSwap(other);
982 } else {
983 ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
984 }
985 }
986 void UnsafeArenaSwap(MemoryLogRawDeallocation* other) {
987 if (other == this) return;
988 GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
989 InternalSwap(other);
990 }
991
992 // implements Message ----------------------------------------------
993
994 MemoryLogRawDeallocation* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
995 return CreateMaybeMessage<MemoryLogRawDeallocation>(arena);
996 }
997 MemoryLogRawDeallocation* New() const {
998 return New(nullptr);
999 }
1000 void CheckTypeAndMergeFrom(const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) final;
1001 void CopyFrom(const MemoryLogRawDeallocation& from);
1002 void MergeFrom(const MemoryLogRawDeallocation& from);
1003 PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
1004 bool IsInitialized() const final;
1005
1006 size_t ByteSizeLong() const final;
1007 const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
1008 ::uint8_t* _InternalSerialize(
1009 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
1010 int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
1011
1012 private:
1013 void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
1014 void SharedDtor();
1015 void SetCachedSize(int size) const;
1016 void InternalSwap(MemoryLogRawDeallocation* other);
1017
1018 private:
1019 friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
1020 static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
1021 return "tensorflow.MemoryLogRawDeallocation";
1022 }
1023 protected:
1024 explicit MemoryLogRawDeallocation(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1025 bool is_message_owned = false);
1026 public:
1027
1028 std::string GetTypeName() const final;
1029
1030 // nested types ----------------------------------------------------
1031
1032 // accessors -------------------------------------------------------
1033
1034 enum : int {
1035 kOperationFieldNumber = 2,
1036 kAllocatorNameFieldNumber = 4,
1037 kStepIdFieldNumber = 1,
1038 kAllocationIdFieldNumber = 3,
1039 kDeferredFieldNumber = 5,
1040 };
1041 // string operation = 2;
1042 void clear_operation();
1043 const std::string& operation() const;
1044 template <typename ArgT0 = const std::string&, typename... ArgT>
1045 void set_operation(ArgT0&& arg0, ArgT... args);
1046 std::string* mutable_operation();
1047 PROTOBUF_NODISCARD std::string* release_operation();
1048 void set_allocated_operation(std::string* operation);
1049 private:
1050 const std::string& _internal_operation() const;
1051 inline PROTOBUF_ALWAYS_INLINE void _internal_set_operation(const std::string& value);
1052 std::string* _internal_mutable_operation();
1053 public:
1054
1055 // string allocator_name = 4;
1056 void clear_allocator_name();
1057 const std::string& allocator_name() const;
1058 template <typename ArgT0 = const std::string&, typename... ArgT>
1059 void set_allocator_name(ArgT0&& arg0, ArgT... args);
1060 std::string* mutable_allocator_name();
1061 PROTOBUF_NODISCARD std::string* release_allocator_name();
1062 void set_allocated_allocator_name(std::string* allocator_name);
1063 private:
1064 const std::string& _internal_allocator_name() const;
1065 inline PROTOBUF_ALWAYS_INLINE void _internal_set_allocator_name(const std::string& value);
1066 std::string* _internal_mutable_allocator_name();
1067 public:
1068
1069 // int64 step_id = 1;
1070 void clear_step_id();
1071 ::int64_t step_id() const;
1072 void set_step_id(::int64_t value);
1073 private:
1074 ::int64_t _internal_step_id() const;
1075 void _internal_set_step_id(::int64_t value);
1076 public:
1077
1078 // int64 allocation_id = 3;
1079 void clear_allocation_id();
1080 ::int64_t allocation_id() const;
1081 void set_allocation_id(::int64_t value);
1082 private:
1083 ::int64_t _internal_allocation_id() const;
1084 void _internal_set_allocation_id(::int64_t value);
1085 public:
1086
1087 // bool deferred = 5;
1088 void clear_deferred();
1089 bool deferred() const;
1090 void set_deferred(bool value);
1091 private:
1092 bool _internal_deferred() const;
1093 void _internal_set_deferred(bool value);
1094 public:
1095
1096 // @@protoc_insertion_point(class_scope:tensorflow.MemoryLogRawDeallocation)
1097 private:
1098 class _Internal;
1099
1100 template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
1101 typedef void InternalArenaConstructable_;
1102 typedef void DestructorSkippable_;
1103 struct Impl_ {
1104 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr operation_;
1105 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr allocator_name_;
1106 ::int64_t step_id_;
1107 ::int64_t allocation_id_;
1108 bool deferred_;
1109 mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
1110 };
1111 union { Impl_ _impl_; };
1112 friend struct ::TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto;
1113 };
1114 // ===================================================================
1115
1116
1117 // ===================================================================
1118
1119 #ifdef __GNUC__
1120 #pragma GCC diagnostic push
1121 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
1122 #endif // __GNUC__
1123 // MemoryLogStep
1124
1125 // int64 step_id = 1;
clear_step_id()1126 inline void MemoryLogStep::clear_step_id() {
1127 _impl_.step_id_ = ::int64_t{0};
1128 }
_internal_step_id()1129 inline ::int64_t MemoryLogStep::_internal_step_id() const {
1130 return _impl_.step_id_;
1131 }
step_id()1132 inline ::int64_t MemoryLogStep::step_id() const {
1133 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogStep.step_id)
1134 return _internal_step_id();
1135 }
_internal_set_step_id(::int64_t value)1136 inline void MemoryLogStep::_internal_set_step_id(::int64_t value) {
1137
1138 _impl_.step_id_ = value;
1139 }
set_step_id(::int64_t value)1140 inline void MemoryLogStep::set_step_id(::int64_t value) {
1141 _internal_set_step_id(value);
1142 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogStep.step_id)
1143 }
1144
1145 // string handle = 2;
clear_handle()1146 inline void MemoryLogStep::clear_handle() {
1147 _impl_.handle_.ClearToEmpty();
1148 }
handle()1149 inline const std::string& MemoryLogStep::handle() const {
1150 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogStep.handle)
1151 return _internal_handle();
1152 }
1153 template <typename ArgT0, typename... ArgT>
1154 inline PROTOBUF_ALWAYS_INLINE
set_handle(ArgT0 && arg0,ArgT...args)1155 void MemoryLogStep::set_handle(ArgT0&& arg0, ArgT... args) {
1156
1157 _impl_.handle_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1158 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogStep.handle)
1159 }
mutable_handle()1160 inline std::string* MemoryLogStep::mutable_handle() {
1161 std::string* _s = _internal_mutable_handle();
1162 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogStep.handle)
1163 return _s;
1164 }
_internal_handle()1165 inline const std::string& MemoryLogStep::_internal_handle() const {
1166 return _impl_.handle_.Get();
1167 }
_internal_set_handle(const std::string & value)1168 inline void MemoryLogStep::_internal_set_handle(const std::string& value) {
1169
1170 _impl_.handle_.Set(value, GetArenaForAllocation());
1171 }
_internal_mutable_handle()1172 inline std::string* MemoryLogStep::_internal_mutable_handle() {
1173
1174 return _impl_.handle_.Mutable(GetArenaForAllocation());
1175 }
release_handle()1176 inline std::string* MemoryLogStep::release_handle() {
1177 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogStep.handle)
1178 return _impl_.handle_.Release();
1179 }
set_allocated_handle(std::string * handle)1180 inline void MemoryLogStep::set_allocated_handle(std::string* handle) {
1181 _impl_.handle_.SetAllocated(handle, GetArenaForAllocation());
1182 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1183 if (_impl_.handle_.IsDefault()) {
1184 _impl_.handle_.Set("", GetArenaForAllocation());
1185 }
1186 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1187 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogStep.handle)
1188 }
1189
1190 // -------------------------------------------------------------------
1191
1192 // MemoryLogTensorAllocation
1193
1194 // int64 step_id = 1;
clear_step_id()1195 inline void MemoryLogTensorAllocation::clear_step_id() {
1196 _impl_.step_id_ = ::int64_t{0};
1197 }
_internal_step_id()1198 inline ::int64_t MemoryLogTensorAllocation::_internal_step_id() const {
1199 return _impl_.step_id_;
1200 }
step_id()1201 inline ::int64_t MemoryLogTensorAllocation::step_id() const {
1202 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorAllocation.step_id)
1203 return _internal_step_id();
1204 }
_internal_set_step_id(::int64_t value)1205 inline void MemoryLogTensorAllocation::_internal_set_step_id(::int64_t value) {
1206
1207 _impl_.step_id_ = value;
1208 }
set_step_id(::int64_t value)1209 inline void MemoryLogTensorAllocation::set_step_id(::int64_t value) {
1210 _internal_set_step_id(value);
1211 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogTensorAllocation.step_id)
1212 }
1213
1214 // string kernel_name = 2;
clear_kernel_name()1215 inline void MemoryLogTensorAllocation::clear_kernel_name() {
1216 _impl_.kernel_name_.ClearToEmpty();
1217 }
kernel_name()1218 inline const std::string& MemoryLogTensorAllocation::kernel_name() const {
1219 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorAllocation.kernel_name)
1220 return _internal_kernel_name();
1221 }
1222 template <typename ArgT0, typename... ArgT>
1223 inline PROTOBUF_ALWAYS_INLINE
set_kernel_name(ArgT0 && arg0,ArgT...args)1224 void MemoryLogTensorAllocation::set_kernel_name(ArgT0&& arg0, ArgT... args) {
1225
1226 _impl_.kernel_name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1227 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogTensorAllocation.kernel_name)
1228 }
mutable_kernel_name()1229 inline std::string* MemoryLogTensorAllocation::mutable_kernel_name() {
1230 std::string* _s = _internal_mutable_kernel_name();
1231 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogTensorAllocation.kernel_name)
1232 return _s;
1233 }
_internal_kernel_name()1234 inline const std::string& MemoryLogTensorAllocation::_internal_kernel_name() const {
1235 return _impl_.kernel_name_.Get();
1236 }
_internal_set_kernel_name(const std::string & value)1237 inline void MemoryLogTensorAllocation::_internal_set_kernel_name(const std::string& value) {
1238
1239 _impl_.kernel_name_.Set(value, GetArenaForAllocation());
1240 }
_internal_mutable_kernel_name()1241 inline std::string* MemoryLogTensorAllocation::_internal_mutable_kernel_name() {
1242
1243 return _impl_.kernel_name_.Mutable(GetArenaForAllocation());
1244 }
release_kernel_name()1245 inline std::string* MemoryLogTensorAllocation::release_kernel_name() {
1246 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogTensorAllocation.kernel_name)
1247 return _impl_.kernel_name_.Release();
1248 }
set_allocated_kernel_name(std::string * kernel_name)1249 inline void MemoryLogTensorAllocation::set_allocated_kernel_name(std::string* kernel_name) {
1250 _impl_.kernel_name_.SetAllocated(kernel_name, GetArenaForAllocation());
1251 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1252 if (_impl_.kernel_name_.IsDefault()) {
1253 _impl_.kernel_name_.Set("", GetArenaForAllocation());
1254 }
1255 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1256 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogTensorAllocation.kernel_name)
1257 }
1258
1259 // .tensorflow.TensorDescription tensor = 3;
_internal_has_tensor()1260 inline bool MemoryLogTensorAllocation::_internal_has_tensor() const {
1261 return this != internal_default_instance() && _impl_.tensor_ != nullptr;
1262 }
has_tensor()1263 inline bool MemoryLogTensorAllocation::has_tensor() const {
1264 return _internal_has_tensor();
1265 }
_internal_tensor()1266 inline const ::tensorflow::TensorDescription& MemoryLogTensorAllocation::_internal_tensor() const {
1267 const ::tensorflow::TensorDescription* p = _impl_.tensor_;
1268 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::TensorDescription&>(
1269 ::tensorflow::_TensorDescription_default_instance_);
1270 }
tensor()1271 inline const ::tensorflow::TensorDescription& MemoryLogTensorAllocation::tensor() const {
1272 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorAllocation.tensor)
1273 return _internal_tensor();
1274 }
unsafe_arena_set_allocated_tensor(::tensorflow::TensorDescription * tensor)1275 inline void MemoryLogTensorAllocation::unsafe_arena_set_allocated_tensor(
1276 ::tensorflow::TensorDescription* tensor) {
1277 if (GetArenaForAllocation() == nullptr) {
1278 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.tensor_);
1279 }
1280 _impl_.tensor_ = tensor;
1281 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MemoryLogTensorAllocation.tensor)
1282 }
release_tensor()1283 inline ::tensorflow::TensorDescription* MemoryLogTensorAllocation::release_tensor() {
1284
1285 ::tensorflow::TensorDescription* temp = _impl_.tensor_;
1286 _impl_.tensor_ = nullptr;
1287 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
1288 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
1289 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
1290 if (GetArenaForAllocation() == nullptr) { delete old; }
1291 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
1292 if (GetArenaForAllocation() != nullptr) {
1293 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
1294 }
1295 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
1296 return temp;
1297 }
unsafe_arena_release_tensor()1298 inline ::tensorflow::TensorDescription* MemoryLogTensorAllocation::unsafe_arena_release_tensor() {
1299 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogTensorAllocation.tensor)
1300
1301 ::tensorflow::TensorDescription* temp = _impl_.tensor_;
1302 _impl_.tensor_ = nullptr;
1303 return temp;
1304 }
_internal_mutable_tensor()1305 inline ::tensorflow::TensorDescription* MemoryLogTensorAllocation::_internal_mutable_tensor() {
1306
1307 if (_impl_.tensor_ == nullptr) {
1308 auto* p = CreateMaybeMessage<::tensorflow::TensorDescription>(GetArenaForAllocation());
1309 _impl_.tensor_ = p;
1310 }
1311 return _impl_.tensor_;
1312 }
mutable_tensor()1313 inline ::tensorflow::TensorDescription* MemoryLogTensorAllocation::mutable_tensor() {
1314 ::tensorflow::TensorDescription* _msg = _internal_mutable_tensor();
1315 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogTensorAllocation.tensor)
1316 return _msg;
1317 }
set_allocated_tensor(::tensorflow::TensorDescription * tensor)1318 inline void MemoryLogTensorAllocation::set_allocated_tensor(::tensorflow::TensorDescription* tensor) {
1319 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1320 if (message_arena == nullptr) {
1321 delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.tensor_);
1322 }
1323 if (tensor) {
1324 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1325 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
1326 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(tensor));
1327 if (message_arena != submessage_arena) {
1328 tensor = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1329 message_arena, tensor, submessage_arena);
1330 }
1331
1332 } else {
1333
1334 }
1335 _impl_.tensor_ = tensor;
1336 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogTensorAllocation.tensor)
1337 }
1338
1339 // -------------------------------------------------------------------
1340
1341 // MemoryLogTensorDeallocation
1342
1343 // int64 allocation_id = 1;
clear_allocation_id()1344 inline void MemoryLogTensorDeallocation::clear_allocation_id() {
1345 _impl_.allocation_id_ = ::int64_t{0};
1346 }
_internal_allocation_id()1347 inline ::int64_t MemoryLogTensorDeallocation::_internal_allocation_id() const {
1348 return _impl_.allocation_id_;
1349 }
allocation_id()1350 inline ::int64_t MemoryLogTensorDeallocation::allocation_id() const {
1351 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorDeallocation.allocation_id)
1352 return _internal_allocation_id();
1353 }
_internal_set_allocation_id(::int64_t value)1354 inline void MemoryLogTensorDeallocation::_internal_set_allocation_id(::int64_t value) {
1355
1356 _impl_.allocation_id_ = value;
1357 }
set_allocation_id(::int64_t value)1358 inline void MemoryLogTensorDeallocation::set_allocation_id(::int64_t value) {
1359 _internal_set_allocation_id(value);
1360 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogTensorDeallocation.allocation_id)
1361 }
1362
1363 // string allocator_name = 2;
clear_allocator_name()1364 inline void MemoryLogTensorDeallocation::clear_allocator_name() {
1365 _impl_.allocator_name_.ClearToEmpty();
1366 }
allocator_name()1367 inline const std::string& MemoryLogTensorDeallocation::allocator_name() const {
1368 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorDeallocation.allocator_name)
1369 return _internal_allocator_name();
1370 }
1371 template <typename ArgT0, typename... ArgT>
1372 inline PROTOBUF_ALWAYS_INLINE
set_allocator_name(ArgT0 && arg0,ArgT...args)1373 void MemoryLogTensorDeallocation::set_allocator_name(ArgT0&& arg0, ArgT... args) {
1374
1375 _impl_.allocator_name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1376 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogTensorDeallocation.allocator_name)
1377 }
mutable_allocator_name()1378 inline std::string* MemoryLogTensorDeallocation::mutable_allocator_name() {
1379 std::string* _s = _internal_mutable_allocator_name();
1380 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogTensorDeallocation.allocator_name)
1381 return _s;
1382 }
_internal_allocator_name()1383 inline const std::string& MemoryLogTensorDeallocation::_internal_allocator_name() const {
1384 return _impl_.allocator_name_.Get();
1385 }
_internal_set_allocator_name(const std::string & value)1386 inline void MemoryLogTensorDeallocation::_internal_set_allocator_name(const std::string& value) {
1387
1388 _impl_.allocator_name_.Set(value, GetArenaForAllocation());
1389 }
_internal_mutable_allocator_name()1390 inline std::string* MemoryLogTensorDeallocation::_internal_mutable_allocator_name() {
1391
1392 return _impl_.allocator_name_.Mutable(GetArenaForAllocation());
1393 }
release_allocator_name()1394 inline std::string* MemoryLogTensorDeallocation::release_allocator_name() {
1395 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogTensorDeallocation.allocator_name)
1396 return _impl_.allocator_name_.Release();
1397 }
set_allocated_allocator_name(std::string * allocator_name)1398 inline void MemoryLogTensorDeallocation::set_allocated_allocator_name(std::string* allocator_name) {
1399 _impl_.allocator_name_.SetAllocated(allocator_name, GetArenaForAllocation());
1400 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1401 if (_impl_.allocator_name_.IsDefault()) {
1402 _impl_.allocator_name_.Set("", GetArenaForAllocation());
1403 }
1404 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1405 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogTensorDeallocation.allocator_name)
1406 }
1407
1408 // -------------------------------------------------------------------
1409
1410 // MemoryLogTensorOutput
1411
1412 // int64 step_id = 1;
clear_step_id()1413 inline void MemoryLogTensorOutput::clear_step_id() {
1414 _impl_.step_id_ = ::int64_t{0};
1415 }
_internal_step_id()1416 inline ::int64_t MemoryLogTensorOutput::_internal_step_id() const {
1417 return _impl_.step_id_;
1418 }
step_id()1419 inline ::int64_t MemoryLogTensorOutput::step_id() const {
1420 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorOutput.step_id)
1421 return _internal_step_id();
1422 }
_internal_set_step_id(::int64_t value)1423 inline void MemoryLogTensorOutput::_internal_set_step_id(::int64_t value) {
1424
1425 _impl_.step_id_ = value;
1426 }
set_step_id(::int64_t value)1427 inline void MemoryLogTensorOutput::set_step_id(::int64_t value) {
1428 _internal_set_step_id(value);
1429 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogTensorOutput.step_id)
1430 }
1431
1432 // string kernel_name = 2;
clear_kernel_name()1433 inline void MemoryLogTensorOutput::clear_kernel_name() {
1434 _impl_.kernel_name_.ClearToEmpty();
1435 }
kernel_name()1436 inline const std::string& MemoryLogTensorOutput::kernel_name() const {
1437 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorOutput.kernel_name)
1438 return _internal_kernel_name();
1439 }
1440 template <typename ArgT0, typename... ArgT>
1441 inline PROTOBUF_ALWAYS_INLINE
set_kernel_name(ArgT0 && arg0,ArgT...args)1442 void MemoryLogTensorOutput::set_kernel_name(ArgT0&& arg0, ArgT... args) {
1443
1444 _impl_.kernel_name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1445 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogTensorOutput.kernel_name)
1446 }
mutable_kernel_name()1447 inline std::string* MemoryLogTensorOutput::mutable_kernel_name() {
1448 std::string* _s = _internal_mutable_kernel_name();
1449 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogTensorOutput.kernel_name)
1450 return _s;
1451 }
_internal_kernel_name()1452 inline const std::string& MemoryLogTensorOutput::_internal_kernel_name() const {
1453 return _impl_.kernel_name_.Get();
1454 }
_internal_set_kernel_name(const std::string & value)1455 inline void MemoryLogTensorOutput::_internal_set_kernel_name(const std::string& value) {
1456
1457 _impl_.kernel_name_.Set(value, GetArenaForAllocation());
1458 }
_internal_mutable_kernel_name()1459 inline std::string* MemoryLogTensorOutput::_internal_mutable_kernel_name() {
1460
1461 return _impl_.kernel_name_.Mutable(GetArenaForAllocation());
1462 }
release_kernel_name()1463 inline std::string* MemoryLogTensorOutput::release_kernel_name() {
1464 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogTensorOutput.kernel_name)
1465 return _impl_.kernel_name_.Release();
1466 }
set_allocated_kernel_name(std::string * kernel_name)1467 inline void MemoryLogTensorOutput::set_allocated_kernel_name(std::string* kernel_name) {
1468 _impl_.kernel_name_.SetAllocated(kernel_name, GetArenaForAllocation());
1469 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1470 if (_impl_.kernel_name_.IsDefault()) {
1471 _impl_.kernel_name_.Set("", GetArenaForAllocation());
1472 }
1473 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1474 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogTensorOutput.kernel_name)
1475 }
1476
1477 // int32 index = 3;
clear_index()1478 inline void MemoryLogTensorOutput::clear_index() {
1479 _impl_.index_ = 0;
1480 }
_internal_index()1481 inline ::int32_t MemoryLogTensorOutput::_internal_index() const {
1482 return _impl_.index_;
1483 }
index()1484 inline ::int32_t MemoryLogTensorOutput::index() const {
1485 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorOutput.index)
1486 return _internal_index();
1487 }
_internal_set_index(::int32_t value)1488 inline void MemoryLogTensorOutput::_internal_set_index(::int32_t value) {
1489
1490 _impl_.index_ = value;
1491 }
set_index(::int32_t value)1492 inline void MemoryLogTensorOutput::set_index(::int32_t value) {
1493 _internal_set_index(value);
1494 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogTensorOutput.index)
1495 }
1496
1497 // .tensorflow.TensorDescription tensor = 4;
_internal_has_tensor()1498 inline bool MemoryLogTensorOutput::_internal_has_tensor() const {
1499 return this != internal_default_instance() && _impl_.tensor_ != nullptr;
1500 }
has_tensor()1501 inline bool MemoryLogTensorOutput::has_tensor() const {
1502 return _internal_has_tensor();
1503 }
_internal_tensor()1504 inline const ::tensorflow::TensorDescription& MemoryLogTensorOutput::_internal_tensor() const {
1505 const ::tensorflow::TensorDescription* p = _impl_.tensor_;
1506 return p != nullptr ? *p : reinterpret_cast<const ::tensorflow::TensorDescription&>(
1507 ::tensorflow::_TensorDescription_default_instance_);
1508 }
tensor()1509 inline const ::tensorflow::TensorDescription& MemoryLogTensorOutput::tensor() const {
1510 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogTensorOutput.tensor)
1511 return _internal_tensor();
1512 }
unsafe_arena_set_allocated_tensor(::tensorflow::TensorDescription * tensor)1513 inline void MemoryLogTensorOutput::unsafe_arena_set_allocated_tensor(
1514 ::tensorflow::TensorDescription* tensor) {
1515 if (GetArenaForAllocation() == nullptr) {
1516 delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.tensor_);
1517 }
1518 _impl_.tensor_ = tensor;
1519 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MemoryLogTensorOutput.tensor)
1520 }
release_tensor()1521 inline ::tensorflow::TensorDescription* MemoryLogTensorOutput::release_tensor() {
1522
1523 ::tensorflow::TensorDescription* temp = _impl_.tensor_;
1524 _impl_.tensor_ = nullptr;
1525 #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
1526 auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
1527 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
1528 if (GetArenaForAllocation() == nullptr) { delete old; }
1529 #else // PROTOBUF_FORCE_COPY_IN_RELEASE
1530 if (GetArenaForAllocation() != nullptr) {
1531 temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
1532 }
1533 #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
1534 return temp;
1535 }
unsafe_arena_release_tensor()1536 inline ::tensorflow::TensorDescription* MemoryLogTensorOutput::unsafe_arena_release_tensor() {
1537 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogTensorOutput.tensor)
1538
1539 ::tensorflow::TensorDescription* temp = _impl_.tensor_;
1540 _impl_.tensor_ = nullptr;
1541 return temp;
1542 }
_internal_mutable_tensor()1543 inline ::tensorflow::TensorDescription* MemoryLogTensorOutput::_internal_mutable_tensor() {
1544
1545 if (_impl_.tensor_ == nullptr) {
1546 auto* p = CreateMaybeMessage<::tensorflow::TensorDescription>(GetArenaForAllocation());
1547 _impl_.tensor_ = p;
1548 }
1549 return _impl_.tensor_;
1550 }
mutable_tensor()1551 inline ::tensorflow::TensorDescription* MemoryLogTensorOutput::mutable_tensor() {
1552 ::tensorflow::TensorDescription* _msg = _internal_mutable_tensor();
1553 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogTensorOutput.tensor)
1554 return _msg;
1555 }
set_allocated_tensor(::tensorflow::TensorDescription * tensor)1556 inline void MemoryLogTensorOutput::set_allocated_tensor(::tensorflow::TensorDescription* tensor) {
1557 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
1558 if (message_arena == nullptr) {
1559 delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.tensor_);
1560 }
1561 if (tensor) {
1562 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
1563 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
1564 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(tensor));
1565 if (message_arena != submessage_arena) {
1566 tensor = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
1567 message_arena, tensor, submessage_arena);
1568 }
1569
1570 } else {
1571
1572 }
1573 _impl_.tensor_ = tensor;
1574 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogTensorOutput.tensor)
1575 }
1576
1577 // -------------------------------------------------------------------
1578
1579 // MemoryLogRawAllocation
1580
1581 // int64 step_id = 1;
clear_step_id()1582 inline void MemoryLogRawAllocation::clear_step_id() {
1583 _impl_.step_id_ = ::int64_t{0};
1584 }
_internal_step_id()1585 inline ::int64_t MemoryLogRawAllocation::_internal_step_id() const {
1586 return _impl_.step_id_;
1587 }
step_id()1588 inline ::int64_t MemoryLogRawAllocation::step_id() const {
1589 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawAllocation.step_id)
1590 return _internal_step_id();
1591 }
_internal_set_step_id(::int64_t value)1592 inline void MemoryLogRawAllocation::_internal_set_step_id(::int64_t value) {
1593
1594 _impl_.step_id_ = value;
1595 }
set_step_id(::int64_t value)1596 inline void MemoryLogRawAllocation::set_step_id(::int64_t value) {
1597 _internal_set_step_id(value);
1598 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawAllocation.step_id)
1599 }
1600
1601 // string operation = 2;
clear_operation()1602 inline void MemoryLogRawAllocation::clear_operation() {
1603 _impl_.operation_.ClearToEmpty();
1604 }
operation()1605 inline const std::string& MemoryLogRawAllocation::operation() const {
1606 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawAllocation.operation)
1607 return _internal_operation();
1608 }
1609 template <typename ArgT0, typename... ArgT>
1610 inline PROTOBUF_ALWAYS_INLINE
set_operation(ArgT0 && arg0,ArgT...args)1611 void MemoryLogRawAllocation::set_operation(ArgT0&& arg0, ArgT... args) {
1612
1613 _impl_.operation_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1614 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawAllocation.operation)
1615 }
mutable_operation()1616 inline std::string* MemoryLogRawAllocation::mutable_operation() {
1617 std::string* _s = _internal_mutable_operation();
1618 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogRawAllocation.operation)
1619 return _s;
1620 }
_internal_operation()1621 inline const std::string& MemoryLogRawAllocation::_internal_operation() const {
1622 return _impl_.operation_.Get();
1623 }
_internal_set_operation(const std::string & value)1624 inline void MemoryLogRawAllocation::_internal_set_operation(const std::string& value) {
1625
1626 _impl_.operation_.Set(value, GetArenaForAllocation());
1627 }
_internal_mutable_operation()1628 inline std::string* MemoryLogRawAllocation::_internal_mutable_operation() {
1629
1630 return _impl_.operation_.Mutable(GetArenaForAllocation());
1631 }
release_operation()1632 inline std::string* MemoryLogRawAllocation::release_operation() {
1633 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogRawAllocation.operation)
1634 return _impl_.operation_.Release();
1635 }
set_allocated_operation(std::string * operation)1636 inline void MemoryLogRawAllocation::set_allocated_operation(std::string* operation) {
1637 _impl_.operation_.SetAllocated(operation, GetArenaForAllocation());
1638 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1639 if (_impl_.operation_.IsDefault()) {
1640 _impl_.operation_.Set("", GetArenaForAllocation());
1641 }
1642 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1643 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogRawAllocation.operation)
1644 }
1645
1646 // int64 num_bytes = 3;
clear_num_bytes()1647 inline void MemoryLogRawAllocation::clear_num_bytes() {
1648 _impl_.num_bytes_ = ::int64_t{0};
1649 }
_internal_num_bytes()1650 inline ::int64_t MemoryLogRawAllocation::_internal_num_bytes() const {
1651 return _impl_.num_bytes_;
1652 }
num_bytes()1653 inline ::int64_t MemoryLogRawAllocation::num_bytes() const {
1654 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawAllocation.num_bytes)
1655 return _internal_num_bytes();
1656 }
_internal_set_num_bytes(::int64_t value)1657 inline void MemoryLogRawAllocation::_internal_set_num_bytes(::int64_t value) {
1658
1659 _impl_.num_bytes_ = value;
1660 }
set_num_bytes(::int64_t value)1661 inline void MemoryLogRawAllocation::set_num_bytes(::int64_t value) {
1662 _internal_set_num_bytes(value);
1663 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawAllocation.num_bytes)
1664 }
1665
1666 // uint64 ptr = 4;
clear_ptr()1667 inline void MemoryLogRawAllocation::clear_ptr() {
1668 _impl_.ptr_ = ::uint64_t{0u};
1669 }
_internal_ptr()1670 inline ::uint64_t MemoryLogRawAllocation::_internal_ptr() const {
1671 return _impl_.ptr_;
1672 }
ptr()1673 inline ::uint64_t MemoryLogRawAllocation::ptr() const {
1674 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawAllocation.ptr)
1675 return _internal_ptr();
1676 }
_internal_set_ptr(::uint64_t value)1677 inline void MemoryLogRawAllocation::_internal_set_ptr(::uint64_t value) {
1678
1679 _impl_.ptr_ = value;
1680 }
set_ptr(::uint64_t value)1681 inline void MemoryLogRawAllocation::set_ptr(::uint64_t value) {
1682 _internal_set_ptr(value);
1683 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawAllocation.ptr)
1684 }
1685
1686 // int64 allocation_id = 5;
clear_allocation_id()1687 inline void MemoryLogRawAllocation::clear_allocation_id() {
1688 _impl_.allocation_id_ = ::int64_t{0};
1689 }
_internal_allocation_id()1690 inline ::int64_t MemoryLogRawAllocation::_internal_allocation_id() const {
1691 return _impl_.allocation_id_;
1692 }
allocation_id()1693 inline ::int64_t MemoryLogRawAllocation::allocation_id() const {
1694 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawAllocation.allocation_id)
1695 return _internal_allocation_id();
1696 }
_internal_set_allocation_id(::int64_t value)1697 inline void MemoryLogRawAllocation::_internal_set_allocation_id(::int64_t value) {
1698
1699 _impl_.allocation_id_ = value;
1700 }
set_allocation_id(::int64_t value)1701 inline void MemoryLogRawAllocation::set_allocation_id(::int64_t value) {
1702 _internal_set_allocation_id(value);
1703 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawAllocation.allocation_id)
1704 }
1705
1706 // string allocator_name = 6;
clear_allocator_name()1707 inline void MemoryLogRawAllocation::clear_allocator_name() {
1708 _impl_.allocator_name_.ClearToEmpty();
1709 }
allocator_name()1710 inline const std::string& MemoryLogRawAllocation::allocator_name() const {
1711 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawAllocation.allocator_name)
1712 return _internal_allocator_name();
1713 }
1714 template <typename ArgT0, typename... ArgT>
1715 inline PROTOBUF_ALWAYS_INLINE
set_allocator_name(ArgT0 && arg0,ArgT...args)1716 void MemoryLogRawAllocation::set_allocator_name(ArgT0&& arg0, ArgT... args) {
1717
1718 _impl_.allocator_name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1719 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawAllocation.allocator_name)
1720 }
mutable_allocator_name()1721 inline std::string* MemoryLogRawAllocation::mutable_allocator_name() {
1722 std::string* _s = _internal_mutable_allocator_name();
1723 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogRawAllocation.allocator_name)
1724 return _s;
1725 }
_internal_allocator_name()1726 inline const std::string& MemoryLogRawAllocation::_internal_allocator_name() const {
1727 return _impl_.allocator_name_.Get();
1728 }
_internal_set_allocator_name(const std::string & value)1729 inline void MemoryLogRawAllocation::_internal_set_allocator_name(const std::string& value) {
1730
1731 _impl_.allocator_name_.Set(value, GetArenaForAllocation());
1732 }
_internal_mutable_allocator_name()1733 inline std::string* MemoryLogRawAllocation::_internal_mutable_allocator_name() {
1734
1735 return _impl_.allocator_name_.Mutable(GetArenaForAllocation());
1736 }
release_allocator_name()1737 inline std::string* MemoryLogRawAllocation::release_allocator_name() {
1738 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogRawAllocation.allocator_name)
1739 return _impl_.allocator_name_.Release();
1740 }
set_allocated_allocator_name(std::string * allocator_name)1741 inline void MemoryLogRawAllocation::set_allocated_allocator_name(std::string* allocator_name) {
1742 _impl_.allocator_name_.SetAllocated(allocator_name, GetArenaForAllocation());
1743 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1744 if (_impl_.allocator_name_.IsDefault()) {
1745 _impl_.allocator_name_.Set("", GetArenaForAllocation());
1746 }
1747 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1748 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogRawAllocation.allocator_name)
1749 }
1750
1751 // -------------------------------------------------------------------
1752
1753 // MemoryLogRawDeallocation
1754
1755 // int64 step_id = 1;
clear_step_id()1756 inline void MemoryLogRawDeallocation::clear_step_id() {
1757 _impl_.step_id_ = ::int64_t{0};
1758 }
_internal_step_id()1759 inline ::int64_t MemoryLogRawDeallocation::_internal_step_id() const {
1760 return _impl_.step_id_;
1761 }
step_id()1762 inline ::int64_t MemoryLogRawDeallocation::step_id() const {
1763 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawDeallocation.step_id)
1764 return _internal_step_id();
1765 }
_internal_set_step_id(::int64_t value)1766 inline void MemoryLogRawDeallocation::_internal_set_step_id(::int64_t value) {
1767
1768 _impl_.step_id_ = value;
1769 }
set_step_id(::int64_t value)1770 inline void MemoryLogRawDeallocation::set_step_id(::int64_t value) {
1771 _internal_set_step_id(value);
1772 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawDeallocation.step_id)
1773 }
1774
1775 // string operation = 2;
clear_operation()1776 inline void MemoryLogRawDeallocation::clear_operation() {
1777 _impl_.operation_.ClearToEmpty();
1778 }
operation()1779 inline const std::string& MemoryLogRawDeallocation::operation() const {
1780 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawDeallocation.operation)
1781 return _internal_operation();
1782 }
1783 template <typename ArgT0, typename... ArgT>
1784 inline PROTOBUF_ALWAYS_INLINE
set_operation(ArgT0 && arg0,ArgT...args)1785 void MemoryLogRawDeallocation::set_operation(ArgT0&& arg0, ArgT... args) {
1786
1787 _impl_.operation_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1788 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawDeallocation.operation)
1789 }
mutable_operation()1790 inline std::string* MemoryLogRawDeallocation::mutable_operation() {
1791 std::string* _s = _internal_mutable_operation();
1792 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogRawDeallocation.operation)
1793 return _s;
1794 }
_internal_operation()1795 inline const std::string& MemoryLogRawDeallocation::_internal_operation() const {
1796 return _impl_.operation_.Get();
1797 }
_internal_set_operation(const std::string & value)1798 inline void MemoryLogRawDeallocation::_internal_set_operation(const std::string& value) {
1799
1800 _impl_.operation_.Set(value, GetArenaForAllocation());
1801 }
_internal_mutable_operation()1802 inline std::string* MemoryLogRawDeallocation::_internal_mutable_operation() {
1803
1804 return _impl_.operation_.Mutable(GetArenaForAllocation());
1805 }
release_operation()1806 inline std::string* MemoryLogRawDeallocation::release_operation() {
1807 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogRawDeallocation.operation)
1808 return _impl_.operation_.Release();
1809 }
set_allocated_operation(std::string * operation)1810 inline void MemoryLogRawDeallocation::set_allocated_operation(std::string* operation) {
1811 _impl_.operation_.SetAllocated(operation, GetArenaForAllocation());
1812 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1813 if (_impl_.operation_.IsDefault()) {
1814 _impl_.operation_.Set("", GetArenaForAllocation());
1815 }
1816 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1817 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogRawDeallocation.operation)
1818 }
1819
1820 // int64 allocation_id = 3;
clear_allocation_id()1821 inline void MemoryLogRawDeallocation::clear_allocation_id() {
1822 _impl_.allocation_id_ = ::int64_t{0};
1823 }
_internal_allocation_id()1824 inline ::int64_t MemoryLogRawDeallocation::_internal_allocation_id() const {
1825 return _impl_.allocation_id_;
1826 }
allocation_id()1827 inline ::int64_t MemoryLogRawDeallocation::allocation_id() const {
1828 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawDeallocation.allocation_id)
1829 return _internal_allocation_id();
1830 }
_internal_set_allocation_id(::int64_t value)1831 inline void MemoryLogRawDeallocation::_internal_set_allocation_id(::int64_t value) {
1832
1833 _impl_.allocation_id_ = value;
1834 }
set_allocation_id(::int64_t value)1835 inline void MemoryLogRawDeallocation::set_allocation_id(::int64_t value) {
1836 _internal_set_allocation_id(value);
1837 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawDeallocation.allocation_id)
1838 }
1839
1840 // string allocator_name = 4;
clear_allocator_name()1841 inline void MemoryLogRawDeallocation::clear_allocator_name() {
1842 _impl_.allocator_name_.ClearToEmpty();
1843 }
allocator_name()1844 inline const std::string& MemoryLogRawDeallocation::allocator_name() const {
1845 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawDeallocation.allocator_name)
1846 return _internal_allocator_name();
1847 }
1848 template <typename ArgT0, typename... ArgT>
1849 inline PROTOBUF_ALWAYS_INLINE
set_allocator_name(ArgT0 && arg0,ArgT...args)1850 void MemoryLogRawDeallocation::set_allocator_name(ArgT0&& arg0, ArgT... args) {
1851
1852 _impl_.allocator_name_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
1853 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawDeallocation.allocator_name)
1854 }
mutable_allocator_name()1855 inline std::string* MemoryLogRawDeallocation::mutable_allocator_name() {
1856 std::string* _s = _internal_mutable_allocator_name();
1857 // @@protoc_insertion_point(field_mutable:tensorflow.MemoryLogRawDeallocation.allocator_name)
1858 return _s;
1859 }
_internal_allocator_name()1860 inline const std::string& MemoryLogRawDeallocation::_internal_allocator_name() const {
1861 return _impl_.allocator_name_.Get();
1862 }
_internal_set_allocator_name(const std::string & value)1863 inline void MemoryLogRawDeallocation::_internal_set_allocator_name(const std::string& value) {
1864
1865 _impl_.allocator_name_.Set(value, GetArenaForAllocation());
1866 }
_internal_mutable_allocator_name()1867 inline std::string* MemoryLogRawDeallocation::_internal_mutable_allocator_name() {
1868
1869 return _impl_.allocator_name_.Mutable(GetArenaForAllocation());
1870 }
release_allocator_name()1871 inline std::string* MemoryLogRawDeallocation::release_allocator_name() {
1872 // @@protoc_insertion_point(field_release:tensorflow.MemoryLogRawDeallocation.allocator_name)
1873 return _impl_.allocator_name_.Release();
1874 }
set_allocated_allocator_name(std::string * allocator_name)1875 inline void MemoryLogRawDeallocation::set_allocated_allocator_name(std::string* allocator_name) {
1876 _impl_.allocator_name_.SetAllocated(allocator_name, GetArenaForAllocation());
1877 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1878 if (_impl_.allocator_name_.IsDefault()) {
1879 _impl_.allocator_name_.Set("", GetArenaForAllocation());
1880 }
1881 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1882 // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryLogRawDeallocation.allocator_name)
1883 }
1884
1885 // bool deferred = 5;
clear_deferred()1886 inline void MemoryLogRawDeallocation::clear_deferred() {
1887 _impl_.deferred_ = false;
1888 }
_internal_deferred()1889 inline bool MemoryLogRawDeallocation::_internal_deferred() const {
1890 return _impl_.deferred_;
1891 }
deferred()1892 inline bool MemoryLogRawDeallocation::deferred() const {
1893 // @@protoc_insertion_point(field_get:tensorflow.MemoryLogRawDeallocation.deferred)
1894 return _internal_deferred();
1895 }
_internal_set_deferred(bool value)1896 inline void MemoryLogRawDeallocation::_internal_set_deferred(bool value) {
1897
1898 _impl_.deferred_ = value;
1899 }
set_deferred(bool value)1900 inline void MemoryLogRawDeallocation::set_deferred(bool value) {
1901 _internal_set_deferred(value);
1902 // @@protoc_insertion_point(field_set:tensorflow.MemoryLogRawDeallocation.deferred)
1903 }
1904
1905 #ifdef __GNUC__
1906 #pragma GCC diagnostic pop
1907 #endif // __GNUC__
1908 // -------------------------------------------------------------------
1909
1910 // -------------------------------------------------------------------
1911
1912 // -------------------------------------------------------------------
1913
1914 // -------------------------------------------------------------------
1915
1916 // -------------------------------------------------------------------
1917
1918
1919 // @@protoc_insertion_point(namespace_scope)
1920
1921 } // namespace tensorflow
1922
1923 // @@protoc_insertion_point(global_scope)
1924
1925 #include <google/protobuf/port_undef.inc>
1926 #endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto
1927