1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/tensor_description.proto
3
4 #include "tensorflow/core/framework/tensor_description.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
TensorDescription(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR TensorDescription::TensorDescription(
23 ::_pbi::ConstantInitialized): _impl_{
24 /*decltype(_impl_.shape_)*/nullptr
25 , /*decltype(_impl_.allocation_description_)*/nullptr
26 , /*decltype(_impl_.dtype_)*/0
27 , /*decltype(_impl_._cached_size_)*/{}} {}
28 struct TensorDescriptionDefaultTypeInternal {
TensorDescriptionDefaultTypeInternaltensorflow::TensorDescriptionDefaultTypeInternal29 PROTOBUF_CONSTEXPR TensorDescriptionDefaultTypeInternal()
30 : _instance(::_pbi::ConstantInitialized{}) {}
~TensorDescriptionDefaultTypeInternaltensorflow::TensorDescriptionDefaultTypeInternal31 ~TensorDescriptionDefaultTypeInternal() {}
32 union { // NOLINT(misc-non-private-member-variables-in-classes)
33 TensorDescription _instance;
34 };
35 };
36 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 TensorDescriptionDefaultTypeInternal _TensorDescription_default_instance_;
37 } // namespace tensorflow
38 namespace tensorflow {
39
40 // ===================================================================
41
42 class TensorDescription::_Internal {
43 public:
44 static const ::tensorflow::TensorShapeProto& shape(const TensorDescription* msg);
45 static const ::tensorflow::AllocationDescription& allocation_description(const TensorDescription* msg);
46 };
47
48 const ::tensorflow::TensorShapeProto&
shape(const TensorDescription * msg)49 TensorDescription::_Internal::shape(const TensorDescription* msg) {
50 return *msg->_impl_.shape_;
51 }
52 const ::tensorflow::AllocationDescription&
allocation_description(const TensorDescription * msg)53 TensorDescription::_Internal::allocation_description(const TensorDescription* msg) {
54 return *msg->_impl_.allocation_description_;
55 }
clear_shape()56 void TensorDescription::clear_shape() {
57 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
58 delete _impl_.shape_;
59 }
60 _impl_.shape_ = nullptr;
61 }
clear_allocation_description()62 void TensorDescription::clear_allocation_description() {
63 if (GetArenaForAllocation() == nullptr && _impl_.allocation_description_ != nullptr) {
64 delete _impl_.allocation_description_;
65 }
66 _impl_.allocation_description_ = nullptr;
67 }
TensorDescription(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)68 TensorDescription::TensorDescription(::PROTOBUF_NAMESPACE_ID::Arena* arena,
69 bool is_message_owned)
70 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
71 SharedCtor(arena, is_message_owned);
72 // @@protoc_insertion_point(arena_constructor:tensorflow.TensorDescription)
73 }
TensorDescription(const TensorDescription & from)74 TensorDescription::TensorDescription(const TensorDescription& from)
75 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
76 TensorDescription* const _this = this; (void)_this;
77 new (&_impl_) Impl_{
78 decltype(_impl_.shape_){nullptr}
79 , decltype(_impl_.allocation_description_){nullptr}
80 , decltype(_impl_.dtype_){}
81 , /*decltype(_impl_._cached_size_)*/{}};
82
83 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
84 if (from._internal_has_shape()) {
85 _this->_impl_.shape_ = new ::tensorflow::TensorShapeProto(*from._impl_.shape_);
86 }
87 if (from._internal_has_allocation_description()) {
88 _this->_impl_.allocation_description_ = new ::tensorflow::AllocationDescription(*from._impl_.allocation_description_);
89 }
90 _this->_impl_.dtype_ = from._impl_.dtype_;
91 // @@protoc_insertion_point(copy_constructor:tensorflow.TensorDescription)
92 }
93
SharedCtor(::_pb::Arena * arena,bool is_message_owned)94 inline void TensorDescription::SharedCtor(
95 ::_pb::Arena* arena, bool is_message_owned) {
96 (void)arena;
97 (void)is_message_owned;
98 new (&_impl_) Impl_{
99 decltype(_impl_.shape_){nullptr}
100 , decltype(_impl_.allocation_description_){nullptr}
101 , decltype(_impl_.dtype_){0}
102 , /*decltype(_impl_._cached_size_)*/{}
103 };
104 }
105
~TensorDescription()106 TensorDescription::~TensorDescription() {
107 // @@protoc_insertion_point(destructor:tensorflow.TensorDescription)
108 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
109 (void)arena;
110 return;
111 }
112 SharedDtor();
113 }
114
SharedDtor()115 inline void TensorDescription::SharedDtor() {
116 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
117 if (this != internal_default_instance()) delete _impl_.shape_;
118 if (this != internal_default_instance()) delete _impl_.allocation_description_;
119 }
120
SetCachedSize(int size) const121 void TensorDescription::SetCachedSize(int size) const {
122 _impl_._cached_size_.Set(size);
123 }
124
Clear()125 void TensorDescription::Clear() {
126 // @@protoc_insertion_point(message_clear_start:tensorflow.TensorDescription)
127 ::uint32_t cached_has_bits = 0;
128 // Prevent compiler warnings about cached_has_bits being unused
129 (void) cached_has_bits;
130
131 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
132 delete _impl_.shape_;
133 }
134 _impl_.shape_ = nullptr;
135 if (GetArenaForAllocation() == nullptr && _impl_.allocation_description_ != nullptr) {
136 delete _impl_.allocation_description_;
137 }
138 _impl_.allocation_description_ = nullptr;
139 _impl_.dtype_ = 0;
140 _internal_metadata_.Clear<std::string>();
141 }
142
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)143 const char* TensorDescription::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
144 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
145 while (!ctx->Done(&ptr)) {
146 ::uint32_t tag;
147 ptr = ::_pbi::ReadTag(ptr, &tag);
148 switch (tag >> 3) {
149 // .tensorflow.DataType dtype = 1;
150 case 1:
151 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
152 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
153 CHK_(ptr);
154 _internal_set_dtype(static_cast<::tensorflow::DataType>(val));
155 } else {
156 goto handle_unusual;
157 }
158 continue;
159 // .tensorflow.TensorShapeProto shape = 2;
160 case 2:
161 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
162 ptr = ctx->ParseMessage(_internal_mutable_shape(), ptr);
163 CHK_(ptr);
164 } else {
165 goto handle_unusual;
166 }
167 continue;
168 // .tensorflow.AllocationDescription allocation_description = 4;
169 case 4:
170 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
171 ptr = ctx->ParseMessage(_internal_mutable_allocation_description(), ptr);
172 CHK_(ptr);
173 } else {
174 goto handle_unusual;
175 }
176 continue;
177 default:
178 goto handle_unusual;
179 } // switch
180 handle_unusual:
181 if ((tag == 0) || ((tag & 7) == 4)) {
182 CHK_(ptr);
183 ctx->SetLastTag(tag);
184 goto message_done;
185 }
186 ptr = UnknownFieldParse(
187 tag,
188 _internal_metadata_.mutable_unknown_fields<std::string>(),
189 ptr, ctx);
190 CHK_(ptr != nullptr);
191 } // while
192 message_done:
193 return ptr;
194 failure:
195 ptr = nullptr;
196 goto message_done;
197 #undef CHK_
198 }
199
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const200 ::uint8_t* TensorDescription::_InternalSerialize(
201 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
202 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorDescription)
203 ::uint32_t cached_has_bits = 0;
204 (void) cached_has_bits;
205
206 // .tensorflow.DataType dtype = 1;
207 if (this->_internal_dtype() != 0) {
208 target = stream->EnsureSpace(target);
209 target = ::_pbi::WireFormatLite::WriteEnumToArray(
210 1, this->_internal_dtype(), target);
211 }
212
213 // .tensorflow.TensorShapeProto shape = 2;
214 if (this->_internal_has_shape()) {
215 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
216 InternalWriteMessage(2, _Internal::shape(this),
217 _Internal::shape(this).GetCachedSize(), target, stream);
218 }
219
220 // .tensorflow.AllocationDescription allocation_description = 4;
221 if (this->_internal_has_allocation_description()) {
222 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
223 InternalWriteMessage(4, _Internal::allocation_description(this),
224 _Internal::allocation_description(this).GetCachedSize(), target, stream);
225 }
226
227 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
228 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
229 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
230 }
231 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorDescription)
232 return target;
233 }
234
ByteSizeLong() const235 size_t TensorDescription::ByteSizeLong() const {
236 // @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorDescription)
237 size_t total_size = 0;
238
239 ::uint32_t cached_has_bits = 0;
240 // Prevent compiler warnings about cached_has_bits being unused
241 (void) cached_has_bits;
242
243 // .tensorflow.TensorShapeProto shape = 2;
244 if (this->_internal_has_shape()) {
245 total_size += 1 +
246 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
247 *_impl_.shape_);
248 }
249
250 // .tensorflow.AllocationDescription allocation_description = 4;
251 if (this->_internal_has_allocation_description()) {
252 total_size += 1 +
253 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
254 *_impl_.allocation_description_);
255 }
256
257 // .tensorflow.DataType dtype = 1;
258 if (this->_internal_dtype() != 0) {
259 total_size += 1 +
260 ::_pbi::WireFormatLite::EnumSize(this->_internal_dtype());
261 }
262
263 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
264 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
265 }
266 int cached_size = ::_pbi::ToCachedSize(total_size);
267 SetCachedSize(cached_size);
268 return total_size;
269 }
270
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)271 void TensorDescription::CheckTypeAndMergeFrom(
272 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
273 MergeFrom(*::_pbi::DownCast<const TensorDescription*>(
274 &from));
275 }
276
MergeFrom(const TensorDescription & from)277 void TensorDescription::MergeFrom(const TensorDescription& from) {
278 TensorDescription* const _this = this;
279 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorDescription)
280 GOOGLE_DCHECK_NE(&from, _this);
281 ::uint32_t cached_has_bits = 0;
282 (void) cached_has_bits;
283
284 if (from._internal_has_shape()) {
285 _this->_internal_mutable_shape()->::tensorflow::TensorShapeProto::MergeFrom(
286 from._internal_shape());
287 }
288 if (from._internal_has_allocation_description()) {
289 _this->_internal_mutable_allocation_description()->::tensorflow::AllocationDescription::MergeFrom(
290 from._internal_allocation_description());
291 }
292 if (from._internal_dtype() != 0) {
293 _this->_internal_set_dtype(from._internal_dtype());
294 }
295 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
296 }
297
CopyFrom(const TensorDescription & from)298 void TensorDescription::CopyFrom(const TensorDescription& from) {
299 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorDescription)
300 if (&from == this) return;
301 Clear();
302 MergeFrom(from);
303 }
304
IsInitialized() const305 bool TensorDescription::IsInitialized() const {
306 return true;
307 }
308
InternalSwap(TensorDescription * other)309 void TensorDescription::InternalSwap(TensorDescription* other) {
310 using std::swap;
311 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
312 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
313 PROTOBUF_FIELD_OFFSET(TensorDescription, _impl_.dtype_)
314 + sizeof(TensorDescription::_impl_.dtype_) // NOLINT
315 - PROTOBUF_FIELD_OFFSET(TensorDescription, _impl_.shape_)>(
316 reinterpret_cast<char*>(&_impl_.shape_),
317 reinterpret_cast<char*>(&other->_impl_.shape_));
318 }
319
GetTypeName() const320 std::string TensorDescription::GetTypeName() const {
321 return "tensorflow.TensorDescription";
322 }
323
324
325 // @@protoc_insertion_point(namespace_scope)
326 } // namespace tensorflow
327 PROTOBUF_NAMESPACE_OPEN
328 template<> PROTOBUF_NOINLINE ::tensorflow::TensorDescription*
CreateMaybeMessage(Arena * arena)329 Arena::CreateMaybeMessage< ::tensorflow::TensorDescription >(Arena* arena) {
330 return Arena::CreateMessageInternal< ::tensorflow::TensorDescription >(arena);
331 }
332 PROTOBUF_NAMESPACE_CLOSE
333
334 // @@protoc_insertion_point(global_scope)
335 #include <google/protobuf/port_undef.inc>
336