1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/framework/tensor.proto
3
4 #include "tensorflow/core/framework/tensor.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
TensorProto(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR TensorProto::TensorProto(
23 ::_pbi::ConstantInitialized): _impl_{
24 /*decltype(_impl_.float_val_)*/{}
25 , /*decltype(_impl_.double_val_)*/{}
26 , /*decltype(_impl_.int_val_)*/{}
27 , /*decltype(_impl_._int_val_cached_byte_size_)*/{0}
28 , /*decltype(_impl_.string_val_)*/{}
29 , /*decltype(_impl_.scomplex_val_)*/{}
30 , /*decltype(_impl_.int64_val_)*/{}
31 , /*decltype(_impl_._int64_val_cached_byte_size_)*/{0}
32 , /*decltype(_impl_.bool_val_)*/{}
33 , /*decltype(_impl_.dcomplex_val_)*/{}
34 , /*decltype(_impl_.half_val_)*/{}
35 , /*decltype(_impl_._half_val_cached_byte_size_)*/{0}
36 , /*decltype(_impl_.resource_handle_val_)*/{}
37 , /*decltype(_impl_.variant_val_)*/{}
38 , /*decltype(_impl_.uint32_val_)*/{}
39 , /*decltype(_impl_._uint32_val_cached_byte_size_)*/{0}
40 , /*decltype(_impl_.uint64_val_)*/{}
41 , /*decltype(_impl_._uint64_val_cached_byte_size_)*/{0}
42 , /*decltype(_impl_.tensor_content_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
43 , /*decltype(_impl_.tensor_shape_)*/nullptr
44 , /*decltype(_impl_.dtype_)*/0
45 , /*decltype(_impl_.version_number_)*/0
46 , /*decltype(_impl_._cached_size_)*/{}} {}
47 struct TensorProtoDefaultTypeInternal {
TensorProtoDefaultTypeInternaltensorflow::TensorProtoDefaultTypeInternal48 PROTOBUF_CONSTEXPR TensorProtoDefaultTypeInternal()
49 : _instance(::_pbi::ConstantInitialized{}) {}
~TensorProtoDefaultTypeInternaltensorflow::TensorProtoDefaultTypeInternal50 ~TensorProtoDefaultTypeInternal() {}
51 union { // NOLINT(misc-non-private-member-variables-in-classes)
52 TensorProto _instance;
53 };
54 };
55 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 TensorProtoDefaultTypeInternal _TensorProto_default_instance_;
VariantTensorDataProto(::_pbi::ConstantInitialized)56 PROTOBUF_CONSTEXPR VariantTensorDataProto::VariantTensorDataProto(
57 ::_pbi::ConstantInitialized): _impl_{
58 /*decltype(_impl_.tensors_)*/{}
59 , /*decltype(_impl_.type_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
60 , /*decltype(_impl_.metadata_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
61 , /*decltype(_impl_._cached_size_)*/{}} {}
62 struct VariantTensorDataProtoDefaultTypeInternal {
VariantTensorDataProtoDefaultTypeInternaltensorflow::VariantTensorDataProtoDefaultTypeInternal63 PROTOBUF_CONSTEXPR VariantTensorDataProtoDefaultTypeInternal()
64 : _instance(::_pbi::ConstantInitialized{}) {}
~VariantTensorDataProtoDefaultTypeInternaltensorflow::VariantTensorDataProtoDefaultTypeInternal65 ~VariantTensorDataProtoDefaultTypeInternal() {}
66 union { // NOLINT(misc-non-private-member-variables-in-classes)
67 VariantTensorDataProto _instance;
68 };
69 };
70 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 VariantTensorDataProtoDefaultTypeInternal _VariantTensorDataProto_default_instance_;
71 } // namespace tensorflow
72 namespace tensorflow {
73
74 // ===================================================================
75
76 class TensorProto::_Internal {
77 public:
78 static const ::tensorflow::TensorShapeProto& tensor_shape(const TensorProto* msg);
79 };
80
81 const ::tensorflow::TensorShapeProto&
tensor_shape(const TensorProto * msg)82 TensorProto::_Internal::tensor_shape(const TensorProto* msg) {
83 return *msg->_impl_.tensor_shape_;
84 }
clear_tensor_shape()85 void TensorProto::clear_tensor_shape() {
86 if (GetArenaForAllocation() == nullptr && _impl_.tensor_shape_ != nullptr) {
87 delete _impl_.tensor_shape_;
88 }
89 _impl_.tensor_shape_ = nullptr;
90 }
clear_resource_handle_val()91 void TensorProto::clear_resource_handle_val() {
92 _impl_.resource_handle_val_.Clear();
93 }
TensorProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)94 TensorProto::TensorProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
95 bool is_message_owned)
96 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
97 SharedCtor(arena, is_message_owned);
98 // @@protoc_insertion_point(arena_constructor:tensorflow.TensorProto)
99 }
TensorProto(const TensorProto & from)100 TensorProto::TensorProto(const TensorProto& from)
101 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
102 TensorProto* const _this = this; (void)_this;
103 new (&_impl_) Impl_{
104 decltype(_impl_.float_val_){from._impl_.float_val_}
105 , decltype(_impl_.double_val_){from._impl_.double_val_}
106 , decltype(_impl_.int_val_){from._impl_.int_val_}
107 , /*decltype(_impl_._int_val_cached_byte_size_)*/{0}
108 , decltype(_impl_.string_val_){from._impl_.string_val_}
109 , decltype(_impl_.scomplex_val_){from._impl_.scomplex_val_}
110 , decltype(_impl_.int64_val_){from._impl_.int64_val_}
111 , /*decltype(_impl_._int64_val_cached_byte_size_)*/{0}
112 , decltype(_impl_.bool_val_){from._impl_.bool_val_}
113 , decltype(_impl_.dcomplex_val_){from._impl_.dcomplex_val_}
114 , decltype(_impl_.half_val_){from._impl_.half_val_}
115 , /*decltype(_impl_._half_val_cached_byte_size_)*/{0}
116 , decltype(_impl_.resource_handle_val_){from._impl_.resource_handle_val_}
117 , decltype(_impl_.variant_val_){from._impl_.variant_val_}
118 , decltype(_impl_.uint32_val_){from._impl_.uint32_val_}
119 , /*decltype(_impl_._uint32_val_cached_byte_size_)*/{0}
120 , decltype(_impl_.uint64_val_){from._impl_.uint64_val_}
121 , /*decltype(_impl_._uint64_val_cached_byte_size_)*/{0}
122 , decltype(_impl_.tensor_content_){}
123 , decltype(_impl_.tensor_shape_){nullptr}
124 , decltype(_impl_.dtype_){}
125 , decltype(_impl_.version_number_){}
126 , /*decltype(_impl_._cached_size_)*/{}};
127
128 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
129 _impl_.tensor_content_.InitDefault();
130 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
131 _impl_.tensor_content_.Set("", GetArenaForAllocation());
132 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
133 if (!from._internal_tensor_content().empty()) {
134 _this->_impl_.tensor_content_.Set(from._internal_tensor_content(),
135 _this->GetArenaForAllocation());
136 }
137 if (from._internal_has_tensor_shape()) {
138 _this->_impl_.tensor_shape_ = new ::tensorflow::TensorShapeProto(*from._impl_.tensor_shape_);
139 }
140 ::memcpy(&_impl_.dtype_, &from._impl_.dtype_,
141 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.version_number_) -
142 reinterpret_cast<char*>(&_impl_.dtype_)) + sizeof(_impl_.version_number_));
143 // @@protoc_insertion_point(copy_constructor:tensorflow.TensorProto)
144 }
145
SharedCtor(::_pb::Arena * arena,bool is_message_owned)146 inline void TensorProto::SharedCtor(
147 ::_pb::Arena* arena, bool is_message_owned) {
148 (void)arena;
149 (void)is_message_owned;
150 new (&_impl_) Impl_{
151 decltype(_impl_.float_val_){arena}
152 , decltype(_impl_.double_val_){arena}
153 , decltype(_impl_.int_val_){arena}
154 , /*decltype(_impl_._int_val_cached_byte_size_)*/{0}
155 , decltype(_impl_.string_val_){arena}
156 , decltype(_impl_.scomplex_val_){arena}
157 , decltype(_impl_.int64_val_){arena}
158 , /*decltype(_impl_._int64_val_cached_byte_size_)*/{0}
159 , decltype(_impl_.bool_val_){arena}
160 , decltype(_impl_.dcomplex_val_){arena}
161 , decltype(_impl_.half_val_){arena}
162 , /*decltype(_impl_._half_val_cached_byte_size_)*/{0}
163 , decltype(_impl_.resource_handle_val_){arena}
164 , decltype(_impl_.variant_val_){arena}
165 , decltype(_impl_.uint32_val_){arena}
166 , /*decltype(_impl_._uint32_val_cached_byte_size_)*/{0}
167 , decltype(_impl_.uint64_val_){arena}
168 , /*decltype(_impl_._uint64_val_cached_byte_size_)*/{0}
169 , decltype(_impl_.tensor_content_){}
170 , decltype(_impl_.tensor_shape_){nullptr}
171 , decltype(_impl_.dtype_){0}
172 , decltype(_impl_.version_number_){0}
173 , /*decltype(_impl_._cached_size_)*/{}
174 };
175 _impl_.tensor_content_.InitDefault();
176 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
177 _impl_.tensor_content_.Set("", GetArenaForAllocation());
178 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
179 }
180
~TensorProto()181 TensorProto::~TensorProto() {
182 // @@protoc_insertion_point(destructor:tensorflow.TensorProto)
183 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
184 (void)arena;
185 return;
186 }
187 SharedDtor();
188 }
189
SharedDtor()190 inline void TensorProto::SharedDtor() {
191 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
192 _impl_.float_val_.~RepeatedField();
193 _impl_.double_val_.~RepeatedField();
194 _impl_.int_val_.~RepeatedField();
195 _impl_.string_val_.~RepeatedPtrField();
196 _impl_.scomplex_val_.~RepeatedField();
197 _impl_.int64_val_.~RepeatedField();
198 _impl_.bool_val_.~RepeatedField();
199 _impl_.dcomplex_val_.~RepeatedField();
200 _impl_.half_val_.~RepeatedField();
201 _impl_.resource_handle_val_.~RepeatedPtrField();
202 _impl_.variant_val_.~RepeatedPtrField();
203 _impl_.uint32_val_.~RepeatedField();
204 _impl_.uint64_val_.~RepeatedField();
205 _impl_.tensor_content_.Destroy();
206 if (this != internal_default_instance()) delete _impl_.tensor_shape_;
207 }
208
SetCachedSize(int size) const209 void TensorProto::SetCachedSize(int size) const {
210 _impl_._cached_size_.Set(size);
211 }
212
Clear()213 void TensorProto::Clear() {
214 // @@protoc_insertion_point(message_clear_start:tensorflow.TensorProto)
215 ::uint32_t cached_has_bits = 0;
216 // Prevent compiler warnings about cached_has_bits being unused
217 (void) cached_has_bits;
218
219 _impl_.float_val_.Clear();
220 _impl_.double_val_.Clear();
221 _impl_.int_val_.Clear();
222 _impl_.string_val_.Clear();
223 _impl_.scomplex_val_.Clear();
224 _impl_.int64_val_.Clear();
225 _impl_.bool_val_.Clear();
226 _impl_.dcomplex_val_.Clear();
227 _impl_.half_val_.Clear();
228 _impl_.resource_handle_val_.Clear();
229 _impl_.variant_val_.Clear();
230 _impl_.uint32_val_.Clear();
231 _impl_.uint64_val_.Clear();
232 _impl_.tensor_content_.ClearToEmpty();
233 if (GetArenaForAllocation() == nullptr && _impl_.tensor_shape_ != nullptr) {
234 delete _impl_.tensor_shape_;
235 }
236 _impl_.tensor_shape_ = nullptr;
237 ::memset(&_impl_.dtype_, 0, static_cast<size_t>(
238 reinterpret_cast<char*>(&_impl_.version_number_) -
239 reinterpret_cast<char*>(&_impl_.dtype_)) + sizeof(_impl_.version_number_));
240 _internal_metadata_.Clear<std::string>();
241 }
242
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)243 const char* TensorProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
244 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
245 while (!ctx->Done(&ptr)) {
246 ::uint32_t tag;
247 ptr = ::_pbi::ReadTag(ptr, &tag);
248 switch (tag >> 3) {
249 // .tensorflow.DataType dtype = 1;
250 case 1:
251 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
252 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
253 CHK_(ptr);
254 _internal_set_dtype(static_cast<::tensorflow::DataType>(val));
255 } else {
256 goto handle_unusual;
257 }
258 continue;
259 // .tensorflow.TensorShapeProto tensor_shape = 2;
260 case 2:
261 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
262 ptr = ctx->ParseMessage(_internal_mutable_tensor_shape(), ptr);
263 CHK_(ptr);
264 } else {
265 goto handle_unusual;
266 }
267 continue;
268 // int32 version_number = 3;
269 case 3:
270 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
271 _impl_.version_number_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
272 CHK_(ptr);
273 } else {
274 goto handle_unusual;
275 }
276 continue;
277 // bytes tensor_content = 4;
278 case 4:
279 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
280 auto str = _internal_mutable_tensor_content();
281 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
282 CHK_(ptr);
283 } else {
284 goto handle_unusual;
285 }
286 continue;
287 // repeated float float_val = 5 [packed = true];
288 case 5:
289 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
290 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedFloatParser(_internal_mutable_float_val(), ptr, ctx);
291 CHK_(ptr);
292 } else if (static_cast<::uint8_t>(tag) == 45) {
293 _internal_add_float_val(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<float>(ptr));
294 ptr += sizeof(float);
295 } else {
296 goto handle_unusual;
297 }
298 continue;
299 // repeated double double_val = 6 [packed = true];
300 case 6:
301 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) {
302 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedDoubleParser(_internal_mutable_double_val(), ptr, ctx);
303 CHK_(ptr);
304 } else if (static_cast<::uint8_t>(tag) == 49) {
305 _internal_add_double_val(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr));
306 ptr += sizeof(double);
307 } else {
308 goto handle_unusual;
309 }
310 continue;
311 // repeated int32 int_val = 7 [packed = true];
312 case 7:
313 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 58)) {
314 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_int_val(), ptr, ctx);
315 CHK_(ptr);
316 } else if (static_cast<::uint8_t>(tag) == 56) {
317 _internal_add_int_val(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
318 CHK_(ptr);
319 } else {
320 goto handle_unusual;
321 }
322 continue;
323 // repeated bytes string_val = 8;
324 case 8:
325 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 66)) {
326 ptr -= 1;
327 do {
328 ptr += 1;
329 auto str = _internal_add_string_val();
330 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
331 CHK_(ptr);
332 if (!ctx->DataAvailable(ptr)) break;
333 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<66>(ptr));
334 } else {
335 goto handle_unusual;
336 }
337 continue;
338 // repeated float scomplex_val = 9 [packed = true];
339 case 9:
340 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 74)) {
341 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedFloatParser(_internal_mutable_scomplex_val(), ptr, ctx);
342 CHK_(ptr);
343 } else if (static_cast<::uint8_t>(tag) == 77) {
344 _internal_add_scomplex_val(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<float>(ptr));
345 ptr += sizeof(float);
346 } else {
347 goto handle_unusual;
348 }
349 continue;
350 // repeated int64 int64_val = 10 [packed = true];
351 case 10:
352 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 82)) {
353 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(_internal_mutable_int64_val(), ptr, ctx);
354 CHK_(ptr);
355 } else if (static_cast<::uint8_t>(tag) == 80) {
356 _internal_add_int64_val(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
357 CHK_(ptr);
358 } else {
359 goto handle_unusual;
360 }
361 continue;
362 // repeated bool bool_val = 11 [packed = true];
363 case 11:
364 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 90)) {
365 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedBoolParser(_internal_mutable_bool_val(), ptr, ctx);
366 CHK_(ptr);
367 } else if (static_cast<::uint8_t>(tag) == 88) {
368 _internal_add_bool_val(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
369 CHK_(ptr);
370 } else {
371 goto handle_unusual;
372 }
373 continue;
374 // repeated double dcomplex_val = 12 [packed = true];
375 case 12:
376 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 98)) {
377 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedDoubleParser(_internal_mutable_dcomplex_val(), ptr, ctx);
378 CHK_(ptr);
379 } else if (static_cast<::uint8_t>(tag) == 97) {
380 _internal_add_dcomplex_val(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr));
381 ptr += sizeof(double);
382 } else {
383 goto handle_unusual;
384 }
385 continue;
386 // repeated int32 half_val = 13 [packed = true];
387 case 13:
388 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 106)) {
389 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_half_val(), ptr, ctx);
390 CHK_(ptr);
391 } else if (static_cast<::uint8_t>(tag) == 104) {
392 _internal_add_half_val(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
393 CHK_(ptr);
394 } else {
395 goto handle_unusual;
396 }
397 continue;
398 // repeated .tensorflow.ResourceHandleProto resource_handle_val = 14;
399 case 14:
400 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 114)) {
401 ptr -= 1;
402 do {
403 ptr += 1;
404 ptr = ctx->ParseMessage(_internal_add_resource_handle_val(), ptr);
405 CHK_(ptr);
406 if (!ctx->DataAvailable(ptr)) break;
407 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<114>(ptr));
408 } else {
409 goto handle_unusual;
410 }
411 continue;
412 // repeated .tensorflow.VariantTensorDataProto variant_val = 15;
413 case 15:
414 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 122)) {
415 ptr -= 1;
416 do {
417 ptr += 1;
418 ptr = ctx->ParseMessage(_internal_add_variant_val(), ptr);
419 CHK_(ptr);
420 if (!ctx->DataAvailable(ptr)) break;
421 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<122>(ptr));
422 } else {
423 goto handle_unusual;
424 }
425 continue;
426 // repeated uint32 uint32_val = 16 [packed = true];
427 case 16:
428 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 130)) {
429 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedUInt32Parser(_internal_mutable_uint32_val(), ptr, ctx);
430 CHK_(ptr);
431 } else if (static_cast<::uint8_t>(tag) == 128) {
432 _internal_add_uint32_val(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr));
433 CHK_(ptr);
434 } else {
435 goto handle_unusual;
436 }
437 continue;
438 // repeated uint64 uint64_val = 17 [packed = true];
439 case 17:
440 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 138)) {
441 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedUInt64Parser(_internal_mutable_uint64_val(), ptr, ctx);
442 CHK_(ptr);
443 } else if (static_cast<::uint8_t>(tag) == 136) {
444 _internal_add_uint64_val(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
445 CHK_(ptr);
446 } else {
447 goto handle_unusual;
448 }
449 continue;
450 default:
451 goto handle_unusual;
452 } // switch
453 handle_unusual:
454 if ((tag == 0) || ((tag & 7) == 4)) {
455 CHK_(ptr);
456 ctx->SetLastTag(tag);
457 goto message_done;
458 }
459 ptr = UnknownFieldParse(
460 tag,
461 _internal_metadata_.mutable_unknown_fields<std::string>(),
462 ptr, ctx);
463 CHK_(ptr != nullptr);
464 } // while
465 message_done:
466 return ptr;
467 failure:
468 ptr = nullptr;
469 goto message_done;
470 #undef CHK_
471 }
472
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const473 ::uint8_t* TensorProto::_InternalSerialize(
474 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
475 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorProto)
476 ::uint32_t cached_has_bits = 0;
477 (void) cached_has_bits;
478
479 // .tensorflow.DataType dtype = 1;
480 if (this->_internal_dtype() != 0) {
481 target = stream->EnsureSpace(target);
482 target = ::_pbi::WireFormatLite::WriteEnumToArray(
483 1, this->_internal_dtype(), target);
484 }
485
486 // .tensorflow.TensorShapeProto tensor_shape = 2;
487 if (this->_internal_has_tensor_shape()) {
488 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
489 InternalWriteMessage(2, _Internal::tensor_shape(this),
490 _Internal::tensor_shape(this).GetCachedSize(), target, stream);
491 }
492
493 // int32 version_number = 3;
494 if (this->_internal_version_number() != 0) {
495 target = stream->EnsureSpace(target);
496 target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_version_number(), target);
497 }
498
499 // bytes tensor_content = 4;
500 if (!this->_internal_tensor_content().empty()) {
501 target = stream->WriteBytesMaybeAliased(
502 4, this->_internal_tensor_content(), target);
503 }
504
505 // repeated float float_val = 5 [packed = true];
506 if (this->_internal_float_val_size() > 0) {
507 target = stream->WriteFixedPacked(5, _internal_float_val(), target);
508 }
509
510 // repeated double double_val = 6 [packed = true];
511 if (this->_internal_double_val_size() > 0) {
512 target = stream->WriteFixedPacked(6, _internal_double_val(), target);
513 }
514
515 // repeated int32 int_val = 7 [packed = true];
516 {
517 int byte_size = _impl_._int_val_cached_byte_size_.load(std::memory_order_relaxed);
518 if (byte_size > 0) {
519 target = stream->WriteInt32Packed(
520 7, _internal_int_val(), byte_size, target);
521 }
522 }
523
524 // repeated bytes string_val = 8;
525 for (int i = 0, n = this->_internal_string_val_size(); i < n; i++) {
526 const auto& s = this->_internal_string_val(i);
527 target = stream->WriteBytes(8, s, target);
528 }
529
530 // repeated float scomplex_val = 9 [packed = true];
531 if (this->_internal_scomplex_val_size() > 0) {
532 target = stream->WriteFixedPacked(9, _internal_scomplex_val(), target);
533 }
534
535 // repeated int64 int64_val = 10 [packed = true];
536 {
537 int byte_size = _impl_._int64_val_cached_byte_size_.load(std::memory_order_relaxed);
538 if (byte_size > 0) {
539 target = stream->WriteInt64Packed(
540 10, _internal_int64_val(), byte_size, target);
541 }
542 }
543
544 // repeated bool bool_val = 11 [packed = true];
545 if (this->_internal_bool_val_size() > 0) {
546 target = stream->WriteFixedPacked(11, _internal_bool_val(), target);
547 }
548
549 // repeated double dcomplex_val = 12 [packed = true];
550 if (this->_internal_dcomplex_val_size() > 0) {
551 target = stream->WriteFixedPacked(12, _internal_dcomplex_val(), target);
552 }
553
554 // repeated int32 half_val = 13 [packed = true];
555 {
556 int byte_size = _impl_._half_val_cached_byte_size_.load(std::memory_order_relaxed);
557 if (byte_size > 0) {
558 target = stream->WriteInt32Packed(
559 13, _internal_half_val(), byte_size, target);
560 }
561 }
562
563 // repeated .tensorflow.ResourceHandleProto resource_handle_val = 14;
564 for (unsigned i = 0,
565 n = static_cast<unsigned>(this->_internal_resource_handle_val_size()); i < n; i++) {
566 const auto& repfield = this->_internal_resource_handle_val(i);
567 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
568 InternalWriteMessage(14, repfield, repfield.GetCachedSize(), target, stream);
569 }
570
571 // repeated .tensorflow.VariantTensorDataProto variant_val = 15;
572 for (unsigned i = 0,
573 n = static_cast<unsigned>(this->_internal_variant_val_size()); i < n; i++) {
574 const auto& repfield = this->_internal_variant_val(i);
575 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
576 InternalWriteMessage(15, repfield, repfield.GetCachedSize(), target, stream);
577 }
578
579 // repeated uint32 uint32_val = 16 [packed = true];
580 {
581 int byte_size = _impl_._uint32_val_cached_byte_size_.load(std::memory_order_relaxed);
582 if (byte_size > 0) {
583 target = stream->WriteUInt32Packed(
584 16, _internal_uint32_val(), byte_size, target);
585 }
586 }
587
588 // repeated uint64 uint64_val = 17 [packed = true];
589 {
590 int byte_size = _impl_._uint64_val_cached_byte_size_.load(std::memory_order_relaxed);
591 if (byte_size > 0) {
592 target = stream->WriteUInt64Packed(
593 17, _internal_uint64_val(), byte_size, target);
594 }
595 }
596
597 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
598 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
599 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
600 }
601 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorProto)
602 return target;
603 }
604
ByteSizeLong() const605 size_t TensorProto::ByteSizeLong() const {
606 // @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorProto)
607 size_t total_size = 0;
608
609 ::uint32_t cached_has_bits = 0;
610 // Prevent compiler warnings about cached_has_bits being unused
611 (void) cached_has_bits;
612
613 // repeated float float_val = 5 [packed = true];
614 {
615 unsigned int count = static_cast<unsigned int>(this->_internal_float_val_size());
616 size_t data_size = 4UL * count;
617 if (data_size > 0) {
618 total_size += 1 +
619 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
620 }
621 total_size += data_size;
622 }
623
624 // repeated double double_val = 6 [packed = true];
625 {
626 unsigned int count = static_cast<unsigned int>(this->_internal_double_val_size());
627 size_t data_size = 8UL * count;
628 if (data_size > 0) {
629 total_size += 1 +
630 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
631 }
632 total_size += data_size;
633 }
634
635 // repeated int32 int_val = 7 [packed = true];
636 {
637 size_t data_size = ::_pbi::WireFormatLite::
638 Int32Size(this->_impl_.int_val_);
639 if (data_size > 0) {
640 total_size += 1 +
641 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
642 }
643 int cached_size = ::_pbi::ToCachedSize(data_size);
644 _impl_._int_val_cached_byte_size_.store(cached_size,
645 std::memory_order_relaxed);
646 total_size += data_size;
647 }
648
649 // repeated bytes string_val = 8;
650 total_size += 1 *
651 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(_impl_.string_val_.size());
652 for (int i = 0, n = _impl_.string_val_.size(); i < n; i++) {
653 total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize(
654 _impl_.string_val_.Get(i));
655 }
656
657 // repeated float scomplex_val = 9 [packed = true];
658 {
659 unsigned int count = static_cast<unsigned int>(this->_internal_scomplex_val_size());
660 size_t data_size = 4UL * count;
661 if (data_size > 0) {
662 total_size += 1 +
663 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
664 }
665 total_size += data_size;
666 }
667
668 // repeated int64 int64_val = 10 [packed = true];
669 {
670 size_t data_size = ::_pbi::WireFormatLite::
671 Int64Size(this->_impl_.int64_val_);
672 if (data_size > 0) {
673 total_size += 1 +
674 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
675 }
676 int cached_size = ::_pbi::ToCachedSize(data_size);
677 _impl_._int64_val_cached_byte_size_.store(cached_size,
678 std::memory_order_relaxed);
679 total_size += data_size;
680 }
681
682 // repeated bool bool_val = 11 [packed = true];
683 {
684 unsigned int count = static_cast<unsigned int>(this->_internal_bool_val_size());
685 size_t data_size = 1UL * count;
686 if (data_size > 0) {
687 total_size += 1 +
688 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
689 }
690 total_size += data_size;
691 }
692
693 // repeated double dcomplex_val = 12 [packed = true];
694 {
695 unsigned int count = static_cast<unsigned int>(this->_internal_dcomplex_val_size());
696 size_t data_size = 8UL * count;
697 if (data_size > 0) {
698 total_size += 1 +
699 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
700 }
701 total_size += data_size;
702 }
703
704 // repeated int32 half_val = 13 [packed = true];
705 {
706 size_t data_size = ::_pbi::WireFormatLite::
707 Int32Size(this->_impl_.half_val_);
708 if (data_size > 0) {
709 total_size += 1 +
710 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
711 }
712 int cached_size = ::_pbi::ToCachedSize(data_size);
713 _impl_._half_val_cached_byte_size_.store(cached_size,
714 std::memory_order_relaxed);
715 total_size += data_size;
716 }
717
718 // repeated .tensorflow.ResourceHandleProto resource_handle_val = 14;
719 total_size += 1UL * this->_internal_resource_handle_val_size();
720 for (const auto& msg : this->_impl_.resource_handle_val_) {
721 total_size +=
722 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
723 }
724
725 // repeated .tensorflow.VariantTensorDataProto variant_val = 15;
726 total_size += 1UL * this->_internal_variant_val_size();
727 for (const auto& msg : this->_impl_.variant_val_) {
728 total_size +=
729 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
730 }
731
732 // repeated uint32 uint32_val = 16 [packed = true];
733 {
734 size_t data_size = ::_pbi::WireFormatLite::
735 UInt32Size(this->_impl_.uint32_val_);
736 if (data_size > 0) {
737 total_size += 2 +
738 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
739 }
740 int cached_size = ::_pbi::ToCachedSize(data_size);
741 _impl_._uint32_val_cached_byte_size_.store(cached_size,
742 std::memory_order_relaxed);
743 total_size += data_size;
744 }
745
746 // repeated uint64 uint64_val = 17 [packed = true];
747 {
748 size_t data_size = ::_pbi::WireFormatLite::
749 UInt64Size(this->_impl_.uint64_val_);
750 if (data_size > 0) {
751 total_size += 2 +
752 ::_pbi::WireFormatLite::Int32Size(static_cast<::int32_t>(data_size));
753 }
754 int cached_size = ::_pbi::ToCachedSize(data_size);
755 _impl_._uint64_val_cached_byte_size_.store(cached_size,
756 std::memory_order_relaxed);
757 total_size += data_size;
758 }
759
760 // bytes tensor_content = 4;
761 if (!this->_internal_tensor_content().empty()) {
762 total_size += 1 +
763 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize(
764 this->_internal_tensor_content());
765 }
766
767 // .tensorflow.TensorShapeProto tensor_shape = 2;
768 if (this->_internal_has_tensor_shape()) {
769 total_size += 1 +
770 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
771 *_impl_.tensor_shape_);
772 }
773
774 // .tensorflow.DataType dtype = 1;
775 if (this->_internal_dtype() != 0) {
776 total_size += 1 +
777 ::_pbi::WireFormatLite::EnumSize(this->_internal_dtype());
778 }
779
780 // int32 version_number = 3;
781 if (this->_internal_version_number() != 0) {
782 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_version_number());
783 }
784
785 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
786 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
787 }
788 int cached_size = ::_pbi::ToCachedSize(total_size);
789 SetCachedSize(cached_size);
790 return total_size;
791 }
792
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)793 void TensorProto::CheckTypeAndMergeFrom(
794 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
795 MergeFrom(*::_pbi::DownCast<const TensorProto*>(
796 &from));
797 }
798
MergeFrom(const TensorProto & from)799 void TensorProto::MergeFrom(const TensorProto& from) {
800 TensorProto* const _this = this;
801 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorProto)
802 GOOGLE_DCHECK_NE(&from, _this);
803 ::uint32_t cached_has_bits = 0;
804 (void) cached_has_bits;
805
806 _this->_impl_.float_val_.MergeFrom(from._impl_.float_val_);
807 _this->_impl_.double_val_.MergeFrom(from._impl_.double_val_);
808 _this->_impl_.int_val_.MergeFrom(from._impl_.int_val_);
809 _this->_impl_.string_val_.MergeFrom(from._impl_.string_val_);
810 _this->_impl_.scomplex_val_.MergeFrom(from._impl_.scomplex_val_);
811 _this->_impl_.int64_val_.MergeFrom(from._impl_.int64_val_);
812 _this->_impl_.bool_val_.MergeFrom(from._impl_.bool_val_);
813 _this->_impl_.dcomplex_val_.MergeFrom(from._impl_.dcomplex_val_);
814 _this->_impl_.half_val_.MergeFrom(from._impl_.half_val_);
815 _this->_impl_.resource_handle_val_.MergeFrom(from._impl_.resource_handle_val_);
816 _this->_impl_.variant_val_.MergeFrom(from._impl_.variant_val_);
817 _this->_impl_.uint32_val_.MergeFrom(from._impl_.uint32_val_);
818 _this->_impl_.uint64_val_.MergeFrom(from._impl_.uint64_val_);
819 if (!from._internal_tensor_content().empty()) {
820 _this->_internal_set_tensor_content(from._internal_tensor_content());
821 }
822 if (from._internal_has_tensor_shape()) {
823 _this->_internal_mutable_tensor_shape()->::tensorflow::TensorShapeProto::MergeFrom(
824 from._internal_tensor_shape());
825 }
826 if (from._internal_dtype() != 0) {
827 _this->_internal_set_dtype(from._internal_dtype());
828 }
829 if (from._internal_version_number() != 0) {
830 _this->_internal_set_version_number(from._internal_version_number());
831 }
832 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
833 }
834
CopyFrom(const TensorProto & from)835 void TensorProto::CopyFrom(const TensorProto& from) {
836 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorProto)
837 if (&from == this) return;
838 Clear();
839 MergeFrom(from);
840 }
841
IsInitialized() const842 bool TensorProto::IsInitialized() const {
843 return true;
844 }
845
InternalSwap(TensorProto * other)846 void TensorProto::InternalSwap(TensorProto* other) {
847 using std::swap;
848 auto* lhs_arena = GetArenaForAllocation();
849 auto* rhs_arena = other->GetArenaForAllocation();
850 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
851 _impl_.float_val_.InternalSwap(&other->_impl_.float_val_);
852 _impl_.double_val_.InternalSwap(&other->_impl_.double_val_);
853 _impl_.int_val_.InternalSwap(&other->_impl_.int_val_);
854 _impl_.string_val_.InternalSwap(&other->_impl_.string_val_);
855 _impl_.scomplex_val_.InternalSwap(&other->_impl_.scomplex_val_);
856 _impl_.int64_val_.InternalSwap(&other->_impl_.int64_val_);
857 _impl_.bool_val_.InternalSwap(&other->_impl_.bool_val_);
858 _impl_.dcomplex_val_.InternalSwap(&other->_impl_.dcomplex_val_);
859 _impl_.half_val_.InternalSwap(&other->_impl_.half_val_);
860 _impl_.resource_handle_val_.InternalSwap(&other->_impl_.resource_handle_val_);
861 _impl_.variant_val_.InternalSwap(&other->_impl_.variant_val_);
862 _impl_.uint32_val_.InternalSwap(&other->_impl_.uint32_val_);
863 _impl_.uint64_val_.InternalSwap(&other->_impl_.uint64_val_);
864 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
865 &_impl_.tensor_content_, lhs_arena,
866 &other->_impl_.tensor_content_, rhs_arena
867 );
868 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
869 PROTOBUF_FIELD_OFFSET(TensorProto, _impl_.version_number_)
870 + sizeof(TensorProto::_impl_.version_number_) // NOLINT
871 - PROTOBUF_FIELD_OFFSET(TensorProto, _impl_.tensor_shape_)>(
872 reinterpret_cast<char*>(&_impl_.tensor_shape_),
873 reinterpret_cast<char*>(&other->_impl_.tensor_shape_));
874 }
875
GetTypeName() const876 std::string TensorProto::GetTypeName() const {
877 return "tensorflow.TensorProto";
878 }
879
880
881 // ===================================================================
882
883 class VariantTensorDataProto::_Internal {
884 public:
885 };
886
VariantTensorDataProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)887 VariantTensorDataProto::VariantTensorDataProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
888 bool is_message_owned)
889 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
890 SharedCtor(arena, is_message_owned);
891 // @@protoc_insertion_point(arena_constructor:tensorflow.VariantTensorDataProto)
892 }
VariantTensorDataProto(const VariantTensorDataProto & from)893 VariantTensorDataProto::VariantTensorDataProto(const VariantTensorDataProto& from)
894 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
895 VariantTensorDataProto* const _this = this; (void)_this;
896 new (&_impl_) Impl_{
897 decltype(_impl_.tensors_){from._impl_.tensors_}
898 , decltype(_impl_.type_name_){}
899 , decltype(_impl_.metadata_){}
900 , /*decltype(_impl_._cached_size_)*/{}};
901
902 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
903 _impl_.type_name_.InitDefault();
904 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
905 _impl_.type_name_.Set("", GetArenaForAllocation());
906 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
907 if (!from._internal_type_name().empty()) {
908 _this->_impl_.type_name_.Set(from._internal_type_name(),
909 _this->GetArenaForAllocation());
910 }
911 _impl_.metadata_.InitDefault();
912 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
913 _impl_.metadata_.Set("", GetArenaForAllocation());
914 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
915 if (!from._internal_metadata().empty()) {
916 _this->_impl_.metadata_.Set(from._internal_metadata(),
917 _this->GetArenaForAllocation());
918 }
919 // @@protoc_insertion_point(copy_constructor:tensorflow.VariantTensorDataProto)
920 }
921
SharedCtor(::_pb::Arena * arena,bool is_message_owned)922 inline void VariantTensorDataProto::SharedCtor(
923 ::_pb::Arena* arena, bool is_message_owned) {
924 (void)arena;
925 (void)is_message_owned;
926 new (&_impl_) Impl_{
927 decltype(_impl_.tensors_){arena}
928 , decltype(_impl_.type_name_){}
929 , decltype(_impl_.metadata_){}
930 , /*decltype(_impl_._cached_size_)*/{}
931 };
932 _impl_.type_name_.InitDefault();
933 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
934 _impl_.type_name_.Set("", GetArenaForAllocation());
935 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
936 _impl_.metadata_.InitDefault();
937 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
938 _impl_.metadata_.Set("", GetArenaForAllocation());
939 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
940 }
941
~VariantTensorDataProto()942 VariantTensorDataProto::~VariantTensorDataProto() {
943 // @@protoc_insertion_point(destructor:tensorflow.VariantTensorDataProto)
944 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
945 (void)arena;
946 return;
947 }
948 SharedDtor();
949 }
950
SharedDtor()951 inline void VariantTensorDataProto::SharedDtor() {
952 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
953 _impl_.tensors_.~RepeatedPtrField();
954 _impl_.type_name_.Destroy();
955 _impl_.metadata_.Destroy();
956 }
957
SetCachedSize(int size) const958 void VariantTensorDataProto::SetCachedSize(int size) const {
959 _impl_._cached_size_.Set(size);
960 }
961
Clear()962 void VariantTensorDataProto::Clear() {
963 // @@protoc_insertion_point(message_clear_start:tensorflow.VariantTensorDataProto)
964 ::uint32_t cached_has_bits = 0;
965 // Prevent compiler warnings about cached_has_bits being unused
966 (void) cached_has_bits;
967
968 _impl_.tensors_.Clear();
969 _impl_.type_name_.ClearToEmpty();
970 _impl_.metadata_.ClearToEmpty();
971 _internal_metadata_.Clear<std::string>();
972 }
973
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)974 const char* VariantTensorDataProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
975 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
976 while (!ctx->Done(&ptr)) {
977 ::uint32_t tag;
978 ptr = ::_pbi::ReadTag(ptr, &tag);
979 switch (tag >> 3) {
980 // string type_name = 1;
981 case 1:
982 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
983 auto str = _internal_mutable_type_name();
984 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
985 CHK_(ptr);
986 CHK_(::_pbi::VerifyUTF8(str, nullptr));
987 } else {
988 goto handle_unusual;
989 }
990 continue;
991 // bytes metadata = 2;
992 case 2:
993 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
994 auto str = _internal_mutable_metadata();
995 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
996 CHK_(ptr);
997 } else {
998 goto handle_unusual;
999 }
1000 continue;
1001 // repeated .tensorflow.TensorProto tensors = 3;
1002 case 3:
1003 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
1004 ptr -= 1;
1005 do {
1006 ptr += 1;
1007 ptr = ctx->ParseMessage(_internal_add_tensors(), ptr);
1008 CHK_(ptr);
1009 if (!ctx->DataAvailable(ptr)) break;
1010 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr));
1011 } else {
1012 goto handle_unusual;
1013 }
1014 continue;
1015 default:
1016 goto handle_unusual;
1017 } // switch
1018 handle_unusual:
1019 if ((tag == 0) || ((tag & 7) == 4)) {
1020 CHK_(ptr);
1021 ctx->SetLastTag(tag);
1022 goto message_done;
1023 }
1024 ptr = UnknownFieldParse(
1025 tag,
1026 _internal_metadata_.mutable_unknown_fields<std::string>(),
1027 ptr, ctx);
1028 CHK_(ptr != nullptr);
1029 } // while
1030 message_done:
1031 return ptr;
1032 failure:
1033 ptr = nullptr;
1034 goto message_done;
1035 #undef CHK_
1036 }
1037
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1038 ::uint8_t* VariantTensorDataProto::_InternalSerialize(
1039 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1040 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.VariantTensorDataProto)
1041 ::uint32_t cached_has_bits = 0;
1042 (void) cached_has_bits;
1043
1044 // string type_name = 1;
1045 if (!this->_internal_type_name().empty()) {
1046 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1047 this->_internal_type_name().data(), static_cast<int>(this->_internal_type_name().length()),
1048 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1049 "tensorflow.VariantTensorDataProto.type_name");
1050 target = stream->WriteStringMaybeAliased(
1051 1, this->_internal_type_name(), target);
1052 }
1053
1054 // bytes metadata = 2;
1055 if (!this->_internal_metadata().empty()) {
1056 target = stream->WriteBytesMaybeAliased(
1057 2, this->_internal_metadata(), target);
1058 }
1059
1060 // repeated .tensorflow.TensorProto tensors = 3;
1061 for (unsigned i = 0,
1062 n = static_cast<unsigned>(this->_internal_tensors_size()); i < n; i++) {
1063 const auto& repfield = this->_internal_tensors(i);
1064 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1065 InternalWriteMessage(3, repfield, repfield.GetCachedSize(), target, stream);
1066 }
1067
1068 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1069 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1070 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1071 }
1072 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.VariantTensorDataProto)
1073 return target;
1074 }
1075
ByteSizeLong() const1076 size_t VariantTensorDataProto::ByteSizeLong() const {
1077 // @@protoc_insertion_point(message_byte_size_start:tensorflow.VariantTensorDataProto)
1078 size_t total_size = 0;
1079
1080 ::uint32_t cached_has_bits = 0;
1081 // Prevent compiler warnings about cached_has_bits being unused
1082 (void) cached_has_bits;
1083
1084 // repeated .tensorflow.TensorProto tensors = 3;
1085 total_size += 1UL * this->_internal_tensors_size();
1086 for (const auto& msg : this->_impl_.tensors_) {
1087 total_size +=
1088 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1089 }
1090
1091 // string type_name = 1;
1092 if (!this->_internal_type_name().empty()) {
1093 total_size += 1 +
1094 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1095 this->_internal_type_name());
1096 }
1097
1098 // bytes metadata = 2;
1099 if (!this->_internal_metadata().empty()) {
1100 total_size += 1 +
1101 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize(
1102 this->_internal_metadata());
1103 }
1104
1105 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1106 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1107 }
1108 int cached_size = ::_pbi::ToCachedSize(total_size);
1109 SetCachedSize(cached_size);
1110 return total_size;
1111 }
1112
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1113 void VariantTensorDataProto::CheckTypeAndMergeFrom(
1114 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1115 MergeFrom(*::_pbi::DownCast<const VariantTensorDataProto*>(
1116 &from));
1117 }
1118
MergeFrom(const VariantTensorDataProto & from)1119 void VariantTensorDataProto::MergeFrom(const VariantTensorDataProto& from) {
1120 VariantTensorDataProto* const _this = this;
1121 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.VariantTensorDataProto)
1122 GOOGLE_DCHECK_NE(&from, _this);
1123 ::uint32_t cached_has_bits = 0;
1124 (void) cached_has_bits;
1125
1126 _this->_impl_.tensors_.MergeFrom(from._impl_.tensors_);
1127 if (!from._internal_type_name().empty()) {
1128 _this->_internal_set_type_name(from._internal_type_name());
1129 }
1130 if (!from._internal_metadata().empty()) {
1131 _this->_internal_set_metadata(from._internal_metadata());
1132 }
1133 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1134 }
1135
CopyFrom(const VariantTensorDataProto & from)1136 void VariantTensorDataProto::CopyFrom(const VariantTensorDataProto& from) {
1137 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.VariantTensorDataProto)
1138 if (&from == this) return;
1139 Clear();
1140 MergeFrom(from);
1141 }
1142
IsInitialized() const1143 bool VariantTensorDataProto::IsInitialized() const {
1144 return true;
1145 }
1146
InternalSwap(VariantTensorDataProto * other)1147 void VariantTensorDataProto::InternalSwap(VariantTensorDataProto* other) {
1148 using std::swap;
1149 auto* lhs_arena = GetArenaForAllocation();
1150 auto* rhs_arena = other->GetArenaForAllocation();
1151 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1152 _impl_.tensors_.InternalSwap(&other->_impl_.tensors_);
1153 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1154 &_impl_.type_name_, lhs_arena,
1155 &other->_impl_.type_name_, rhs_arena
1156 );
1157 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
1158 &_impl_.metadata_, lhs_arena,
1159 &other->_impl_.metadata_, rhs_arena
1160 );
1161 }
1162
GetTypeName() const1163 std::string VariantTensorDataProto::GetTypeName() const {
1164 return "tensorflow.VariantTensorDataProto";
1165 }
1166
1167
1168 // @@protoc_insertion_point(namespace_scope)
1169 } // namespace tensorflow
1170 PROTOBUF_NAMESPACE_OPEN
1171 template<> PROTOBUF_NOINLINE ::tensorflow::TensorProto*
CreateMaybeMessage(Arena * arena)1172 Arena::CreateMaybeMessage< ::tensorflow::TensorProto >(Arena* arena) {
1173 return Arena::CreateMessageInternal< ::tensorflow::TensorProto >(arena);
1174 }
1175 template<> PROTOBUF_NOINLINE ::tensorflow::VariantTensorDataProto*
CreateMaybeMessage(Arena * arena)1176 Arena::CreateMaybeMessage< ::tensorflow::VariantTensorDataProto >(Arena* arena) {
1177 return Arena::CreateMessageInternal< ::tensorflow::VariantTensorDataProto >(arena);
1178 }
1179 PROTOBUF_NAMESPACE_CLOSE
1180
1181 // @@protoc_insertion_point(global_scope)
1182 #include <google/protobuf/port_undef.inc>
1183