1 // Generated by the protocol buffer compiler. DO NOT EDIT!
2 // source: tensorflow/core/protobuf/struct.proto
3
4 #include "tensorflow/core/protobuf/struct.pb.h"
5
6 #include <algorithm>
7 #include <cstdint>
8
9 #include <google/protobuf/io/coded_stream.h>
10 #include <google/protobuf/extension_set.h>
11 #include <google/protobuf/wire_format_lite.h>
12 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
13 // @@protoc_insertion_point(includes)
14 #include <google/protobuf/port_def.inc>
15
16 PROTOBUF_PRAGMA_INIT_SEG
17
18 namespace _pb = ::PROTOBUF_NAMESPACE_ID;
19 namespace _pbi = _pb::internal;
20
21 namespace tensorflow {
StructuredValue(::_pbi::ConstantInitialized)22 PROTOBUF_CONSTEXPR StructuredValue::StructuredValue(
23 ::_pbi::ConstantInitialized): _impl_{
24 /*decltype(_impl_.kind_)*/{}
25 , /*decltype(_impl_._cached_size_)*/{}
26 , /*decltype(_impl_._oneof_case_)*/{}} {}
27 struct StructuredValueDefaultTypeInternal {
StructuredValueDefaultTypeInternaltensorflow::StructuredValueDefaultTypeInternal28 PROTOBUF_CONSTEXPR StructuredValueDefaultTypeInternal()
29 : _instance(::_pbi::ConstantInitialized{}) {}
~StructuredValueDefaultTypeInternaltensorflow::StructuredValueDefaultTypeInternal30 ~StructuredValueDefaultTypeInternal() {}
31 union { // NOLINT(misc-non-private-member-variables-in-classes)
32 StructuredValue _instance;
33 };
34 };
35 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 StructuredValueDefaultTypeInternal _StructuredValue_default_instance_;
NoneValue(::_pbi::ConstantInitialized)36 PROTOBUF_CONSTEXPR NoneValue::NoneValue(
37 ::_pbi::ConstantInitialized): _impl_{
38 /*decltype(_impl_._cached_size_)*/{}} {}
39 struct NoneValueDefaultTypeInternal {
NoneValueDefaultTypeInternaltensorflow::NoneValueDefaultTypeInternal40 PROTOBUF_CONSTEXPR NoneValueDefaultTypeInternal()
41 : _instance(::_pbi::ConstantInitialized{}) {}
~NoneValueDefaultTypeInternaltensorflow::NoneValueDefaultTypeInternal42 ~NoneValueDefaultTypeInternal() {}
43 union { // NOLINT(misc-non-private-member-variables-in-classes)
44 NoneValue _instance;
45 };
46 };
47 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 NoneValueDefaultTypeInternal _NoneValue_default_instance_;
ListValue(::_pbi::ConstantInitialized)48 PROTOBUF_CONSTEXPR ListValue::ListValue(
49 ::_pbi::ConstantInitialized): _impl_{
50 /*decltype(_impl_.values_)*/{}
51 , /*decltype(_impl_._cached_size_)*/{}} {}
52 struct ListValueDefaultTypeInternal {
ListValueDefaultTypeInternaltensorflow::ListValueDefaultTypeInternal53 PROTOBUF_CONSTEXPR ListValueDefaultTypeInternal()
54 : _instance(::_pbi::ConstantInitialized{}) {}
~ListValueDefaultTypeInternaltensorflow::ListValueDefaultTypeInternal55 ~ListValueDefaultTypeInternal() {}
56 union { // NOLINT(misc-non-private-member-variables-in-classes)
57 ListValue _instance;
58 };
59 };
60 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ListValueDefaultTypeInternal _ListValue_default_instance_;
TupleValue(::_pbi::ConstantInitialized)61 PROTOBUF_CONSTEXPR TupleValue::TupleValue(
62 ::_pbi::ConstantInitialized): _impl_{
63 /*decltype(_impl_.values_)*/{}
64 , /*decltype(_impl_._cached_size_)*/{}} {}
65 struct TupleValueDefaultTypeInternal {
TupleValueDefaultTypeInternaltensorflow::TupleValueDefaultTypeInternal66 PROTOBUF_CONSTEXPR TupleValueDefaultTypeInternal()
67 : _instance(::_pbi::ConstantInitialized{}) {}
~TupleValueDefaultTypeInternaltensorflow::TupleValueDefaultTypeInternal68 ~TupleValueDefaultTypeInternal() {}
69 union { // NOLINT(misc-non-private-member-variables-in-classes)
70 TupleValue _instance;
71 };
72 };
73 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 TupleValueDefaultTypeInternal _TupleValue_default_instance_;
DictValue_FieldsEntry_DoNotUse(::_pbi::ConstantInitialized)74 PROTOBUF_CONSTEXPR DictValue_FieldsEntry_DoNotUse::DictValue_FieldsEntry_DoNotUse(
75 ::_pbi::ConstantInitialized) {}
76 struct DictValue_FieldsEntry_DoNotUseDefaultTypeInternal {
DictValue_FieldsEntry_DoNotUseDefaultTypeInternaltensorflow::DictValue_FieldsEntry_DoNotUseDefaultTypeInternal77 PROTOBUF_CONSTEXPR DictValue_FieldsEntry_DoNotUseDefaultTypeInternal()
78 : _instance(::_pbi::ConstantInitialized{}) {}
~DictValue_FieldsEntry_DoNotUseDefaultTypeInternaltensorflow::DictValue_FieldsEntry_DoNotUseDefaultTypeInternal79 ~DictValue_FieldsEntry_DoNotUseDefaultTypeInternal() {}
80 union { // NOLINT(misc-non-private-member-variables-in-classes)
81 DictValue_FieldsEntry_DoNotUse _instance;
82 };
83 };
84 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 DictValue_FieldsEntry_DoNotUseDefaultTypeInternal _DictValue_FieldsEntry_DoNotUse_default_instance_;
DictValue(::_pbi::ConstantInitialized)85 PROTOBUF_CONSTEXPR DictValue::DictValue(
86 ::_pbi::ConstantInitialized): _impl_{
87 /*decltype(_impl_.fields_)*/{}
88 , /*decltype(_impl_._cached_size_)*/{}} {}
89 struct DictValueDefaultTypeInternal {
DictValueDefaultTypeInternaltensorflow::DictValueDefaultTypeInternal90 PROTOBUF_CONSTEXPR DictValueDefaultTypeInternal()
91 : _instance(::_pbi::ConstantInitialized{}) {}
~DictValueDefaultTypeInternaltensorflow::DictValueDefaultTypeInternal92 ~DictValueDefaultTypeInternal() {}
93 union { // NOLINT(misc-non-private-member-variables-in-classes)
94 DictValue _instance;
95 };
96 };
97 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 DictValueDefaultTypeInternal _DictValue_default_instance_;
PairValue(::_pbi::ConstantInitialized)98 PROTOBUF_CONSTEXPR PairValue::PairValue(
99 ::_pbi::ConstantInitialized): _impl_{
100 /*decltype(_impl_.key_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
101 , /*decltype(_impl_.value_)*/nullptr
102 , /*decltype(_impl_._cached_size_)*/{}} {}
103 struct PairValueDefaultTypeInternal {
PairValueDefaultTypeInternaltensorflow::PairValueDefaultTypeInternal104 PROTOBUF_CONSTEXPR PairValueDefaultTypeInternal()
105 : _instance(::_pbi::ConstantInitialized{}) {}
~PairValueDefaultTypeInternaltensorflow::PairValueDefaultTypeInternal106 ~PairValueDefaultTypeInternal() {}
107 union { // NOLINT(misc-non-private-member-variables-in-classes)
108 PairValue _instance;
109 };
110 };
111 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 PairValueDefaultTypeInternal _PairValue_default_instance_;
NamedTupleValue(::_pbi::ConstantInitialized)112 PROTOBUF_CONSTEXPR NamedTupleValue::NamedTupleValue(
113 ::_pbi::ConstantInitialized): _impl_{
114 /*decltype(_impl_.values_)*/{}
115 , /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
116 , /*decltype(_impl_._cached_size_)*/{}} {}
117 struct NamedTupleValueDefaultTypeInternal {
NamedTupleValueDefaultTypeInternaltensorflow::NamedTupleValueDefaultTypeInternal118 PROTOBUF_CONSTEXPR NamedTupleValueDefaultTypeInternal()
119 : _instance(::_pbi::ConstantInitialized{}) {}
~NamedTupleValueDefaultTypeInternaltensorflow::NamedTupleValueDefaultTypeInternal120 ~NamedTupleValueDefaultTypeInternal() {}
121 union { // NOLINT(misc-non-private-member-variables-in-classes)
122 NamedTupleValue _instance;
123 };
124 };
125 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 NamedTupleValueDefaultTypeInternal _NamedTupleValue_default_instance_;
TensorSpecProto(::_pbi::ConstantInitialized)126 PROTOBUF_CONSTEXPR TensorSpecProto::TensorSpecProto(
127 ::_pbi::ConstantInitialized): _impl_{
128 /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
129 , /*decltype(_impl_.shape_)*/nullptr
130 , /*decltype(_impl_.dtype_)*/0
131 , /*decltype(_impl_._cached_size_)*/{}} {}
132 struct TensorSpecProtoDefaultTypeInternal {
TensorSpecProtoDefaultTypeInternaltensorflow::TensorSpecProtoDefaultTypeInternal133 PROTOBUF_CONSTEXPR TensorSpecProtoDefaultTypeInternal()
134 : _instance(::_pbi::ConstantInitialized{}) {}
~TensorSpecProtoDefaultTypeInternaltensorflow::TensorSpecProtoDefaultTypeInternal135 ~TensorSpecProtoDefaultTypeInternal() {}
136 union { // NOLINT(misc-non-private-member-variables-in-classes)
137 TensorSpecProto _instance;
138 };
139 };
140 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 TensorSpecProtoDefaultTypeInternal _TensorSpecProto_default_instance_;
BoundedTensorSpecProto(::_pbi::ConstantInitialized)141 PROTOBUF_CONSTEXPR BoundedTensorSpecProto::BoundedTensorSpecProto(
142 ::_pbi::ConstantInitialized): _impl_{
143 /*decltype(_impl_.name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
144 , /*decltype(_impl_.shape_)*/nullptr
145 , /*decltype(_impl_.minimum_)*/nullptr
146 , /*decltype(_impl_.maximum_)*/nullptr
147 , /*decltype(_impl_.dtype_)*/0
148 , /*decltype(_impl_._cached_size_)*/{}} {}
149 struct BoundedTensorSpecProtoDefaultTypeInternal {
BoundedTensorSpecProtoDefaultTypeInternaltensorflow::BoundedTensorSpecProtoDefaultTypeInternal150 PROTOBUF_CONSTEXPR BoundedTensorSpecProtoDefaultTypeInternal()
151 : _instance(::_pbi::ConstantInitialized{}) {}
~BoundedTensorSpecProtoDefaultTypeInternaltensorflow::BoundedTensorSpecProtoDefaultTypeInternal152 ~BoundedTensorSpecProtoDefaultTypeInternal() {}
153 union { // NOLINT(misc-non-private-member-variables-in-classes)
154 BoundedTensorSpecProto _instance;
155 };
156 };
157 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 BoundedTensorSpecProtoDefaultTypeInternal _BoundedTensorSpecProto_default_instance_;
TypeSpecProto(::_pbi::ConstantInitialized)158 PROTOBUF_CONSTEXPR TypeSpecProto::TypeSpecProto(
159 ::_pbi::ConstantInitialized): _impl_{
160 /*decltype(_impl_.type_spec_class_name_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
161 , /*decltype(_impl_.type_state_)*/nullptr
162 , /*decltype(_impl_.type_spec_class_)*/0
163 , /*decltype(_impl_.num_flat_components_)*/0
164 , /*decltype(_impl_._cached_size_)*/{}} {}
165 struct TypeSpecProtoDefaultTypeInternal {
TypeSpecProtoDefaultTypeInternaltensorflow::TypeSpecProtoDefaultTypeInternal166 PROTOBUF_CONSTEXPR TypeSpecProtoDefaultTypeInternal()
167 : _instance(::_pbi::ConstantInitialized{}) {}
~TypeSpecProtoDefaultTypeInternaltensorflow::TypeSpecProtoDefaultTypeInternal168 ~TypeSpecProtoDefaultTypeInternal() {}
169 union { // NOLINT(misc-non-private-member-variables-in-classes)
170 TypeSpecProto _instance;
171 };
172 };
173 PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 TypeSpecProtoDefaultTypeInternal _TypeSpecProto_default_instance_;
174 } // namespace tensorflow
175 namespace tensorflow {
TypeSpecProto_TypeSpecClass_IsValid(int value)176 bool TypeSpecProto_TypeSpecClass_IsValid(int value) {
177 switch (value) {
178 case 0:
179 case 1:
180 case 2:
181 case 3:
182 case 4:
183 case 5:
184 case 6:
185 case 7:
186 case 8:
187 case 9:
188 case 10:
189 case 12:
190 case 13:
191 return true;
192 default:
193 return false;
194 }
195 }
196
197 static ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<std::string> TypeSpecProto_TypeSpecClass_strings[13] = {};
198
199 static const char TypeSpecProto_TypeSpecClass_names[] =
200 "DATA_DATASET_SPEC"
201 "DATA_ITERATOR_SPEC"
202 "EXTENSION_TYPE_SPEC"
203 "INDEXED_SLICES_SPEC"
204 "OPTIONAL_SPEC"
205 "PER_REPLICA_SPEC"
206 "RAGGED_TENSOR_SPEC"
207 "REGISTERED_TYPE_SPEC"
208 "ROW_PARTITION_SPEC"
209 "SPARSE_TENSOR_SPEC"
210 "TENSOR_ARRAY_SPEC"
211 "UNKNOWN"
212 "VARIABLE_SPEC";
213
214 static const ::PROTOBUF_NAMESPACE_ID::internal::EnumEntry TypeSpecProto_TypeSpecClass_entries[] = {
215 { {TypeSpecProto_TypeSpecClass_names + 0, 17}, 5 },
216 { {TypeSpecProto_TypeSpecClass_names + 17, 18}, 6 },
217 { {TypeSpecProto_TypeSpecClass_names + 35, 19}, 13 },
218 { {TypeSpecProto_TypeSpecClass_names + 54, 19}, 2 },
219 { {TypeSpecProto_TypeSpecClass_names + 73, 13}, 7 },
220 { {TypeSpecProto_TypeSpecClass_names + 86, 16}, 8 },
221 { {TypeSpecProto_TypeSpecClass_names + 102, 18}, 3 },
222 { {TypeSpecProto_TypeSpecClass_names + 120, 20}, 12 },
223 { {TypeSpecProto_TypeSpecClass_names + 140, 18}, 10 },
224 { {TypeSpecProto_TypeSpecClass_names + 158, 18}, 1 },
225 { {TypeSpecProto_TypeSpecClass_names + 176, 17}, 4 },
226 { {TypeSpecProto_TypeSpecClass_names + 193, 7}, 0 },
227 { {TypeSpecProto_TypeSpecClass_names + 200, 13}, 9 },
228 };
229
230 static const int TypeSpecProto_TypeSpecClass_entries_by_number[] = {
231 11, // 0 -> UNKNOWN
232 9, // 1 -> SPARSE_TENSOR_SPEC
233 3, // 2 -> INDEXED_SLICES_SPEC
234 6, // 3 -> RAGGED_TENSOR_SPEC
235 10, // 4 -> TENSOR_ARRAY_SPEC
236 0, // 5 -> DATA_DATASET_SPEC
237 1, // 6 -> DATA_ITERATOR_SPEC
238 4, // 7 -> OPTIONAL_SPEC
239 5, // 8 -> PER_REPLICA_SPEC
240 12, // 9 -> VARIABLE_SPEC
241 8, // 10 -> ROW_PARTITION_SPEC
242 7, // 12 -> REGISTERED_TYPE_SPEC
243 2, // 13 -> EXTENSION_TYPE_SPEC
244 };
245
TypeSpecProto_TypeSpecClass_Name(TypeSpecProto_TypeSpecClass value)246 const std::string& TypeSpecProto_TypeSpecClass_Name(
247 TypeSpecProto_TypeSpecClass value) {
248 static const bool dummy =
249 ::PROTOBUF_NAMESPACE_ID::internal::InitializeEnumStrings(
250 TypeSpecProto_TypeSpecClass_entries,
251 TypeSpecProto_TypeSpecClass_entries_by_number,
252 13, TypeSpecProto_TypeSpecClass_strings);
253 (void) dummy;
254 int idx = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumName(
255 TypeSpecProto_TypeSpecClass_entries,
256 TypeSpecProto_TypeSpecClass_entries_by_number,
257 13, value);
258 return idx == -1 ? ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString() :
259 TypeSpecProto_TypeSpecClass_strings[idx].get();
260 }
TypeSpecProto_TypeSpecClass_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name,TypeSpecProto_TypeSpecClass * value)261 bool TypeSpecProto_TypeSpecClass_Parse(
262 ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, TypeSpecProto_TypeSpecClass* value) {
263 int int_value;
264 bool success = ::PROTOBUF_NAMESPACE_ID::internal::LookUpEnumValue(
265 TypeSpecProto_TypeSpecClass_entries, 13, name, &int_value);
266 if (success) {
267 *value = static_cast<TypeSpecProto_TypeSpecClass>(int_value);
268 }
269 return success;
270 }
271 #if (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
272 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::UNKNOWN;
273 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::SPARSE_TENSOR_SPEC;
274 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::INDEXED_SLICES_SPEC;
275 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::RAGGED_TENSOR_SPEC;
276 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::TENSOR_ARRAY_SPEC;
277 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::DATA_DATASET_SPEC;
278 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::DATA_ITERATOR_SPEC;
279 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::OPTIONAL_SPEC;
280 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::PER_REPLICA_SPEC;
281 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::VARIABLE_SPEC;
282 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::ROW_PARTITION_SPEC;
283 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::REGISTERED_TYPE_SPEC;
284 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::EXTENSION_TYPE_SPEC;
285 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::TypeSpecClass_MIN;
286 constexpr TypeSpecProto_TypeSpecClass TypeSpecProto::TypeSpecClass_MAX;
287 constexpr int TypeSpecProto::TypeSpecClass_ARRAYSIZE;
288 #endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || (_MSC_VER >= 1900 && _MSC_VER < 1912))
289
290 // ===================================================================
291
292 class StructuredValue::_Internal {
293 public:
294 static const ::tensorflow::NoneValue& none_value(const StructuredValue* msg);
295 static const ::tensorflow::TensorShapeProto& tensor_shape_value(const StructuredValue* msg);
296 static const ::tensorflow::TensorSpecProto& tensor_spec_value(const StructuredValue* msg);
297 static const ::tensorflow::TypeSpecProto& type_spec_value(const StructuredValue* msg);
298 static const ::tensorflow::BoundedTensorSpecProto& bounded_tensor_spec_value(const StructuredValue* msg);
299 static const ::tensorflow::ListValue& list_value(const StructuredValue* msg);
300 static const ::tensorflow::TupleValue& tuple_value(const StructuredValue* msg);
301 static const ::tensorflow::DictValue& dict_value(const StructuredValue* msg);
302 static const ::tensorflow::NamedTupleValue& named_tuple_value(const StructuredValue* msg);
303 };
304
305 const ::tensorflow::NoneValue&
none_value(const StructuredValue * msg)306 StructuredValue::_Internal::none_value(const StructuredValue* msg) {
307 return *msg->_impl_.kind_.none_value_;
308 }
309 const ::tensorflow::TensorShapeProto&
tensor_shape_value(const StructuredValue * msg)310 StructuredValue::_Internal::tensor_shape_value(const StructuredValue* msg) {
311 return *msg->_impl_.kind_.tensor_shape_value_;
312 }
313 const ::tensorflow::TensorSpecProto&
tensor_spec_value(const StructuredValue * msg)314 StructuredValue::_Internal::tensor_spec_value(const StructuredValue* msg) {
315 return *msg->_impl_.kind_.tensor_spec_value_;
316 }
317 const ::tensorflow::TypeSpecProto&
type_spec_value(const StructuredValue * msg)318 StructuredValue::_Internal::type_spec_value(const StructuredValue* msg) {
319 return *msg->_impl_.kind_.type_spec_value_;
320 }
321 const ::tensorflow::BoundedTensorSpecProto&
bounded_tensor_spec_value(const StructuredValue * msg)322 StructuredValue::_Internal::bounded_tensor_spec_value(const StructuredValue* msg) {
323 return *msg->_impl_.kind_.bounded_tensor_spec_value_;
324 }
325 const ::tensorflow::ListValue&
list_value(const StructuredValue * msg)326 StructuredValue::_Internal::list_value(const StructuredValue* msg) {
327 return *msg->_impl_.kind_.list_value_;
328 }
329 const ::tensorflow::TupleValue&
tuple_value(const StructuredValue * msg)330 StructuredValue::_Internal::tuple_value(const StructuredValue* msg) {
331 return *msg->_impl_.kind_.tuple_value_;
332 }
333 const ::tensorflow::DictValue&
dict_value(const StructuredValue * msg)334 StructuredValue::_Internal::dict_value(const StructuredValue* msg) {
335 return *msg->_impl_.kind_.dict_value_;
336 }
337 const ::tensorflow::NamedTupleValue&
named_tuple_value(const StructuredValue * msg)338 StructuredValue::_Internal::named_tuple_value(const StructuredValue* msg) {
339 return *msg->_impl_.kind_.named_tuple_value_;
340 }
set_allocated_none_value(::tensorflow::NoneValue * none_value)341 void StructuredValue::set_allocated_none_value(::tensorflow::NoneValue* none_value) {
342 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
343 clear_kind();
344 if (none_value) {
345 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
346 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(none_value);
347 if (message_arena != submessage_arena) {
348 none_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
349 message_arena, none_value, submessage_arena);
350 }
351 set_has_none_value();
352 _impl_.kind_.none_value_ = none_value;
353 }
354 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.none_value)
355 }
set_allocated_tensor_shape_value(::tensorflow::TensorShapeProto * tensor_shape_value)356 void StructuredValue::set_allocated_tensor_shape_value(::tensorflow::TensorShapeProto* tensor_shape_value) {
357 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
358 clear_kind();
359 if (tensor_shape_value) {
360 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
361 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(
362 reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(tensor_shape_value));
363 if (message_arena != submessage_arena) {
364 tensor_shape_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
365 message_arena, tensor_shape_value, submessage_arena);
366 }
367 set_has_tensor_shape_value();
368 _impl_.kind_.tensor_shape_value_ = tensor_shape_value;
369 }
370 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.tensor_shape_value)
371 }
clear_tensor_shape_value()372 void StructuredValue::clear_tensor_shape_value() {
373 if (_internal_has_tensor_shape_value()) {
374 if (GetArenaForAllocation() == nullptr) {
375 delete _impl_.kind_.tensor_shape_value_;
376 }
377 clear_has_kind();
378 }
379 }
set_allocated_tensor_spec_value(::tensorflow::TensorSpecProto * tensor_spec_value)380 void StructuredValue::set_allocated_tensor_spec_value(::tensorflow::TensorSpecProto* tensor_spec_value) {
381 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
382 clear_kind();
383 if (tensor_spec_value) {
384 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
385 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(tensor_spec_value);
386 if (message_arena != submessage_arena) {
387 tensor_spec_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
388 message_arena, tensor_spec_value, submessage_arena);
389 }
390 set_has_tensor_spec_value();
391 _impl_.kind_.tensor_spec_value_ = tensor_spec_value;
392 }
393 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.tensor_spec_value)
394 }
set_allocated_type_spec_value(::tensorflow::TypeSpecProto * type_spec_value)395 void StructuredValue::set_allocated_type_spec_value(::tensorflow::TypeSpecProto* type_spec_value) {
396 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
397 clear_kind();
398 if (type_spec_value) {
399 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
400 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(type_spec_value);
401 if (message_arena != submessage_arena) {
402 type_spec_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
403 message_arena, type_spec_value, submessage_arena);
404 }
405 set_has_type_spec_value();
406 _impl_.kind_.type_spec_value_ = type_spec_value;
407 }
408 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.type_spec_value)
409 }
set_allocated_bounded_tensor_spec_value(::tensorflow::BoundedTensorSpecProto * bounded_tensor_spec_value)410 void StructuredValue::set_allocated_bounded_tensor_spec_value(::tensorflow::BoundedTensorSpecProto* bounded_tensor_spec_value) {
411 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
412 clear_kind();
413 if (bounded_tensor_spec_value) {
414 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
415 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(bounded_tensor_spec_value);
416 if (message_arena != submessage_arena) {
417 bounded_tensor_spec_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
418 message_arena, bounded_tensor_spec_value, submessage_arena);
419 }
420 set_has_bounded_tensor_spec_value();
421 _impl_.kind_.bounded_tensor_spec_value_ = bounded_tensor_spec_value;
422 }
423 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.bounded_tensor_spec_value)
424 }
set_allocated_list_value(::tensorflow::ListValue * list_value)425 void StructuredValue::set_allocated_list_value(::tensorflow::ListValue* list_value) {
426 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
427 clear_kind();
428 if (list_value) {
429 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
430 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(list_value);
431 if (message_arena != submessage_arena) {
432 list_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
433 message_arena, list_value, submessage_arena);
434 }
435 set_has_list_value();
436 _impl_.kind_.list_value_ = list_value;
437 }
438 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.list_value)
439 }
set_allocated_tuple_value(::tensorflow::TupleValue * tuple_value)440 void StructuredValue::set_allocated_tuple_value(::tensorflow::TupleValue* tuple_value) {
441 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
442 clear_kind();
443 if (tuple_value) {
444 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
445 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(tuple_value);
446 if (message_arena != submessage_arena) {
447 tuple_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
448 message_arena, tuple_value, submessage_arena);
449 }
450 set_has_tuple_value();
451 _impl_.kind_.tuple_value_ = tuple_value;
452 }
453 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.tuple_value)
454 }
set_allocated_dict_value(::tensorflow::DictValue * dict_value)455 void StructuredValue::set_allocated_dict_value(::tensorflow::DictValue* dict_value) {
456 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
457 clear_kind();
458 if (dict_value) {
459 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
460 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(dict_value);
461 if (message_arena != submessage_arena) {
462 dict_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
463 message_arena, dict_value, submessage_arena);
464 }
465 set_has_dict_value();
466 _impl_.kind_.dict_value_ = dict_value;
467 }
468 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.dict_value)
469 }
set_allocated_named_tuple_value(::tensorflow::NamedTupleValue * named_tuple_value)470 void StructuredValue::set_allocated_named_tuple_value(::tensorflow::NamedTupleValue* named_tuple_value) {
471 ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
472 clear_kind();
473 if (named_tuple_value) {
474 ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
475 ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(named_tuple_value);
476 if (message_arena != submessage_arena) {
477 named_tuple_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
478 message_arena, named_tuple_value, submessage_arena);
479 }
480 set_has_named_tuple_value();
481 _impl_.kind_.named_tuple_value_ = named_tuple_value;
482 }
483 // @@protoc_insertion_point(field_set_allocated:tensorflow.StructuredValue.named_tuple_value)
484 }
StructuredValue(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)485 StructuredValue::StructuredValue(::PROTOBUF_NAMESPACE_ID::Arena* arena,
486 bool is_message_owned)
487 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
488 SharedCtor(arena, is_message_owned);
489 // @@protoc_insertion_point(arena_constructor:tensorflow.StructuredValue)
490 }
StructuredValue(const StructuredValue & from)491 StructuredValue::StructuredValue(const StructuredValue& from)
492 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
493 StructuredValue* const _this = this; (void)_this;
494 new (&_impl_) Impl_{
495 decltype(_impl_.kind_){}
496 , /*decltype(_impl_._cached_size_)*/{}
497 , /*decltype(_impl_._oneof_case_)*/{}};
498
499 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
500 clear_has_kind();
501 switch (from.kind_case()) {
502 case kNoneValue: {
503 _this->_internal_mutable_none_value()->::tensorflow::NoneValue::MergeFrom(
504 from._internal_none_value());
505 break;
506 }
507 case kFloat64Value: {
508 _this->_internal_set_float64_value(from._internal_float64_value());
509 break;
510 }
511 case kInt64Value: {
512 _this->_internal_set_int64_value(from._internal_int64_value());
513 break;
514 }
515 case kStringValue: {
516 _this->_internal_set_string_value(from._internal_string_value());
517 break;
518 }
519 case kBoolValue: {
520 _this->_internal_set_bool_value(from._internal_bool_value());
521 break;
522 }
523 case kTensorShapeValue: {
524 _this->_internal_mutable_tensor_shape_value()->::tensorflow::TensorShapeProto::MergeFrom(
525 from._internal_tensor_shape_value());
526 break;
527 }
528 case kTensorDtypeValue: {
529 _this->_internal_set_tensor_dtype_value(from._internal_tensor_dtype_value());
530 break;
531 }
532 case kTensorSpecValue: {
533 _this->_internal_mutable_tensor_spec_value()->::tensorflow::TensorSpecProto::MergeFrom(
534 from._internal_tensor_spec_value());
535 break;
536 }
537 case kTypeSpecValue: {
538 _this->_internal_mutable_type_spec_value()->::tensorflow::TypeSpecProto::MergeFrom(
539 from._internal_type_spec_value());
540 break;
541 }
542 case kBoundedTensorSpecValue: {
543 _this->_internal_mutable_bounded_tensor_spec_value()->::tensorflow::BoundedTensorSpecProto::MergeFrom(
544 from._internal_bounded_tensor_spec_value());
545 break;
546 }
547 case kListValue: {
548 _this->_internal_mutable_list_value()->::tensorflow::ListValue::MergeFrom(
549 from._internal_list_value());
550 break;
551 }
552 case kTupleValue: {
553 _this->_internal_mutable_tuple_value()->::tensorflow::TupleValue::MergeFrom(
554 from._internal_tuple_value());
555 break;
556 }
557 case kDictValue: {
558 _this->_internal_mutable_dict_value()->::tensorflow::DictValue::MergeFrom(
559 from._internal_dict_value());
560 break;
561 }
562 case kNamedTupleValue: {
563 _this->_internal_mutable_named_tuple_value()->::tensorflow::NamedTupleValue::MergeFrom(
564 from._internal_named_tuple_value());
565 break;
566 }
567 case KIND_NOT_SET: {
568 break;
569 }
570 }
571 // @@protoc_insertion_point(copy_constructor:tensorflow.StructuredValue)
572 }
573
SharedCtor(::_pb::Arena * arena,bool is_message_owned)574 inline void StructuredValue::SharedCtor(
575 ::_pb::Arena* arena, bool is_message_owned) {
576 (void)arena;
577 (void)is_message_owned;
578 new (&_impl_) Impl_{
579 decltype(_impl_.kind_){}
580 , /*decltype(_impl_._cached_size_)*/{}
581 , /*decltype(_impl_._oneof_case_)*/{}
582 };
583 clear_has_kind();
584 }
585
~StructuredValue()586 StructuredValue::~StructuredValue() {
587 // @@protoc_insertion_point(destructor:tensorflow.StructuredValue)
588 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
589 (void)arena;
590 return;
591 }
592 SharedDtor();
593 }
594
SharedDtor()595 inline void StructuredValue::SharedDtor() {
596 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
597 if (has_kind()) {
598 clear_kind();
599 }
600 }
601
SetCachedSize(int size) const602 void StructuredValue::SetCachedSize(int size) const {
603 _impl_._cached_size_.Set(size);
604 }
605
clear_kind()606 void StructuredValue::clear_kind() {
607 // @@protoc_insertion_point(one_of_clear_start:tensorflow.StructuredValue)
608 switch (kind_case()) {
609 case kNoneValue: {
610 if (GetArenaForAllocation() == nullptr) {
611 delete _impl_.kind_.none_value_;
612 }
613 break;
614 }
615 case kFloat64Value: {
616 // No need to clear
617 break;
618 }
619 case kInt64Value: {
620 // No need to clear
621 break;
622 }
623 case kStringValue: {
624 _impl_.kind_.string_value_.Destroy();
625 break;
626 }
627 case kBoolValue: {
628 // No need to clear
629 break;
630 }
631 case kTensorShapeValue: {
632 if (GetArenaForAllocation() == nullptr) {
633 delete _impl_.kind_.tensor_shape_value_;
634 }
635 break;
636 }
637 case kTensorDtypeValue: {
638 // No need to clear
639 break;
640 }
641 case kTensorSpecValue: {
642 if (GetArenaForAllocation() == nullptr) {
643 delete _impl_.kind_.tensor_spec_value_;
644 }
645 break;
646 }
647 case kTypeSpecValue: {
648 if (GetArenaForAllocation() == nullptr) {
649 delete _impl_.kind_.type_spec_value_;
650 }
651 break;
652 }
653 case kBoundedTensorSpecValue: {
654 if (GetArenaForAllocation() == nullptr) {
655 delete _impl_.kind_.bounded_tensor_spec_value_;
656 }
657 break;
658 }
659 case kListValue: {
660 if (GetArenaForAllocation() == nullptr) {
661 delete _impl_.kind_.list_value_;
662 }
663 break;
664 }
665 case kTupleValue: {
666 if (GetArenaForAllocation() == nullptr) {
667 delete _impl_.kind_.tuple_value_;
668 }
669 break;
670 }
671 case kDictValue: {
672 if (GetArenaForAllocation() == nullptr) {
673 delete _impl_.kind_.dict_value_;
674 }
675 break;
676 }
677 case kNamedTupleValue: {
678 if (GetArenaForAllocation() == nullptr) {
679 delete _impl_.kind_.named_tuple_value_;
680 }
681 break;
682 }
683 case KIND_NOT_SET: {
684 break;
685 }
686 }
687 _impl_._oneof_case_[0] = KIND_NOT_SET;
688 }
689
690
Clear()691 void StructuredValue::Clear() {
692 // @@protoc_insertion_point(message_clear_start:tensorflow.StructuredValue)
693 ::uint32_t cached_has_bits = 0;
694 // Prevent compiler warnings about cached_has_bits being unused
695 (void) cached_has_bits;
696
697 clear_kind();
698 _internal_metadata_.Clear<std::string>();
699 }
700
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)701 const char* StructuredValue::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
702 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
703 while (!ctx->Done(&ptr)) {
704 ::uint32_t tag;
705 ptr = ::_pbi::ReadTag(ptr, &tag);
706 switch (tag >> 3) {
707 // .tensorflow.NoneValue none_value = 1;
708 case 1:
709 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
710 ptr = ctx->ParseMessage(_internal_mutable_none_value(), ptr);
711 CHK_(ptr);
712 } else {
713 goto handle_unusual;
714 }
715 continue;
716 // double float64_value = 11;
717 case 11:
718 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 89)) {
719 _internal_set_float64_value(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr));
720 ptr += sizeof(double);
721 } else {
722 goto handle_unusual;
723 }
724 continue;
725 // sint64 int64_value = 12;
726 case 12:
727 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 96)) {
728 _internal_set_int64_value(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr));
729 CHK_(ptr);
730 } else {
731 goto handle_unusual;
732 }
733 continue;
734 // string string_value = 13;
735 case 13:
736 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 106)) {
737 auto str = _internal_mutable_string_value();
738 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
739 CHK_(ptr);
740 CHK_(::_pbi::VerifyUTF8(str, nullptr));
741 } else {
742 goto handle_unusual;
743 }
744 continue;
745 // bool bool_value = 14;
746 case 14:
747 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 112)) {
748 _internal_set_bool_value(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr));
749 CHK_(ptr);
750 } else {
751 goto handle_unusual;
752 }
753 continue;
754 // .tensorflow.TensorShapeProto tensor_shape_value = 31;
755 case 31:
756 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 250)) {
757 ptr = ctx->ParseMessage(_internal_mutable_tensor_shape_value(), ptr);
758 CHK_(ptr);
759 } else {
760 goto handle_unusual;
761 }
762 continue;
763 // .tensorflow.DataType tensor_dtype_value = 32;
764 case 32:
765 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 0)) {
766 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
767 CHK_(ptr);
768 _internal_set_tensor_dtype_value(static_cast<::tensorflow::DataType>(val));
769 } else {
770 goto handle_unusual;
771 }
772 continue;
773 // .tensorflow.TensorSpecProto tensor_spec_value = 33;
774 case 33:
775 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
776 ptr = ctx->ParseMessage(_internal_mutable_tensor_spec_value(), ptr);
777 CHK_(ptr);
778 } else {
779 goto handle_unusual;
780 }
781 continue;
782 // .tensorflow.TypeSpecProto type_spec_value = 34;
783 case 34:
784 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
785 ptr = ctx->ParseMessage(_internal_mutable_type_spec_value(), ptr);
786 CHK_(ptr);
787 } else {
788 goto handle_unusual;
789 }
790 continue;
791 // .tensorflow.BoundedTensorSpecProto bounded_tensor_spec_value = 35;
792 case 35:
793 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
794 ptr = ctx->ParseMessage(_internal_mutable_bounded_tensor_spec_value(), ptr);
795 CHK_(ptr);
796 } else {
797 goto handle_unusual;
798 }
799 continue;
800 // .tensorflow.ListValue list_value = 51;
801 case 51:
802 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 154)) {
803 ptr = ctx->ParseMessage(_internal_mutable_list_value(), ptr);
804 CHK_(ptr);
805 } else {
806 goto handle_unusual;
807 }
808 continue;
809 // .tensorflow.TupleValue tuple_value = 52;
810 case 52:
811 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 162)) {
812 ptr = ctx->ParseMessage(_internal_mutable_tuple_value(), ptr);
813 CHK_(ptr);
814 } else {
815 goto handle_unusual;
816 }
817 continue;
818 // .tensorflow.DictValue dict_value = 53;
819 case 53:
820 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 170)) {
821 ptr = ctx->ParseMessage(_internal_mutable_dict_value(), ptr);
822 CHK_(ptr);
823 } else {
824 goto handle_unusual;
825 }
826 continue;
827 // .tensorflow.NamedTupleValue named_tuple_value = 54;
828 case 54:
829 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 178)) {
830 ptr = ctx->ParseMessage(_internal_mutable_named_tuple_value(), ptr);
831 CHK_(ptr);
832 } else {
833 goto handle_unusual;
834 }
835 continue;
836 default:
837 goto handle_unusual;
838 } // switch
839 handle_unusual:
840 if ((tag == 0) || ((tag & 7) == 4)) {
841 CHK_(ptr);
842 ctx->SetLastTag(tag);
843 goto message_done;
844 }
845 ptr = UnknownFieldParse(
846 tag,
847 _internal_metadata_.mutable_unknown_fields<std::string>(),
848 ptr, ctx);
849 CHK_(ptr != nullptr);
850 } // while
851 message_done:
852 return ptr;
853 failure:
854 ptr = nullptr;
855 goto message_done;
856 #undef CHK_
857 }
858
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const859 ::uint8_t* StructuredValue::_InternalSerialize(
860 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
861 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.StructuredValue)
862 ::uint32_t cached_has_bits = 0;
863 (void) cached_has_bits;
864
865 // .tensorflow.NoneValue none_value = 1;
866 if (_internal_has_none_value()) {
867 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
868 InternalWriteMessage(1, _Internal::none_value(this),
869 _Internal::none_value(this).GetCachedSize(), target, stream);
870 }
871
872 // double float64_value = 11;
873 if (_internal_has_float64_value()) {
874 target = stream->EnsureSpace(target);
875 target = ::_pbi::WireFormatLite::WriteDoubleToArray(11, this->_internal_float64_value(), target);
876 }
877
878 // sint64 int64_value = 12;
879 if (_internal_has_int64_value()) {
880 target = stream->EnsureSpace(target);
881 target = ::_pbi::WireFormatLite::WriteSInt64ToArray(12, this->_internal_int64_value(), target);
882 }
883
884 // string string_value = 13;
885 if (_internal_has_string_value()) {
886 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
887 this->_internal_string_value().data(), static_cast<int>(this->_internal_string_value().length()),
888 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
889 "tensorflow.StructuredValue.string_value");
890 target = stream->WriteStringMaybeAliased(
891 13, this->_internal_string_value(), target);
892 }
893
894 // bool bool_value = 14;
895 if (_internal_has_bool_value()) {
896 target = stream->EnsureSpace(target);
897 target = ::_pbi::WireFormatLite::WriteBoolToArray(14, this->_internal_bool_value(), target);
898 }
899
900 // .tensorflow.TensorShapeProto tensor_shape_value = 31;
901 if (_internal_has_tensor_shape_value()) {
902 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
903 InternalWriteMessage(31, _Internal::tensor_shape_value(this),
904 _Internal::tensor_shape_value(this).GetCachedSize(), target, stream);
905 }
906
907 // .tensorflow.DataType tensor_dtype_value = 32;
908 if (_internal_has_tensor_dtype_value()) {
909 target = stream->EnsureSpace(target);
910 target = ::_pbi::WireFormatLite::WriteEnumToArray(
911 32, this->_internal_tensor_dtype_value(), target);
912 }
913
914 // .tensorflow.TensorSpecProto tensor_spec_value = 33;
915 if (_internal_has_tensor_spec_value()) {
916 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
917 InternalWriteMessage(33, _Internal::tensor_spec_value(this),
918 _Internal::tensor_spec_value(this).GetCachedSize(), target, stream);
919 }
920
921 // .tensorflow.TypeSpecProto type_spec_value = 34;
922 if (_internal_has_type_spec_value()) {
923 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
924 InternalWriteMessage(34, _Internal::type_spec_value(this),
925 _Internal::type_spec_value(this).GetCachedSize(), target, stream);
926 }
927
928 // .tensorflow.BoundedTensorSpecProto bounded_tensor_spec_value = 35;
929 if (_internal_has_bounded_tensor_spec_value()) {
930 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
931 InternalWriteMessage(35, _Internal::bounded_tensor_spec_value(this),
932 _Internal::bounded_tensor_spec_value(this).GetCachedSize(), target, stream);
933 }
934
935 // .tensorflow.ListValue list_value = 51;
936 if (_internal_has_list_value()) {
937 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
938 InternalWriteMessage(51, _Internal::list_value(this),
939 _Internal::list_value(this).GetCachedSize(), target, stream);
940 }
941
942 // .tensorflow.TupleValue tuple_value = 52;
943 if (_internal_has_tuple_value()) {
944 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
945 InternalWriteMessage(52, _Internal::tuple_value(this),
946 _Internal::tuple_value(this).GetCachedSize(), target, stream);
947 }
948
949 // .tensorflow.DictValue dict_value = 53;
950 if (_internal_has_dict_value()) {
951 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
952 InternalWriteMessage(53, _Internal::dict_value(this),
953 _Internal::dict_value(this).GetCachedSize(), target, stream);
954 }
955
956 // .tensorflow.NamedTupleValue named_tuple_value = 54;
957 if (_internal_has_named_tuple_value()) {
958 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
959 InternalWriteMessage(54, _Internal::named_tuple_value(this),
960 _Internal::named_tuple_value(this).GetCachedSize(), target, stream);
961 }
962
963 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
964 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
965 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
966 }
967 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.StructuredValue)
968 return target;
969 }
970
ByteSizeLong() const971 size_t StructuredValue::ByteSizeLong() const {
972 // @@protoc_insertion_point(message_byte_size_start:tensorflow.StructuredValue)
973 size_t total_size = 0;
974
975 switch (kind_case()) {
976 // .tensorflow.NoneValue none_value = 1;
977 case kNoneValue: {
978 total_size += 1 +
979 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
980 *_impl_.kind_.none_value_);
981 break;
982 }
983 // double float64_value = 11;
984 case kFloat64Value: {
985 total_size += 1 + 8;
986 break;
987 }
988 // sint64 int64_value = 12;
989 case kInt64Value: {
990 total_size += ::_pbi::WireFormatLite::SInt64SizePlusOne(this->_internal_int64_value());
991 break;
992 }
993 // string string_value = 13;
994 case kStringValue: {
995 total_size += 1 +
996 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
997 this->_internal_string_value());
998 break;
999 }
1000 // bool bool_value = 14;
1001 case kBoolValue: {
1002 total_size += 1 + 1;
1003 break;
1004 }
1005 // .tensorflow.TensorShapeProto tensor_shape_value = 31;
1006 case kTensorShapeValue: {
1007 total_size += 2 +
1008 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1009 *_impl_.kind_.tensor_shape_value_);
1010 break;
1011 }
1012 // .tensorflow.DataType tensor_dtype_value = 32;
1013 case kTensorDtypeValue: {
1014 total_size += 2 +
1015 ::_pbi::WireFormatLite::EnumSize(this->_internal_tensor_dtype_value());
1016 break;
1017 }
1018 // .tensorflow.TensorSpecProto tensor_spec_value = 33;
1019 case kTensorSpecValue: {
1020 total_size += 2 +
1021 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1022 *_impl_.kind_.tensor_spec_value_);
1023 break;
1024 }
1025 // .tensorflow.TypeSpecProto type_spec_value = 34;
1026 case kTypeSpecValue: {
1027 total_size += 2 +
1028 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1029 *_impl_.kind_.type_spec_value_);
1030 break;
1031 }
1032 // .tensorflow.BoundedTensorSpecProto bounded_tensor_spec_value = 35;
1033 case kBoundedTensorSpecValue: {
1034 total_size += 2 +
1035 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1036 *_impl_.kind_.bounded_tensor_spec_value_);
1037 break;
1038 }
1039 // .tensorflow.ListValue list_value = 51;
1040 case kListValue: {
1041 total_size += 2 +
1042 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1043 *_impl_.kind_.list_value_);
1044 break;
1045 }
1046 // .tensorflow.TupleValue tuple_value = 52;
1047 case kTupleValue: {
1048 total_size += 2 +
1049 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1050 *_impl_.kind_.tuple_value_);
1051 break;
1052 }
1053 // .tensorflow.DictValue dict_value = 53;
1054 case kDictValue: {
1055 total_size += 2 +
1056 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1057 *_impl_.kind_.dict_value_);
1058 break;
1059 }
1060 // .tensorflow.NamedTupleValue named_tuple_value = 54;
1061 case kNamedTupleValue: {
1062 total_size += 2 +
1063 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1064 *_impl_.kind_.named_tuple_value_);
1065 break;
1066 }
1067 case KIND_NOT_SET: {
1068 break;
1069 }
1070 }
1071 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1072 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1073 }
1074 int cached_size = ::_pbi::ToCachedSize(total_size);
1075 SetCachedSize(cached_size);
1076 return total_size;
1077 }
1078
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1079 void StructuredValue::CheckTypeAndMergeFrom(
1080 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1081 MergeFrom(*::_pbi::DownCast<const StructuredValue*>(
1082 &from));
1083 }
1084
MergeFrom(const StructuredValue & from)1085 void StructuredValue::MergeFrom(const StructuredValue& from) {
1086 StructuredValue* const _this = this;
1087 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.StructuredValue)
1088 GOOGLE_DCHECK_NE(&from, _this);
1089 ::uint32_t cached_has_bits = 0;
1090 (void) cached_has_bits;
1091
1092 switch (from.kind_case()) {
1093 case kNoneValue: {
1094 _this->_internal_mutable_none_value()->::tensorflow::NoneValue::MergeFrom(
1095 from._internal_none_value());
1096 break;
1097 }
1098 case kFloat64Value: {
1099 _this->_internal_set_float64_value(from._internal_float64_value());
1100 break;
1101 }
1102 case kInt64Value: {
1103 _this->_internal_set_int64_value(from._internal_int64_value());
1104 break;
1105 }
1106 case kStringValue: {
1107 _this->_internal_set_string_value(from._internal_string_value());
1108 break;
1109 }
1110 case kBoolValue: {
1111 _this->_internal_set_bool_value(from._internal_bool_value());
1112 break;
1113 }
1114 case kTensorShapeValue: {
1115 _this->_internal_mutable_tensor_shape_value()->::tensorflow::TensorShapeProto::MergeFrom(
1116 from._internal_tensor_shape_value());
1117 break;
1118 }
1119 case kTensorDtypeValue: {
1120 _this->_internal_set_tensor_dtype_value(from._internal_tensor_dtype_value());
1121 break;
1122 }
1123 case kTensorSpecValue: {
1124 _this->_internal_mutable_tensor_spec_value()->::tensorflow::TensorSpecProto::MergeFrom(
1125 from._internal_tensor_spec_value());
1126 break;
1127 }
1128 case kTypeSpecValue: {
1129 _this->_internal_mutable_type_spec_value()->::tensorflow::TypeSpecProto::MergeFrom(
1130 from._internal_type_spec_value());
1131 break;
1132 }
1133 case kBoundedTensorSpecValue: {
1134 _this->_internal_mutable_bounded_tensor_spec_value()->::tensorflow::BoundedTensorSpecProto::MergeFrom(
1135 from._internal_bounded_tensor_spec_value());
1136 break;
1137 }
1138 case kListValue: {
1139 _this->_internal_mutable_list_value()->::tensorflow::ListValue::MergeFrom(
1140 from._internal_list_value());
1141 break;
1142 }
1143 case kTupleValue: {
1144 _this->_internal_mutable_tuple_value()->::tensorflow::TupleValue::MergeFrom(
1145 from._internal_tuple_value());
1146 break;
1147 }
1148 case kDictValue: {
1149 _this->_internal_mutable_dict_value()->::tensorflow::DictValue::MergeFrom(
1150 from._internal_dict_value());
1151 break;
1152 }
1153 case kNamedTupleValue: {
1154 _this->_internal_mutable_named_tuple_value()->::tensorflow::NamedTupleValue::MergeFrom(
1155 from._internal_named_tuple_value());
1156 break;
1157 }
1158 case KIND_NOT_SET: {
1159 break;
1160 }
1161 }
1162 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1163 }
1164
CopyFrom(const StructuredValue & from)1165 void StructuredValue::CopyFrom(const StructuredValue& from) {
1166 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.StructuredValue)
1167 if (&from == this) return;
1168 Clear();
1169 MergeFrom(from);
1170 }
1171
IsInitialized() const1172 bool StructuredValue::IsInitialized() const {
1173 return true;
1174 }
1175
InternalSwap(StructuredValue * other)1176 void StructuredValue::InternalSwap(StructuredValue* other) {
1177 using std::swap;
1178 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1179 swap(_impl_.kind_, other->_impl_.kind_);
1180 swap(_impl_._oneof_case_[0], other->_impl_._oneof_case_[0]);
1181 }
1182
GetTypeName() const1183 std::string StructuredValue::GetTypeName() const {
1184 return "tensorflow.StructuredValue";
1185 }
1186
1187
1188 // ===================================================================
1189
1190 class NoneValue::_Internal {
1191 public:
1192 };
1193
NoneValue(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1194 NoneValue::NoneValue(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1195 bool is_message_owned)
1196 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1197 SharedCtor(arena, is_message_owned);
1198 // @@protoc_insertion_point(arena_constructor:tensorflow.NoneValue)
1199 }
NoneValue(const NoneValue & from)1200 NoneValue::NoneValue(const NoneValue& from)
1201 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1202 NoneValue* const _this = this; (void)_this;
1203 new (&_impl_) Impl_{
1204 /*decltype(_impl_._cached_size_)*/{}};
1205
1206 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1207 // @@protoc_insertion_point(copy_constructor:tensorflow.NoneValue)
1208 }
1209
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1210 inline void NoneValue::SharedCtor(
1211 ::_pb::Arena* arena, bool is_message_owned) {
1212 (void)arena;
1213 (void)is_message_owned;
1214 new (&_impl_) Impl_{
1215 /*decltype(_impl_._cached_size_)*/{}
1216 };
1217 }
1218
~NoneValue()1219 NoneValue::~NoneValue() {
1220 // @@protoc_insertion_point(destructor:tensorflow.NoneValue)
1221 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1222 (void)arena;
1223 return;
1224 }
1225 SharedDtor();
1226 }
1227
SharedDtor()1228 inline void NoneValue::SharedDtor() {
1229 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1230 }
1231
SetCachedSize(int size) const1232 void NoneValue::SetCachedSize(int size) const {
1233 _impl_._cached_size_.Set(size);
1234 }
1235
Clear()1236 void NoneValue::Clear() {
1237 // @@protoc_insertion_point(message_clear_start:tensorflow.NoneValue)
1238 ::uint32_t cached_has_bits = 0;
1239 // Prevent compiler warnings about cached_has_bits being unused
1240 (void) cached_has_bits;
1241
1242 _internal_metadata_.Clear<std::string>();
1243 }
1244
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1245 const char* NoneValue::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1246 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1247 while (!ctx->Done(&ptr)) {
1248 ::uint32_t tag;
1249 ptr = ::_pbi::ReadTag(ptr, &tag);
1250 if ((tag == 0) || ((tag & 7) == 4)) {
1251 CHK_(ptr);
1252 ctx->SetLastTag(tag);
1253 goto message_done;
1254 }
1255 ptr = UnknownFieldParse(
1256 tag,
1257 _internal_metadata_.mutable_unknown_fields<std::string>(),
1258 ptr, ctx);
1259 CHK_(ptr != nullptr);
1260 } // while
1261 message_done:
1262 return ptr;
1263 failure:
1264 ptr = nullptr;
1265 goto message_done;
1266 #undef CHK_
1267 }
1268
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1269 ::uint8_t* NoneValue::_InternalSerialize(
1270 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1271 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.NoneValue)
1272 ::uint32_t cached_has_bits = 0;
1273 (void) cached_has_bits;
1274
1275 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1276 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1277 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1278 }
1279 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.NoneValue)
1280 return target;
1281 }
1282
ByteSizeLong() const1283 size_t NoneValue::ByteSizeLong() const {
1284 // @@protoc_insertion_point(message_byte_size_start:tensorflow.NoneValue)
1285 size_t total_size = 0;
1286
1287 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1288 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1289 }
1290 int cached_size = ::_pbi::ToCachedSize(total_size);
1291 SetCachedSize(cached_size);
1292 return total_size;
1293 }
1294
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1295 void NoneValue::CheckTypeAndMergeFrom(
1296 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1297 MergeFrom(*::_pbi::DownCast<const NoneValue*>(
1298 &from));
1299 }
1300
MergeFrom(const NoneValue & from)1301 void NoneValue::MergeFrom(const NoneValue& from) {
1302 NoneValue* const _this = this;
1303 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.NoneValue)
1304 GOOGLE_DCHECK_NE(&from, _this);
1305 ::uint32_t cached_has_bits = 0;
1306 (void) cached_has_bits;
1307
1308 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1309 }
1310
CopyFrom(const NoneValue & from)1311 void NoneValue::CopyFrom(const NoneValue& from) {
1312 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.NoneValue)
1313 if (&from == this) return;
1314 Clear();
1315 MergeFrom(from);
1316 }
1317
IsInitialized() const1318 bool NoneValue::IsInitialized() const {
1319 return true;
1320 }
1321
InternalSwap(NoneValue * other)1322 void NoneValue::InternalSwap(NoneValue* other) {
1323 using std::swap;
1324 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1325 }
1326
GetTypeName() const1327 std::string NoneValue::GetTypeName() const {
1328 return "tensorflow.NoneValue";
1329 }
1330
1331
1332 // ===================================================================
1333
1334 class ListValue::_Internal {
1335 public:
1336 };
1337
ListValue(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1338 ListValue::ListValue(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1339 bool is_message_owned)
1340 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1341 SharedCtor(arena, is_message_owned);
1342 // @@protoc_insertion_point(arena_constructor:tensorflow.ListValue)
1343 }
ListValue(const ListValue & from)1344 ListValue::ListValue(const ListValue& from)
1345 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1346 ListValue* const _this = this; (void)_this;
1347 new (&_impl_) Impl_{
1348 decltype(_impl_.values_){from._impl_.values_}
1349 , /*decltype(_impl_._cached_size_)*/{}};
1350
1351 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1352 // @@protoc_insertion_point(copy_constructor:tensorflow.ListValue)
1353 }
1354
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1355 inline void ListValue::SharedCtor(
1356 ::_pb::Arena* arena, bool is_message_owned) {
1357 (void)arena;
1358 (void)is_message_owned;
1359 new (&_impl_) Impl_{
1360 decltype(_impl_.values_){arena}
1361 , /*decltype(_impl_._cached_size_)*/{}
1362 };
1363 }
1364
~ListValue()1365 ListValue::~ListValue() {
1366 // @@protoc_insertion_point(destructor:tensorflow.ListValue)
1367 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1368 (void)arena;
1369 return;
1370 }
1371 SharedDtor();
1372 }
1373
SharedDtor()1374 inline void ListValue::SharedDtor() {
1375 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1376 _impl_.values_.~RepeatedPtrField();
1377 }
1378
SetCachedSize(int size) const1379 void ListValue::SetCachedSize(int size) const {
1380 _impl_._cached_size_.Set(size);
1381 }
1382
Clear()1383 void ListValue::Clear() {
1384 // @@protoc_insertion_point(message_clear_start:tensorflow.ListValue)
1385 ::uint32_t cached_has_bits = 0;
1386 // Prevent compiler warnings about cached_has_bits being unused
1387 (void) cached_has_bits;
1388
1389 _impl_.values_.Clear();
1390 _internal_metadata_.Clear<std::string>();
1391 }
1392
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1393 const char* ListValue::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1394 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1395 while (!ctx->Done(&ptr)) {
1396 ::uint32_t tag;
1397 ptr = ::_pbi::ReadTag(ptr, &tag);
1398 switch (tag >> 3) {
1399 // repeated .tensorflow.StructuredValue values = 1;
1400 case 1:
1401 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1402 ptr -= 1;
1403 do {
1404 ptr += 1;
1405 ptr = ctx->ParseMessage(_internal_add_values(), ptr);
1406 CHK_(ptr);
1407 if (!ctx->DataAvailable(ptr)) break;
1408 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
1409 } else {
1410 goto handle_unusual;
1411 }
1412 continue;
1413 default:
1414 goto handle_unusual;
1415 } // switch
1416 handle_unusual:
1417 if ((tag == 0) || ((tag & 7) == 4)) {
1418 CHK_(ptr);
1419 ctx->SetLastTag(tag);
1420 goto message_done;
1421 }
1422 ptr = UnknownFieldParse(
1423 tag,
1424 _internal_metadata_.mutable_unknown_fields<std::string>(),
1425 ptr, ctx);
1426 CHK_(ptr != nullptr);
1427 } // while
1428 message_done:
1429 return ptr;
1430 failure:
1431 ptr = nullptr;
1432 goto message_done;
1433 #undef CHK_
1434 }
1435
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1436 ::uint8_t* ListValue::_InternalSerialize(
1437 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1438 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ListValue)
1439 ::uint32_t cached_has_bits = 0;
1440 (void) cached_has_bits;
1441
1442 // repeated .tensorflow.StructuredValue values = 1;
1443 for (unsigned i = 0,
1444 n = static_cast<unsigned>(this->_internal_values_size()); i < n; i++) {
1445 const auto& repfield = this->_internal_values(i);
1446 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1447 InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
1448 }
1449
1450 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1451 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1452 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1453 }
1454 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ListValue)
1455 return target;
1456 }
1457
ByteSizeLong() const1458 size_t ListValue::ByteSizeLong() const {
1459 // @@protoc_insertion_point(message_byte_size_start:tensorflow.ListValue)
1460 size_t total_size = 0;
1461
1462 ::uint32_t cached_has_bits = 0;
1463 // Prevent compiler warnings about cached_has_bits being unused
1464 (void) cached_has_bits;
1465
1466 // repeated .tensorflow.StructuredValue values = 1;
1467 total_size += 1UL * this->_internal_values_size();
1468 for (const auto& msg : this->_impl_.values_) {
1469 total_size +=
1470 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1471 }
1472
1473 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1474 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1475 }
1476 int cached_size = ::_pbi::ToCachedSize(total_size);
1477 SetCachedSize(cached_size);
1478 return total_size;
1479 }
1480
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1481 void ListValue::CheckTypeAndMergeFrom(
1482 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1483 MergeFrom(*::_pbi::DownCast<const ListValue*>(
1484 &from));
1485 }
1486
MergeFrom(const ListValue & from)1487 void ListValue::MergeFrom(const ListValue& from) {
1488 ListValue* const _this = this;
1489 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ListValue)
1490 GOOGLE_DCHECK_NE(&from, _this);
1491 ::uint32_t cached_has_bits = 0;
1492 (void) cached_has_bits;
1493
1494 _this->_impl_.values_.MergeFrom(from._impl_.values_);
1495 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1496 }
1497
CopyFrom(const ListValue & from)1498 void ListValue::CopyFrom(const ListValue& from) {
1499 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ListValue)
1500 if (&from == this) return;
1501 Clear();
1502 MergeFrom(from);
1503 }
1504
IsInitialized() const1505 bool ListValue::IsInitialized() const {
1506 return true;
1507 }
1508
InternalSwap(ListValue * other)1509 void ListValue::InternalSwap(ListValue* other) {
1510 using std::swap;
1511 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1512 _impl_.values_.InternalSwap(&other->_impl_.values_);
1513 }
1514
GetTypeName() const1515 std::string ListValue::GetTypeName() const {
1516 return "tensorflow.ListValue";
1517 }
1518
1519
1520 // ===================================================================
1521
1522 class TupleValue::_Internal {
1523 public:
1524 };
1525
TupleValue(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1526 TupleValue::TupleValue(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1527 bool is_message_owned)
1528 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1529 SharedCtor(arena, is_message_owned);
1530 // @@protoc_insertion_point(arena_constructor:tensorflow.TupleValue)
1531 }
TupleValue(const TupleValue & from)1532 TupleValue::TupleValue(const TupleValue& from)
1533 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1534 TupleValue* const _this = this; (void)_this;
1535 new (&_impl_) Impl_{
1536 decltype(_impl_.values_){from._impl_.values_}
1537 , /*decltype(_impl_._cached_size_)*/{}};
1538
1539 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1540 // @@protoc_insertion_point(copy_constructor:tensorflow.TupleValue)
1541 }
1542
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1543 inline void TupleValue::SharedCtor(
1544 ::_pb::Arena* arena, bool is_message_owned) {
1545 (void)arena;
1546 (void)is_message_owned;
1547 new (&_impl_) Impl_{
1548 decltype(_impl_.values_){arena}
1549 , /*decltype(_impl_._cached_size_)*/{}
1550 };
1551 }
1552
~TupleValue()1553 TupleValue::~TupleValue() {
1554 // @@protoc_insertion_point(destructor:tensorflow.TupleValue)
1555 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1556 (void)arena;
1557 return;
1558 }
1559 SharedDtor();
1560 }
1561
SharedDtor()1562 inline void TupleValue::SharedDtor() {
1563 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1564 _impl_.values_.~RepeatedPtrField();
1565 }
1566
SetCachedSize(int size) const1567 void TupleValue::SetCachedSize(int size) const {
1568 _impl_._cached_size_.Set(size);
1569 }
1570
Clear()1571 void TupleValue::Clear() {
1572 // @@protoc_insertion_point(message_clear_start:tensorflow.TupleValue)
1573 ::uint32_t cached_has_bits = 0;
1574 // Prevent compiler warnings about cached_has_bits being unused
1575 (void) cached_has_bits;
1576
1577 _impl_.values_.Clear();
1578 _internal_metadata_.Clear<std::string>();
1579 }
1580
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1581 const char* TupleValue::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1582 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1583 while (!ctx->Done(&ptr)) {
1584 ::uint32_t tag;
1585 ptr = ::_pbi::ReadTag(ptr, &tag);
1586 switch (tag >> 3) {
1587 // repeated .tensorflow.StructuredValue values = 1;
1588 case 1:
1589 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1590 ptr -= 1;
1591 do {
1592 ptr += 1;
1593 ptr = ctx->ParseMessage(_internal_add_values(), ptr);
1594 CHK_(ptr);
1595 if (!ctx->DataAvailable(ptr)) break;
1596 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
1597 } else {
1598 goto handle_unusual;
1599 }
1600 continue;
1601 default:
1602 goto handle_unusual;
1603 } // switch
1604 handle_unusual:
1605 if ((tag == 0) || ((tag & 7) == 4)) {
1606 CHK_(ptr);
1607 ctx->SetLastTag(tag);
1608 goto message_done;
1609 }
1610 ptr = UnknownFieldParse(
1611 tag,
1612 _internal_metadata_.mutable_unknown_fields<std::string>(),
1613 ptr, ctx);
1614 CHK_(ptr != nullptr);
1615 } // while
1616 message_done:
1617 return ptr;
1618 failure:
1619 ptr = nullptr;
1620 goto message_done;
1621 #undef CHK_
1622 }
1623
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1624 ::uint8_t* TupleValue::_InternalSerialize(
1625 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1626 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.TupleValue)
1627 ::uint32_t cached_has_bits = 0;
1628 (void) cached_has_bits;
1629
1630 // repeated .tensorflow.StructuredValue values = 1;
1631 for (unsigned i = 0,
1632 n = static_cast<unsigned>(this->_internal_values_size()); i < n; i++) {
1633 const auto& repfield = this->_internal_values(i);
1634 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1635 InternalWriteMessage(1, repfield, repfield.GetCachedSize(), target, stream);
1636 }
1637
1638 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1639 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1640 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1641 }
1642 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.TupleValue)
1643 return target;
1644 }
1645
ByteSizeLong() const1646 size_t TupleValue::ByteSizeLong() const {
1647 // @@protoc_insertion_point(message_byte_size_start:tensorflow.TupleValue)
1648 size_t total_size = 0;
1649
1650 ::uint32_t cached_has_bits = 0;
1651 // Prevent compiler warnings about cached_has_bits being unused
1652 (void) cached_has_bits;
1653
1654 // repeated .tensorflow.StructuredValue values = 1;
1655 total_size += 1UL * this->_internal_values_size();
1656 for (const auto& msg : this->_impl_.values_) {
1657 total_size +=
1658 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
1659 }
1660
1661 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1662 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1663 }
1664 int cached_size = ::_pbi::ToCachedSize(total_size);
1665 SetCachedSize(cached_size);
1666 return total_size;
1667 }
1668
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1669 void TupleValue::CheckTypeAndMergeFrom(
1670 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1671 MergeFrom(*::_pbi::DownCast<const TupleValue*>(
1672 &from));
1673 }
1674
MergeFrom(const TupleValue & from)1675 void TupleValue::MergeFrom(const TupleValue& from) {
1676 TupleValue* const _this = this;
1677 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TupleValue)
1678 GOOGLE_DCHECK_NE(&from, _this);
1679 ::uint32_t cached_has_bits = 0;
1680 (void) cached_has_bits;
1681
1682 _this->_impl_.values_.MergeFrom(from._impl_.values_);
1683 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1684 }
1685
CopyFrom(const TupleValue & from)1686 void TupleValue::CopyFrom(const TupleValue& from) {
1687 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TupleValue)
1688 if (&from == this) return;
1689 Clear();
1690 MergeFrom(from);
1691 }
1692
IsInitialized() const1693 bool TupleValue::IsInitialized() const {
1694 return true;
1695 }
1696
InternalSwap(TupleValue * other)1697 void TupleValue::InternalSwap(TupleValue* other) {
1698 using std::swap;
1699 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1700 _impl_.values_.InternalSwap(&other->_impl_.values_);
1701 }
1702
GetTypeName() const1703 std::string TupleValue::GetTypeName() const {
1704 return "tensorflow.TupleValue";
1705 }
1706
1707
1708 // ===================================================================
1709
DictValue_FieldsEntry_DoNotUse()1710 DictValue_FieldsEntry_DoNotUse::DictValue_FieldsEntry_DoNotUse() {}
DictValue_FieldsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena * arena)1711 DictValue_FieldsEntry_DoNotUse::DictValue_FieldsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1712 : SuperType(arena) {}
MergeFrom(const DictValue_FieldsEntry_DoNotUse & other)1713 void DictValue_FieldsEntry_DoNotUse::MergeFrom(const DictValue_FieldsEntry_DoNotUse& other) {
1714 MergeFromInternal(other);
1715 }
1716
1717 // ===================================================================
1718
1719 class DictValue::_Internal {
1720 public:
1721 };
1722
DictValue(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1723 DictValue::DictValue(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1724 bool is_message_owned)
1725 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1726 SharedCtor(arena, is_message_owned);
1727 // @@protoc_insertion_point(arena_constructor:tensorflow.DictValue)
1728 }
DictValue(const DictValue & from)1729 DictValue::DictValue(const DictValue& from)
1730 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1731 DictValue* const _this = this; (void)_this;
1732 new (&_impl_) Impl_{
1733 /*decltype(_impl_.fields_)*/{}
1734 , /*decltype(_impl_._cached_size_)*/{}};
1735
1736 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1737 _this->_impl_.fields_.MergeFrom(from._impl_.fields_);
1738 // @@protoc_insertion_point(copy_constructor:tensorflow.DictValue)
1739 }
1740
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1741 inline void DictValue::SharedCtor(
1742 ::_pb::Arena* arena, bool is_message_owned) {
1743 (void)arena;
1744 (void)is_message_owned;
1745 new (&_impl_) Impl_{
1746 /*decltype(_impl_.fields_)*/{::_pbi::ArenaInitialized(), arena}
1747 , /*decltype(_impl_._cached_size_)*/{}
1748 };
1749 }
1750
~DictValue()1751 DictValue::~DictValue() {
1752 // @@protoc_insertion_point(destructor:tensorflow.DictValue)
1753 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1754 (void)arena;
1755 return;
1756 }
1757 SharedDtor();
1758 }
1759
SharedDtor()1760 inline void DictValue::SharedDtor() {
1761 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1762 _impl_.fields_.Destruct();
1763 _impl_.fields_.~MapFieldLite();
1764 }
1765
SetCachedSize(int size) const1766 void DictValue::SetCachedSize(int size) const {
1767 _impl_._cached_size_.Set(size);
1768 }
1769
Clear()1770 void DictValue::Clear() {
1771 // @@protoc_insertion_point(message_clear_start:tensorflow.DictValue)
1772 ::uint32_t cached_has_bits = 0;
1773 // Prevent compiler warnings about cached_has_bits being unused
1774 (void) cached_has_bits;
1775
1776 _impl_.fields_.Clear();
1777 _internal_metadata_.Clear<std::string>();
1778 }
1779
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)1780 const char* DictValue::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
1781 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1782 while (!ctx->Done(&ptr)) {
1783 ::uint32_t tag;
1784 ptr = ::_pbi::ReadTag(ptr, &tag);
1785 switch (tag >> 3) {
1786 // map<string, .tensorflow.StructuredValue> fields = 1;
1787 case 1:
1788 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
1789 ptr -= 1;
1790 do {
1791 ptr += 1;
1792 ptr = ctx->ParseMessage(&_impl_.fields_, ptr);
1793 CHK_(ptr);
1794 if (!ctx->DataAvailable(ptr)) break;
1795 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr));
1796 } else {
1797 goto handle_unusual;
1798 }
1799 continue;
1800 default:
1801 goto handle_unusual;
1802 } // switch
1803 handle_unusual:
1804 if ((tag == 0) || ((tag & 7) == 4)) {
1805 CHK_(ptr);
1806 ctx->SetLastTag(tag);
1807 goto message_done;
1808 }
1809 ptr = UnknownFieldParse(
1810 tag,
1811 _internal_metadata_.mutable_unknown_fields<std::string>(),
1812 ptr, ctx);
1813 CHK_(ptr != nullptr);
1814 } // while
1815 message_done:
1816 return ptr;
1817 failure:
1818 ptr = nullptr;
1819 goto message_done;
1820 #undef CHK_
1821 }
1822
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const1823 ::uint8_t* DictValue::_InternalSerialize(
1824 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
1825 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.DictValue)
1826 ::uint32_t cached_has_bits = 0;
1827 (void) cached_has_bits;
1828
1829 // map<string, .tensorflow.StructuredValue> fields = 1;
1830 if (!this->_internal_fields().empty()) {
1831 using MapType = ::_pb::Map<std::string, ::tensorflow::StructuredValue>;
1832 using WireHelper = DictValue_FieldsEntry_DoNotUse::Funcs;
1833 const auto& map_field = this->_internal_fields();
1834 auto check_utf8 = [](const MapType::value_type& entry) {
1835 (void)entry;
1836 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1837 entry.first.data(), static_cast<int>(entry.first.length()),
1838 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1839 "tensorflow.DictValue.FieldsEntry.key");
1840 };
1841
1842 if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
1843 for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
1844 target = WireHelper::InternalSerialize(1, entry.first, entry.second, target, stream);
1845 check_utf8(entry);
1846 }
1847 } else {
1848 for (const auto& entry : map_field) {
1849 target = WireHelper::InternalSerialize(1, entry.first, entry.second, target, stream);
1850 check_utf8(entry);
1851 }
1852 }
1853 }
1854
1855 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1856 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
1857 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
1858 }
1859 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.DictValue)
1860 return target;
1861 }
1862
ByteSizeLong() const1863 size_t DictValue::ByteSizeLong() const {
1864 // @@protoc_insertion_point(message_byte_size_start:tensorflow.DictValue)
1865 size_t total_size = 0;
1866
1867 ::uint32_t cached_has_bits = 0;
1868 // Prevent compiler warnings about cached_has_bits being unused
1869 (void) cached_has_bits;
1870
1871 // map<string, .tensorflow.StructuredValue> fields = 1;
1872 total_size += 1 *
1873 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_fields_size());
1874 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::StructuredValue >::const_iterator
1875 it = this->_internal_fields().begin();
1876 it != this->_internal_fields().end(); ++it) {
1877 total_size += DictValue_FieldsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
1878 }
1879
1880 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
1881 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
1882 }
1883 int cached_size = ::_pbi::ToCachedSize(total_size);
1884 SetCachedSize(cached_size);
1885 return total_size;
1886 }
1887
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)1888 void DictValue::CheckTypeAndMergeFrom(
1889 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
1890 MergeFrom(*::_pbi::DownCast<const DictValue*>(
1891 &from));
1892 }
1893
MergeFrom(const DictValue & from)1894 void DictValue::MergeFrom(const DictValue& from) {
1895 DictValue* const _this = this;
1896 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.DictValue)
1897 GOOGLE_DCHECK_NE(&from, _this);
1898 ::uint32_t cached_has_bits = 0;
1899 (void) cached_has_bits;
1900
1901 _this->_impl_.fields_.MergeFrom(from._impl_.fields_);
1902 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1903 }
1904
CopyFrom(const DictValue & from)1905 void DictValue::CopyFrom(const DictValue& from) {
1906 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.DictValue)
1907 if (&from == this) return;
1908 Clear();
1909 MergeFrom(from);
1910 }
1911
IsInitialized() const1912 bool DictValue::IsInitialized() const {
1913 return true;
1914 }
1915
InternalSwap(DictValue * other)1916 void DictValue::InternalSwap(DictValue* other) {
1917 using std::swap;
1918 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
1919 _impl_.fields_.InternalSwap(&other->_impl_.fields_);
1920 }
1921
GetTypeName() const1922 std::string DictValue::GetTypeName() const {
1923 return "tensorflow.DictValue";
1924 }
1925
1926
1927 // ===================================================================
1928
1929 class PairValue::_Internal {
1930 public:
1931 static const ::tensorflow::StructuredValue& value(const PairValue* msg);
1932 };
1933
1934 const ::tensorflow::StructuredValue&
value(const PairValue * msg)1935 PairValue::_Internal::value(const PairValue* msg) {
1936 return *msg->_impl_.value_;
1937 }
PairValue(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)1938 PairValue::PairValue(::PROTOBUF_NAMESPACE_ID::Arena* arena,
1939 bool is_message_owned)
1940 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
1941 SharedCtor(arena, is_message_owned);
1942 // @@protoc_insertion_point(arena_constructor:tensorflow.PairValue)
1943 }
PairValue(const PairValue & from)1944 PairValue::PairValue(const PairValue& from)
1945 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
1946 PairValue* const _this = this; (void)_this;
1947 new (&_impl_) Impl_{
1948 decltype(_impl_.key_){}
1949 , decltype(_impl_.value_){nullptr}
1950 , /*decltype(_impl_._cached_size_)*/{}};
1951
1952 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
1953 _impl_.key_.InitDefault();
1954 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1955 _impl_.key_.Set("", GetArenaForAllocation());
1956 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1957 if (!from._internal_key().empty()) {
1958 _this->_impl_.key_.Set(from._internal_key(),
1959 _this->GetArenaForAllocation());
1960 }
1961 if (from._internal_has_value()) {
1962 _this->_impl_.value_ = new ::tensorflow::StructuredValue(*from._impl_.value_);
1963 }
1964 // @@protoc_insertion_point(copy_constructor:tensorflow.PairValue)
1965 }
1966
SharedCtor(::_pb::Arena * arena,bool is_message_owned)1967 inline void PairValue::SharedCtor(
1968 ::_pb::Arena* arena, bool is_message_owned) {
1969 (void)arena;
1970 (void)is_message_owned;
1971 new (&_impl_) Impl_{
1972 decltype(_impl_.key_){}
1973 , decltype(_impl_.value_){nullptr}
1974 , /*decltype(_impl_._cached_size_)*/{}
1975 };
1976 _impl_.key_.InitDefault();
1977 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
1978 _impl_.key_.Set("", GetArenaForAllocation());
1979 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
1980 }
1981
~PairValue()1982 PairValue::~PairValue() {
1983 // @@protoc_insertion_point(destructor:tensorflow.PairValue)
1984 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
1985 (void)arena;
1986 return;
1987 }
1988 SharedDtor();
1989 }
1990
SharedDtor()1991 inline void PairValue::SharedDtor() {
1992 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
1993 _impl_.key_.Destroy();
1994 if (this != internal_default_instance()) delete _impl_.value_;
1995 }
1996
SetCachedSize(int size) const1997 void PairValue::SetCachedSize(int size) const {
1998 _impl_._cached_size_.Set(size);
1999 }
2000
Clear()2001 void PairValue::Clear() {
2002 // @@protoc_insertion_point(message_clear_start:tensorflow.PairValue)
2003 ::uint32_t cached_has_bits = 0;
2004 // Prevent compiler warnings about cached_has_bits being unused
2005 (void) cached_has_bits;
2006
2007 _impl_.key_.ClearToEmpty();
2008 if (GetArenaForAllocation() == nullptr && _impl_.value_ != nullptr) {
2009 delete _impl_.value_;
2010 }
2011 _impl_.value_ = nullptr;
2012 _internal_metadata_.Clear<std::string>();
2013 }
2014
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2015 const char* PairValue::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2016 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2017 while (!ctx->Done(&ptr)) {
2018 ::uint32_t tag;
2019 ptr = ::_pbi::ReadTag(ptr, &tag);
2020 switch (tag >> 3) {
2021 // string key = 1;
2022 case 1:
2023 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2024 auto str = _internal_mutable_key();
2025 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
2026 CHK_(ptr);
2027 CHK_(::_pbi::VerifyUTF8(str, nullptr));
2028 } else {
2029 goto handle_unusual;
2030 }
2031 continue;
2032 // .tensorflow.StructuredValue value = 2;
2033 case 2:
2034 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2035 ptr = ctx->ParseMessage(_internal_mutable_value(), ptr);
2036 CHK_(ptr);
2037 } else {
2038 goto handle_unusual;
2039 }
2040 continue;
2041 default:
2042 goto handle_unusual;
2043 } // switch
2044 handle_unusual:
2045 if ((tag == 0) || ((tag & 7) == 4)) {
2046 CHK_(ptr);
2047 ctx->SetLastTag(tag);
2048 goto message_done;
2049 }
2050 ptr = UnknownFieldParse(
2051 tag,
2052 _internal_metadata_.mutable_unknown_fields<std::string>(),
2053 ptr, ctx);
2054 CHK_(ptr != nullptr);
2055 } // while
2056 message_done:
2057 return ptr;
2058 failure:
2059 ptr = nullptr;
2060 goto message_done;
2061 #undef CHK_
2062 }
2063
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2064 ::uint8_t* PairValue::_InternalSerialize(
2065 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2066 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.PairValue)
2067 ::uint32_t cached_has_bits = 0;
2068 (void) cached_has_bits;
2069
2070 // string key = 1;
2071 if (!this->_internal_key().empty()) {
2072 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2073 this->_internal_key().data(), static_cast<int>(this->_internal_key().length()),
2074 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2075 "tensorflow.PairValue.key");
2076 target = stream->WriteStringMaybeAliased(
2077 1, this->_internal_key(), target);
2078 }
2079
2080 // .tensorflow.StructuredValue value = 2;
2081 if (this->_internal_has_value()) {
2082 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2083 InternalWriteMessage(2, _Internal::value(this),
2084 _Internal::value(this).GetCachedSize(), target, stream);
2085 }
2086
2087 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2088 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2089 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2090 }
2091 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.PairValue)
2092 return target;
2093 }
2094
ByteSizeLong() const2095 size_t PairValue::ByteSizeLong() const {
2096 // @@protoc_insertion_point(message_byte_size_start:tensorflow.PairValue)
2097 size_t total_size = 0;
2098
2099 ::uint32_t cached_has_bits = 0;
2100 // Prevent compiler warnings about cached_has_bits being unused
2101 (void) cached_has_bits;
2102
2103 // string key = 1;
2104 if (!this->_internal_key().empty()) {
2105 total_size += 1 +
2106 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2107 this->_internal_key());
2108 }
2109
2110 // .tensorflow.StructuredValue value = 2;
2111 if (this->_internal_has_value()) {
2112 total_size += 1 +
2113 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2114 *_impl_.value_);
2115 }
2116
2117 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2118 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2119 }
2120 int cached_size = ::_pbi::ToCachedSize(total_size);
2121 SetCachedSize(cached_size);
2122 return total_size;
2123 }
2124
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2125 void PairValue::CheckTypeAndMergeFrom(
2126 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2127 MergeFrom(*::_pbi::DownCast<const PairValue*>(
2128 &from));
2129 }
2130
MergeFrom(const PairValue & from)2131 void PairValue::MergeFrom(const PairValue& from) {
2132 PairValue* const _this = this;
2133 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.PairValue)
2134 GOOGLE_DCHECK_NE(&from, _this);
2135 ::uint32_t cached_has_bits = 0;
2136 (void) cached_has_bits;
2137
2138 if (!from._internal_key().empty()) {
2139 _this->_internal_set_key(from._internal_key());
2140 }
2141 if (from._internal_has_value()) {
2142 _this->_internal_mutable_value()->::tensorflow::StructuredValue::MergeFrom(
2143 from._internal_value());
2144 }
2145 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2146 }
2147
CopyFrom(const PairValue & from)2148 void PairValue::CopyFrom(const PairValue& from) {
2149 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.PairValue)
2150 if (&from == this) return;
2151 Clear();
2152 MergeFrom(from);
2153 }
2154
IsInitialized() const2155 bool PairValue::IsInitialized() const {
2156 return true;
2157 }
2158
InternalSwap(PairValue * other)2159 void PairValue::InternalSwap(PairValue* other) {
2160 using std::swap;
2161 auto* lhs_arena = GetArenaForAllocation();
2162 auto* rhs_arena = other->GetArenaForAllocation();
2163 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2164 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
2165 &_impl_.key_, lhs_arena,
2166 &other->_impl_.key_, rhs_arena
2167 );
2168 swap(_impl_.value_, other->_impl_.value_);
2169 }
2170
GetTypeName() const2171 std::string PairValue::GetTypeName() const {
2172 return "tensorflow.PairValue";
2173 }
2174
2175
2176 // ===================================================================
2177
2178 class NamedTupleValue::_Internal {
2179 public:
2180 };
2181
NamedTupleValue(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2182 NamedTupleValue::NamedTupleValue(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2183 bool is_message_owned)
2184 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2185 SharedCtor(arena, is_message_owned);
2186 // @@protoc_insertion_point(arena_constructor:tensorflow.NamedTupleValue)
2187 }
NamedTupleValue(const NamedTupleValue & from)2188 NamedTupleValue::NamedTupleValue(const NamedTupleValue& from)
2189 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2190 NamedTupleValue* const _this = this; (void)_this;
2191 new (&_impl_) Impl_{
2192 decltype(_impl_.values_){from._impl_.values_}
2193 , decltype(_impl_.name_){}
2194 , /*decltype(_impl_._cached_size_)*/{}};
2195
2196 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2197 _impl_.name_.InitDefault();
2198 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2199 _impl_.name_.Set("", GetArenaForAllocation());
2200 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2201 if (!from._internal_name().empty()) {
2202 _this->_impl_.name_.Set(from._internal_name(),
2203 _this->GetArenaForAllocation());
2204 }
2205 // @@protoc_insertion_point(copy_constructor:tensorflow.NamedTupleValue)
2206 }
2207
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2208 inline void NamedTupleValue::SharedCtor(
2209 ::_pb::Arena* arena, bool is_message_owned) {
2210 (void)arena;
2211 (void)is_message_owned;
2212 new (&_impl_) Impl_{
2213 decltype(_impl_.values_){arena}
2214 , decltype(_impl_.name_){}
2215 , /*decltype(_impl_._cached_size_)*/{}
2216 };
2217 _impl_.name_.InitDefault();
2218 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2219 _impl_.name_.Set("", GetArenaForAllocation());
2220 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2221 }
2222
~NamedTupleValue()2223 NamedTupleValue::~NamedTupleValue() {
2224 // @@protoc_insertion_point(destructor:tensorflow.NamedTupleValue)
2225 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2226 (void)arena;
2227 return;
2228 }
2229 SharedDtor();
2230 }
2231
SharedDtor()2232 inline void NamedTupleValue::SharedDtor() {
2233 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2234 _impl_.values_.~RepeatedPtrField();
2235 _impl_.name_.Destroy();
2236 }
2237
SetCachedSize(int size) const2238 void NamedTupleValue::SetCachedSize(int size) const {
2239 _impl_._cached_size_.Set(size);
2240 }
2241
Clear()2242 void NamedTupleValue::Clear() {
2243 // @@protoc_insertion_point(message_clear_start:tensorflow.NamedTupleValue)
2244 ::uint32_t cached_has_bits = 0;
2245 // Prevent compiler warnings about cached_has_bits being unused
2246 (void) cached_has_bits;
2247
2248 _impl_.values_.Clear();
2249 _impl_.name_.ClearToEmpty();
2250 _internal_metadata_.Clear<std::string>();
2251 }
2252
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2253 const char* NamedTupleValue::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2254 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2255 while (!ctx->Done(&ptr)) {
2256 ::uint32_t tag;
2257 ptr = ::_pbi::ReadTag(ptr, &tag);
2258 switch (tag >> 3) {
2259 // string name = 1;
2260 case 1:
2261 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2262 auto str = _internal_mutable_name();
2263 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
2264 CHK_(ptr);
2265 CHK_(::_pbi::VerifyUTF8(str, nullptr));
2266 } else {
2267 goto handle_unusual;
2268 }
2269 continue;
2270 // repeated .tensorflow.PairValue values = 2;
2271 case 2:
2272 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2273 ptr -= 1;
2274 do {
2275 ptr += 1;
2276 ptr = ctx->ParseMessage(_internal_add_values(), ptr);
2277 CHK_(ptr);
2278 if (!ctx->DataAvailable(ptr)) break;
2279 } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr));
2280 } else {
2281 goto handle_unusual;
2282 }
2283 continue;
2284 default:
2285 goto handle_unusual;
2286 } // switch
2287 handle_unusual:
2288 if ((tag == 0) || ((tag & 7) == 4)) {
2289 CHK_(ptr);
2290 ctx->SetLastTag(tag);
2291 goto message_done;
2292 }
2293 ptr = UnknownFieldParse(
2294 tag,
2295 _internal_metadata_.mutable_unknown_fields<std::string>(),
2296 ptr, ctx);
2297 CHK_(ptr != nullptr);
2298 } // while
2299 message_done:
2300 return ptr;
2301 failure:
2302 ptr = nullptr;
2303 goto message_done;
2304 #undef CHK_
2305 }
2306
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2307 ::uint8_t* NamedTupleValue::_InternalSerialize(
2308 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2309 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.NamedTupleValue)
2310 ::uint32_t cached_has_bits = 0;
2311 (void) cached_has_bits;
2312
2313 // string name = 1;
2314 if (!this->_internal_name().empty()) {
2315 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2316 this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
2317 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2318 "tensorflow.NamedTupleValue.name");
2319 target = stream->WriteStringMaybeAliased(
2320 1, this->_internal_name(), target);
2321 }
2322
2323 // repeated .tensorflow.PairValue values = 2;
2324 for (unsigned i = 0,
2325 n = static_cast<unsigned>(this->_internal_values_size()); i < n; i++) {
2326 const auto& repfield = this->_internal_values(i);
2327 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2328 InternalWriteMessage(2, repfield, repfield.GetCachedSize(), target, stream);
2329 }
2330
2331 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2332 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2333 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2334 }
2335 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.NamedTupleValue)
2336 return target;
2337 }
2338
ByteSizeLong() const2339 size_t NamedTupleValue::ByteSizeLong() const {
2340 // @@protoc_insertion_point(message_byte_size_start:tensorflow.NamedTupleValue)
2341 size_t total_size = 0;
2342
2343 ::uint32_t cached_has_bits = 0;
2344 // Prevent compiler warnings about cached_has_bits being unused
2345 (void) cached_has_bits;
2346
2347 // repeated .tensorflow.PairValue values = 2;
2348 total_size += 1UL * this->_internal_values_size();
2349 for (const auto& msg : this->_impl_.values_) {
2350 total_size +=
2351 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg);
2352 }
2353
2354 // string name = 1;
2355 if (!this->_internal_name().empty()) {
2356 total_size += 1 +
2357 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2358 this->_internal_name());
2359 }
2360
2361 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2362 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2363 }
2364 int cached_size = ::_pbi::ToCachedSize(total_size);
2365 SetCachedSize(cached_size);
2366 return total_size;
2367 }
2368
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2369 void NamedTupleValue::CheckTypeAndMergeFrom(
2370 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2371 MergeFrom(*::_pbi::DownCast<const NamedTupleValue*>(
2372 &from));
2373 }
2374
MergeFrom(const NamedTupleValue & from)2375 void NamedTupleValue::MergeFrom(const NamedTupleValue& from) {
2376 NamedTupleValue* const _this = this;
2377 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.NamedTupleValue)
2378 GOOGLE_DCHECK_NE(&from, _this);
2379 ::uint32_t cached_has_bits = 0;
2380 (void) cached_has_bits;
2381
2382 _this->_impl_.values_.MergeFrom(from._impl_.values_);
2383 if (!from._internal_name().empty()) {
2384 _this->_internal_set_name(from._internal_name());
2385 }
2386 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2387 }
2388
CopyFrom(const NamedTupleValue & from)2389 void NamedTupleValue::CopyFrom(const NamedTupleValue& from) {
2390 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.NamedTupleValue)
2391 if (&from == this) return;
2392 Clear();
2393 MergeFrom(from);
2394 }
2395
IsInitialized() const2396 bool NamedTupleValue::IsInitialized() const {
2397 return true;
2398 }
2399
InternalSwap(NamedTupleValue * other)2400 void NamedTupleValue::InternalSwap(NamedTupleValue* other) {
2401 using std::swap;
2402 auto* lhs_arena = GetArenaForAllocation();
2403 auto* rhs_arena = other->GetArenaForAllocation();
2404 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2405 _impl_.values_.InternalSwap(&other->_impl_.values_);
2406 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
2407 &_impl_.name_, lhs_arena,
2408 &other->_impl_.name_, rhs_arena
2409 );
2410 }
2411
GetTypeName() const2412 std::string NamedTupleValue::GetTypeName() const {
2413 return "tensorflow.NamedTupleValue";
2414 }
2415
2416
2417 // ===================================================================
2418
2419 class TensorSpecProto::_Internal {
2420 public:
2421 static const ::tensorflow::TensorShapeProto& shape(const TensorSpecProto* msg);
2422 };
2423
2424 const ::tensorflow::TensorShapeProto&
shape(const TensorSpecProto * msg)2425 TensorSpecProto::_Internal::shape(const TensorSpecProto* msg) {
2426 return *msg->_impl_.shape_;
2427 }
clear_shape()2428 void TensorSpecProto::clear_shape() {
2429 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
2430 delete _impl_.shape_;
2431 }
2432 _impl_.shape_ = nullptr;
2433 }
TensorSpecProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2434 TensorSpecProto::TensorSpecProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2435 bool is_message_owned)
2436 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2437 SharedCtor(arena, is_message_owned);
2438 // @@protoc_insertion_point(arena_constructor:tensorflow.TensorSpecProto)
2439 }
TensorSpecProto(const TensorSpecProto & from)2440 TensorSpecProto::TensorSpecProto(const TensorSpecProto& from)
2441 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2442 TensorSpecProto* const _this = this; (void)_this;
2443 new (&_impl_) Impl_{
2444 decltype(_impl_.name_){}
2445 , decltype(_impl_.shape_){nullptr}
2446 , decltype(_impl_.dtype_){}
2447 , /*decltype(_impl_._cached_size_)*/{}};
2448
2449 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2450 _impl_.name_.InitDefault();
2451 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2452 _impl_.name_.Set("", GetArenaForAllocation());
2453 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2454 if (!from._internal_name().empty()) {
2455 _this->_impl_.name_.Set(from._internal_name(),
2456 _this->GetArenaForAllocation());
2457 }
2458 if (from._internal_has_shape()) {
2459 _this->_impl_.shape_ = new ::tensorflow::TensorShapeProto(*from._impl_.shape_);
2460 }
2461 _this->_impl_.dtype_ = from._impl_.dtype_;
2462 // @@protoc_insertion_point(copy_constructor:tensorflow.TensorSpecProto)
2463 }
2464
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2465 inline void TensorSpecProto::SharedCtor(
2466 ::_pb::Arena* arena, bool is_message_owned) {
2467 (void)arena;
2468 (void)is_message_owned;
2469 new (&_impl_) Impl_{
2470 decltype(_impl_.name_){}
2471 , decltype(_impl_.shape_){nullptr}
2472 , decltype(_impl_.dtype_){0}
2473 , /*decltype(_impl_._cached_size_)*/{}
2474 };
2475 _impl_.name_.InitDefault();
2476 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2477 _impl_.name_.Set("", GetArenaForAllocation());
2478 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2479 }
2480
~TensorSpecProto()2481 TensorSpecProto::~TensorSpecProto() {
2482 // @@protoc_insertion_point(destructor:tensorflow.TensorSpecProto)
2483 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2484 (void)arena;
2485 return;
2486 }
2487 SharedDtor();
2488 }
2489
SharedDtor()2490 inline void TensorSpecProto::SharedDtor() {
2491 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2492 _impl_.name_.Destroy();
2493 if (this != internal_default_instance()) delete _impl_.shape_;
2494 }
2495
SetCachedSize(int size) const2496 void TensorSpecProto::SetCachedSize(int size) const {
2497 _impl_._cached_size_.Set(size);
2498 }
2499
Clear()2500 void TensorSpecProto::Clear() {
2501 // @@protoc_insertion_point(message_clear_start:tensorflow.TensorSpecProto)
2502 ::uint32_t cached_has_bits = 0;
2503 // Prevent compiler warnings about cached_has_bits being unused
2504 (void) cached_has_bits;
2505
2506 _impl_.name_.ClearToEmpty();
2507 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
2508 delete _impl_.shape_;
2509 }
2510 _impl_.shape_ = nullptr;
2511 _impl_.dtype_ = 0;
2512 _internal_metadata_.Clear<std::string>();
2513 }
2514
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2515 const char* TensorSpecProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2516 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2517 while (!ctx->Done(&ptr)) {
2518 ::uint32_t tag;
2519 ptr = ::_pbi::ReadTag(ptr, &tag);
2520 switch (tag >> 3) {
2521 // string name = 1;
2522 case 1:
2523 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2524 auto str = _internal_mutable_name();
2525 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
2526 CHK_(ptr);
2527 CHK_(::_pbi::VerifyUTF8(str, nullptr));
2528 } else {
2529 goto handle_unusual;
2530 }
2531 continue;
2532 // .tensorflow.TensorShapeProto shape = 2;
2533 case 2:
2534 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2535 ptr = ctx->ParseMessage(_internal_mutable_shape(), ptr);
2536 CHK_(ptr);
2537 } else {
2538 goto handle_unusual;
2539 }
2540 continue;
2541 // .tensorflow.DataType dtype = 3;
2542 case 3:
2543 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
2544 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2545 CHK_(ptr);
2546 _internal_set_dtype(static_cast<::tensorflow::DataType>(val));
2547 } else {
2548 goto handle_unusual;
2549 }
2550 continue;
2551 default:
2552 goto handle_unusual;
2553 } // switch
2554 handle_unusual:
2555 if ((tag == 0) || ((tag & 7) == 4)) {
2556 CHK_(ptr);
2557 ctx->SetLastTag(tag);
2558 goto message_done;
2559 }
2560 ptr = UnknownFieldParse(
2561 tag,
2562 _internal_metadata_.mutable_unknown_fields<std::string>(),
2563 ptr, ctx);
2564 CHK_(ptr != nullptr);
2565 } // while
2566 message_done:
2567 return ptr;
2568 failure:
2569 ptr = nullptr;
2570 goto message_done;
2571 #undef CHK_
2572 }
2573
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2574 ::uint8_t* TensorSpecProto::_InternalSerialize(
2575 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2576 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorSpecProto)
2577 ::uint32_t cached_has_bits = 0;
2578 (void) cached_has_bits;
2579
2580 // string name = 1;
2581 if (!this->_internal_name().empty()) {
2582 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2583 this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
2584 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2585 "tensorflow.TensorSpecProto.name");
2586 target = stream->WriteStringMaybeAliased(
2587 1, this->_internal_name(), target);
2588 }
2589
2590 // .tensorflow.TensorShapeProto shape = 2;
2591 if (this->_internal_has_shape()) {
2592 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2593 InternalWriteMessage(2, _Internal::shape(this),
2594 _Internal::shape(this).GetCachedSize(), target, stream);
2595 }
2596
2597 // .tensorflow.DataType dtype = 3;
2598 if (this->_internal_dtype() != 0) {
2599 target = stream->EnsureSpace(target);
2600 target = ::_pbi::WireFormatLite::WriteEnumToArray(
2601 3, this->_internal_dtype(), target);
2602 }
2603
2604 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2605 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2606 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2607 }
2608 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorSpecProto)
2609 return target;
2610 }
2611
ByteSizeLong() const2612 size_t TensorSpecProto::ByteSizeLong() const {
2613 // @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorSpecProto)
2614 size_t total_size = 0;
2615
2616 ::uint32_t cached_has_bits = 0;
2617 // Prevent compiler warnings about cached_has_bits being unused
2618 (void) cached_has_bits;
2619
2620 // string name = 1;
2621 if (!this->_internal_name().empty()) {
2622 total_size += 1 +
2623 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2624 this->_internal_name());
2625 }
2626
2627 // .tensorflow.TensorShapeProto shape = 2;
2628 if (this->_internal_has_shape()) {
2629 total_size += 1 +
2630 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2631 *_impl_.shape_);
2632 }
2633
2634 // .tensorflow.DataType dtype = 3;
2635 if (this->_internal_dtype() != 0) {
2636 total_size += 1 +
2637 ::_pbi::WireFormatLite::EnumSize(this->_internal_dtype());
2638 }
2639
2640 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2641 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
2642 }
2643 int cached_size = ::_pbi::ToCachedSize(total_size);
2644 SetCachedSize(cached_size);
2645 return total_size;
2646 }
2647
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)2648 void TensorSpecProto::CheckTypeAndMergeFrom(
2649 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
2650 MergeFrom(*::_pbi::DownCast<const TensorSpecProto*>(
2651 &from));
2652 }
2653
MergeFrom(const TensorSpecProto & from)2654 void TensorSpecProto::MergeFrom(const TensorSpecProto& from) {
2655 TensorSpecProto* const _this = this;
2656 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorSpecProto)
2657 GOOGLE_DCHECK_NE(&from, _this);
2658 ::uint32_t cached_has_bits = 0;
2659 (void) cached_has_bits;
2660
2661 if (!from._internal_name().empty()) {
2662 _this->_internal_set_name(from._internal_name());
2663 }
2664 if (from._internal_has_shape()) {
2665 _this->_internal_mutable_shape()->::tensorflow::TensorShapeProto::MergeFrom(
2666 from._internal_shape());
2667 }
2668 if (from._internal_dtype() != 0) {
2669 _this->_internal_set_dtype(from._internal_dtype());
2670 }
2671 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2672 }
2673
CopyFrom(const TensorSpecProto & from)2674 void TensorSpecProto::CopyFrom(const TensorSpecProto& from) {
2675 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorSpecProto)
2676 if (&from == this) return;
2677 Clear();
2678 MergeFrom(from);
2679 }
2680
IsInitialized() const2681 bool TensorSpecProto::IsInitialized() const {
2682 return true;
2683 }
2684
InternalSwap(TensorSpecProto * other)2685 void TensorSpecProto::InternalSwap(TensorSpecProto* other) {
2686 using std::swap;
2687 auto* lhs_arena = GetArenaForAllocation();
2688 auto* rhs_arena = other->GetArenaForAllocation();
2689 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
2690 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
2691 &_impl_.name_, lhs_arena,
2692 &other->_impl_.name_, rhs_arena
2693 );
2694 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
2695 PROTOBUF_FIELD_OFFSET(TensorSpecProto, _impl_.dtype_)
2696 + sizeof(TensorSpecProto::_impl_.dtype_) // NOLINT
2697 - PROTOBUF_FIELD_OFFSET(TensorSpecProto, _impl_.shape_)>(
2698 reinterpret_cast<char*>(&_impl_.shape_),
2699 reinterpret_cast<char*>(&other->_impl_.shape_));
2700 }
2701
GetTypeName() const2702 std::string TensorSpecProto::GetTypeName() const {
2703 return "tensorflow.TensorSpecProto";
2704 }
2705
2706
2707 // ===================================================================
2708
2709 class BoundedTensorSpecProto::_Internal {
2710 public:
2711 static const ::tensorflow::TensorShapeProto& shape(const BoundedTensorSpecProto* msg);
2712 static const ::tensorflow::TensorProto& minimum(const BoundedTensorSpecProto* msg);
2713 static const ::tensorflow::TensorProto& maximum(const BoundedTensorSpecProto* msg);
2714 };
2715
2716 const ::tensorflow::TensorShapeProto&
shape(const BoundedTensorSpecProto * msg)2717 BoundedTensorSpecProto::_Internal::shape(const BoundedTensorSpecProto* msg) {
2718 return *msg->_impl_.shape_;
2719 }
2720 const ::tensorflow::TensorProto&
minimum(const BoundedTensorSpecProto * msg)2721 BoundedTensorSpecProto::_Internal::minimum(const BoundedTensorSpecProto* msg) {
2722 return *msg->_impl_.minimum_;
2723 }
2724 const ::tensorflow::TensorProto&
maximum(const BoundedTensorSpecProto * msg)2725 BoundedTensorSpecProto::_Internal::maximum(const BoundedTensorSpecProto* msg) {
2726 return *msg->_impl_.maximum_;
2727 }
clear_shape()2728 void BoundedTensorSpecProto::clear_shape() {
2729 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
2730 delete _impl_.shape_;
2731 }
2732 _impl_.shape_ = nullptr;
2733 }
clear_minimum()2734 void BoundedTensorSpecProto::clear_minimum() {
2735 if (GetArenaForAllocation() == nullptr && _impl_.minimum_ != nullptr) {
2736 delete _impl_.minimum_;
2737 }
2738 _impl_.minimum_ = nullptr;
2739 }
clear_maximum()2740 void BoundedTensorSpecProto::clear_maximum() {
2741 if (GetArenaForAllocation() == nullptr && _impl_.maximum_ != nullptr) {
2742 delete _impl_.maximum_;
2743 }
2744 _impl_.maximum_ = nullptr;
2745 }
BoundedTensorSpecProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)2746 BoundedTensorSpecProto::BoundedTensorSpecProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
2747 bool is_message_owned)
2748 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
2749 SharedCtor(arena, is_message_owned);
2750 // @@protoc_insertion_point(arena_constructor:tensorflow.BoundedTensorSpecProto)
2751 }
BoundedTensorSpecProto(const BoundedTensorSpecProto & from)2752 BoundedTensorSpecProto::BoundedTensorSpecProto(const BoundedTensorSpecProto& from)
2753 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
2754 BoundedTensorSpecProto* const _this = this; (void)_this;
2755 new (&_impl_) Impl_{
2756 decltype(_impl_.name_){}
2757 , decltype(_impl_.shape_){nullptr}
2758 , decltype(_impl_.minimum_){nullptr}
2759 , decltype(_impl_.maximum_){nullptr}
2760 , decltype(_impl_.dtype_){}
2761 , /*decltype(_impl_._cached_size_)*/{}};
2762
2763 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
2764 _impl_.name_.InitDefault();
2765 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2766 _impl_.name_.Set("", GetArenaForAllocation());
2767 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2768 if (!from._internal_name().empty()) {
2769 _this->_impl_.name_.Set(from._internal_name(),
2770 _this->GetArenaForAllocation());
2771 }
2772 if (from._internal_has_shape()) {
2773 _this->_impl_.shape_ = new ::tensorflow::TensorShapeProto(*from._impl_.shape_);
2774 }
2775 if (from._internal_has_minimum()) {
2776 _this->_impl_.minimum_ = new ::tensorflow::TensorProto(*from._impl_.minimum_);
2777 }
2778 if (from._internal_has_maximum()) {
2779 _this->_impl_.maximum_ = new ::tensorflow::TensorProto(*from._impl_.maximum_);
2780 }
2781 _this->_impl_.dtype_ = from._impl_.dtype_;
2782 // @@protoc_insertion_point(copy_constructor:tensorflow.BoundedTensorSpecProto)
2783 }
2784
SharedCtor(::_pb::Arena * arena,bool is_message_owned)2785 inline void BoundedTensorSpecProto::SharedCtor(
2786 ::_pb::Arena* arena, bool is_message_owned) {
2787 (void)arena;
2788 (void)is_message_owned;
2789 new (&_impl_) Impl_{
2790 decltype(_impl_.name_){}
2791 , decltype(_impl_.shape_){nullptr}
2792 , decltype(_impl_.minimum_){nullptr}
2793 , decltype(_impl_.maximum_){nullptr}
2794 , decltype(_impl_.dtype_){0}
2795 , /*decltype(_impl_._cached_size_)*/{}
2796 };
2797 _impl_.name_.InitDefault();
2798 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
2799 _impl_.name_.Set("", GetArenaForAllocation());
2800 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
2801 }
2802
~BoundedTensorSpecProto()2803 BoundedTensorSpecProto::~BoundedTensorSpecProto() {
2804 // @@protoc_insertion_point(destructor:tensorflow.BoundedTensorSpecProto)
2805 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
2806 (void)arena;
2807 return;
2808 }
2809 SharedDtor();
2810 }
2811
SharedDtor()2812 inline void BoundedTensorSpecProto::SharedDtor() {
2813 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
2814 _impl_.name_.Destroy();
2815 if (this != internal_default_instance()) delete _impl_.shape_;
2816 if (this != internal_default_instance()) delete _impl_.minimum_;
2817 if (this != internal_default_instance()) delete _impl_.maximum_;
2818 }
2819
SetCachedSize(int size) const2820 void BoundedTensorSpecProto::SetCachedSize(int size) const {
2821 _impl_._cached_size_.Set(size);
2822 }
2823
Clear()2824 void BoundedTensorSpecProto::Clear() {
2825 // @@protoc_insertion_point(message_clear_start:tensorflow.BoundedTensorSpecProto)
2826 ::uint32_t cached_has_bits = 0;
2827 // Prevent compiler warnings about cached_has_bits being unused
2828 (void) cached_has_bits;
2829
2830 _impl_.name_.ClearToEmpty();
2831 if (GetArenaForAllocation() == nullptr && _impl_.shape_ != nullptr) {
2832 delete _impl_.shape_;
2833 }
2834 _impl_.shape_ = nullptr;
2835 if (GetArenaForAllocation() == nullptr && _impl_.minimum_ != nullptr) {
2836 delete _impl_.minimum_;
2837 }
2838 _impl_.minimum_ = nullptr;
2839 if (GetArenaForAllocation() == nullptr && _impl_.maximum_ != nullptr) {
2840 delete _impl_.maximum_;
2841 }
2842 _impl_.maximum_ = nullptr;
2843 _impl_.dtype_ = 0;
2844 _internal_metadata_.Clear<std::string>();
2845 }
2846
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)2847 const char* BoundedTensorSpecProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
2848 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2849 while (!ctx->Done(&ptr)) {
2850 ::uint32_t tag;
2851 ptr = ::_pbi::ReadTag(ptr, &tag);
2852 switch (tag >> 3) {
2853 // string name = 1;
2854 case 1:
2855 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) {
2856 auto str = _internal_mutable_name();
2857 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
2858 CHK_(ptr);
2859 CHK_(::_pbi::VerifyUTF8(str, nullptr));
2860 } else {
2861 goto handle_unusual;
2862 }
2863 continue;
2864 // .tensorflow.TensorShapeProto shape = 2;
2865 case 2:
2866 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
2867 ptr = ctx->ParseMessage(_internal_mutable_shape(), ptr);
2868 CHK_(ptr);
2869 } else {
2870 goto handle_unusual;
2871 }
2872 continue;
2873 // .tensorflow.DataType dtype = 3;
2874 case 3:
2875 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) {
2876 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
2877 CHK_(ptr);
2878 _internal_set_dtype(static_cast<::tensorflow::DataType>(val));
2879 } else {
2880 goto handle_unusual;
2881 }
2882 continue;
2883 // .tensorflow.TensorProto minimum = 4;
2884 case 4:
2885 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) {
2886 ptr = ctx->ParseMessage(_internal_mutable_minimum(), ptr);
2887 CHK_(ptr);
2888 } else {
2889 goto handle_unusual;
2890 }
2891 continue;
2892 // .tensorflow.TensorProto maximum = 5;
2893 case 5:
2894 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) {
2895 ptr = ctx->ParseMessage(_internal_mutable_maximum(), ptr);
2896 CHK_(ptr);
2897 } else {
2898 goto handle_unusual;
2899 }
2900 continue;
2901 default:
2902 goto handle_unusual;
2903 } // switch
2904 handle_unusual:
2905 if ((tag == 0) || ((tag & 7) == 4)) {
2906 CHK_(ptr);
2907 ctx->SetLastTag(tag);
2908 goto message_done;
2909 }
2910 ptr = UnknownFieldParse(
2911 tag,
2912 _internal_metadata_.mutable_unknown_fields<std::string>(),
2913 ptr, ctx);
2914 CHK_(ptr != nullptr);
2915 } // while
2916 message_done:
2917 return ptr;
2918 failure:
2919 ptr = nullptr;
2920 goto message_done;
2921 #undef CHK_
2922 }
2923
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const2924 ::uint8_t* BoundedTensorSpecProto::_InternalSerialize(
2925 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
2926 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.BoundedTensorSpecProto)
2927 ::uint32_t cached_has_bits = 0;
2928 (void) cached_has_bits;
2929
2930 // string name = 1;
2931 if (!this->_internal_name().empty()) {
2932 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2933 this->_internal_name().data(), static_cast<int>(this->_internal_name().length()),
2934 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2935 "tensorflow.BoundedTensorSpecProto.name");
2936 target = stream->WriteStringMaybeAliased(
2937 1, this->_internal_name(), target);
2938 }
2939
2940 // .tensorflow.TensorShapeProto shape = 2;
2941 if (this->_internal_has_shape()) {
2942 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2943 InternalWriteMessage(2, _Internal::shape(this),
2944 _Internal::shape(this).GetCachedSize(), target, stream);
2945 }
2946
2947 // .tensorflow.DataType dtype = 3;
2948 if (this->_internal_dtype() != 0) {
2949 target = stream->EnsureSpace(target);
2950 target = ::_pbi::WireFormatLite::WriteEnumToArray(
2951 3, this->_internal_dtype(), target);
2952 }
2953
2954 // .tensorflow.TensorProto minimum = 4;
2955 if (this->_internal_has_minimum()) {
2956 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2957 InternalWriteMessage(4, _Internal::minimum(this),
2958 _Internal::minimum(this).GetCachedSize(), target, stream);
2959 }
2960
2961 // .tensorflow.TensorProto maximum = 5;
2962 if (this->_internal_has_maximum()) {
2963 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2964 InternalWriteMessage(5, _Internal::maximum(this),
2965 _Internal::maximum(this).GetCachedSize(), target, stream);
2966 }
2967
2968 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
2969 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
2970 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
2971 }
2972 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.BoundedTensorSpecProto)
2973 return target;
2974 }
2975
ByteSizeLong() const2976 size_t BoundedTensorSpecProto::ByteSizeLong() const {
2977 // @@protoc_insertion_point(message_byte_size_start:tensorflow.BoundedTensorSpecProto)
2978 size_t total_size = 0;
2979
2980 ::uint32_t cached_has_bits = 0;
2981 // Prevent compiler warnings about cached_has_bits being unused
2982 (void) cached_has_bits;
2983
2984 // string name = 1;
2985 if (!this->_internal_name().empty()) {
2986 total_size += 1 +
2987 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2988 this->_internal_name());
2989 }
2990
2991 // .tensorflow.TensorShapeProto shape = 2;
2992 if (this->_internal_has_shape()) {
2993 total_size += 1 +
2994 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2995 *_impl_.shape_);
2996 }
2997
2998 // .tensorflow.TensorProto minimum = 4;
2999 if (this->_internal_has_minimum()) {
3000 total_size += 1 +
3001 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
3002 *_impl_.minimum_);
3003 }
3004
3005 // .tensorflow.TensorProto maximum = 5;
3006 if (this->_internal_has_maximum()) {
3007 total_size += 1 +
3008 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
3009 *_impl_.maximum_);
3010 }
3011
3012 // .tensorflow.DataType dtype = 3;
3013 if (this->_internal_dtype() != 0) {
3014 total_size += 1 +
3015 ::_pbi::WireFormatLite::EnumSize(this->_internal_dtype());
3016 }
3017
3018 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3019 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3020 }
3021 int cached_size = ::_pbi::ToCachedSize(total_size);
3022 SetCachedSize(cached_size);
3023 return total_size;
3024 }
3025
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3026 void BoundedTensorSpecProto::CheckTypeAndMergeFrom(
3027 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3028 MergeFrom(*::_pbi::DownCast<const BoundedTensorSpecProto*>(
3029 &from));
3030 }
3031
MergeFrom(const BoundedTensorSpecProto & from)3032 void BoundedTensorSpecProto::MergeFrom(const BoundedTensorSpecProto& from) {
3033 BoundedTensorSpecProto* const _this = this;
3034 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.BoundedTensorSpecProto)
3035 GOOGLE_DCHECK_NE(&from, _this);
3036 ::uint32_t cached_has_bits = 0;
3037 (void) cached_has_bits;
3038
3039 if (!from._internal_name().empty()) {
3040 _this->_internal_set_name(from._internal_name());
3041 }
3042 if (from._internal_has_shape()) {
3043 _this->_internal_mutable_shape()->::tensorflow::TensorShapeProto::MergeFrom(
3044 from._internal_shape());
3045 }
3046 if (from._internal_has_minimum()) {
3047 _this->_internal_mutable_minimum()->::tensorflow::TensorProto::MergeFrom(
3048 from._internal_minimum());
3049 }
3050 if (from._internal_has_maximum()) {
3051 _this->_internal_mutable_maximum()->::tensorflow::TensorProto::MergeFrom(
3052 from._internal_maximum());
3053 }
3054 if (from._internal_dtype() != 0) {
3055 _this->_internal_set_dtype(from._internal_dtype());
3056 }
3057 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3058 }
3059
CopyFrom(const BoundedTensorSpecProto & from)3060 void BoundedTensorSpecProto::CopyFrom(const BoundedTensorSpecProto& from) {
3061 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.BoundedTensorSpecProto)
3062 if (&from == this) return;
3063 Clear();
3064 MergeFrom(from);
3065 }
3066
IsInitialized() const3067 bool BoundedTensorSpecProto::IsInitialized() const {
3068 return true;
3069 }
3070
InternalSwap(BoundedTensorSpecProto * other)3071 void BoundedTensorSpecProto::InternalSwap(BoundedTensorSpecProto* other) {
3072 using std::swap;
3073 auto* lhs_arena = GetArenaForAllocation();
3074 auto* rhs_arena = other->GetArenaForAllocation();
3075 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3076 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
3077 &_impl_.name_, lhs_arena,
3078 &other->_impl_.name_, rhs_arena
3079 );
3080 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
3081 PROTOBUF_FIELD_OFFSET(BoundedTensorSpecProto, _impl_.dtype_)
3082 + sizeof(BoundedTensorSpecProto::_impl_.dtype_) // NOLINT
3083 - PROTOBUF_FIELD_OFFSET(BoundedTensorSpecProto, _impl_.shape_)>(
3084 reinterpret_cast<char*>(&_impl_.shape_),
3085 reinterpret_cast<char*>(&other->_impl_.shape_));
3086 }
3087
GetTypeName() const3088 std::string BoundedTensorSpecProto::GetTypeName() const {
3089 return "tensorflow.BoundedTensorSpecProto";
3090 }
3091
3092
3093 // ===================================================================
3094
3095 class TypeSpecProto::_Internal {
3096 public:
3097 static const ::tensorflow::StructuredValue& type_state(const TypeSpecProto* msg);
3098 };
3099
3100 const ::tensorflow::StructuredValue&
type_state(const TypeSpecProto * msg)3101 TypeSpecProto::_Internal::type_state(const TypeSpecProto* msg) {
3102 return *msg->_impl_.type_state_;
3103 }
TypeSpecProto(::PROTOBUF_NAMESPACE_ID::Arena * arena,bool is_message_owned)3104 TypeSpecProto::TypeSpecProto(::PROTOBUF_NAMESPACE_ID::Arena* arena,
3105 bool is_message_owned)
3106 : ::PROTOBUF_NAMESPACE_ID::MessageLite(arena, is_message_owned) {
3107 SharedCtor(arena, is_message_owned);
3108 // @@protoc_insertion_point(arena_constructor:tensorflow.TypeSpecProto)
3109 }
TypeSpecProto(const TypeSpecProto & from)3110 TypeSpecProto::TypeSpecProto(const TypeSpecProto& from)
3111 : ::PROTOBUF_NAMESPACE_ID::MessageLite() {
3112 TypeSpecProto* const _this = this; (void)_this;
3113 new (&_impl_) Impl_{
3114 decltype(_impl_.type_spec_class_name_){}
3115 , decltype(_impl_.type_state_){nullptr}
3116 , decltype(_impl_.type_spec_class_){}
3117 , decltype(_impl_.num_flat_components_){}
3118 , /*decltype(_impl_._cached_size_)*/{}};
3119
3120 _internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3121 _impl_.type_spec_class_name_.InitDefault();
3122 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3123 _impl_.type_spec_class_name_.Set("", GetArenaForAllocation());
3124 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3125 if (!from._internal_type_spec_class_name().empty()) {
3126 _this->_impl_.type_spec_class_name_.Set(from._internal_type_spec_class_name(),
3127 _this->GetArenaForAllocation());
3128 }
3129 if (from._internal_has_type_state()) {
3130 _this->_impl_.type_state_ = new ::tensorflow::StructuredValue(*from._impl_.type_state_);
3131 }
3132 ::memcpy(&_impl_.type_spec_class_, &from._impl_.type_spec_class_,
3133 static_cast<size_t>(reinterpret_cast<char*>(&_impl_.num_flat_components_) -
3134 reinterpret_cast<char*>(&_impl_.type_spec_class_)) + sizeof(_impl_.num_flat_components_));
3135 // @@protoc_insertion_point(copy_constructor:tensorflow.TypeSpecProto)
3136 }
3137
SharedCtor(::_pb::Arena * arena,bool is_message_owned)3138 inline void TypeSpecProto::SharedCtor(
3139 ::_pb::Arena* arena, bool is_message_owned) {
3140 (void)arena;
3141 (void)is_message_owned;
3142 new (&_impl_) Impl_{
3143 decltype(_impl_.type_spec_class_name_){}
3144 , decltype(_impl_.type_state_){nullptr}
3145 , decltype(_impl_.type_spec_class_){0}
3146 , decltype(_impl_.num_flat_components_){0}
3147 , /*decltype(_impl_._cached_size_)*/{}
3148 };
3149 _impl_.type_spec_class_name_.InitDefault();
3150 #ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
3151 _impl_.type_spec_class_name_.Set("", GetArenaForAllocation());
3152 #endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
3153 }
3154
~TypeSpecProto()3155 TypeSpecProto::~TypeSpecProto() {
3156 // @@protoc_insertion_point(destructor:tensorflow.TypeSpecProto)
3157 if (auto *arena = _internal_metadata_.DeleteReturnArena<std::string>()) {
3158 (void)arena;
3159 return;
3160 }
3161 SharedDtor();
3162 }
3163
SharedDtor()3164 inline void TypeSpecProto::SharedDtor() {
3165 GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
3166 _impl_.type_spec_class_name_.Destroy();
3167 if (this != internal_default_instance()) delete _impl_.type_state_;
3168 }
3169
SetCachedSize(int size) const3170 void TypeSpecProto::SetCachedSize(int size) const {
3171 _impl_._cached_size_.Set(size);
3172 }
3173
Clear()3174 void TypeSpecProto::Clear() {
3175 // @@protoc_insertion_point(message_clear_start:tensorflow.TypeSpecProto)
3176 ::uint32_t cached_has_bits = 0;
3177 // Prevent compiler warnings about cached_has_bits being unused
3178 (void) cached_has_bits;
3179
3180 _impl_.type_spec_class_name_.ClearToEmpty();
3181 if (GetArenaForAllocation() == nullptr && _impl_.type_state_ != nullptr) {
3182 delete _impl_.type_state_;
3183 }
3184 _impl_.type_state_ = nullptr;
3185 ::memset(&_impl_.type_spec_class_, 0, static_cast<size_t>(
3186 reinterpret_cast<char*>(&_impl_.num_flat_components_) -
3187 reinterpret_cast<char*>(&_impl_.type_spec_class_)) + sizeof(_impl_.num_flat_components_));
3188 _internal_metadata_.Clear<std::string>();
3189 }
3190
_InternalParse(const char * ptr,::_pbi::ParseContext * ctx)3191 const char* TypeSpecProto::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
3192 #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3193 while (!ctx->Done(&ptr)) {
3194 ::uint32_t tag;
3195 ptr = ::_pbi::ReadTag(ptr, &tag);
3196 switch (tag >> 3) {
3197 // .tensorflow.TypeSpecProto.TypeSpecClass type_spec_class = 1;
3198 case 1:
3199 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) {
3200 ::uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
3201 CHK_(ptr);
3202 _internal_set_type_spec_class(static_cast<::tensorflow::TypeSpecProto_TypeSpecClass>(val));
3203 } else {
3204 goto handle_unusual;
3205 }
3206 continue;
3207 // .tensorflow.StructuredValue type_state = 2;
3208 case 2:
3209 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) {
3210 ptr = ctx->ParseMessage(_internal_mutable_type_state(), ptr);
3211 CHK_(ptr);
3212 } else {
3213 goto handle_unusual;
3214 }
3215 continue;
3216 // string type_spec_class_name = 3;
3217 case 3:
3218 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) {
3219 auto str = _internal_mutable_type_spec_class_name();
3220 ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
3221 CHK_(ptr);
3222 CHK_(::_pbi::VerifyUTF8(str, nullptr));
3223 } else {
3224 goto handle_unusual;
3225 }
3226 continue;
3227 // int32 num_flat_components = 4;
3228 case 4:
3229 if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) {
3230 _impl_.num_flat_components_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr);
3231 CHK_(ptr);
3232 } else {
3233 goto handle_unusual;
3234 }
3235 continue;
3236 default:
3237 goto handle_unusual;
3238 } // switch
3239 handle_unusual:
3240 if ((tag == 0) || ((tag & 7) == 4)) {
3241 CHK_(ptr);
3242 ctx->SetLastTag(tag);
3243 goto message_done;
3244 }
3245 ptr = UnknownFieldParse(
3246 tag,
3247 _internal_metadata_.mutable_unknown_fields<std::string>(),
3248 ptr, ctx);
3249 CHK_(ptr != nullptr);
3250 } // while
3251 message_done:
3252 return ptr;
3253 failure:
3254 ptr = nullptr;
3255 goto message_done;
3256 #undef CHK_
3257 }
3258
_InternalSerialize(::uint8_t * target,::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream * stream) const3259 ::uint8_t* TypeSpecProto::_InternalSerialize(
3260 ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
3261 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.TypeSpecProto)
3262 ::uint32_t cached_has_bits = 0;
3263 (void) cached_has_bits;
3264
3265 // .tensorflow.TypeSpecProto.TypeSpecClass type_spec_class = 1;
3266 if (this->_internal_type_spec_class() != 0) {
3267 target = stream->EnsureSpace(target);
3268 target = ::_pbi::WireFormatLite::WriteEnumToArray(
3269 1, this->_internal_type_spec_class(), target);
3270 }
3271
3272 // .tensorflow.StructuredValue type_state = 2;
3273 if (this->_internal_has_type_state()) {
3274 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3275 InternalWriteMessage(2, _Internal::type_state(this),
3276 _Internal::type_state(this).GetCachedSize(), target, stream);
3277 }
3278
3279 // string type_spec_class_name = 3;
3280 if (!this->_internal_type_spec_class_name().empty()) {
3281 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3282 this->_internal_type_spec_class_name().data(), static_cast<int>(this->_internal_type_spec_class_name().length()),
3283 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3284 "tensorflow.TypeSpecProto.type_spec_class_name");
3285 target = stream->WriteStringMaybeAliased(
3286 3, this->_internal_type_spec_class_name(), target);
3287 }
3288
3289 // int32 num_flat_components = 4;
3290 if (this->_internal_num_flat_components() != 0) {
3291 target = stream->EnsureSpace(target);
3292 target = ::_pbi::WireFormatLite::WriteInt32ToArray(4, this->_internal_num_flat_components(), target);
3293 }
3294
3295 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3296 target = stream->WriteRaw(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).data(),
3297 static_cast<int>(_internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size()), target);
3298 }
3299 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.TypeSpecProto)
3300 return target;
3301 }
3302
ByteSizeLong() const3303 size_t TypeSpecProto::ByteSizeLong() const {
3304 // @@protoc_insertion_point(message_byte_size_start:tensorflow.TypeSpecProto)
3305 size_t total_size = 0;
3306
3307 ::uint32_t cached_has_bits = 0;
3308 // Prevent compiler warnings about cached_has_bits being unused
3309 (void) cached_has_bits;
3310
3311 // string type_spec_class_name = 3;
3312 if (!this->_internal_type_spec_class_name().empty()) {
3313 total_size += 1 +
3314 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
3315 this->_internal_type_spec_class_name());
3316 }
3317
3318 // .tensorflow.StructuredValue type_state = 2;
3319 if (this->_internal_has_type_state()) {
3320 total_size += 1 +
3321 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
3322 *_impl_.type_state_);
3323 }
3324
3325 // .tensorflow.TypeSpecProto.TypeSpecClass type_spec_class = 1;
3326 if (this->_internal_type_spec_class() != 0) {
3327 total_size += 1 +
3328 ::_pbi::WireFormatLite::EnumSize(this->_internal_type_spec_class());
3329 }
3330
3331 // int32 num_flat_components = 4;
3332 if (this->_internal_num_flat_components() != 0) {
3333 total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_num_flat_components());
3334 }
3335
3336 if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
3337 total_size += _internal_metadata_.unknown_fields<std::string>(::PROTOBUF_NAMESPACE_ID::internal::GetEmptyString).size();
3338 }
3339 int cached_size = ::_pbi::ToCachedSize(total_size);
3340 SetCachedSize(cached_size);
3341 return total_size;
3342 }
3343
CheckTypeAndMergeFrom(const::PROTOBUF_NAMESPACE_ID::MessageLite & from)3344 void TypeSpecProto::CheckTypeAndMergeFrom(
3345 const ::PROTOBUF_NAMESPACE_ID::MessageLite& from) {
3346 MergeFrom(*::_pbi::DownCast<const TypeSpecProto*>(
3347 &from));
3348 }
3349
MergeFrom(const TypeSpecProto & from)3350 void TypeSpecProto::MergeFrom(const TypeSpecProto& from) {
3351 TypeSpecProto* const _this = this;
3352 // @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TypeSpecProto)
3353 GOOGLE_DCHECK_NE(&from, _this);
3354 ::uint32_t cached_has_bits = 0;
3355 (void) cached_has_bits;
3356
3357 if (!from._internal_type_spec_class_name().empty()) {
3358 _this->_internal_set_type_spec_class_name(from._internal_type_spec_class_name());
3359 }
3360 if (from._internal_has_type_state()) {
3361 _this->_internal_mutable_type_state()->::tensorflow::StructuredValue::MergeFrom(
3362 from._internal_type_state());
3363 }
3364 if (from._internal_type_spec_class() != 0) {
3365 _this->_internal_set_type_spec_class(from._internal_type_spec_class());
3366 }
3367 if (from._internal_num_flat_components() != 0) {
3368 _this->_internal_set_num_flat_components(from._internal_num_flat_components());
3369 }
3370 _this->_internal_metadata_.MergeFrom<std::string>(from._internal_metadata_);
3371 }
3372
CopyFrom(const TypeSpecProto & from)3373 void TypeSpecProto::CopyFrom(const TypeSpecProto& from) {
3374 // @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TypeSpecProto)
3375 if (&from == this) return;
3376 Clear();
3377 MergeFrom(from);
3378 }
3379
IsInitialized() const3380 bool TypeSpecProto::IsInitialized() const {
3381 return true;
3382 }
3383
InternalSwap(TypeSpecProto * other)3384 void TypeSpecProto::InternalSwap(TypeSpecProto* other) {
3385 using std::swap;
3386 auto* lhs_arena = GetArenaForAllocation();
3387 auto* rhs_arena = other->GetArenaForAllocation();
3388 _internal_metadata_.InternalSwap(&other->_internal_metadata_);
3389 ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
3390 &_impl_.type_spec_class_name_, lhs_arena,
3391 &other->_impl_.type_spec_class_name_, rhs_arena
3392 );
3393 ::PROTOBUF_NAMESPACE_ID::internal::memswap<
3394 PROTOBUF_FIELD_OFFSET(TypeSpecProto, _impl_.num_flat_components_)
3395 + sizeof(TypeSpecProto::_impl_.num_flat_components_) // NOLINT
3396 - PROTOBUF_FIELD_OFFSET(TypeSpecProto, _impl_.type_state_)>(
3397 reinterpret_cast<char*>(&_impl_.type_state_),
3398 reinterpret_cast<char*>(&other->_impl_.type_state_));
3399 }
3400
GetTypeName() const3401 std::string TypeSpecProto::GetTypeName() const {
3402 return "tensorflow.TypeSpecProto";
3403 }
3404
3405
3406 // @@protoc_insertion_point(namespace_scope)
3407 } // namespace tensorflow
3408 PROTOBUF_NAMESPACE_OPEN
3409 template<> PROTOBUF_NOINLINE ::tensorflow::StructuredValue*
CreateMaybeMessage(Arena * arena)3410 Arena::CreateMaybeMessage< ::tensorflow::StructuredValue >(Arena* arena) {
3411 return Arena::CreateMessageInternal< ::tensorflow::StructuredValue >(arena);
3412 }
3413 template<> PROTOBUF_NOINLINE ::tensorflow::NoneValue*
CreateMaybeMessage(Arena * arena)3414 Arena::CreateMaybeMessage< ::tensorflow::NoneValue >(Arena* arena) {
3415 return Arena::CreateMessageInternal< ::tensorflow::NoneValue >(arena);
3416 }
3417 template<> PROTOBUF_NOINLINE ::tensorflow::ListValue*
CreateMaybeMessage(Arena * arena)3418 Arena::CreateMaybeMessage< ::tensorflow::ListValue >(Arena* arena) {
3419 return Arena::CreateMessageInternal< ::tensorflow::ListValue >(arena);
3420 }
3421 template<> PROTOBUF_NOINLINE ::tensorflow::TupleValue*
CreateMaybeMessage(Arena * arena)3422 Arena::CreateMaybeMessage< ::tensorflow::TupleValue >(Arena* arena) {
3423 return Arena::CreateMessageInternal< ::tensorflow::TupleValue >(arena);
3424 }
3425 template<> PROTOBUF_NOINLINE ::tensorflow::DictValue_FieldsEntry_DoNotUse*
CreateMaybeMessage(Arena * arena)3426 Arena::CreateMaybeMessage< ::tensorflow::DictValue_FieldsEntry_DoNotUse >(Arena* arena) {
3427 return Arena::CreateMessageInternal< ::tensorflow::DictValue_FieldsEntry_DoNotUse >(arena);
3428 }
3429 template<> PROTOBUF_NOINLINE ::tensorflow::DictValue*
CreateMaybeMessage(Arena * arena)3430 Arena::CreateMaybeMessage< ::tensorflow::DictValue >(Arena* arena) {
3431 return Arena::CreateMessageInternal< ::tensorflow::DictValue >(arena);
3432 }
3433 template<> PROTOBUF_NOINLINE ::tensorflow::PairValue*
CreateMaybeMessage(Arena * arena)3434 Arena::CreateMaybeMessage< ::tensorflow::PairValue >(Arena* arena) {
3435 return Arena::CreateMessageInternal< ::tensorflow::PairValue >(arena);
3436 }
3437 template<> PROTOBUF_NOINLINE ::tensorflow::NamedTupleValue*
CreateMaybeMessage(Arena * arena)3438 Arena::CreateMaybeMessage< ::tensorflow::NamedTupleValue >(Arena* arena) {
3439 return Arena::CreateMessageInternal< ::tensorflow::NamedTupleValue >(arena);
3440 }
3441 template<> PROTOBUF_NOINLINE ::tensorflow::TensorSpecProto*
CreateMaybeMessage(Arena * arena)3442 Arena::CreateMaybeMessage< ::tensorflow::TensorSpecProto >(Arena* arena) {
3443 return Arena::CreateMessageInternal< ::tensorflow::TensorSpecProto >(arena);
3444 }
3445 template<> PROTOBUF_NOINLINE ::tensorflow::BoundedTensorSpecProto*
CreateMaybeMessage(Arena * arena)3446 Arena::CreateMaybeMessage< ::tensorflow::BoundedTensorSpecProto >(Arena* arena) {
3447 return Arena::CreateMessageInternal< ::tensorflow::BoundedTensorSpecProto >(arena);
3448 }
3449 template<> PROTOBUF_NOINLINE ::tensorflow::TypeSpecProto*
CreateMaybeMessage(Arena * arena)3450 Arena::CreateMaybeMessage< ::tensorflow::TypeSpecProto >(Arena* arena) {
3451 return Arena::CreateMessageInternal< ::tensorflow::TypeSpecProto >(arena);
3452 }
3453 PROTOBUF_NAMESPACE_CLOSE
3454
3455 // @@protoc_insertion_point(global_scope)
3456 #include <google/protobuf/port_undef.inc>
3457