1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 // automatically generated by the FlatBuffers compiler, do not modify
16
17
18 #ifndef FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
19 #define FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
20
21 #include "flatbuffers/flatbuffers.h"
22
23 #include "serialization_base_generated.h"
24 #include "gpu_model_generated.h"
25
26 namespace tflite {
27 namespace gpu {
28 namespace cl {
29 namespace data {
30
31 struct BinaryProgram;
32 struct BinaryProgramBuilder;
33
34 struct InferenceContext;
35 struct InferenceContextBuilder;
36
37 struct BinaryProgram FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
38 typedef BinaryProgramBuilder Builder;
39 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
40 VT_FINGERPRINT = 4,
41 VT_BINARY = 6
42 };
fingerprintFLATBUFFERS_FINAL_CLASS43 uint64_t fingerprint() const {
44 return GetField<uint64_t>(VT_FINGERPRINT, 0);
45 }
binaryFLATBUFFERS_FINAL_CLASS46 const flatbuffers::Vector<uint8_t> *binary() const {
47 return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_BINARY);
48 }
VerifyFLATBUFFERS_FINAL_CLASS49 bool Verify(flatbuffers::Verifier &verifier) const {
50 return VerifyTableStart(verifier) &&
51 VerifyField<uint64_t>(verifier, VT_FINGERPRINT, 8) &&
52 VerifyOffset(verifier, VT_BINARY) &&
53 verifier.VerifyVector(binary()) &&
54 verifier.EndTable();
55 }
56 };
57
58 struct BinaryProgramBuilder {
59 typedef BinaryProgram Table;
60 flatbuffers::FlatBufferBuilder &fbb_;
61 flatbuffers::uoffset_t start_;
add_fingerprintBinaryProgramBuilder62 void add_fingerprint(uint64_t fingerprint) {
63 fbb_.AddElement<uint64_t>(BinaryProgram::VT_FINGERPRINT, fingerprint, 0);
64 }
add_binaryBinaryProgramBuilder65 void add_binary(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> binary) {
66 fbb_.AddOffset(BinaryProgram::VT_BINARY, binary);
67 }
BinaryProgramBuilderBinaryProgramBuilder68 explicit BinaryProgramBuilder(flatbuffers::FlatBufferBuilder &_fbb)
69 : fbb_(_fbb) {
70 start_ = fbb_.StartTable();
71 }
FinishBinaryProgramBuilder72 flatbuffers::Offset<BinaryProgram> Finish() {
73 const auto end = fbb_.EndTable(start_);
74 auto o = flatbuffers::Offset<BinaryProgram>(end);
75 return o;
76 }
77 };
78
79 inline flatbuffers::Offset<BinaryProgram> CreateBinaryProgram(
80 flatbuffers::FlatBufferBuilder &_fbb,
81 uint64_t fingerprint = 0,
82 flatbuffers::Offset<flatbuffers::Vector<uint8_t>> binary = 0) {
83 BinaryProgramBuilder builder_(_fbb);
84 builder_.add_fingerprint(fingerprint);
85 builder_.add_binary(binary);
86 return builder_.Finish();
87 }
88
89 inline flatbuffers::Offset<BinaryProgram> CreateBinaryProgramDirect(
90 flatbuffers::FlatBufferBuilder &_fbb,
91 uint64_t fingerprint = 0,
92 const std::vector<uint8_t> *binary = nullptr) {
93 auto binary__ = binary ? _fbb.CreateVector<uint8_t>(*binary) : 0;
94 return tflite::gpu::cl::data::CreateBinaryProgram(
95 _fbb,
96 fingerprint,
97 binary__);
98 }
99
100 struct InferenceContext FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
101 typedef InferenceContextBuilder Builder;
102 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
103 VT_GPU_MODEL = 4,
104 VT_DRIVER_VERSION = 6,
105 VT_BINARY_PROGRAMS = 8,
106 VT_TUNED_WORK_GROUP_SIZES_PER_NODE = 10,
107 VT_FINGERPRINTS_PER_NODE = 12
108 };
gpu_modelFLATBUFFERS_FINAL_CLASS109 const tflite::gpu::data::GpuModel *gpu_model() const {
110 return GetPointer<const tflite::gpu::data::GpuModel *>(VT_GPU_MODEL);
111 }
driver_versionFLATBUFFERS_FINAL_CLASS112 const flatbuffers::String *driver_version() const {
113 return GetPointer<const flatbuffers::String *>(VT_DRIVER_VERSION);
114 }
binary_programsFLATBUFFERS_FINAL_CLASS115 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>> *binary_programs() const {
116 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>> *>(VT_BINARY_PROGRAMS);
117 }
tuned_work_group_sizes_per_nodeFLATBUFFERS_FINAL_CLASS118 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::Int3>> *tuned_work_group_sizes_per_node() const {
119 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::Int3>> *>(VT_TUNED_WORK_GROUP_SIZES_PER_NODE);
120 }
fingerprints_per_nodeFLATBUFFERS_FINAL_CLASS121 const flatbuffers::Vector<uint64_t> *fingerprints_per_node() const {
122 return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_FINGERPRINTS_PER_NODE);
123 }
VerifyFLATBUFFERS_FINAL_CLASS124 bool Verify(flatbuffers::Verifier &verifier) const {
125 return VerifyTableStart(verifier) &&
126 VerifyOffset(verifier, VT_GPU_MODEL) &&
127 verifier.VerifyTable(gpu_model()) &&
128 VerifyOffset(verifier, VT_DRIVER_VERSION) &&
129 verifier.VerifyString(driver_version()) &&
130 VerifyOffset(verifier, VT_BINARY_PROGRAMS) &&
131 verifier.VerifyVector(binary_programs()) &&
132 verifier.VerifyVectorOfTables(binary_programs()) &&
133 VerifyOffset(verifier, VT_TUNED_WORK_GROUP_SIZES_PER_NODE) &&
134 verifier.VerifyVector(tuned_work_group_sizes_per_node()) &&
135 verifier.VerifyVectorOfTables(tuned_work_group_sizes_per_node()) &&
136 VerifyOffset(verifier, VT_FINGERPRINTS_PER_NODE) &&
137 verifier.VerifyVector(fingerprints_per_node()) &&
138 verifier.EndTable();
139 }
140 };
141
142 struct InferenceContextBuilder {
143 typedef InferenceContext Table;
144 flatbuffers::FlatBufferBuilder &fbb_;
145 flatbuffers::uoffset_t start_;
add_gpu_modelInferenceContextBuilder146 void add_gpu_model(flatbuffers::Offset<tflite::gpu::data::GpuModel> gpu_model) {
147 fbb_.AddOffset(InferenceContext::VT_GPU_MODEL, gpu_model);
148 }
add_driver_versionInferenceContextBuilder149 void add_driver_version(flatbuffers::Offset<flatbuffers::String> driver_version) {
150 fbb_.AddOffset(InferenceContext::VT_DRIVER_VERSION, driver_version);
151 }
add_binary_programsInferenceContextBuilder152 void add_binary_programs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>> binary_programs) {
153 fbb_.AddOffset(InferenceContext::VT_BINARY_PROGRAMS, binary_programs);
154 }
add_tuned_work_group_sizes_per_nodeInferenceContextBuilder155 void add_tuned_work_group_sizes_per_node(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::Int3>>> tuned_work_group_sizes_per_node) {
156 fbb_.AddOffset(InferenceContext::VT_TUNED_WORK_GROUP_SIZES_PER_NODE, tuned_work_group_sizes_per_node);
157 }
add_fingerprints_per_nodeInferenceContextBuilder158 void add_fingerprints_per_node(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> fingerprints_per_node) {
159 fbb_.AddOffset(InferenceContext::VT_FINGERPRINTS_PER_NODE, fingerprints_per_node);
160 }
InferenceContextBuilderInferenceContextBuilder161 explicit InferenceContextBuilder(flatbuffers::FlatBufferBuilder &_fbb)
162 : fbb_(_fbb) {
163 start_ = fbb_.StartTable();
164 }
FinishInferenceContextBuilder165 flatbuffers::Offset<InferenceContext> Finish() {
166 const auto end = fbb_.EndTable(start_);
167 auto o = flatbuffers::Offset<InferenceContext>(end);
168 return o;
169 }
170 };
171
172 inline flatbuffers::Offset<InferenceContext> CreateInferenceContext(
173 flatbuffers::FlatBufferBuilder &_fbb,
174 flatbuffers::Offset<tflite::gpu::data::GpuModel> gpu_model = 0,
175 flatbuffers::Offset<flatbuffers::String> driver_version = 0,
176 flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>> binary_programs = 0,
177 flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::Int3>>> tuned_work_group_sizes_per_node = 0,
178 flatbuffers::Offset<flatbuffers::Vector<uint64_t>> fingerprints_per_node = 0) {
179 InferenceContextBuilder builder_(_fbb);
180 builder_.add_fingerprints_per_node(fingerprints_per_node);
181 builder_.add_tuned_work_group_sizes_per_node(tuned_work_group_sizes_per_node);
182 builder_.add_binary_programs(binary_programs);
183 builder_.add_driver_version(driver_version);
184 builder_.add_gpu_model(gpu_model);
185 return builder_.Finish();
186 }
187
188 inline flatbuffers::Offset<InferenceContext> CreateInferenceContextDirect(
189 flatbuffers::FlatBufferBuilder &_fbb,
190 flatbuffers::Offset<tflite::gpu::data::GpuModel> gpu_model = 0,
191 const char *driver_version = nullptr,
192 const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>> *binary_programs = nullptr,
193 const std::vector<flatbuffers::Offset<tflite::gpu::data::Int3>> *tuned_work_group_sizes_per_node = nullptr,
194 const std::vector<uint64_t> *fingerprints_per_node = nullptr) {
195 auto driver_version__ = driver_version ? _fbb.CreateString(driver_version) : 0;
196 auto binary_programs__ = binary_programs ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>(*binary_programs) : 0;
197 auto tuned_work_group_sizes_per_node__ = tuned_work_group_sizes_per_node ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::data::Int3>>(*tuned_work_group_sizes_per_node) : 0;
198 auto fingerprints_per_node__ = fingerprints_per_node ? _fbb.CreateVector<uint64_t>(*fingerprints_per_node) : 0;
199 return tflite::gpu::cl::data::CreateInferenceContext(
200 _fbb,
201 gpu_model,
202 driver_version__,
203 binary_programs__,
204 tuned_work_group_sizes_per_node__,
205 fingerprints_per_node__);
206 }
207
GetInferenceContext(const void * buf)208 inline const tflite::gpu::cl::data::InferenceContext *GetInferenceContext(const void *buf) {
209 return flatbuffers::GetRoot<tflite::gpu::cl::data::InferenceContext>(buf);
210 }
211
GetSizePrefixedInferenceContext(const void * buf)212 inline const tflite::gpu::cl::data::InferenceContext *GetSizePrefixedInferenceContext(const void *buf) {
213 return flatbuffers::GetSizePrefixedRoot<tflite::gpu::cl::data::InferenceContext>(buf);
214 }
215
VerifyInferenceContextBuffer(flatbuffers::Verifier & verifier)216 inline bool VerifyInferenceContextBuffer(
217 flatbuffers::Verifier &verifier) {
218 return verifier.VerifyBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
219 }
220
VerifySizePrefixedInferenceContextBuffer(flatbuffers::Verifier & verifier)221 inline bool VerifySizePrefixedInferenceContextBuffer(
222 flatbuffers::Verifier &verifier) {
223 return verifier.VerifySizePrefixedBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
224 }
225
FinishInferenceContextBuffer(flatbuffers::FlatBufferBuilder & fbb,flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root)226 inline void FinishInferenceContextBuffer(
227 flatbuffers::FlatBufferBuilder &fbb,
228 flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
229 fbb.Finish(root);
230 }
231
FinishSizePrefixedInferenceContextBuffer(flatbuffers::FlatBufferBuilder & fbb,flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root)232 inline void FinishSizePrefixedInferenceContextBuffer(
233 flatbuffers::FlatBufferBuilder &fbb,
234 flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
235 fbb.FinishSizePrefixed(root);
236 }
237
238 } // namespace data
239 } // namespace cl
240 } // namespace gpu
241 } // namespace tflite
242
243 #endif // FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
244