xref: /aosp_15_r20/external/armnn/delegate/test/ControlTestHelper.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 
13 #include <flatbuffers/flatbuffers.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <schema_generated.h>
18 
19 #include <doctest/doctest.h>
20 
21 namespace
22 {
23 
CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & outputTensorShape,const int32_t inputTensorNum,int32_t axis=0,float quantScale=1.0f,int quantOffset=0)24 std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
25                                           tflite::TensorType tensorType,
26                                           std::vector<int32_t>& inputTensorShape,
27                                           const std::vector <int32_t>& outputTensorShape,
28                                           const int32_t inputTensorNum,
29                                           int32_t axis = 0,
30                                           float quantScale = 1.0f,
31                                           int quantOffset  = 0)
32 {
33     using namespace tflite;
34     flatbuffers::FlatBufferBuilder flatBufferBuilder;
35 
36     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
37     buffers.push_back(CreateBuffer(flatBufferBuilder));
38     buffers.push_back(CreateBuffer(flatBufferBuilder));
39     buffers.push_back(CreateBuffer(flatBufferBuilder));
40 
41     auto quantizationParameters =
42             CreateQuantizationParameters(flatBufferBuilder,
43                                          0,
44                                          0,
45                                          flatBufferBuilder.CreateVector<float>({ quantScale }),
46                                          flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
47 
48     std::vector<int32_t> operatorInputs{};
49     const std::vector<int32_t> operatorOutputs{inputTensorNum};
50     std::vector<int> subgraphInputs{};
51     const std::vector<int> subgraphOutputs{inputTensorNum};
52 
53     std::vector<flatbuffers::Offset<Tensor>> tensors(inputTensorNum + 1);
54     for (int i = 0; i < inputTensorNum; ++i)
55     {
56         tensors[i] = CreateTensor(flatBufferBuilder,
57                                   flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
58                                                                           inputTensorShape.size()),
59                                   tensorType,
60                                   1,
61                                   flatBufferBuilder.CreateString("input" + std::to_string(i)),
62                                   quantizationParameters);
63 
64         // Add number of inputs to vector.
65         operatorInputs.push_back(i);
66         subgraphInputs.push_back(i);
67     }
68 
69     // Create output tensor
70     tensors[inputTensorNum] = CreateTensor(flatBufferBuilder,
71                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
72                                                                       outputTensorShape.size()),
73                               tensorType,
74                               2,
75                               flatBufferBuilder.CreateString("output"),
76                               quantizationParameters);
77 
78     // create operator
79     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ConcatenationOptions;
80     flatbuffers::Offset<void> operatorBuiltinOptions = CreateConcatenationOptions(flatBufferBuilder, axis).Union();
81 
82     flatbuffers::Offset <Operator> controlOperator =
83             CreateOperator(flatBufferBuilder,
84                            0,
85                            flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
86                            flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
87                            operatorBuiltinOptionsType,
88                            operatorBuiltinOptions);
89 
90     flatbuffers::Offset <SubGraph> subgraph =
91             CreateSubGraph(flatBufferBuilder,
92                            flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
93                            flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
94                            flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
95                            flatBufferBuilder.CreateVector(&controlOperator, 1));
96 
97     flatbuffers::Offset <flatbuffers::String> modelDescription =
98             flatBufferBuilder.CreateString("ArmnnDelegate: Concatenation Operator Model");
99     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
100 
101     flatbuffers::Offset <Model> flatbufferModel =
102             CreateModel(flatBufferBuilder,
103                         TFLITE_SCHEMA_VERSION,
104                         flatBufferBuilder.CreateVector(&operatorCode, 1),
105                         flatBufferBuilder.CreateVector(&subgraph, 1),
106                         modelDescription,
107                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
108 
109     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
110 
111     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
112                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
113 }
114 
CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<int32_t> & input0TensorShape,std::vector<int32_t> & input1TensorShape,const std::vector<int32_t> & outputTensorShape,std::vector<int32_t> & axisData,const bool keepDims,float quantScale=1.0f,int quantOffset=0)115 std::vector<char> CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
116                                         tflite::TensorType tensorType,
117                                         std::vector<int32_t>& input0TensorShape,
118                                         std::vector<int32_t>& input1TensorShape,
119                                         const std::vector <int32_t>& outputTensorShape,
120                                         std::vector<int32_t>& axisData,
121                                         const bool keepDims,
122                                         float quantScale = 1.0f,
123                                         int quantOffset  = 0)
124 {
125     using namespace tflite;
126     flatbuffers::FlatBufferBuilder flatBufferBuilder;
127 
128     std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
129     buffers[0] = CreateBuffer(flatBufferBuilder);
130     buffers[1] = CreateBuffer(flatBufferBuilder,
131                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
132                                                              sizeof(int32_t) * axisData.size()));
133 
134     auto quantizationParameters =
135             CreateQuantizationParameters(flatBufferBuilder,
136                                          0,
137                                          0,
138                                          flatBufferBuilder.CreateVector<float>({ quantScale }),
139                                          flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
140 
141     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
142     tensors[0] = CreateTensor(flatBufferBuilder,
143                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
144                                                                       input0TensorShape.size()),
145                               tensorType,
146                               0,
147                               flatBufferBuilder.CreateString("input"),
148                               quantizationParameters);
149 
150     tensors[1] = CreateTensor(flatBufferBuilder,
151                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
152                                                                       input1TensorShape.size()),
153                               ::tflite::TensorType_INT32,
154                               1,
155                               flatBufferBuilder.CreateString("axis"),
156                               quantizationParameters);
157 
158     // Create output tensor
159     tensors[2] = CreateTensor(flatBufferBuilder,
160                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
161                                                                       outputTensorShape.size()),
162                               tensorType,
163                               0,
164                               flatBufferBuilder.CreateString("output"),
165                               quantizationParameters);
166 
167     // create operator. Mean uses ReducerOptions.
168     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ReducerOptions;
169     flatbuffers::Offset<void> operatorBuiltinOptions = CreateReducerOptions(flatBufferBuilder, keepDims).Union();
170 
171     const std::vector<int> operatorInputs{ {0, 1} };
172     const std::vector<int> operatorOutputs{ 2 };
173     flatbuffers::Offset <Operator> controlOperator =
174             CreateOperator(flatBufferBuilder,
175                            0,
176                            flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
177                            flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
178                            operatorBuiltinOptionsType,
179                            operatorBuiltinOptions);
180 
181     const std::vector<int> subgraphInputs{ {0, 1} };
182     const std::vector<int> subgraphOutputs{ 2 };
183     flatbuffers::Offset <SubGraph> subgraph =
184             CreateSubGraph(flatBufferBuilder,
185                            flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
186                            flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
187                            flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
188                            flatBufferBuilder.CreateVector(&controlOperator, 1));
189 
190     flatbuffers::Offset <flatbuffers::String> modelDescription =
191             flatBufferBuilder.CreateString("ArmnnDelegate: Mean Operator Model");
192     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
193 
194     flatbuffers::Offset <Model> flatbufferModel =
195             CreateModel(flatBufferBuilder,
196                         TFLITE_SCHEMA_VERSION,
197                         flatBufferBuilder.CreateVector(&operatorCode, 1),
198                         flatBufferBuilder.CreateVector(&subgraph, 1),
199                         modelDescription,
200                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
201 
202     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
203 
204     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
205                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
206 }
207 
208 template <typename T>
ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & inputShapes,std::vector<int32_t> & expectedOutputShape,std::vector<std::vector<T>> & inputValues,std::vector<T> & expectedOutputValues,int32_t axis=0,float quantScale=1.0f,int quantOffset=0)209 void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,
210                        tflite::TensorType tensorType,
211                        std::vector<armnn::BackendId>& backends,
212                        std::vector<int32_t>& inputShapes,
213                        std::vector<int32_t>& expectedOutputShape,
214                        std::vector<std::vector<T>>& inputValues,
215                        std::vector<T>& expectedOutputValues,
216                        int32_t axis = 0,
217                        float quantScale = 1.0f,
218                        int quantOffset  = 0)
219 {
220     using namespace delegateTestInterpreter;
221     std::vector<char> modelBuffer = CreateConcatTfLiteModel(controlOperatorCode,
222                                                             tensorType,
223                                                             inputShapes,
224                                                             expectedOutputShape,
225                                                             inputValues.size(),
226                                                             axis,
227                                                             quantScale,
228                                                             quantOffset);
229 
230     // Setup interpreter with just TFLite Runtime.
231     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
232     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
233 
234     // Setup interpreter with Arm NN Delegate applied.
235     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
236     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
237 
238     for (unsigned int i = 0; i < inputValues.size(); ++i)
239     {
240         CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues[i], i) == kTfLiteOk);
241         CHECK(armnnInterpreter.FillInputTensor<T>(inputValues[i], i) == kTfLiteOk);
242     }
243 
244     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
245     std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
246     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
247 
248     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
249     std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
250     std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
251 
252     armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
253     armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
254 
255     tfLiteInterpreter.Cleanup();
256     armnnInterpreter.Cleanup();
257 }
258 
259 template <typename T>
MeanTest(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & input0Shape,std::vector<int32_t> & input1Shape,std::vector<int32_t> & expectedOutputShape,std::vector<T> & input0Values,std::vector<int32_t> & input1Values,std::vector<T> & expectedOutputValues,const bool keepDims,float quantScale=1.0f,int quantOffset=0)260 void MeanTest(tflite::BuiltinOperator controlOperatorCode,
261               tflite::TensorType tensorType,
262               std::vector<armnn::BackendId>& backends,
263               std::vector<int32_t>& input0Shape,
264               std::vector<int32_t>& input1Shape,
265               std::vector<int32_t>& expectedOutputShape,
266               std::vector<T>& input0Values,
267               std::vector<int32_t>& input1Values,
268               std::vector<T>& expectedOutputValues,
269               const bool keepDims,
270               float quantScale = 1.0f,
271               int quantOffset  = 0)
272 {
273     using namespace delegateTestInterpreter;
274     std::vector<char> modelBuffer = CreateMeanTfLiteModel(controlOperatorCode,
275                                                           tensorType,
276                                                           input0Shape,
277                                                           input1Shape,
278                                                           expectedOutputShape,
279                                                           input1Values,
280                                                           keepDims,
281                                                           quantScale,
282                                                           quantOffset);
283 
284     // Setup interpreter with just TFLite Runtime.
285     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
286     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
287     CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
288     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
289     std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
290     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
291 
292     // Setup interpreter with Arm NN Delegate applied.
293     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
294     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
295     CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
296     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
297     std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
298     std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
299 
300     armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
301     armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
302 
303     tfLiteInterpreter.Cleanup();
304     armnnInterpreter.Cleanup();
305 }
306 
307 } // anonymous namespace