xref: /aosp_15_r20/external/armnn/delegate/test/Pooling3dTestHelper.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 
13 #include <flatbuffers/flatbuffers.h>
14 #include <flatbuffers/flexbuffers.h>
15 #include <tensorflow/lite/kernels/register.h>
16 #include <tensorflow/lite/kernels/custom_ops_register.h>
17 #include <tensorflow/lite/version.h>
18 
19 #include <schema_generated.h>
20 
21 #include <doctest/doctest.h>
22 
23 namespace
24 {
25 #if defined(ARMNN_POST_TFLITE_2_5)
26 
27 std::vector<uint8_t> CreateCustomOptions(int, int, int, int, int, int, TfLitePadding);
28 
CreatePooling3dTfLiteModel(std::string poolType,tflite::TensorType tensorType,const std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & outputTensorShape,TfLitePadding padding=kTfLitePaddingSame,int32_t strideWidth=0,int32_t strideHeight=0,int32_t strideDepth=0,int32_t filterWidth=0,int32_t filterHeight=0,int32_t filterDepth=0,tflite::ActivationFunctionType fusedActivation=tflite::ActivationFunctionType_NONE,float quantScale=1.0f,int quantOffset=0)29 std::vector<char> CreatePooling3dTfLiteModel(
30     std::string poolType,
31     tflite::TensorType tensorType,
32     const std::vector<int32_t>& inputTensorShape,
33     const std::vector<int32_t>& outputTensorShape,
34     TfLitePadding padding = kTfLitePaddingSame,
35     int32_t strideWidth = 0,
36     int32_t strideHeight = 0,
37     int32_t strideDepth = 0,
38     int32_t filterWidth = 0,
39     int32_t filterHeight = 0,
40     int32_t filterDepth = 0,
41     tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
42     float quantScale = 1.0f,
43     int quantOffset = 0)
44 {
45     using namespace tflite;
46     flatbuffers::FlatBufferBuilder flatBufferBuilder;
47 
48     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
49     buffers.push_back(CreateBuffer(flatBufferBuilder));
50     buffers.push_back(CreateBuffer(flatBufferBuilder));
51     buffers.push_back(CreateBuffer(flatBufferBuilder));
52 
53 
54     auto quantizationParameters =
55         CreateQuantizationParameters(flatBufferBuilder,
56                                      0,
57                                      0,
58                                      flatBufferBuilder.CreateVector<float>({ quantScale }),
59                                      flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
60 
61     // Create the input and output tensors
62     std::array<flatbuffers::Offset<Tensor>, 2> tensors;
63     tensors[0] = CreateTensor(flatBufferBuilder,
64                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
65                                                                       inputTensorShape.size()),
66                               tensorType,
67                               0,
68                               flatBufferBuilder.CreateString("input"),
69                               quantizationParameters);
70 
71     tensors[1] = CreateTensor(flatBufferBuilder,
72                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
73                                                                       outputTensorShape.size()),
74                               tensorType,
75                               0,
76                               flatBufferBuilder.CreateString("output"),
77                               quantizationParameters);
78 
79     // Create the custom options from the function below
80     std::vector<uint8_t> customOperatorOptions = CreateCustomOptions(strideHeight, strideWidth, strideDepth,
81                                                                      filterHeight, filterWidth, filterDepth, padding);
82     // opCodeIndex is created as a uint8_t to avoid map lookup
83     uint8_t opCodeIndex = 0;
84     // Set the operator name based on the PoolType passed in from the test case
85     std::string opName = "";
86     if (poolType == "kMax")
87     {
88         opName = "MaxPool3D";
89     }
90     else
91     {
92         opName = "AveragePool3D";
93     }
94     // To create a custom operator code you pass in the builtin code for custom operators and the name of the custom op
95     flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCodeDirect(flatBufferBuilder,
96                                                                               tflite::BuiltinOperator_CUSTOM,
97                                                                               opName.c_str());
98 
99     // Create the Operator using the opCodeIndex and custom options. Also sets builtin options to none.
100     const std::vector<int32_t> operatorInputs{ 0 };
101     const std::vector<int32_t> operatorOutputs{ 1 };
102     flatbuffers::Offset<Operator> poolingOperator =
103         CreateOperator(flatBufferBuilder,
104                        opCodeIndex,
105                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
106                        flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
107                        tflite::BuiltinOptions_NONE,
108                        0,
109                        flatBufferBuilder.CreateVector<uint8_t>(customOperatorOptions),
110                        tflite::CustomOptionsFormat_FLEXBUFFERS);
111 
112     // Create the subgraph using the operator created above.
113     const std::vector<int> subgraphInputs{ 0 };
114     const std::vector<int> subgraphOutputs{ 1 };
115     flatbuffers::Offset<SubGraph> subgraph =
116         CreateSubGraph(flatBufferBuilder,
117                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
118                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
119                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
120                        flatBufferBuilder.CreateVector(&poolingOperator, 1));
121 
122     flatbuffers::Offset<flatbuffers::String> modelDescription =
123         flatBufferBuilder.CreateString("ArmnnDelegate: Pooling3d Operator Model");
124 
125     // Create the model using operatorCode and the subgraph.
126     flatbuffers::Offset<Model> flatbufferModel =
127         CreateModel(flatBufferBuilder,
128                     TFLITE_SCHEMA_VERSION,
129                     flatBufferBuilder.CreateVector(&operatorCode, 1),
130                     flatBufferBuilder.CreateVector(&subgraph, 1),
131                     modelDescription,
132                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
133 
134     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
135 
136     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
137                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
138 }
139 
140 template<typename T>
Pooling3dTest(std::string poolType,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & inputShape,std::vector<int32_t> & outputShape,std::vector<T> & inputValues,std::vector<T> & expectedOutputValues,TfLitePadding padding=kTfLitePaddingSame,int32_t strideWidth=0,int32_t strideHeight=0,int32_t strideDepth=0,int32_t filterWidth=0,int32_t filterHeight=0,int32_t filterDepth=0,tflite::ActivationFunctionType fusedActivation=tflite::ActivationFunctionType_NONE,float quantScale=1.0f,int quantOffset=0)141 void Pooling3dTest(std::string poolType,
142                    tflite::TensorType tensorType,
143                    std::vector<armnn::BackendId>& backends,
144                    std::vector<int32_t>& inputShape,
145                    std::vector<int32_t>& outputShape,
146                    std::vector<T>& inputValues,
147                    std::vector<T>& expectedOutputValues,
148                    TfLitePadding padding = kTfLitePaddingSame,
149                    int32_t strideWidth = 0,
150                    int32_t strideHeight = 0,
151                    int32_t strideDepth = 0,
152                    int32_t filterWidth = 0,
153                    int32_t filterHeight = 0,
154                    int32_t filterDepth = 0,
155                    tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
156                    float quantScale = 1.0f,
157                    int quantOffset = 0)
158 {
159     using namespace delegateTestInterpreter;
160     // Create the single op model buffer
161     std::vector<char> modelBuffer = CreatePooling3dTfLiteModel(poolType,
162                                                                tensorType,
163                                                                inputShape,
164                                                                outputShape,
165                                                                padding,
166                                                                strideWidth,
167                                                                strideHeight,
168                                                                strideDepth,
169                                                                filterWidth,
170                                                                filterHeight,
171                                                                filterDepth,
172                                                                fusedActivation,
173                                                                quantScale,
174                                                                quantOffset);
175 
176     std::string opType = "";
177     if (poolType == "kMax")
178     {
179         opType = "MaxPool3D";
180     }
181     else
182     {
183         opType = "AveragePool3D";
184     }
185 
186     // Setup interpreter with just TFLite Runtime.
187     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer, opType);
188     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
189     CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
190     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
191     std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
192     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
193 
194     // Setup interpreter with Arm NN Delegate applied.
195     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends, opType);
196     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
197     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
198     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
199     std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
200     std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
201 
202     armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
203     armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
204 
205     tfLiteInterpreter.Cleanup();
206     armnnInterpreter.Cleanup();
207 }
208 
209 // Function to create the flexbuffer custom options for the custom pooling3d operator.
CreateCustomOptions(int strideHeight,int strideWidth,int strideDepth,int filterHeight,int filterWidth,int filterDepth,TfLitePadding padding)210 std::vector<uint8_t> CreateCustomOptions(int strideHeight, int strideWidth, int strideDepth,
211                                          int filterHeight, int filterWidth, int filterDepth, TfLitePadding padding)
212 {
213     auto flex_builder = std::make_unique<flexbuffers::Builder>();
214     size_t map_start = flex_builder->StartMap();
215     flex_builder->String("data_format", "NDHWC");
216     // Padding is created as a key and padding type. Only VALID and SAME supported
217     if (padding == kTfLitePaddingValid)
218     {
219         flex_builder->String("padding", "VALID");
220     }
221     else
222     {
223         flex_builder->String("padding", "SAME");
224     }
225 
226     // Vector of filter dimensions in order ( 1, Depth, Height, Width, 1 )
227     auto start = flex_builder->StartVector("ksize");
228     flex_builder->Add(1);
229     flex_builder->Add(filterDepth);
230     flex_builder->Add(filterHeight);
231     flex_builder->Add(filterWidth);
232     flex_builder->Add(1);
233     // EndVector( start, bool typed, bool fixed)
234     flex_builder->EndVector(start, true, false);
235 
236     // Vector of stride dimensions in order ( 1, Depth, Height, Width, 1 )
237     auto stridesStart = flex_builder->StartVector("strides");
238     flex_builder->Add(1);
239     flex_builder->Add(strideDepth);
240     flex_builder->Add(strideHeight);
241     flex_builder->Add(strideWidth);
242     flex_builder->Add(1);
243     // EndVector( stridesStart, bool typed, bool fixed)
244     flex_builder->EndVector(stridesStart, true, false);
245 
246     flex_builder->EndMap(map_start);
247     flex_builder->Finish();
248 
249     return flex_builder->GetBuffer();
250 }
251 #endif
252 } // anonymous namespace
253 
254 
255 
256 
257