xref: /aosp_15_r20/external/armnn/delegate/test/PadTestHelper.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 
13 #include <flatbuffers/flatbuffers.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <schema_generated.h>
18 
19 #include <doctest/doctest.h>
20 
21 namespace
22 {
23 
24 template <typename T>
CreatePadTfLiteModel(tflite::BuiltinOperator padOperatorCode,tflite::TensorType tensorType,tflite::MirrorPadMode paddingMode,const std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & paddingTensorShape,const std::vector<int32_t> & outputTensorShape,const std::vector<int32_t> & paddingDim,const std::vector<T> paddingValue,float quantScale=1.0f,int quantOffset=0)25 std::vector<char> CreatePadTfLiteModel(
26     tflite::BuiltinOperator padOperatorCode,
27     tflite::TensorType tensorType,
28     tflite::MirrorPadMode paddingMode,
29     const std::vector<int32_t>& inputTensorShape,
30     const std::vector<int32_t>& paddingTensorShape,
31     const std::vector<int32_t>& outputTensorShape,
32     const std::vector<int32_t>& paddingDim,
33     const std::vector<T> paddingValue,
34     float quantScale = 1.0f,
35     int quantOffset  = 0)
36 {
37     using namespace tflite;
38     flatbuffers::FlatBufferBuilder flatBufferBuilder;
39 
40     auto quantizationParameters =
41         CreateQuantizationParameters(flatBufferBuilder,
42                                      0,
43                                      0,
44                                      flatBufferBuilder.CreateVector<float>({ quantScale }),
45                                      flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
46 
47     auto inputTensor = CreateTensor(flatBufferBuilder,
48                                     flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
49                                                                             inputTensorShape.size()),
50                                     tensorType,
51                                     0,
52                                     flatBufferBuilder.CreateString("input"),
53                                     quantizationParameters);
54 
55     auto paddingTensor = CreateTensor(flatBufferBuilder,
56                                       flatBufferBuilder.CreateVector<int32_t>(paddingTensorShape.data(),
57                                                                               paddingTensorShape.size()),
58                                       tflite::TensorType_INT32,
59                                       1,
60                                       flatBufferBuilder.CreateString("padding"));
61 
62     auto outputTensor = CreateTensor(flatBufferBuilder,
63                                      flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
64                                                                              outputTensorShape.size()),
65                                      tensorType,
66                                      2,
67                                      flatBufferBuilder.CreateString("output"),
68                                      quantizationParameters);
69 
70     std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
71 
72     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
73     buffers.push_back(CreateBuffer(flatBufferBuilder));
74     buffers.push_back(
75         CreateBuffer(flatBufferBuilder,
76                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
77                                                     sizeof(int32_t) * paddingDim.size())));
78     buffers.push_back(CreateBuffer(flatBufferBuilder));
79 
80     std::vector<int32_t> operatorInputs;
81     std::vector<int> subgraphInputs;
82 
83     tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_PadOptions;
84     flatbuffers::Offset<void> operatorBuiltinOptions;
85 
86     if (padOperatorCode == tflite::BuiltinOperator_PAD)
87     {
88         operatorInputs = {{ 0, 1 }};
89         subgraphInputs = {{ 0, 1 }};
90         operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union();
91     }
92     else if(padOperatorCode == tflite::BuiltinOperator_MIRROR_PAD)
93     {
94         operatorInputs = {{ 0, 1 }};
95         subgraphInputs = {{ 0, 1 }};
96 
97         operatorBuiltinOptionsType = BuiltinOptions_MirrorPadOptions;
98         operatorBuiltinOptions = CreateMirrorPadOptions(flatBufferBuilder, paddingMode).Union();
99     }
100     else if (padOperatorCode == tflite::BuiltinOperator_PADV2)
101     {
102         buffers.push_back(
103             CreateBuffer(flatBufferBuilder,
104                          flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingValue.data()),
105                                                         sizeof(T))));
106 
107         const std::vector<int32_t> shape = { 1 };
108         auto padValueTensor = CreateTensor(flatBufferBuilder,
109                                            flatBufferBuilder.CreateVector<int32_t>(shape.data(),
110                                                                                    shape.size()),
111                                            tensorType,
112                                            3,
113                                            flatBufferBuilder.CreateString("paddingValue"),
114                                            quantizationParameters);
115 
116         tensors.push_back(padValueTensor);
117 
118         operatorInputs = {{ 0, 1, 3 }};
119         subgraphInputs = {{ 0, 1, 3 }};
120 
121         operatorBuiltinOptionsType = BuiltinOptions_PadV2Options;
122         operatorBuiltinOptions = CreatePadV2Options(flatBufferBuilder).Union();
123     }
124 
125     // create operator
126     const std::vector<int32_t> operatorOutputs{ 2 };
127     flatbuffers::Offset <Operator> paddingOperator =
128         CreateOperator(flatBufferBuilder,
129                        0,
130                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
131                        flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
132                        operatorBuiltinOptionsType,
133                        operatorBuiltinOptions);
134 
135     const std::vector<int> subgraphOutputs{ 2 };
136     flatbuffers::Offset <SubGraph> subgraph =
137         CreateSubGraph(flatBufferBuilder,
138                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
139                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
140                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
141                        flatBufferBuilder.CreateVector(&paddingOperator, 1));
142 
143     flatbuffers::Offset <flatbuffers::String> modelDescription =
144         flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model");
145     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
146                                                                          padOperatorCode);
147 
148     flatbuffers::Offset <Model> flatbufferModel =
149         CreateModel(flatBufferBuilder,
150                     TFLITE_SCHEMA_VERSION,
151                     flatBufferBuilder.CreateVector(&operatorCode, 1),
152                     flatBufferBuilder.CreateVector(&subgraph, 1),
153                     modelDescription,
154                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
155 
156     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
157 
158     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
159                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
160 }
161 
162 template <typename T>
PadTest(tflite::BuiltinOperator padOperatorCode,tflite::TensorType tensorType,const std::vector<armnn::BackendId> & backends,const std::vector<int32_t> & inputShape,const std::vector<int32_t> & paddingShape,std::vector<int32_t> & outputShape,std::vector<T> & inputValues,std::vector<int32_t> & paddingDim,std::vector<T> & expectedOutputValues,T paddingValue,float quantScale=1.0f,int quantOffset=0,tflite::MirrorPadMode paddingMode=tflite::MirrorPadMode_SYMMETRIC)163 void PadTest(tflite::BuiltinOperator padOperatorCode,
164              tflite::TensorType tensorType,
165              const std::vector<armnn::BackendId>& backends,
166              const std::vector<int32_t>& inputShape,
167              const std::vector<int32_t>& paddingShape,
168              std::vector<int32_t>& outputShape,
169              std::vector<T>& inputValues,
170              std::vector<int32_t>& paddingDim,
171              std::vector<T>& expectedOutputValues,
172              T paddingValue,
173              float quantScale = 1.0f,
174              int quantOffset  = 0,
175              tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
176 {
177     using namespace delegateTestInterpreter;
178     std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
179                                                             tensorType,
180                                                             paddingMode,
181                                                             inputShape,
182                                                             paddingShape,
183                                                             outputShape,
184                                                             paddingDim,
185                                                             {paddingValue},
186                                                             quantScale,
187                                                             quantOffset);
188 
189     // Setup interpreter with just TFLite Runtime.
190     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
191     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
192     CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
193     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
194     std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
195     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
196 
197     // Setup interpreter with Arm NN Delegate applied.
198     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
199     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
200     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
201     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
202     std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
203     std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
204 
205     armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
206     armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
207 
208     tfLiteInterpreter.Cleanup();
209     armnnInterpreter.Cleanup();
210 }
211 
212 } // anonymous namespace
213