xref: /aosp_15_r20/external/armnn/delegate/test/FullyConnectedTestHelper.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 
13 #include <flatbuffers/flatbuffers.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <schema_generated.h>
18 
19 #include <doctest/doctest.h>
20 
21 namespace
22 {
23 
24 template <typename T>
CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,tflite::ActivationFunctionType activationType,const std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & weightsTensorShape,const std::vector<int32_t> & biasTensorShape,std::vector<int32_t> & outputTensorShape,std::vector<T> & weightsData,bool constantWeights=true,float quantScale=1.0f,int quantOffset=0,float outputQuantScale=2.0f,int outputQuantOffset=0)25 std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
26                                                   tflite::ActivationFunctionType activationType,
27                                                   const std::vector <int32_t>& inputTensorShape,
28                                                   const std::vector <int32_t>& weightsTensorShape,
29                                                   const std::vector <int32_t>& biasTensorShape,
30                                                   std::vector <int32_t>& outputTensorShape,
31                                                   std::vector <T>& weightsData,
32                                                   bool constantWeights = true,
33                                                   float quantScale = 1.0f,
34                                                   int quantOffset  = 0,
35                                                   float outputQuantScale = 2.0f,
36                                                   int outputQuantOffset  = 0)
37 {
38     using namespace tflite;
39     flatbuffers::FlatBufferBuilder flatBufferBuilder;
40     std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
41     buffers[0] = CreateBuffer(flatBufferBuilder);
42     buffers[1] = CreateBuffer(flatBufferBuilder);
43 
44     auto biasTensorType = ::tflite::TensorType_FLOAT32;
45     if (tensorType == ::tflite::TensorType_INT8)
46     {
47         biasTensorType = ::tflite::TensorType_INT32;
48     }
49     if (constantWeights)
50     {
51         buffers[2] = CreateBuffer(flatBufferBuilder,
52                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(weightsData.data()),
53                                                     sizeof(T) * weightsData.size()));
54 
55         if (tensorType == ::tflite::TensorType_INT8)
56         {
57             std::vector<int32_t> biasData = { 10 };
58             buffers[3] = CreateBuffer(flatBufferBuilder,
59                                       flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
60                                                                      sizeof(int32_t) * biasData.size()));
61 
62         }
63         else
64         {
65             std::vector<float> biasData = { 10 };
66             buffers[3] = CreateBuffer(flatBufferBuilder,
67                                       flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
68                                                                      sizeof(float) * biasData.size()));
69         }
70     }
71     else
72     {
73         buffers[2] = CreateBuffer(flatBufferBuilder);
74         buffers[3] = CreateBuffer(flatBufferBuilder);
75     }
76     buffers[4] = CreateBuffer(flatBufferBuilder);
77 
78     auto quantizationParameters =
79         CreateQuantizationParameters(flatBufferBuilder,
80                                      0,
81                                      0,
82                                      flatBufferBuilder.CreateVector<float>({ quantScale }),
83                                      flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
84 
85     auto outputQuantizationParameters =
86         CreateQuantizationParameters(flatBufferBuilder,
87                                      0,
88                                      0,
89                                      flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
90                                      flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
91 
92     std::array<flatbuffers::Offset<Tensor>, 4> tensors;
93     tensors[0] = CreateTensor(flatBufferBuilder,
94                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
95                                                                       inputTensorShape.size()),
96                               tensorType,
97                               1,
98                               flatBufferBuilder.CreateString("input_0"),
99                               quantizationParameters);
100     tensors[1] = CreateTensor(flatBufferBuilder,
101                               flatBufferBuilder.CreateVector<int32_t>(weightsTensorShape.data(),
102                                                                       weightsTensorShape.size()),
103                               tensorType,
104                               2,
105                               flatBufferBuilder.CreateString("weights"),
106                               quantizationParameters);
107     tensors[2] = CreateTensor(flatBufferBuilder,
108                               flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(),
109                                                                       biasTensorShape.size()),
110                               biasTensorType,
111                               3,
112                               flatBufferBuilder.CreateString("bias"),
113                               quantizationParameters);
114 
115     tensors[3] = CreateTensor(flatBufferBuilder,
116                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
117                                                                       outputTensorShape.size()),
118                               tensorType,
119                               4,
120                               flatBufferBuilder.CreateString("output"),
121                               outputQuantizationParameters);
122 
123 
124     // create operator
125     tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FullyConnectedOptions;
126     flatbuffers::Offset<void> operatorBuiltinOptions =
127         CreateFullyConnectedOptions(flatBufferBuilder,
128                                     activationType,
129                                     FullyConnectedOptionsWeightsFormat_DEFAULT, false).Union();
130 
131     const std::vector<int> operatorInputs{0, 1, 2};
132     const std::vector<int> operatorOutputs{3};
133     flatbuffers::Offset <Operator> fullyConnectedOperator =
134         CreateOperator(flatBufferBuilder,
135                        0,
136                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
137                        flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
138                        operatorBuiltinOptionsType, operatorBuiltinOptions);
139 
140     const std::vector<int> subgraphInputs{0, 1, 2};
141     const std::vector<int> subgraphOutputs{3};
142     flatbuffers::Offset <SubGraph> subgraph =
143         CreateSubGraph(flatBufferBuilder,
144                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
145                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
146                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
147                        flatBufferBuilder.CreateVector(&fullyConnectedOperator, 1));
148 
149     flatbuffers::Offset <flatbuffers::String> modelDescription =
150         flatBufferBuilder.CreateString("ArmnnDelegate: FullyConnected Operator Model");
151     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
152                                                                          tflite::BuiltinOperator_FULLY_CONNECTED);
153 
154     flatbuffers::Offset <Model> flatbufferModel =
155         CreateModel(flatBufferBuilder,
156                     TFLITE_SCHEMA_VERSION,
157                     flatBufferBuilder.CreateVector(&operatorCode, 1),
158                     flatBufferBuilder.CreateVector(&subgraph, 1),
159                     modelDescription,
160                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
161 
162     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
163 
164     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
165                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
166 }
167 
168 template <typename T>
FullyConnectedTest(std::vector<armnn::BackendId> & backends,tflite::TensorType tensorType,tflite::ActivationFunctionType activationType,const std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & weightsTensorShape,const std::vector<int32_t> & biasTensorShape,std::vector<int32_t> & outputTensorShape,std::vector<T> & inputValues,std::vector<T> & expectedOutputValues,std::vector<T> & weightsData,bool constantWeights=true,float quantScale=1.0f,int quantOffset=0)169 void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
170                         tflite::TensorType tensorType,
171                         tflite::ActivationFunctionType activationType,
172                         const std::vector <int32_t>& inputTensorShape,
173                         const std::vector <int32_t>& weightsTensorShape,
174                         const std::vector <int32_t>& biasTensorShape,
175                         std::vector <int32_t>& outputTensorShape,
176                         std::vector <T>& inputValues,
177                         std::vector <T>& expectedOutputValues,
178                         std::vector <T>& weightsData,
179                         bool constantWeights = true,
180                         float quantScale = 1.0f,
181                         int quantOffset  = 0)
182 {
183     using namespace delegateTestInterpreter;
184 
185     std::vector<char> modelBuffer = CreateFullyConnectedTfLiteModel(tensorType,
186                                                                     activationType,
187                                                                     inputTensorShape,
188                                                                     weightsTensorShape,
189                                                                     biasTensorShape,
190                                                                     outputTensorShape,
191                                                                     weightsData,
192                                                                     constantWeights,
193                                                                     quantScale,
194                                                                     quantOffset);
195 
196     // Setup interpreter with just TFLite Runtime.
197     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
198     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
199 
200     // Setup interpreter with Arm NN Delegate applied.
201     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
202     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
203 
204     CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
205     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
206 
207     if (!constantWeights)
208     {
209         CHECK(tfLiteInterpreter.FillInputTensor<T>(weightsData, 1) == kTfLiteOk);
210         CHECK(armnnInterpreter.FillInputTensor<T>(weightsData, 1) == kTfLiteOk);
211 
212         if (tensorType == ::tflite::TensorType_INT8)
213         {
214             std::vector <int32_t> biasData = {10};
215             CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(biasData, 2) == kTfLiteOk);
216             CHECK(armnnInterpreter.FillInputTensor<int32_t>(biasData, 2) == kTfLiteOk);
217         }
218         else
219         {
220             std::vector<float> biasData = {10};
221             CHECK(tfLiteInterpreter.FillInputTensor<float>(biasData, 2) == kTfLiteOk);
222             CHECK(armnnInterpreter.FillInputTensor<float>(biasData, 2) == kTfLiteOk);
223         }
224     }
225 
226     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
227     std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
228     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
229 
230     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
231     std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
232     std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
233 
234     armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
235     armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
236 
237     tfLiteInterpreter.Cleanup();
238     armnnInterpreter.Cleanup();
239 }
240 
241 } // anonymous namespace