xref: /aosp_15_r20/external/armnn/delegate/test/BatchSpaceTestHelper.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 
13 #include <flatbuffers/flatbuffers.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <schema_generated.h>
18 
19 #include <doctest/doctest.h>
20 
21 namespace
22 {
23 
CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpaceOperatorCode,tflite::TensorType tensorType,std::vector<int32_t> & inputTensorShape,std::vector<int32_t> & outputTensorShape,std::vector<unsigned int> & blockData,std::vector<std::pair<unsigned int,unsigned int>> & cropsPadData,float quantScale=1.0f,int quantOffset=0)24 std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpaceOperatorCode,
25                                               tflite::TensorType tensorType,
26                                               std::vector<int32_t>& inputTensorShape,
27                                               std::vector <int32_t>& outputTensorShape,
28                                               std::vector<unsigned int>& blockData,
29                                               std::vector<std::pair<unsigned int, unsigned int>>& cropsPadData,
30                                               float quantScale = 1.0f,
31                                               int quantOffset  = 0)
32 {
33     using namespace tflite;
34     flatbuffers::FlatBufferBuilder flatBufferBuilder;
35 
36     std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
37     buffers[0] = CreateBuffer(flatBufferBuilder);
38     buffers[1] = CreateBuffer(flatBufferBuilder);
39     buffers[2] = CreateBuffer(flatBufferBuilder,
40                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(blockData.data()),
41                                                                   sizeof(int32_t) * blockData.size()));
42     buffers[3] = CreateBuffer(flatBufferBuilder,
43                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cropsPadData.data()),
44                                                                   sizeof(int64_t) * cropsPadData.size()));
45     buffers[4] = CreateBuffer(flatBufferBuilder);
46 
47     auto quantizationParameters =
48             CreateQuantizationParameters(flatBufferBuilder,
49                                          0,
50                                          0,
51                                          flatBufferBuilder.CreateVector<float>({ quantScale }),
52                                          flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
53 
54     std::string cropsOrPadding =
55             batchSpaceOperatorCode == tflite::BuiltinOperator_BATCH_TO_SPACE_ND ? "crops" : "padding";
56 
57     std::vector<int32_t> blockShape { 2 };
58     std::vector<int32_t> cropsOrPaddingShape { 2, 2 };
59 
60     std::array<flatbuffers::Offset<Tensor>, 4> tensors;
61     tensors[0] = CreateTensor(flatBufferBuilder,
62                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
63                                                                       inputTensorShape.size()),
64                               tensorType,
65                               1,
66                               flatBufferBuilder.CreateString("input"),
67                               quantizationParameters);
68 
69     tensors[1] = CreateTensor(flatBufferBuilder,
70                               flatBufferBuilder.CreateVector<int32_t>(blockShape.data(),
71                                                                       blockShape.size()),
72                               ::tflite::TensorType_INT32,
73                               2,
74                               flatBufferBuilder.CreateString("block"),
75                               quantizationParameters);
76 
77     tensors[2] = CreateTensor(flatBufferBuilder,
78                               flatBufferBuilder.CreateVector<int32_t>(cropsOrPaddingShape.data(),
79                                                                       cropsOrPaddingShape.size()),
80                               ::tflite::TensorType_INT32,
81                               3,
82                               flatBufferBuilder.CreateString(cropsOrPadding),
83                               quantizationParameters);
84 
85     // Create output tensor
86     tensors[3] = CreateTensor(flatBufferBuilder,
87                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
88                                                                       outputTensorShape.size()),
89                               tensorType,
90                               4,
91                               flatBufferBuilder.CreateString("output"),
92                               quantizationParameters);
93 
94     // Create operator
95     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
96     flatbuffers::Offset<void> operatorBuiltinOptions = 0;
97     switch (batchSpaceOperatorCode)
98     {
99         case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
100         {
101             operatorBuiltinOptionsType = tflite::BuiltinOptions_BatchToSpaceNDOptions;
102             operatorBuiltinOptions = CreateBatchToSpaceNDOptions(flatBufferBuilder).Union();
103             break;
104         }
105         case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
106         {
107             operatorBuiltinOptionsType = tflite::BuiltinOptions_SpaceToBatchNDOptions;
108             operatorBuiltinOptions = CreateSpaceToBatchNDOptions(flatBufferBuilder).Union();
109             break;
110         }
111         default:
112             break;
113     }
114 
115     const std::vector<int> operatorInputs{ {0, 1, 2} };
116     const std::vector<int> operatorOutputs{ 3 };
117     flatbuffers::Offset <Operator> batchSpaceOperator =
118             CreateOperator(flatBufferBuilder,
119                            0,
120                            flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
121                            flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
122                            operatorBuiltinOptionsType,
123                            operatorBuiltinOptions);
124 
125     const std::vector<int> subgraphInputs{ {0, 1, 2} };
126     const std::vector<int> subgraphOutputs{ 3 };
127     flatbuffers::Offset <SubGraph> subgraph =
128             CreateSubGraph(flatBufferBuilder,
129                            flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
130                            flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
131                            flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
132                            flatBufferBuilder.CreateVector(&batchSpaceOperator, 1));
133 
134     flatbuffers::Offset <flatbuffers::String> modelDescription =
135             flatBufferBuilder.CreateString("ArmnnDelegate: BatchSpace Operator Model");
136     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, batchSpaceOperatorCode);
137 
138     flatbuffers::Offset <Model> flatbufferModel =
139             CreateModel(flatBufferBuilder,
140                         TFLITE_SCHEMA_VERSION,
141                         flatBufferBuilder.CreateVector(&operatorCode, 1),
142                         flatBufferBuilder.CreateVector(&subgraph, 1),
143                         modelDescription,
144                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
145 
146     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
147 
148     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
149                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
150 }
151 
152 template <typename T>
BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & inputShape,std::vector<int32_t> & expectedOutputShape,std::vector<T> & inputValues,std::vector<unsigned int> & blockShapeValues,std::vector<std::pair<unsigned int,unsigned int>> & cropsPaddingValues,std::vector<T> & expectedOutputValues,float quantScale=1.0f,int quantOffset=0)153 void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode,
154                     tflite::TensorType tensorType,
155                     std::vector<armnn::BackendId>& backends,
156                     std::vector<int32_t>& inputShape,
157                     std::vector<int32_t>& expectedOutputShape,
158                     std::vector<T>& inputValues,
159                     std::vector<unsigned int>& blockShapeValues,
160                     std::vector<std::pair<unsigned int, unsigned int>>& cropsPaddingValues,
161                     std::vector<T>& expectedOutputValues,
162                     float quantScale = 1.0f,
163                     int quantOffset  = 0)
164 {
165     using namespace delegateTestInterpreter;
166     std::vector<char> modelBuffer = CreateBatchSpaceTfLiteModel(controlOperatorCode,
167                                                                 tensorType,
168                                                                 inputShape,
169                                                                 expectedOutputShape,
170                                                                 blockShapeValues,
171                                                                 cropsPaddingValues,
172                                                                 quantScale,
173                                                                 quantOffset);
174 
175     // Setup interpreter with just TFLite Runtime.
176     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
177     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
178     CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
179     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
180     std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
181     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
182 
183     // Setup interpreter with Arm NN Delegate applied.
184     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
185     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
186     CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
187     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
188     std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
189     std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
190 
191     armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
192     armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
193 
194     tfLiteInterpreter.Cleanup();
195     armnnInterpreter.Cleanup();
196 }
197 
198 } // anonymous namespace