xref: /aosp_15_r20/external/armnn/delegate/test/UnpackTestHelper.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 
13 #include <flatbuffers/flatbuffers.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <schema_generated.h>
18 
19 #include <doctest/doctest.h>
20 
21 namespace
22 {
23 
CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperatorCode,tflite::TensorType tensorType,std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & outputTensorShape,const int32_t outputTensorNum,unsigned int axis=0,float quantScale=1.0f,int quantOffset=0)24 std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperatorCode,
25                                           tflite::TensorType tensorType,
26                                           std::vector<int32_t>& inputTensorShape,
27                                           const std::vector <int32_t>& outputTensorShape,
28                                           const int32_t outputTensorNum,
29                                           unsigned int axis = 0,
30                                           float quantScale = 1.0f,
31                                           int quantOffset  = 0)
32 {
33     using namespace tflite;
34     flatbuffers::FlatBufferBuilder flatBufferBuilder;
35 
36     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
37     buffers.push_back(CreateBuffer(flatBufferBuilder));
38     buffers.push_back(CreateBuffer(flatBufferBuilder));
39 
40 
41     auto quantizationParameters =
42         CreateQuantizationParameters(flatBufferBuilder,
43                                      0,
44                                      0,
45                                      flatBufferBuilder.CreateVector<float>({ quantScale }),
46                                      flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
47 
48     const std::vector<int32_t> operatorInputs{ 0 };
49     std::vector<int32_t> operatorOutputs{};
50     const std::vector<int> subgraphInputs{ 0 };
51     std::vector<int> subgraphOutputs{};
52 
53     std::vector<flatbuffers::Offset<Tensor>> tensors(outputTensorNum + 1);
54 
55     // Create input tensor
56     tensors[0] = CreateTensor(flatBufferBuilder,
57                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
58                                                                       inputTensorShape.size()),
59                                                                       tensorType,
60                                                                       1,
61                                                                       flatBufferBuilder.CreateString("input"),
62                                                                       quantizationParameters);
63 
64     for (int i = 0; i < outputTensorNum; ++i)
65     {
66         tensors[i + 1] = CreateTensor(flatBufferBuilder,
67                                   flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
68                                                                           outputTensorShape.size()),
69                                   tensorType,
70                                       (i + 2),
71                                   flatBufferBuilder.CreateString("output" + std::to_string(i)),
72                                   quantizationParameters);
73 
74         buffers.push_back(CreateBuffer(flatBufferBuilder));
75         operatorOutputs.push_back(i + 1);
76         subgraphOutputs.push_back(i + 1);
77     }
78 
79     // create operator
80     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_UnpackOptions;
81     flatbuffers::Offset<void> operatorBuiltinOptions =
82         CreateUnpackOptions(flatBufferBuilder, outputTensorNum, axis).Union();
83 
84     flatbuffers::Offset <Operator> unpackOperator =
85         CreateOperator(flatBufferBuilder,
86                        0,
87                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
88                        flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
89                        operatorBuiltinOptionsType,
90                        operatorBuiltinOptions);
91 
92     flatbuffers::Offset <SubGraph> subgraph =
93         CreateSubGraph(flatBufferBuilder,
94                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
95                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
96                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
97                        flatBufferBuilder.CreateVector(&unpackOperator, 1));
98 
99     flatbuffers::Offset <flatbuffers::String> modelDescription =
100         flatBufferBuilder.CreateString("ArmnnDelegate: Unpack Operator Model");
101     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unpackOperatorCode);
102 
103     flatbuffers::Offset <Model> flatbufferModel =
104         CreateModel(flatBufferBuilder,
105                     TFLITE_SCHEMA_VERSION,
106                     flatBufferBuilder.CreateVector(&operatorCode, 1),
107                     flatBufferBuilder.CreateVector(&subgraph, 1),
108                     modelDescription,
109                     flatBufferBuilder.CreateVector(buffers));
110 
111     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
112 
113     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
114                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
115 }
116 
117 template <typename T>
UnpackTest(tflite::BuiltinOperator unpackOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & inputShape,std::vector<int32_t> & expectedOutputShape,std::vector<T> & inputValues,std::vector<std::vector<T>> & expectedOutputValues,unsigned int axis=0,float quantScale=1.0f,int quantOffset=0)118 void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
119               tflite::TensorType tensorType,
120               std::vector<armnn::BackendId>& backends,
121               std::vector<int32_t>& inputShape,
122               std::vector<int32_t>& expectedOutputShape,
123               std::vector<T>& inputValues,
124               std::vector<std::vector<T>>& expectedOutputValues,
125               unsigned int axis = 0,
126               float quantScale = 1.0f,
127               int quantOffset  = 0)
128 {
129     using namespace delegateTestInterpreter;
130     std::vector<char> modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode,
131                                                             tensorType,
132                                                             inputShape,
133                                                             expectedOutputShape,
134                                                             expectedOutputValues.size(),
135                                                             axis,
136                                                             quantScale,
137                                                             quantOffset);
138 
139     // Setup interpreter with just TFLite Runtime.
140     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
141     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
142     CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
143     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
144 
145     // Setup interpreter with Arm NN Delegate applied.
146     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
147     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
148     CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
149     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
150 
151     // Compare output data
152     for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
153     {
154         std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
155         std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(i);
156 
157         std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
158         std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(i);
159 
160         armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
161         armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
162     }
163 
164     tfLiteInterpreter.Cleanup();
165     armnnInterpreter.Cleanup();
166 }
167 
168 } // anonymous namespace