xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/xnnpack/concatenation_tester.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/concatenation_tester.h"
17 
18 #include <algorithm>
19 #include <array>
20 #include <cstdint>
21 #include <functional>
22 #include <numeric>
23 #include <random>
24 #include <vector>
25 
26 #include <gtest/gtest.h>
27 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
28 #include "tensorflow/lite/interpreter.h"
29 #include "tensorflow/lite/kernels/register.h"
30 #include "tensorflow/lite/model.h"
31 #include "tensorflow/lite/schema/schema_conversion_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33 #include "tensorflow/lite/version.h"
34 
35 namespace tflite {
36 namespace xnnpack {
37 
SameShapeDifferentAxis(std::vector<int32_t> shape,int axis,int32_t size)38 std::vector<int32_t> SameShapeDifferentAxis(std::vector<int32_t> shape,
39                                             int axis, int32_t size) {
40   std::vector<int32_t> new_shape{shape};
41   new_shape[axis < 0 ? axis + shape.size() : axis] = size;
42   return new_shape;
43 }
44 
45 template <class T>
Test(Interpreter * delegate_interpreter,Interpreter * default_interpreter) const46 void ConcatenationTester::Test(Interpreter *delegate_interpreter,
47                                Interpreter *default_interpreter) const {
48   std::random_device random_device;
49   auto rng = std::mt19937(random_device());
50   std::uniform_int_distribution<int32_t> input_distribution(
51       std::numeric_limits<T>::min(), std::numeric_limits<T>::max());
52   auto input_rng = std::bind(input_distribution, std::ref(rng));
53 
54   for (size_t i = 0; i < NumInputs(); i++) {
55     T *default_input_data = default_interpreter->typed_input_tensor<T>(i);
56     std::generate(default_input_data,
57                   default_input_data + ComputeSize(InputShape(i)),
58                   std::ref(input_rng));
59 
60     T *xnnpack_input_data = delegate_interpreter->typed_input_tensor<T>(i);
61     std::copy(default_input_data,
62               default_input_data + ComputeSize(InputShape(i)),
63               xnnpack_input_data);
64   }
65 
66   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
67   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
68 
69   T *default_output_data = default_interpreter->typed_output_tensor<T>(0);
70   T *xnnpack_output_data = delegate_interpreter->typed_output_tensor<T>(0);
71 
72   for (size_t i = 0; i < ComputeSize(OutputShape()); i++) {
73     ASSERT_EQ(static_cast<int32_t>(default_output_data[i]),
74               static_cast<int32_t>(xnnpack_output_data[i]));
75   }
76 }
77 
78 template <>
Test(Interpreter * delegate_interpreter,Interpreter * default_interpreter) const79 void ConcatenationTester::Test<float>(Interpreter *delegate_interpreter,
80                                       Interpreter *default_interpreter) const {
81   std::random_device random_device;
82   auto rng = std::mt19937(random_device());
83   std::uniform_real_distribution<float> input_distribution(-25.0f, 25.0f);
84   auto input_rng = std::bind(input_distribution, std::ref(rng));
85 
86   for (size_t i = 0; i < NumInputs(); i++) {
87     float *default_input_data =
88         default_interpreter->typed_input_tensor<float>(i);
89     std::generate(default_input_data,
90                   default_input_data + ComputeSize(InputShape(i)),
91                   std::ref(input_rng));
92 
93     float *xnnpack_input_data =
94         delegate_interpreter->typed_input_tensor<float>(i);
95     std::copy(default_input_data,
96               default_input_data + ComputeSize(InputShape(i)),
97               xnnpack_input_data);
98   }
99 
100   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
101   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
102 
103   float *default_output_data =
104       default_interpreter->typed_output_tensor<float>(0);
105   float *xnnpack_output_data =
106       delegate_interpreter->typed_output_tensor<float>(0);
107 
108   for (size_t i = 0; i < ComputeSize(OutputShape()); i++) {
109     ASSERT_EQ(default_output_data[i], xnnpack_output_data[i]);
110   }
111 }
112 
Test(TensorType tensor_type,TfLiteDelegate * delegate) const113 void ConcatenationTester::Test(TensorType tensor_type,
114                                TfLiteDelegate *delegate) const {
115   std::vector<char> buffer = CreateTfLiteModel(tensor_type);
116   const Model *model = GetModel(buffer.data());
117 
118   std::unique_ptr<Interpreter> delegate_interpreter;
119   ASSERT_EQ(
120       InterpreterBuilder(
121           model,
122           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
123           &delegate_interpreter),
124       kTfLiteOk);
125   std::unique_ptr<Interpreter> default_interpreter;
126   ASSERT_EQ(
127       InterpreterBuilder(
128           model,
129           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
130           &default_interpreter),
131       kTfLiteOk);
132 
133   ASSERT_TRUE(delegate_interpreter);
134   ASSERT_TRUE(default_interpreter);
135   ASSERT_EQ(delegate_interpreter->inputs().size(), NumInputs());
136   ASSERT_EQ(default_interpreter->inputs().size(), NumInputs());
137   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
138   ASSERT_EQ(default_interpreter->outputs().size(), 1);
139 
140   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
141   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
142 
143   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
144 
145   switch (tensor_type) {
146     case TensorType_FLOAT32:
147       Test<float>(delegate_interpreter.get(), default_interpreter.get());
148       break;
149     case TensorType_INT8:
150       Test<int8_t>(delegate_interpreter.get(), default_interpreter.get());
151       break;
152     case TensorType_UINT8:
153       Test<uint8_t>(delegate_interpreter.get(), default_interpreter.get());
154       break;
155     default:
156       GTEST_FAIL();
157   }
158 }
159 
CreateTfLiteModel(TensorType tensor_type) const160 std::vector<char> ConcatenationTester::CreateTfLiteModel(
161     TensorType tensor_type) const {
162   flatbuffers::FlatBufferBuilder builder;
163   flatbuffers::Offset<OperatorCode> operator_code =
164       CreateOperatorCode(builder, BuiltinOperator_CONCATENATION, 0);
165 
166   std::vector<flatbuffers::Offset<Buffer>> buffers{{
167       CreateBuffer(builder, builder.CreateVector({})),
168   }};
169 
170   std::vector<flatbuffers::Offset<Tensor>> tensors;
171   for (size_t i = 0; i < NumInputs(); i++) {
172     tensors.push_back(
173         CreateTensor(builder,
174                      builder.CreateVector<int32_t>(InputShape(i).data(),
175                                                    InputShape(i).size()),
176                      tensor_type,
177                      /*buffer=*/0, /*name=*/0,
178                      CreateQuantizationParameters(
179                          builder, /*min=*/0, /*max=*/0,
180                          builder.CreateVector<float>({/*scale=*/1.0f}),
181                          builder.CreateVector<int64_t>({/*zero_point=*/0}))));
182   }
183 
184   tensors.push_back(CreateTensor(
185       builder,
186       builder.CreateVector<int32_t>(OutputShape().data(), OutputShape().size()),
187       tensor_type,
188       /*buffer=*/0, /*name=*/0,
189       CreateQuantizationParameters(
190           builder, /*min=*/0, /*max=*/0,
191           builder.CreateVector<float>({/*scale=*/1.0f}),
192           builder.CreateVector<int64_t>({/*zero_point=*/0}))));
193 
194   std::vector<int32_t> op_inputs;
195   for (size_t i = 0; i < NumInputs(); i++) {
196     op_inputs.push_back(static_cast<int32_t>(i));
197   }
198 
199   const std::array<int32_t, 1> op_outputs{static_cast<int32_t>(NumInputs())};
200   BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE;
201   flatbuffers::Offset<void> builtin_options = 0;
202   builtin_options_type = tflite::BuiltinOptions_ConcatenationOptions;
203   builtin_options = CreateConcatenationOptions(builder, Axis()).Union();
204   const flatbuffers::Offset<Operator> op = CreateOperator(
205       builder, /*opcode_index=*/0,
206       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
207       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
208       builtin_options_type, builtin_options);
209 
210   const std::vector<int32_t> subgraph_inputs = op_inputs;
211   const std::array<int32_t, 1> subgraph_outputs = op_outputs;
212   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
213       builder, builder.CreateVector(tensors.data(), tensors.size()),
214       builder.CreateVector<int32_t>(subgraph_inputs.data(),
215                                     subgraph_inputs.size()),
216       builder.CreateVector<int32_t>(subgraph_outputs.data(),
217                                     subgraph_outputs.size()),
218       builder.CreateVector(&op, 1));
219 
220   const flatbuffers::Offset<Model> model_buffer = CreateModel(
221       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
222       builder.CreateVector(&subgraph, 1),
223       builder.CreateString("Concatenation model"),
224       builder.CreateVector(buffers.data(), buffers.size()));
225 
226   builder.Finish(model_buffer);
227 
228   return std::vector<char>(builder.GetBufferPointer(),
229                            builder.GetBufferPointer() + builder.GetSize());
230 }
231 
ComputeSize(const std::vector<int32_t> & shape)232 int32_t ConcatenationTester::ComputeSize(const std::vector<int32_t> &shape) {
233   return std::accumulate(shape.cbegin(), shape.cend(), 1,
234                          std::multiplies<int32_t>());
235 }
236 
237 }  // namespace xnnpack
238 }  // namespace tflite
239