1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <algorithm>
16 #include <memory>
17 #include <random>
18 
19 #include <gmock/gmock.h>
20 #include <gtest/gtest.h>
21 #include "absl/flags/flag.h"
22 #include "absl/flags/parse.h"
23 #include "tensorflow/lite/c/common.h"
24 #include "tensorflow/lite/delegates/hexagon/hexagon_delegate.h"
25 #include "tensorflow/lite/interpreter.h"
26 #include "tensorflow/lite/interpreter_builder.h"
27 #include "tensorflow/lite/kernels/kernel_util.h"
28 #include "tensorflow/lite/kernels/register.h"
29 #include "tensorflow/lite/kernels/test_util.h"
30 #include "tensorflow/lite/model_builder.h"
31 #include "tensorflow/lite/testing/util.h"
32 #include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
33 #include "tensorflow/lite/tools/logging.h"
34 
35 ABSL_FLAG(std::string, model_file_path, "", "Path to the test model file.");
36 ABSL_FLAG(std::string, model_input_shapes, "",
37           "List of different input shapes for testing, the input will "
38           "resized for each one in order and tested. They Should be "
39           "separated by : and each shape has dimensions separated by ,");
40 ABSL_FLAG(int, max_batch_size, -1,
41           "Maximum batch size for a single run by hexagon.");
42 ABSL_FLAG(double, error_epsilon, 0.2,
43           "Maximum error allowed while diffing the output.");
44 
45 namespace tflite {
46 namespace {
47 // Returns a randomly generated data of size 'num_elements'.
GetData(int num_elements)48 std::vector<uint8_t> GetData(int num_elements) {
49   std::vector<uint8_t> result(num_elements);
50   std::random_device random_engine;
51   std::uniform_int_distribution<uint32_t> distribution(0, 254);
52   std::generate_n(result.data(), num_elements, [&]() {
53     return static_cast<uint8_t>(distribution(random_engine));
54   });
55   return result;
56 }
57 
58 // Returns the total number of elements.
NumElements(const std::vector<int> & shape)59 int NumElements(const std::vector<int>& shape) {
60   int num_elements = 1;
61   for (int dim : shape) num_elements *= dim;
62   return num_elements;
63 }
64 
65 // Returns true if 'control' and 'exp' values match up to 'epsilon'
DiffOutput(const std::vector<float> & control,const std::vector<float> & exp,double epsilon)66 bool DiffOutput(const std::vector<float>& control,
67                 const std::vector<float>& exp, double epsilon) {
68   if (control.size() != exp.size()) {
69     TFLITE_LOG(ERROR) << "Mismatch size Expected" << control.size() << " got "
70                       << exp.size();
71     return false;
72   }
73   bool has_diff = false;
74   for (int i = 0; i < control.size(); ++i) {
75     if (abs(control[i] - exp[i]) > epsilon) {
76       TFLITE_LOG(ERROR) << control[i] << " " << exp[i];
77       has_diff = true;
78     }
79   }
80   return !has_diff;
81 }
82 
DiffOutput(const std::vector<float> & control,const std::vector<float> & exp)83 bool DiffOutput(const std::vector<float>& control,
84                 const std::vector<float>& exp) {
85   return DiffOutput(control, exp, absl::GetFlag(FLAGS_error_epsilon));
86 }
87 }  // namespace
88 
89 class TestModel {
90  public:
__anonb0a8badb0302(TfLiteDelegate* delegate) 91   TestModel() : delegate_(nullptr, [](TfLiteDelegate* delegate) {}) {}
92 
93   // Initialize the model by reading the model from file and build
94   // interpreter.
Init()95   void Init() {
96     model_ = tflite::FlatBufferModel::BuildFromFile(
97         absl::GetFlag(FLAGS_model_file_path).c_str());
98     ASSERT_TRUE(model_ != nullptr);
99 
100     resolver_ = std::make_unique<ops::builtin::BuiltinOpResolver>();
101     InterpreterBuilder(*model_, *resolver_)(&interpreter_);
102     ASSERT_TRUE(interpreter_ != nullptr);
103   }
104 
105   // Add Hexagon delegate to the graph.
ApplyDelegate(int max_batch_size,const std::vector<int> & input_batch_dimensions,const std::vector<int> & output_batch_dimensions)106   void ApplyDelegate(int max_batch_size,
107                      const std::vector<int>& input_batch_dimensions,
108                      const std::vector<int>& output_batch_dimensions) {
109     TfLiteIntArray* input_batch_dim =
110         TfLiteIntArrayCreate(input_batch_dimensions.size());
111     TfLiteIntArray* output_batch_dim =
112         TfLiteIntArrayCreate(output_batch_dimensions.size());
113     for (int i = 0; i < input_batch_dimensions.size(); ++i)
114       input_batch_dim->data[i] = input_batch_dimensions[i];
115     for (int i = 0; i < output_batch_dimensions.size(); ++i)
116       output_batch_dim->data[i] = output_batch_dimensions[i];
117     ::TfLiteHexagonDelegateOptions options = {0};
118     options.enable_dynamic_batch_size = true;
119     options.max_batch_size = max_batch_size;
120     options.input_batch_dimensions = input_batch_dim;
121     options.output_batch_dimensions = output_batch_dim;
122     TfLiteDelegate* delegate = TfLiteHexagonDelegateCreate(&options);
123     ASSERT_TRUE(delegate != nullptr);
124     delegate_ = std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>(
125         delegate, [](TfLiteDelegate* delegate) {
126           TfLiteHexagonDelegateDelete(delegate);
127         });
128     ASSERT_TRUE(interpreter_->ModifyGraphWithDelegate(delegate_.get()) ==
129                 kTfLiteOk);
130   }
131 
Run(const std::vector<int> & input_shape,const std::vector<uint8_t> & input_data)132   void Run(const std::vector<int>& input_shape,
133            const std::vector<uint8_t>& input_data) {
134     // Resize Inputs.
135     auto interpreter_inputs = interpreter_->inputs();
136     interpreter_->ResizeInputTensor(interpreter_inputs[0], input_shape);
137     ASSERT_EQ(kTfLiteOk, interpreter_->AllocateTensors());
138 
139     TfLiteTensor* input_tensor =
140         interpreter_->tensor(interpreter_->inputs()[0]);
141     memcpy(input_tensor->data.raw, input_data.data(),
142            input_data.size() * sizeof(uint8_t));
143 
144     ASSERT_EQ(kTfLiteOk, interpreter_->Invoke());
145   }
146 
GetOutput(int output_index)147   std::vector<float> GetOutput(int output_index) {
148     auto* tensor = interpreter_->output_tensor(output_index);
149     uint8_t* data = interpreter_->typed_output_tensor<uint8_t>(output_index);
150     std::vector<float> result;
151     result.resize(NumElements(tensor));
152     const auto scale =
153         reinterpret_cast<TfLiteAffineQuantization*>(tensor->quantization.params)
154             ->scale->data[0];
155     const auto zero_point =
156         reinterpret_cast<TfLiteAffineQuantization*>(tensor->quantization.params)
157             ->zero_point->data[0];
158     for (int i = 0; i < result.size(); ++i) {
159       result[i] = scale * (data[i] - zero_point);
160     }
161     return result;
162   }
163 
164  private:
165   std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)> delegate_;
166   std::unique_ptr<FlatBufferModel> model_;
167   std::unique_ptr<tflite::OpResolver> resolver_;
168   std::unique_ptr<Interpreter> interpreter_;
169 };
170 
ParseInputShapes()171 std::vector<std::vector<int>> ParseInputShapes() {
172   std::vector<string> str_input_shapes;
173   benchmark::util::SplitAndParse(absl::GetFlag(FLAGS_model_input_shapes), ':',
174                                  &str_input_shapes);
175   std::vector<std::vector<int>> input_shapes(str_input_shapes.size());
176   for (int i = 0; i < str_input_shapes.size(); ++i) {
177     benchmark::util::SplitAndParse(str_input_shapes[i], ',', &input_shapes[i]);
178   }
179   return input_shapes;
180 }
181 
TEST(HexagonDynamicBatch,MultipleResizes)182 TEST(HexagonDynamicBatch, MultipleResizes) {
183   int num_failed_tests = 0;
184   int num_test = 0;
185   auto test_input_shapes = ParseInputShapes();
186   auto default_model = std::make_unique<TestModel>();
187   auto delegated_model = std::make_unique<TestModel>();
188   default_model->Init();
189   delegated_model->Init();
190   delegated_model->ApplyDelegate(absl::GetFlag(FLAGS_max_batch_size), {0}, {0});
191   for (const auto& input_shape : test_input_shapes) {
192     const auto input = GetData(NumElements(input_shape));
193     default_model->Run(input_shape, input);
194     delegated_model->Run(input_shape, input);
195     const auto default_output = default_model->GetOutput(0);
196     const auto delegated_output = delegated_model->GetOutput(0);
197     if (!DiffOutput(default_output, delegated_output)) {
198       TFLITE_LOG(ERROR) << "Failed for input " << num_test;
199       num_failed_tests++;
200     }
201     num_test++;
202   }
203   if (num_failed_tests == 0) {
204     TFLITE_LOG(INFO) << "All Tests PASSED";
205   } else {
206     TFLITE_LOG(INFO) << "Failed " << num_failed_tests << " out of " << num_test;
207   }
208 }
209 }  // namespace tflite
210 
main(int argc,char ** argv)211 int main(int argc, char** argv) {
212   ::tflite::LogToStderr();
213   absl::ParseCommandLine(argc, argv);
214   testing::InitGoogleTest();
215 
216   TfLiteHexagonInit();
217   int return_val = RUN_ALL_TESTS();
218   TfLiteHexagonTearDown();
219   return return_val;
220 }
221