xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/testing/kernel_test/input_generator.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/testing/kernel_test/input_generator.h"
16 
17 #include <cstdio>
18 #include <fstream>
19 #include <limits>
20 #include <random>
21 #include <string>
22 #include <unordered_map>
23 #include <utility>
24 
25 #include "tensorflow/lite/c/c_api_types.h"
26 #include "tensorflow/lite/c/common.h"
27 #include "tensorflow/lite/kernels/register.h"
28 #include "tensorflow/lite/testing/join.h"
29 #include "tensorflow/lite/testing/split.h"
30 
31 namespace tflite {
32 namespace testing {
33 
34 namespace {
35 static constexpr char kDefaultServingSignatureDefKey[] = "serving_default";
36 
37 template <typename T>
GenerateRandomTensor(TfLiteIntArray * dims,const std::function<T (int)> & random_func)38 std::vector<T> GenerateRandomTensor(TfLiteIntArray* dims,
39                                     const std::function<T(int)>& random_func) {
40   int64_t num_elements = 1;
41   for (int i = 0; i < dims->size; i++) {
42     num_elements *= dims->data[i];
43   }
44 
45   std::vector<T> result(num_elements);
46   for (int i = 0; i < num_elements; i++) {
47     result[i] = random_func(i);
48   }
49   return result;
50 }
51 
52 template <typename T>
GenerateUniform(TfLiteIntArray * dims,float min,float max)53 std::vector<T> GenerateUniform(TfLiteIntArray* dims, float min, float max) {
54   auto random_float = [](float min, float max) {
55     // TODO(yunluli): Change seed for each invocation if needed.
56     // Used rand() instead of rand_r() here to make it runnable on android.
57     return min + (max - min) * static_cast<float>(rand()) / RAND_MAX;
58   };
59 
60   std::function<T(int)> random_t = [&](int) {
61     return static_cast<T>(random_float(min, max));
62   };
63   std::vector<T> data = GenerateRandomTensor(dims, random_t);
64   return data;
65 }
66 
67 template <typename T>
GenerateGaussian(TfLiteIntArray * dims,float min,float max)68 std::vector<T> GenerateGaussian(TfLiteIntArray* dims, float min, float max) {
69   auto random_float = [](float min, float max) {
70     static std::default_random_engine generator;
71     // We generate a float number within [0, 1) following a mormal distribution
72     // with mean = 0.5 and stddev = 1/3, and use it to scale the final random
73     // number into the desired range.
74     static std::normal_distribution<double> distribution(0.5, 1.0 / 3);
75     auto rand_n = distribution(generator);
76     while (rand_n < 0 || rand_n >= 1) {
77       rand_n = distribution(generator);
78     }
79 
80     return min + (max - min) * static_cast<float>(rand_n);
81   };
82 
83   std::function<T(int)> random_t = [&](int) {
84     return static_cast<T>(random_float(min, max));
85   };
86   std::vector<T> data = GenerateRandomTensor(dims, random_t);
87   return data;
88 }
89 
90 }  // namespace
91 
LoadModel(const string & model_dir)92 TfLiteStatus InputGenerator::LoadModel(const string& model_dir) {
93   return LoadModel(model_dir, kDefaultServingSignatureDefKey);
94 }
95 
LoadModel(const string & model_dir,const string & signature)96 TfLiteStatus InputGenerator::LoadModel(const string& model_dir,
97                                        const string& signature) {
98   model_ = FlatBufferModel::BuildFromFile(model_dir.c_str());
99   if (!model_) {
100     fprintf(stderr, "Cannot load model %s", model_dir.c_str());
101     return kTfLiteError;
102   }
103 
104   ::tflite::ops::builtin::BuiltinOpResolver builtin_ops;
105   InterpreterBuilder(*model_, builtin_ops)(&interpreter_);
106   if (!interpreter_) {
107     fprintf(stderr, "Failed to build interpreter.");
108     return kTfLiteError;
109   }
110   signature_runner_ = interpreter_->GetSignatureRunner(signature.c_str());
111   if (!signature_runner_) {
112     fprintf(stderr, "Failed to get SignatureRunner.\n");
113     return kTfLiteError;
114   }
115 
116   return kTfLiteOk;
117 }
118 
ReadInputsFromFile(const string & filename)119 TfLiteStatus InputGenerator::ReadInputsFromFile(const string& filename) {
120   if (filename.empty()) {
121     fprintf(stderr, "Empty input file name.");
122     return kTfLiteError;
123   }
124 
125   std::ifstream input_file(filename);
126   string input;
127   while (std::getline(input_file, input, '\n')) {
128     std::vector<string> parts = Split<string>(input, ":");
129     if (parts.size() != 2) {
130       fprintf(stderr, "Expected <name>:<value>, got %s", input.c_str());
131       return kTfLiteError;
132     }
133     inputs_.push_back(std::make_pair(parts[0], parts[1]));
134   }
135   input_file.close();
136   return kTfLiteOk;
137 }
138 
WriteInputsToFile(const string & filename)139 TfLiteStatus InputGenerator::WriteInputsToFile(const string& filename) {
140   if (filename.empty()) {
141     fprintf(stderr, "Empty input file name.");
142     return kTfLiteError;
143   }
144 
145   std::ofstream output_file;
146   output_file.open(filename, std::fstream::out | std::fstream::trunc);
147   if (!output_file) {
148     fprintf(stderr, "Failed to open output file %s.", filename.c_str());
149     return kTfLiteError;
150   }
151 
152   for (const auto& input : inputs_) {
153     output_file << input.first << ":" << input.second << "\n";
154   }
155   output_file.close();
156 
157   return kTfLiteOk;
158 }
159 
160 // TODO(yunluli): Support more tensor types when needed.
GenerateInput(const string & distribution)161 TfLiteStatus InputGenerator::GenerateInput(const string& distribution) {
162   auto input_tensor_names = signature_runner_->input_names();
163   for (const char* name : input_tensor_names) {
164     auto* tensor = signature_runner_->input_tensor(name);
165     if (distribution == "UNIFORM") {
166       switch (tensor->type) {
167         case kTfLiteInt8: {
168           auto data = GenerateUniform<int8_t>(
169               tensor->dims, std::numeric_limits<int8_t>::min(),
170               std::numeric_limits<int8_t>::max());
171           inputs_.push_back(
172               std::make_pair(name, Join(data.data(), data.size(), ",")));
173           break;
174         }
175         case kTfLiteUInt8: {
176           auto data = GenerateUniform<uint8_t>(
177               tensor->dims, std::numeric_limits<uint8_t>::min(),
178               std::numeric_limits<uint8_t>::max());
179           inputs_.push_back(
180               std::make_pair(name, Join(data.data(), data.size(), ",")));
181           break;
182         }
183         case kTfLiteFloat32: {
184           auto data = GenerateUniform<float>(tensor->dims, -1, 1);
185           inputs_.push_back(
186               std::make_pair(name, Join(data.data(), data.size(), ",")));
187           break;
188         }
189         default:
190           fprintf(stderr, "Unsupported input tensor type %s.",
191                   TfLiteTypeGetName(tensor->type));
192           break;
193       }
194     } else if (distribution == "GAUSSIAN") {
195       switch (tensor->type) {
196         case kTfLiteInt8: {
197           auto data = GenerateGaussian<int8_t>(
198               tensor->dims, std::numeric_limits<int8_t>::min(),
199               std::numeric_limits<int8_t>::max());
200           inputs_.push_back(
201               std::make_pair(name, Join(data.data(), data.size(), ",")));
202           break;
203         }
204         case kTfLiteUInt8: {
205           auto data = GenerateGaussian<uint8_t>(
206               tensor->dims, std::numeric_limits<uint8_t>::min(),
207               std::numeric_limits<uint8_t>::max());
208           inputs_.push_back(
209               std::make_pair(name, Join(data.data(), data.size(), ",")));
210           break;
211         }
212         case kTfLiteFloat32: {
213           auto data = GenerateGaussian<float>(tensor->dims, -1, 1);
214           inputs_.push_back(
215               std::make_pair(name, Join(data.data(), data.size(), ",")));
216           break;
217         }
218         default:
219           fprintf(stderr, "Unsupported input tensor type %s.",
220                   TfLiteTypeGetName(tensor->type));
221           break;
222       }
223     } else {
224       fprintf(stderr, "Unsupported distribution %s.", distribution.c_str());
225       return kTfLiteError;
226     }
227   }
228 
229   return kTfLiteOk;
230 }
231 
232 }  // namespace testing
233 }  // namespace tflite
234