1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/flex/buffer_map.h"
16
17 #include <sys/types.h>
18
19 #include <functional>
20
21 #include <gmock/gmock.h>
22 #include <gtest/gtest.h>
23 #include "tensorflow/core/framework/tensor.h"
24 #include "tensorflow/core/framework/tensor_shape.h"
25 #include "tensorflow/lite/c/c_api_types.h"
26 #include "tensorflow/lite/delegates/flex/buffer_map_util.h"
27 #include "tensorflow/lite/interpreter.h"
28 #include "tensorflow/lite/string_util.h"
29 #include "tensorflow/lite/testing/util.h"
30 #include "tensorflow/lite/util.h"
31
32 namespace tflite {
33 namespace flex {
34 namespace {
35
36 using ::testing::ElementsAre;
37
38 // A bit of RAII to simplify handling of TfLiteTensors in the tests.
39 using UniqueTfLiteTensor =
40 std::unique_ptr<TfLiteTensor, std::function<void(TfLiteTensor*)>>;
41
42 template <typename T>
MakeLiteTensor(const std::vector<int> & shape,const std::vector<T> & data)43 UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
44 const std::vector<T>& data) {
45 auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
46 TfLiteTensorDataFree(t);
47 TfLiteIntArrayFree(t->dims);
48 delete t;
49 });
50 tensor->allocation_type = kTfLiteDynamic;
51 tensor->type = typeToTfLiteType<T>();
52 tensor->dims = ConvertVectorToTfLiteIntArray(shape);
53 TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
54 memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
55 return tensor;
56 }
57
58 template <>
MakeLiteTensor(const std::vector<int> & shape,const std::vector<string> & data)59 UniqueTfLiteTensor MakeLiteTensor<string>(const std::vector<int>& shape,
60 const std::vector<string>& data) {
61 auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
62 TfLiteTensorDataFree(t);
63 TfLiteIntArrayFree(t->dims);
64 delete t;
65 });
66 tensor->allocation_type = kTfLiteDynamic;
67 tensor->type = typeToTfLiteType<string>();
68 tensor->dims = ConvertVectorToTfLiteIntArray(shape);
69 TfLiteTensorRealloc(data.size() * sizeof(string), tensor.get());
70
71 DynamicBuffer b;
72 for (const string& s : data) {
73 b.AddString(s.data(), s.size());
74 }
75 b.WriteToTensor(tensor.get(), ConvertVectorToTfLiteIntArray(shape));
76 return tensor;
77 }
78
79 template <typename T>
MakeTensor(const std::vector<int64_t> & shape,const std::vector<T> & data,tensorflow::DataType dtype)80 tensorflow::Tensor MakeTensor(const std::vector<int64_t>& shape,
81 const std::vector<T>& data,
82 tensorflow::DataType dtype) {
83 tensorflow::Tensor tensor(dtype, tensorflow::TensorShape(shape));
84 memcpy(tensor.data(), data.data(), data.size() * sizeof(T));
85 return tensor;
86 }
87
GetTensorShape(const tensorflow::Tensor & t)88 std::vector<int64_t> GetTensorShape(const tensorflow::Tensor& t) {
89 std::vector<int64_t> shape(t.dims());
90 for (int i = 0; i < t.dims(); ++i) {
91 shape[i] = t.dim_size(i);
92 }
93 return shape;
94 }
95
96 template <typename T>
GetTensorData(const tensorflow::Tensor & t)97 std::vector<T> GetTensorData(const tensorflow::Tensor& t) {
98 const T* data = t.flat<T>().data();
99 return std::vector<T>(data, data + t.NumElements());
100 }
101
TEST(BufferMapTest,EmptyBuffer)102 TEST(BufferMapTest, EmptyBuffer) {
103 BufferMap buffer_map;
104 EXPECT_FALSE(buffer_map.HasTensor(0));
105 }
106
TEST(BufferMapTest,SetFromTfLite)107 TEST(BufferMapTest, SetFromTfLite) {
108 BufferMap buffer_map;
109
110 UniqueTfLiteTensor t =
111 MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
112 buffer_map.SetFromTfLite(0, t.get());
113 ASSERT_TRUE(buffer_map.HasTensor(0));
114
115 EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
116 ElementsAre(0, 0, 0, 0.123f, 0, 0));
117
118 // Also check details of the tensor.
119 tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
120 ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
121 ASSERT_EQ(out_tensor.NumElements(), 6);
122 ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
123 }
124
TEST(BufferMapTest,SetFromTfLiteString)125 TEST(BufferMapTest, SetFromTfLiteString) {
126 BufferMap buffer_map;
127
128 UniqueTfLiteTensor t =
129 MakeLiteTensor<string>({1, 2, 1, 3}, {"", "", "", "str1", "", ""});
130 buffer_map.SetFromTfLite(0, t.get());
131 ASSERT_TRUE(buffer_map.HasTensor(0));
132
133 EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
134 ElementsAre("", "", "", "str1", "", ""));
135
136 // Also check details of the tensor.
137 tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
138 ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_STRING);
139 ASSERT_EQ(out_tensor.NumElements(), 6);
140 ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
141 }
142
TEST(BufferMapTest,SetFromTfLiteTwice)143 TEST(BufferMapTest, SetFromTfLiteTwice) {
144 UniqueTfLiteTensor t1 =
145 MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
146 UniqueTfLiteTensor t2 =
147 MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
148
149 BufferMap buffer_map;
150 buffer_map.SetFromTfLite(0, t1.get());
151 buffer_map.SetFromTfLite(0, t2.get());
152
153 EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
154 ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
155 }
156
TEST(BufferMapTest,SetFromTfLiteStringTwice)157 TEST(BufferMapTest, SetFromTfLiteStringTwice) {
158 UniqueTfLiteTensor t1 =
159 MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
160 UniqueTfLiteTensor t2 =
161 MakeLiteTensor<string>({1, 2, 4}, {"", "", "", "s3", "", "", "s1", "s2"});
162
163 BufferMap buffer_map;
164 buffer_map.SetFromTfLite(0, t1.get());
165 buffer_map.SetFromTfLite(0, t2.get());
166
167 EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
168 ElementsAre("", "", "", "s3", "", "", "s1", "s2"));
169 }
170
TEST(BufferMapTest,SetFromTfLiteBuiltinResource)171 TEST(BufferMapTest, SetFromTfLiteBuiltinResource) {
172 BufferMap buffer_map;
173
174 // Constructs a fake resource tensor.
175 auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
176 TfLiteTensorDataFree(t);
177 TfLiteIntArrayFree(t->dims);
178 delete t;
179 });
180 tensor->allocation_type = kTfLiteDynamic;
181 tensor->type = kTfLiteResource;
182 tensor->dims = ConvertVectorToTfLiteIntArray({1});
183 TfLiteTensorRealloc(sizeof(int32_t), tensor.get());
184 tensor->delegate = nullptr;
185 tensor->data.i32[0] = 1;
186
187 buffer_map.SetFromTfLite(0, tensor.get());
188 // Also check details of the tensor.
189 tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
190 ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_RESOURCE);
191 ASSERT_EQ(out_tensor.NumElements(), 1);
192 tensorflow::ResourceHandle handle =
193 out_tensor.flat<tensorflow::ResourceHandle>()(0);
194 EXPECT_EQ(handle.name(), "tflite_resource_variable:1");
195 }
196
TEST(BufferMapTest,SetFromTensorFlow)197 TEST(BufferMapTest, SetFromTensorFlow) {
198 tensorflow::Tensor t1 = MakeTensor<float>(
199 {1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
200
201 BufferMap buffer_map;
202 buffer_map.SetFromTensorFlow(0, t1);
203
204 EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
205 ElementsAre(0, 0, 0, 0.123f, 0, 0));
206
207 // Also check details of the tensor.
208 tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
209 ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
210 ASSERT_EQ(out_tensor.NumElements(), 6);
211 ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
212 }
213
TEST(BufferMapTest,SetFromTensorFlowTwice)214 TEST(BufferMapTest, SetFromTensorFlowTwice) {
215 tensorflow::Tensor t1 = MakeTensor<float>(
216 {1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
217 tensorflow::Tensor t2 = MakeTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2},
218 tensorflow::DT_INT32);
219 BufferMap buffer_map;
220 buffer_map.SetFromTensorFlow(0, t1);
221 buffer_map.SetFromTensorFlow(0, t2);
222
223 EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
224 ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
225 }
226
TEST(BufferMapTest,TfLiteOverwritesTensorFlow)227 TEST(BufferMapTest, TfLiteOverwritesTensorFlow) {
228 tensorflow::Tensor t1 = MakeTensor<float>(
229 {1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
230 UniqueTfLiteTensor t2 =
231 MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
232
233 BufferMap buffer_map;
234 buffer_map.SetFromTensorFlow(0, t1);
235 buffer_map.SetFromTfLite(0, t2.get());
236
237 EXPECT_FALSE(buffer_map.IsTensorFlowTensor(0));
238 EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
239 ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
240 }
241
TEST(BufferMapTest,TensorFlowOverwritesTfLite)242 TEST(BufferMapTest, TensorFlowOverwritesTfLite) {
243 tensorflow::Tensor t1 = MakeTensor<float>(
244 {1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
245 UniqueTfLiteTensor t2 =
246 MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
247 BufferMap buffer_map;
248 buffer_map.SetFromTfLite(0, t2.get());
249 buffer_map.SetFromTensorFlow(0, t1);
250
251 EXPECT_TRUE(buffer_map.IsTensorFlowTensor(0));
252 EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
253 ElementsAre(0, 0, 0, 0.123f, 0, 0));
254 }
255
TEST(BufferMapTest,TensorflowBufferReuse)256 TEST(BufferMapTest, TensorflowBufferReuse) {
257 TfLiteTensor tensor;
258 tensor.allocation_type = kTfLiteDynamic;
259 tensor.data.raw = nullptr;
260 TfLiteTensorRealloc(10, &tensor);
261 CHECK(tensor.data.raw);
262 EXPECT_EQ(tensor.bytes, 10);
263
264 TfLiteTensorBuffer* tensor_buffer_reused = new TfLiteTensorBuffer(&tensor);
265 // Checks that the underlying buffer is reused.
266 EXPECT_TRUE(tensor_buffer_reused->BufferReusedFromTfLiteTensor());
267 EXPECT_EQ(tensor_buffer_reused->data(), tensor.data.raw);
268 tensor_buffer_reused->Unref();
269
270 TfLiteTensorDataFree(&tensor);
271 }
272
TEST(BufferMapTest,ExplicitlyDisableBufferReuse)273 TEST(BufferMapTest, ExplicitlyDisableBufferReuse) {
274 TfLiteTensor tensor;
275 tensor.allocation_type = kTfLiteDynamic;
276 tensor.data.raw = nullptr;
277 TfLiteTensorRealloc(10, &tensor);
278 CHECK(tensor.data.raw);
279 EXPECT_EQ(tensor.bytes, 10);
280
281 TfLiteTensorBuffer* tensor_buffer =
282 new TfLiteTensorBuffer(&tensor, /*=allow_reusing*/ false);
283 // Checks that the underlying buffer is not reused.
284 EXPECT_FALSE(tensor_buffer->BufferReusedFromTfLiteTensor());
285 EXPECT_NE(tensor_buffer->data(), tensor.data.raw);
286 tensor_buffer->Unref();
287
288 TfLiteTensorDataFree(&tensor);
289 }
290
291 } // namespace
292 } // namespace flex
293 } // namespace tflite
294