1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/kernels/subgraph_test_util.h"
17
18 #include <stddef.h>
19 #include <stdint.h>
20 #include <stdlib.h>
21
22 #include <random>
23 #include <string>
24 #include <vector>
25
26 #include <gtest/gtest.h>
27 #include "tensorflow/lite/builtin_ops.h"
28 #include "tensorflow/lite/c/builtin_op_data.h"
29 #include "tensorflow/lite/c/common.h"
30 #include "tensorflow/lite/core/subgraph.h"
31 #include "tensorflow/lite/kernels/builtin_op_kernels.h"
32 #include "tensorflow/lite/kernels/kernel_util.h"
33 #include "tensorflow/lite/string_util.h"
34
35 namespace tflite {
36
37 // Forward declaration for op kernels.
38 namespace ops {
39 namespace custom {
40 namespace random_int {
41
Prepare(TfLiteContext * context,TfLiteNode * node)42 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
43 TF_LITE_ENSURE_EQ(context, NumInputs(node), 0);
44 TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
45
46 TfLiteTensor* output = GetOutput(context, node, 0);
47 TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
48 outputSize->data[0] = 1;
49 return context->ResizeTensor(context, output, outputSize);
50 }
51
Eval(TfLiteContext * context,TfLiteNode * node)52 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
53 TfLiteTensor& output = context->tensors[node->outputs->data[0]];
54
55 std::random_device rd;
56 std::uniform_int_distribution<int> dist(1, 32768);
57 output.data.i32[0] = dist(rd);
58 return kTfLiteOk;
59 }
60
61 } // namespace random_int
62
Register_RANDOM_INT()63 TfLiteRegistration* Register_RANDOM_INT() {
64 static TfLiteRegistration r = {nullptr, nullptr, random_int::Prepare,
65 random_int::Eval};
66 return &r;
67 }
68
69 } // namespace custom
70 } // namespace ops
71
72 namespace subgraph_test_util {
73
74 namespace {
75
SetupTensor(Subgraph * subgraph,int tensor_index,TfLiteType type)76 void SetupTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
77 ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 0,
78 nullptr, {}, false),
79 kTfLiteOk);
80 }
81
82 } // namespace
83
~SubgraphBuilder()84 SubgraphBuilder::~SubgraphBuilder() {
85 for (auto buffer : buffers_) {
86 free(buffer);
87 }
88 }
89
BuildAddSubgraph(Subgraph * subgraph)90 void SubgraphBuilder::BuildAddSubgraph(Subgraph* subgraph) {
91 const int kInput1 = 0;
92 const int kInput2 = 1;
93 const int kOutput = 2;
94 const int kTensorCount = 3;
95 // kInput1(0) --> +---+
96 // |ADD| --> kOutput(2)
97 // kInput2(1) --> +---+
98
99 int first_new_tensor_index;
100 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
101 kTfLiteOk);
102 ASSERT_EQ(first_new_tensor_index, 0);
103 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
104 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
105
106 SetupTensor(subgraph, kInput1, kTfLiteInt32);
107 SetupTensor(subgraph, kInput2, kTfLiteInt32);
108 SetupTensor(subgraph, kOutput, kTfLiteInt32);
109
110 TfLiteAddParams* params =
111 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
112 params->activation = kTfLiteActNone;
113 auto* add_reg = ops::builtin::Register_ADD();
114 add_reg->builtin_code = kTfLiteBuiltinAdd;
115 int node_index;
116 subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
117 params, add_reg, &node_index);
118 }
119
120 // Build a subgraph with an mul op. Helper function for testing.
BuildMulSubgraph(Subgraph * subgraph)121 void SubgraphBuilder::BuildMulSubgraph(Subgraph* subgraph) {
122 const int kInput1 = 0;
123 const int kInput2 = 1;
124 const int kOutput = 2;
125 const int kTensorCount = 3;
126 // kInput1(0) --> +---+
127 // |MUL| --> kOutput(2)
128 // kInput2(1) --> +---+
129
130 int first_new_tensor_index;
131 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
132 kTfLiteOk);
133 ASSERT_EQ(first_new_tensor_index, 0);
134 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
135 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
136
137 SetupTensor(subgraph, kInput1, kTfLiteInt32);
138 SetupTensor(subgraph, kInput2, kTfLiteInt32);
139 SetupTensor(subgraph, kOutput, kTfLiteInt32);
140
141 TfLiteMulParams* params =
142 reinterpret_cast<TfLiteMulParams*>(malloc(sizeof(TfLiteMulParams)));
143 params->activation = kTfLiteActNone;
144 auto* mul_reg = ops::builtin::Register_MUL();
145 mul_reg->builtin_code = kTfLiteBuiltinMul;
146 int node_index;
147 subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
148 params, mul_reg, &node_index);
149 }
150
151 // Build a subgraph with a pad op. Helper function for testing.
BuildPadSubgraph(Subgraph * subgraph)152 void SubgraphBuilder::BuildPadSubgraph(Subgraph* subgraph) {
153 const int kInput1 = 0;
154 const int kInput2 = 1;
155 const int kOutput = 2;
156 const int kTensorCount = 3;
157 // kInput1(0) --> +---+
158 // |PAD| --> kOutput(2)
159 // kInput2(1) --> +---+
160
161 int first_new_tensor_index;
162 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
163 kTfLiteOk);
164 ASSERT_EQ(first_new_tensor_index, 0);
165 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
166 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
167
168 SetupTensor(subgraph, kInput1, kTfLiteInt32);
169 SetupTensor(subgraph, kInput2, kTfLiteInt32);
170 SetupTensor(subgraph, kOutput, kTfLiteInt32);
171
172 TfLitePadParams* params =
173 reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLitePadParams)));
174 auto* pad_reg = ops::builtin::Register_PAD();
175 pad_reg->builtin_code = kTfLiteBuiltinPad;
176 int node_index;
177 subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
178 params, pad_reg, &node_index);
179 }
180
BuildIfSubgraph(Subgraph * subgraph)181 void SubgraphBuilder::BuildIfSubgraph(Subgraph* subgraph) {
182 const int kCondInput = 0;
183 const int kInput1 = 1;
184 const int kInput2 = 2;
185 const int kOutput = 3;
186 const int kTensorCount = 4;
187
188 // kCondInput(0) --> +----+
189 // kInput1(1) ----> | IF | --> kOutput(3)
190 // kInput2(2) ----> +----+
191
192 int first_new_tensor_index;
193 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
194 kTfLiteOk);
195 ASSERT_EQ(first_new_tensor_index, 0);
196 ASSERT_EQ(subgraph->SetInputs({kCondInput, kInput1, kInput2}), kTfLiteOk);
197 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
198
199 SetupTensor(subgraph, kCondInput, kTfLiteBool);
200 SetupTensor(subgraph, kInput1, kTfLiteInt32);
201 SetupTensor(subgraph, kInput2, kTfLiteInt32);
202 SetupTensor(subgraph, kOutput, kTfLiteInt32);
203
204 TfLiteIfParams* params =
205 reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteIfParams)));
206 params->then_subgraph_index = 1;
207 params->else_subgraph_index = 2;
208 auto* if_reg = ops::builtin::Register_IF();
209 if_reg->builtin_code = kTfLiteBuiltinIf;
210
211 int node_index;
212 subgraph->AddNodeWithParameters({kCondInput, kInput1, kInput2}, {kOutput}, {},
213 nullptr, 0, params, if_reg, &node_index);
214 }
215
BuildLessEqualCondSubgraph(Subgraph * subgraph,int rhs)216 void SubgraphBuilder::BuildLessEqualCondSubgraph(Subgraph* subgraph, int rhs) {
217 const int kInput1 = 0;
218 const int kInput2 = 1;
219 const int kOutput = 2;
220 const int kConstRhs = 3;
221 const int kTensorCount = 4;
222
223 // kInput1(0) ----> +------------+
224 // | LESS_EQUAL | --> kOutput(2)
225 // kConstRhs(3) --> +------------+
226 //
227 // kInput2(1) --> (unused)
228
229 int first_new_tensor_index;
230 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
231 kTfLiteOk);
232 ASSERT_EQ(first_new_tensor_index, 0);
233 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
234 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
235
236 SetupTensor(subgraph, kInput1, kTfLiteInt32);
237 SetupTensor(subgraph, kInput2, kTfLiteInt32);
238 SetupTensor(subgraph, kOutput, kTfLiteBool);
239
240 auto* le_reg = ops::builtin::Register_LESS_EQUAL();
241 le_reg->builtin_code = kTfLiteBuiltinLessEqual;
242
243 CreateConstantInt32Tensor(subgraph, kConstRhs, {1}, {rhs});
244 int node_index;
245 subgraph->AddNodeWithParameters({kInput1, kConstRhs}, {kOutput}, {}, nullptr,
246 0, nullptr, le_reg, &node_index);
247 }
248
BuildAccumulateLoopBodySubgraph(Subgraph * subgraph)249 void SubgraphBuilder::BuildAccumulateLoopBodySubgraph(Subgraph* subgraph) {
250 const int kInputCounter = 0;
251 const int kInputValue = 1;
252 const int kOutputCounter = 2;
253 const int kOutputValue = 3;
254 const int kConstStep = 4;
255 const int kTensorCount = 5;
256
257 // kInputCounter(0) --> +-----+
258 // | ADD | --> kOutputCounter(2)
259 // kConstStep(4) -----> +-----+ |
260 // |
261 // v
262 // +-----+
263 // | ADD | --> kOutputValue(3)
264 // kInputValue(1) ----------------------+-----+
265
266 int first_new_tensor_index;
267 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
268 kTfLiteOk);
269 ASSERT_EQ(first_new_tensor_index, 0);
270 ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
271 ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
272
273 SetupTensor(subgraph, kInputCounter, kTfLiteInt32);
274 SetupTensor(subgraph, kInputValue, kTfLiteInt32);
275 SetupTensor(subgraph, kOutputCounter, kTfLiteInt32);
276 SetupTensor(subgraph, kOutputValue, kTfLiteInt32);
277 CreateConstantInt32Tensor(subgraph, kConstStep, {1}, {1});
278
279 int node_index;
280 TfLiteAddParams* params =
281 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
282 params->activation = kTfLiteActNone;
283 params->pot_scale_int16 = false;
284 auto* add_reg = ops::builtin::Register_ADD();
285 add_reg->builtin_code = kTfLiteBuiltinAdd;
286 subgraph->AddNodeWithParameters({0, 4}, {2}, {}, nullptr, 0, params, add_reg,
287 &node_index);
288 params = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
289 params->activation = kTfLiteActNone;
290 params->pot_scale_int16 = false;
291 subgraph->AddNodeWithParameters({2, 1}, {3}, {}, nullptr, 0, params, add_reg,
292 &node_index);
293 }
294
BuildPadLoopBodySubgraph(Subgraph * subgraph,const std::vector<int> padding)295 void SubgraphBuilder::BuildPadLoopBodySubgraph(Subgraph* subgraph,
296 const std::vector<int> padding) {
297 const int kInputCounter = 0;
298 const int kInputValue = 1;
299 const int kOutputCounter = 2;
300 const int kOutputValue = 3;
301 const int kConstStep = 4;
302 const int kConstPadding = 5;
303 const int kTensorCount = 6;
304
305 // kInputCounter(0) --> +-----+
306 // | ADD | --> kOutputCounter(2)
307 // kConstStep(4) -----> +-----+
308 //
309 // kInputValue(1) ----> +-----+
310 // | PAD | --> kOutputValue(3)
311 // kConstPadding(5) --> +-----+
312
313 int first_new_tensor_index;
314 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
315 kTfLiteOk);
316 ASSERT_EQ(first_new_tensor_index, 0);
317 ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
318 ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
319
320 SetupTensor(subgraph, kInputCounter, kTfLiteInt32);
321 SetupTensor(subgraph, kInputValue, kTfLiteInt32);
322 SetupTensor(subgraph, kOutputCounter, kTfLiteInt32);
323 SetupTensor(subgraph, kOutputValue, kTfLiteInt32);
324
325 CreateConstantInt32Tensor(subgraph, kConstStep, {1}, {1});
326 ASSERT_EQ(padding.size() % 2, 0);
327 int padding_dims = padding.size();
328 CreateConstantInt32Tensor(subgraph, kConstPadding, {1, padding_dims},
329 padding);
330
331 int node_index;
332 TfLiteAddParams* add_params =
333 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
334 add_params->activation = kTfLiteActNone;
335 auto* add_reg = ops::builtin::Register_ADD();
336 add_reg->builtin_code = kTfLiteBuiltinAdd;
337 subgraph->AddNodeWithParameters({kInputCounter, kConstStep}, {kOutputCounter},
338 {}, nullptr, 0, add_params, add_reg,
339 &node_index);
340 TfLitePadParams* pad_params =
341 reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLiteAddParams)));
342 auto* pad_reg = ops::builtin::Register_PAD();
343 pad_reg->builtin_code = kTfLiteBuiltinPad;
344 subgraph->AddNodeWithParameters({kInputValue, kConstPadding}, {kOutputValue},
345 {}, nullptr, 0, pad_params, pad_reg,
346 &node_index);
347 }
348
BuildWhileSubgraph(Subgraph * subgraph)349 void SubgraphBuilder::BuildWhileSubgraph(Subgraph* subgraph) {
350 const int kInput1 = 0;
351 const int kInput2 = 1;
352 const int kOutput1 = 2;
353 const int kOutput2 = 3;
354 const int kTensorCount = 4;
355
356 // kInput1(0) --> +-------+ --> kOutput1(2)
357 // | WHILE |
358 // kInput2(1) --> +-------+ --> kOutput2(3)
359
360 int first_new_tensor_index;
361 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
362 kTfLiteOk);
363 ASSERT_EQ(first_new_tensor_index, 0);
364 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
365 ASSERT_EQ(subgraph->SetOutputs({kOutput1, kOutput2}), kTfLiteOk);
366
367 SetupTensor(subgraph, kInput1, kTfLiteInt32);
368 SetupTensor(subgraph, kInput2, kTfLiteInt32);
369 SetupTensor(subgraph, kOutput1, kTfLiteInt32);
370 SetupTensor(subgraph, kOutput2, kTfLiteInt32);
371
372 TfLiteWhileParams* params =
373 reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
374 params->cond_subgraph_index = 1;
375 params->body_subgraph_index = 2;
376 auto* while_reg = ops::builtin::Register_WHILE();
377 while_reg->builtin_code = kTfLiteBuiltinWhile;
378
379 int node_index;
380 subgraph->AddNodeWithParameters({0, 1}, {2, 3}, {}, nullptr, 0, params,
381 while_reg, &node_index);
382 }
383
BuildAssignRandomValueToVariableSubgraph(Subgraph * subgraph)384 void SubgraphBuilder::BuildAssignRandomValueToVariableSubgraph(
385 Subgraph* subgraph) {
386 const int kConstResourceId = 0;
387 const int kRandomValue = 1;
388 const int kTensorCount = 3;
389
390 // Construct a graph like ths:
391 // %1 = random_int()
392 // variable_assign(%0, %1)
393
394 int first_new_tensor_index;
395 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
396 kTfLiteOk);
397 ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
398 ASSERT_EQ(subgraph->SetOutputs({}), kTfLiteOk);
399
400 SetupTensor(subgraph, kRandomValue, kTfLiteInt32);
401 CreateConstantInt32Tensor(subgraph, kConstResourceId, {1}, {1024});
402
403 int node_index;
404 subgraph->AddNodeWithParameters({}, {kRandomValue}, {}, nullptr, 0, nullptr,
405 ::tflite::ops::custom::Register_RANDOM_INT(),
406 &node_index);
407 subgraph->AddNodeWithParameters(
408 {kConstResourceId, kRandomValue}, {}, {}, nullptr, 0, nullptr,
409 ::tflite::ops::builtin::Register_ASSIGN_VARIABLE(), &node_index);
410 }
411
BuildCallOnceAndReadVariableSubgraph(Subgraph * subgraph)412 void SubgraphBuilder::BuildCallOnceAndReadVariableSubgraph(Subgraph* subgraph) {
413 const int kConstResourceId = 0;
414 const int kOutput = 1;
415 const int kTensorCount = 2;
416
417 // Construct a graph like ths:
418 // Output: %1
419 // %1 = read_variable(%0)
420
421 int first_new_tensor_index;
422 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
423 kTfLiteOk);
424 ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
425 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
426
427 SetupTensor(subgraph, kOutput, kTfLiteInt32);
428 CreateConstantInt32Tensor(subgraph, kConstResourceId, {1}, {1024});
429
430 TfLiteCallOnceParams* params = reinterpret_cast<TfLiteCallOnceParams*>(
431 malloc(sizeof(TfLiteCallOnceParams)));
432 params->init_subgraph_index = 1;
433
434 int node_index;
435 subgraph->AddNodeWithParameters({}, {}, {}, nullptr, 0, params,
436 ::tflite::ops::builtin::Register_CALL_ONCE(),
437 &node_index);
438 subgraph->AddNodeWithParameters(
439 {kConstResourceId}, {kOutput}, {}, nullptr, 0, nullptr,
440 ::tflite::ops::builtin::Register_READ_VARIABLE(), &node_index);
441 }
442
BuildCallOnceAndReadVariablePlusOneSubgraph(Subgraph * subgraph)443 void SubgraphBuilder::BuildCallOnceAndReadVariablePlusOneSubgraph(
444 Subgraph* subgraph) {
445 const int kConstResourceId = 0;
446 const int kConstOne = 1;
447 const int kReadVariableResult = 2;
448 const int kOutput = 3;
449 const int kTensorCount = 4;
450
451 // Construct a graph like ths:
452 // Output: %3
453 // %2 = read_variable(%0)
454 // %3 = add(%2, %1)
455
456 int first_new_tensor_index;
457 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
458 kTfLiteOk);
459 ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
460 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
461
462 SetupTensor(subgraph, kReadVariableResult, kTfLiteInt32);
463 SetupTensor(subgraph, kOutput, kTfLiteInt32);
464 CreateConstantInt32Tensor(subgraph, kConstResourceId, {1}, {1024});
465 CreateConstantInt32Tensor(subgraph, kConstOne, {1}, {1});
466
467 TfLiteCallOnceParams* params = reinterpret_cast<TfLiteCallOnceParams*>(
468 malloc(sizeof(TfLiteCallOnceParams)));
469 params->init_subgraph_index = 1;
470
471 int node_index;
472 subgraph->AddNodeWithParameters({}, {}, {}, nullptr, 0, params,
473 ::tflite::ops::builtin::Register_CALL_ONCE(),
474 &node_index);
475 subgraph->AddNodeWithParameters(
476 {kConstResourceId}, {kReadVariableResult}, {}, nullptr, 0, nullptr,
477 ::tflite::ops::builtin::Register_READ_VARIABLE(), &node_index);
478
479 TfLiteAddParams* add_params =
480 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
481 add_params->activation = kTfLiteActNone;
482 subgraph->AddNodeWithParameters(
483 {kReadVariableResult, kConstOne}, {kOutput}, {}, nullptr, 0, add_params,
484 ::tflite::ops::builtin::Register_ADD(), &node_index);
485 }
486
BuildLessEqualCondSubgraphWithDynamicTensor(Subgraph * subgraph,int rhs)487 void SubgraphBuilder::BuildLessEqualCondSubgraphWithDynamicTensor(
488 Subgraph* subgraph, int rhs) {
489 const int kStringInput1 = 0;
490 const int kStringInput2 = 1;
491 const int kIntegerInput = 2;
492 const int kOutput = 3;
493 const int kConstRhs = 4;
494 const int kTensorCount = 5;
495
496 // kIntegerInput(2) --> +------------+
497 // | LESS_EQUAL | --> kOutput(3)
498 // kConstRhs(4) --> +------------+
499 //
500 // kStringInput1(0) --> (unused)
501 // kStringInput2(1) --> (unused)
502
503 int first_new_tensor_index;
504 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
505 kTfLiteOk);
506 ASSERT_EQ(first_new_tensor_index, 0);
507 ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
508 kTfLiteOk);
509 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
510
511 SetupTensor(subgraph, kStringInput1, kTfLiteString);
512 SetupTensor(subgraph, kStringInput2, kTfLiteString);
513 SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
514 SetupTensor(subgraph, kOutput, kTfLiteBool);
515
516 auto* le_reg = ops::builtin::Register_LESS_EQUAL();
517 le_reg->builtin_code = kTfLiteBuiltinLessEqual;
518
519 CreateConstantInt32Tensor(subgraph, kConstRhs, {1}, {rhs});
520 int node_index;
521 subgraph->AddNodeWithParameters({kIntegerInput, kConstRhs}, {kOutput}, {},
522 nullptr, 0, nullptr, le_reg, &node_index);
523 }
524
BuildBodySubgraphWithDynamicTensor(Subgraph * subgraph)525 void SubgraphBuilder::BuildBodySubgraphWithDynamicTensor(Subgraph* subgraph) {
526 const int kStringInput1 = 0;
527 const int kStringInput2 = 1;
528 const int kIntegerInput = 2;
529 const int kStringOutput1 = 0; // Forwarded of the `kStringInput1` tensor.
530 const int kStringOutput2 = 4;
531 const int kIntegerOutput = 5;
532 const int kConst = 6;
533 const int kTensorCount = 7;
534
535 // Construct a graph like this:
536 // %5 = tf.Add(%2, 1)
537 // %4 = tf.Fill(%0, %5)
538 // yield(%0, %4, %5)
539
540 int first_new_tensor_index;
541 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
542 kTfLiteOk);
543 ASSERT_EQ(first_new_tensor_index, 0);
544 ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
545 kTfLiteOk);
546 ASSERT_EQ(
547 subgraph->SetOutputs({kStringOutput1, kStringOutput2, kIntegerOutput}),
548 kTfLiteOk);
549
550 SetupTensor(subgraph, kStringInput1, kTfLiteString);
551 SetupTensor(subgraph, kStringInput2, kTfLiteString);
552 SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
553 SetupTensor(subgraph, kStringOutput1, kTfLiteString);
554 SetupTensor(subgraph, kStringOutput2, kTfLiteString);
555 SetupTensor(subgraph, kIntegerOutput, kTfLiteInt32);
556 SetupTensor(subgraph, kConst, kTfLiteInt32);
557
558 TfLiteAddParams* add_params =
559 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
560 add_params->activation = kTfLiteActNone;
561
562 auto* add_reg = ops::builtin::Register_ADD();
563 add_reg->builtin_code = kTfLiteBuiltinAdd;
564
565 CreateConstantInt32Tensor(subgraph, kConst, {1}, {1});
566 int node_index;
567 subgraph->AddNodeWithParameters({kIntegerInput, kConst}, {kIntegerOutput}, {},
568 nullptr, 0, add_params, add_reg, &node_index);
569
570 auto* fill_reg = ops::builtin::Register_FILL();
571 fill_reg->builtin_code = kTfLiteBuiltinFill;
572 subgraph->AddNodeWithParameters({kIntegerOutput, kStringInput1},
573 {kStringOutput2}, {}, nullptr, 0, nullptr,
574 fill_reg, &node_index);
575 }
576
BuildWhileSubgraphWithDynamicTensor(Subgraph * subgraph)577 void SubgraphBuilder::BuildWhileSubgraphWithDynamicTensor(Subgraph* subgraph) {
578 const int kStringInput1 = 0;
579 const int kStringInput2 = 1;
580 const int kIntegerInput = 2;
581 const int kStringOutput1 = 3;
582 const int kStringOutput2 = 4;
583 const int kIntegerOutput = 5;
584 const int kTensorCount = 6;
585
586 // Create a while op with 2 string tensor and 1 integer tensor.
587 int first_new_tensor_index;
588 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
589 kTfLiteOk);
590 ASSERT_EQ(first_new_tensor_index, 0);
591 ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
592 kTfLiteOk);
593 ASSERT_EQ(
594 subgraph->SetOutputs({kStringOutput1, kStringOutput2, kIntegerOutput}),
595 kTfLiteOk);
596
597 SetupTensor(subgraph, kStringInput1, kTfLiteString);
598 SetupTensor(subgraph, kStringInput2, kTfLiteString);
599 SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
600 SetupTensor(subgraph, kStringOutput1, kTfLiteString);
601 SetupTensor(subgraph, kStringOutput2, kTfLiteString);
602 SetupTensor(subgraph, kIntegerOutput, kTfLiteInt32);
603
604 TfLiteWhileParams* params =
605 reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
606 params->cond_subgraph_index = 1;
607 params->body_subgraph_index = 2;
608 auto* while_reg = ops::builtin::Register_WHILE();
609 while_reg->builtin_code = kTfLiteBuiltinWhile;
610
611 int node_index;
612 subgraph->AddNodeWithParameters(
613 {kStringInput1, kStringInput2, kIntegerInput},
614 {kStringOutput1, kStringOutput2, kIntegerOutput}, {}, nullptr, 0, params,
615 while_reg, &node_index);
616 }
617
CreateConstantInt32Tensor(Subgraph * subgraph,int tensor_index,const std::vector<int> & shape,const std::vector<int> & data)618 void SubgraphBuilder::CreateConstantInt32Tensor(Subgraph* subgraph,
619 int tensor_index,
620 const std::vector<int>& shape,
621 const std::vector<int>& data) {
622 ASSERT_GT(shape.size(), 0);
623 int num_elements = 1;
624 for (int dim : shape) {
625 num_elements *= dim;
626 }
627 ASSERT_EQ(data.size(), num_elements);
628 size_t size_in_bytes = sizeof(int32_t) * num_elements;
629 // Maybe aligned.
630 int32_t* buffer = reinterpret_cast<int32_t*>(malloc(size_in_bytes));
631 for (int i = 0; i < num_elements; ++i) {
632 buffer[i] = data[i];
633 }
634 buffers_.push_back(buffer);
635 ASSERT_EQ(subgraph->SetTensorParametersReadOnly(
636 tensor_index, kTfLiteInt32, "", shape, {},
637 reinterpret_cast<const char*>(buffer), size_in_bytes),
638 kTfLiteOk);
639 }
640
FillIntTensor(TfLiteTensor * tensor,const std::vector<int32_t> & data)641 void FillIntTensor(TfLiteTensor* tensor, const std::vector<int32_t>& data) {
642 int count = NumElements(tensor);
643 ASSERT_EQ(count, data.size());
644 for (int i = 0; i < count; ++i) {
645 tensor->data.i32[i] = data[i];
646 }
647 }
648
FillScalarStringTensor(TfLiteTensor * tensor,const std::string & data)649 void FillScalarStringTensor(TfLiteTensor* tensor, const std::string& data) {
650 StringRef str_ref;
651 str_ref.str = data.c_str();
652 str_ref.len = data.size();
653 DynamicBuffer buf;
654 buf.AddString(str_ref);
655 buf.WriteToTensor(tensor, /*new_shape=*/TfLiteIntArrayCreate(0));
656 }
657
CheckScalarStringTensor(const TfLiteTensor * tensor,const std::string & data)658 void CheckScalarStringTensor(const TfLiteTensor* tensor,
659 const std::string& data) {
660 ASSERT_EQ(tensor->dims->size, 0);
661 ASSERT_EQ(tensor->type, kTfLiteString);
662 StringRef str_ref = GetString(tensor, 0);
663 EXPECT_EQ(std::string(str_ref.str, str_ref.len), data);
664 }
665
CheckStringTensor(const TfLiteTensor * tensor,const std::vector<int> & shape,const std::vector<std::string> & data)666 void CheckStringTensor(const TfLiteTensor* tensor,
667 const std::vector<int>& shape,
668 const std::vector<std::string>& data) {
669 ASSERT_EQ(tensor->dims->size, shape.size());
670 for (int i = 0; i < tensor->dims->size; ++i) {
671 ASSERT_EQ(tensor->dims->data[i], shape[i]);
672 }
673 ASSERT_EQ(tensor->type, kTfLiteString);
674 int count = GetStringCount(tensor);
675 ASSERT_EQ(count, data.size());
676 for (int i = 0; i < count; ++i) {
677 StringRef str_ref = GetString(tensor, i);
678 EXPECT_EQ(std::string(str_ref.str, str_ref.len), data[i]);
679 }
680 }
CheckIntTensor(const TfLiteTensor * tensor,const std::vector<int> & shape,const std::vector<int32_t> & data)681 void CheckIntTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
682 const std::vector<int32_t>& data) {
683 ASSERT_EQ(tensor->dims->size, shape.size());
684 for (int i = 0; i < tensor->dims->size; ++i) {
685 ASSERT_EQ(tensor->dims->data[i], shape[i]);
686 }
687 ASSERT_EQ(tensor->type, kTfLiteInt32);
688 int count = NumElements(tensor);
689 ASSERT_EQ(count, data.size());
690 for (int i = 0; i < count; ++i) {
691 EXPECT_EQ(tensor->data.i32[i], data[i]);
692 }
693 }
694
CheckBoolTensor(const TfLiteTensor * tensor,const std::vector<int> & shape,const std::vector<bool> & data)695 void CheckBoolTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
696 const std::vector<bool>& data) {
697 ASSERT_EQ(tensor->dims->size, shape.size());
698 for (int i = 0; i < tensor->dims->size; ++i) {
699 ASSERT_EQ(tensor->dims->data[i], shape[i]);
700 }
701 ASSERT_EQ(tensor->type, kTfLiteBool);
702 int count = NumElements(tensor);
703 ASSERT_EQ(count, data.size());
704 for (int i = 0; i < count; ++i) {
705 EXPECT_EQ(tensor->data.b[i], data[i]);
706 }
707 }
708
709 } // namespace subgraph_test_util
710 } // namespace tflite
711