1 //
2 // Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include "TestUtils.hpp"
9
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12
13 #include <flatbuffers/flatbuffers.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/version.h>
16
17 #include <schema_generated.h>
18
19 #include <doctest/doctest.h>
20
21 namespace
22 {
CreateCastTfLiteModel(tflite::TensorType inputTensorType,tflite::TensorType outputTensorType,const std::vector<int32_t> & tensorShape,float quantScale=1.0f,int quantOffset=0)23 std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
24 tflite::TensorType outputTensorType,
25 const std::vector <int32_t>& tensorShape,
26 float quantScale = 1.0f,
27 int quantOffset = 0)
28 {
29 using namespace tflite;
30 flatbuffers::FlatBufferBuilder flatBufferBuilder;
31
32 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
33 buffers.push_back(CreateBuffer(flatBufferBuilder));
34 buffers.push_back(CreateBuffer(flatBufferBuilder));
35 buffers.push_back(CreateBuffer(flatBufferBuilder));
36
37 auto quantizationParameters =
38 CreateQuantizationParameters(flatBufferBuilder,
39 0,
40 0,
41 flatBufferBuilder.CreateVector<float>({quantScale}),
42 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
43
44 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
45 tensors[0] = CreateTensor(flatBufferBuilder,
46 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
47 tensorShape.size()),
48 inputTensorType,
49 1,
50 flatBufferBuilder.CreateString("input"),
51 quantizationParameters);
52 tensors[1] = CreateTensor(flatBufferBuilder,
53 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
54 tensorShape.size()),
55 outputTensorType,
56 2,
57 flatBufferBuilder.CreateString("output"),
58 quantizationParameters);
59
60 const std::vector<int32_t> operatorInputs({0});
61 const std::vector<int32_t> operatorOutputs({1});
62
63 flatbuffers::Offset<Operator> castOperator =
64 CreateOperator(flatBufferBuilder,
65 0,
66 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
67 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
68 BuiltinOptions_CastOptions,
69 CreateCastOptions(flatBufferBuilder).Union());
70
71 flatbuffers::Offset<flatbuffers::String> modelDescription =
72 flatBufferBuilder.CreateString("ArmnnDelegate: CAST Operator Model");
73 flatbuffers::Offset<OperatorCode> operatorCode =
74 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CAST);
75
76 const std::vector<int32_t> subgraphInputs({0});
77 const std::vector<int32_t> subgraphOutputs({1});
78 flatbuffers::Offset<SubGraph> subgraph =
79 CreateSubGraph(flatBufferBuilder,
80 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
81 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
82 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
83 flatBufferBuilder.CreateVector(&castOperator, 1));
84
85 flatbuffers::Offset<Model> flatbufferModel =
86 CreateModel(flatBufferBuilder,
87 TFLITE_SCHEMA_VERSION,
88 flatBufferBuilder.CreateVector(&operatorCode, 1),
89 flatBufferBuilder.CreateVector(&subgraph, 1),
90 modelDescription,
91 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
92
93 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
94 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
95 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
96 }
97
98 template<typename T, typename K>
CastTest(tflite::TensorType inputTensorType,tflite::TensorType outputTensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & shape,std::vector<T> & inputValues,std::vector<K> & expectedOutputValues,float quantScale=1.0f,int quantOffset=0)99 void CastTest(tflite::TensorType inputTensorType,
100 tflite::TensorType outputTensorType,
101 std::vector<armnn::BackendId>& backends,
102 std::vector<int32_t>& shape,
103 std::vector<T>& inputValues,
104 std::vector<K>& expectedOutputValues,
105 float quantScale = 1.0f,
106 int quantOffset = 0)
107 {
108 using namespace delegateTestInterpreter;
109 std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
110 outputTensorType,
111 shape,
112 quantScale,
113 quantOffset);
114
115 // Setup interpreter with just TFLite Runtime.
116 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
117 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
118 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
119 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
120 std::vector<K> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<K>(0);
121 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
122
123 // Setup interpreter with Arm NN Delegate applied.
124 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
125 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
126 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
127 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
128 std::vector<K> armnnOutputValues = armnnInterpreter.GetOutputResult<K>(0);
129 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
130
131 armnnDelegate::CompareOutputData<K>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
132 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
133
134 tfLiteInterpreter.Cleanup();
135 armnnInterpreter.Cleanup();
136 }
137
138 } // anonymous namespace
139