1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include <armnn/utility/IgnoreUnused.hpp>
9
10 #include <tensorflow/lite/builtin_ops.h>
11 #include <tensorflow/lite/c/builtin_op_data.h>
12 #include <tensorflow/lite/c/common.h>
13 #include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
14 #include <tensorflow/lite/minimal_logging.h>
15
16 #include <algorithm>
17 #include <iterator>
18 #include <string>
19 #include <vector>
20
21 namespace armnnDelegate
22 {
23
SetupConcatViewOrigin(const armnn::TensorInfo & inputTensorInfo,armnn::OriginsDescriptor & concatDescriptor,const unsigned int concatAxis,unsigned int inputIndex,unsigned int & mergeDimOrigin)24 void SetupConcatViewOrigin(const armnn::TensorInfo& inputTensorInfo,
25 armnn::OriginsDescriptor& concatDescriptor,
26 const unsigned int concatAxis,
27 unsigned int inputIndex,
28 unsigned int& mergeDimOrigin)
29 {
30 const uint32_t inputRank = concatDescriptor.GetNumDimensions();
31
32 // double check dimensions of the tensors
33 if (inputTensorInfo.GetNumDimensions() != inputRank)
34 {
35 throw armnn::ParseException("The number of dimensions for input tensors "
36 "of the concatenation operator should be: " + std::to_string(inputRank));
37 }
38
39 for (unsigned int j = 0; j < concatAxis; ++j)
40 {
41 concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
42 }
43
44 concatDescriptor.SetViewOriginCoord(inputIndex, concatAxis, mergeDimOrigin);
45 mergeDimOrigin += inputTensorInfo.GetShape()[concatAxis];
46
47 for (unsigned int j = concatAxis + 1; j < inputRank; ++j)
48 {
49 concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
50 }
51 }
52
VisitConcatenationOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t tfLiteConcatOperatorCode)53 TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
54 TfLiteContext* tfLiteContext,
55 TfLiteNode* tfLiteNode,
56 int nodeIndex,
57 int32_t tfLiteConcatOperatorCode)
58 {
59 unsigned int numInputs = tfLiteNode->inputs->size;
60 if (numInputs < 2)
61 {
62 TF_LITE_MAYBE_KERNEL_LOG(
63 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
64 2, numInputs, nodeIndex);
65 return kTfLiteError;
66 }
67 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
68
69 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
70
71 std::vector<armnn::TensorInfo> inputTensorInfos;
72 for (unsigned int i = 0; i < numInputs; ++i)
73 {
74 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]];
75 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteConcatOperatorCode, nodeIndex))
76 {
77 return kTfLiteError;
78 }
79
80 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
81 inputTensorInfos.emplace_back(inputTensorInfo);
82 }
83
84 // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
85 std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
86 std::transform(inputTensorInfos.begin(),
87 inputTensorInfos.end(),
88 std::back_inserter(inputConstTensorInfos),
89 [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
90
91 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
92 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
93 {
94 return kTfLiteError;
95 }
96
97 // Setup OriginsDescriptor, axis and view origin
98 unsigned int numConcatView = static_cast<unsigned int>(numInputs);
99 uint32_t inputRank = tfLiteTensors[tfLiteNode->inputs->data[0]].dims->size;
100
101 auto* concatenationParameters = reinterpret_cast<TfLiteConcatenationParams*>(tfLiteNode->builtin_data);
102
103 if(!concatenationParameters)
104 {
105 throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex]);
106 }
107
108 const unsigned int concatDimInput = static_cast<unsigned int>(
109 (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
110
111 armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
112 concatDescriptor.SetConcatAxis(concatDimInput);
113
114 unsigned int mergeDimOrigin = 0;
115 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
116 {
117 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(
118 tfLiteTensors[tfLiteNode->inputs->data[viewIndex]]);
119
120 // Sets up concatDescriptor view origin
121 SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
122 }
123
124 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
125
126 // Verify we support the fused activation before attempting to create a layer
127 TfLiteFusedActivation activationType = concatenationParameters->activation;
128
129 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
130 outputTensorInfo, activationType);
131 if(activationStatus != kTfLiteOk)
132 {
133 return kTfLiteError;
134 }
135
136 // Check if supported
137 bool isSupported = false;
138 armnn::BackendId setBackend;
139 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
140 {
141 FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
142 tfLiteContext,
143 IsConcatSupported,
144 delegateData.m_Backends,
145 isSupported,
146 setBackend,
147 inputConstTensorInfos,
148 outputTensorInfo,
149 concatDescriptor);
150 };
151
152 if (!delegateData.m_Network)
153 {
154 validateFunc(outputTensorInfo, isSupported);
155 return isSupported ? kTfLiteOk : kTfLiteError;
156 }
157
158 // Setup layer and connect.
159 armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
160 concatenationLayer->SetBackendId(setBackend);
161 ARMNN_ASSERT(concatenationLayer != nullptr);
162
163 // Connect the Constant Inputs
164 auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
165 delegateData,
166 tfLiteContext,
167 tfLiteNode);
168 if (inputsTensorsProcess == kTfLiteError)
169 {
170 return inputsTensorsProcess;
171 }
172
173 armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
174 outputSlot.SetTensorInfo(outputTensorInfo);
175 if(Connect(concatenationLayer, tfLiteNode, delegateData) != kTfLiteOk)
176 {
177 return kTfLiteError;
178 }
179
180 if (activationType == kTfLiteActNone)
181 {
182 // No Activation
183 return kTfLiteOk;
184 }
185
186 // Check and Create activation
187 return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
188 }
189
VisitMeanOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t tfLiteMeanOperatorCode)190 TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
191 TfLiteContext* tfLiteContext,
192 TfLiteNode* tfLiteNode,
193 int nodeIndex,
194 int32_t tfLiteMeanOperatorCode)
195 {
196 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
197 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
198
199 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
200 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
201 if(!IsValid(&tfLiteInputTensor))
202 {
203 TF_LITE_MAYBE_KERNEL_LOG(
204 tfLiteContext,
205 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
206 tfLiteMeanOperatorCode, nodeIndex);
207 return kTfLiteError;
208 }
209 if (IsDynamicTensor(tfLiteInputTensor))
210 {
211 TF_LITE_MAYBE_KERNEL_LOG(
212 tfLiteContext,
213 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
214 tfLiteMeanOperatorCode, nodeIndex);
215 return kTfLiteError;
216 }
217
218 const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
219 if(!IsValid(&tfLiteAxisTensor))
220 {
221 TF_LITE_MAYBE_KERNEL_LOG(
222 tfLiteContext,
223 "TfLiteArmnnDelegate: Invalid axis tensor in operator #%d node #%d: ",
224 tfLiteMeanOperatorCode, nodeIndex);
225 return kTfLiteError;
226 }
227 if (IsDynamicTensor(tfLiteAxisTensor))
228 {
229 TF_LITE_MAYBE_KERNEL_LOG(
230 tfLiteContext,
231 "TfLiteArmnnDelegate: Dynamic axis tensors are not supported in operator #%d node #%d: ",
232 tfLiteMeanOperatorCode, nodeIndex);
233 return kTfLiteError;
234 }
235
236 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
237 if(!IsValid(&tfLiteOutputTensor))
238 {
239 TF_LITE_MAYBE_KERNEL_LOG(
240 tfLiteContext,
241 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
242 tfLiteAxisTensor, nodeIndex);
243 return kTfLiteError;
244 }
245 if (IsDynamicTensor(tfLiteOutputTensor))
246 {
247 TF_LITE_MAYBE_KERNEL_LOG(
248 tfLiteContext,
249 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
250 tfLiteMeanOperatorCode, nodeIndex);
251 return kTfLiteError;
252 }
253
254 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
255 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
256 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
257
258 auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
259
260 std::vector<int32_t> axis;
261 // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
262 for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
263 {
264 axis.emplace_back(axisTensorData[i]);
265 }
266
267 // Convert the axis to unsigned int and remove duplicates.
268 unsigned int rank = inputTensorInfo.GetNumDimensions();
269 std::set<unsigned int> uniqueAxis;
270 std::transform(axis.begin(),
271 axis.end(),
272 std::inserter(uniqueAxis, uniqueAxis.begin()),
273 [rank](int i)->unsigned int{ return (i + rank) % rank; });
274
275 // Setup MeanDescriptor and assign axis and keepDims
276 armnn::MeanDescriptor desc;
277 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
278 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
279
280 // Check if supported
281 bool isSupported = false;
282 armnn::BackendId setBackend;
283 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
284 {
285 FORWARD_LAYER_SUPPORT_FUNC("MEAN",
286 tfLiteContext,
287 IsMeanSupported,
288 delegateData.m_Backends,
289 isSupported,
290 setBackend,
291 inputTensorInfo,
292 outputTensorInfo,
293 desc);
294 };
295
296 if (!delegateData.m_Network)
297 {
298 validateFunc(outputTensorInfo, isSupported);
299 return isSupported ? kTfLiteOk : kTfLiteError;
300 }
301
302 // Setup layer and connect.
303 armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
304 meanLayer->SetBackendId(setBackend);
305 ARMNN_ASSERT(meanLayer != nullptr);
306
307 armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
308 outputSlot.SetTensorInfo(outputTensorInfo);
309
310 // try to connect the Constant Inputs if there are any
311 if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
312 {
313 return kTfLiteError;
314 }
315
316 return Connect(meanLayer, tfLiteNode, delegateData);
317 }
318
VisitControlOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t operatorCode)319 TfLiteStatus VisitControlOperator(DelegateData& delegateData,
320 TfLiteContext* tfLiteContext,
321 TfLiteNode* tfLiteNode,
322 int nodeIndex,
323 int32_t operatorCode)
324 {
325 armnn::IgnoreUnused(delegateData,
326 tfLiteContext,
327 tfLiteNode,
328 nodeIndex,
329 operatorCode);
330
331 switch(operatorCode)
332 {
333 case kTfLiteBuiltinConcatenation:
334 return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
335 case kTfLiteBuiltinMean:
336 return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
337 default:
338 return kTfLiteError;
339 }
340 }
341
342 } // namespace armnnDelegate
343