xref: /aosp_15_r20/external/ComputeLibrary/arm_compute/graph/backends/FunctionHelpers.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
26 
27 #include "arm_compute/core/experimental/IPostOp.h"
28 #include "arm_compute/core/experimental/PostOps.h"
29 #include "arm_compute/graph/Logger.h"
30 #include "arm_compute/graph/Tensor.h"
31 #include "arm_compute/graph/TypePrinter.h"
32 #include "arm_compute/graph/Types.h"
33 #include "arm_compute/graph/Utils.h"
34 #include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
35 #include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h"
36 #include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
37 #include "arm_compute/graph/backends/Utils.h"
38 #include "arm_compute/graph/nodes/Nodes.h"
39 
40 #include "arm_compute/core/Error.h"
41 #include "arm_compute/core/Helpers.h"
42 #include "arm_compute/core/ITensorInfo.h"
43 #include "support/Cast.h"
44 
45 namespace arm_compute
46 {
47 namespace graph
48 {
49 namespace backends
50 {
51 namespace detail
52 {
53 /** Returns backing tensor of a given tensor
54  *
55  * @tparam TargetInfo Target information
56  *
57  * @param[in] tensor Tensor to extract the backing tensor from
58  *
59  * @return Backing tensor if present else nullptr
60  */
61 template <typename TargetInfo>
get_backing_tensor(arm_compute::graph::Tensor * tensor)62 typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
63 {
64     typename TargetInfo::TensorType *backing_tensor = nullptr;
65     if(tensor != nullptr)
66     {
67         ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
68         // Get backing tensor handle
69         ITensorHandle *tensor_handle = tensor->handle();
70         // Get backing tensor
71         backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
72     }
73 
74     return backing_tensor;
75 }
76 
77 template <typename TargetInfo>
validate_node(const INode & node,size_t num_expected_inputs,size_t num_expected_outputs)78 void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
79 {
80     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
81                                   << " Target: " << TargetInfo::TargetType
82                                   << " ID: " << node.id()
83                                   << node.name()
84                                   << std::endl);
85 
86     ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
87     ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
88     ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
89     ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
90 }
91 
92 /** Creates a backend activation layer function
93  *
94  * @tparam ActivationLayerFunction Backend activation function
95  * @tparam TargetInfo              Target-specific information
96  *
97  * @param[in] node Node to create the backend function for
98  *
99  * @return Backend activation layer function
100  */
101 template <typename ActivationLayerFunction, typename TargetInfo>
create_activation_layer(ActivationLayerNode & node)102 std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
103 {
104     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
105 
106     // Extract IO and info
107     typename TargetInfo::TensorType *input    = get_backing_tensor<TargetInfo>(node.input(0));
108     typename TargetInfo::TensorType *output   = get_backing_tensor<TargetInfo>(node.output(0));
109     const ActivationLayerInfo        act_info = node.activation_info();
110 
111     // Create function
112     auto func = std::make_unique<ActivationLayerFunction>();
113     func->configure(input, output, act_info);
114 
115     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
116                                << node.name()
117                                << " Type: " << node.type()
118                                << " Target: " << TargetInfo::TargetType
119                                << " Data Type: " << input->info()->data_type()
120                                << " Shape: " << input->info()->tensor_shape()
121                                << " Activation function: " << act_info.activation()
122                                << " a: " << act_info.a()
123                                << " b: " << act_info.b()
124                                << " InPlace : " << is_in_place_operation(input, output)
125                                << std::endl);
126 
127     return std::move(func);
128 }
129 
130 /** Creates a backend argminmax layer function
131  *
132  * @tparam ArgMinMaxLayerFunction Backend activation function
133  * @tparam TargetInfo             Target-specific information
134  *
135  * @param[in] node Node to create the backend function for
136  *
137  * @return Backend argminmax layer function
138  */
139 template <typename ArgMinMaxLayerFunction, typename TargetInfo>
create_arg_min_max_layer(ArgMinMaxLayerNode & node)140 std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
141 {
142     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
143 
144     // Extract IO and info
145     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
146     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
147     const ReductionOperation         op     = node.reduction_operation();
148     unsigned int                     axis   = node.axis();
149 
150     // Create function
151     auto func = std::make_unique<ArgMinMaxLayerFunction>();
152     func->configure(input, axis, output, op);
153 
154     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
155                                << node.name()
156                                << " Type: " << node.type()
157                                << " Target: " << TargetInfo::TargetType
158                                << " Data Type: " << input->info()->data_type()
159                                << " Shape: " << input->info()->tensor_shape()
160                                << " Reduction Operation: " << op
161                                << " axis: " << axis
162                                << std::endl);
163 
164     return std::move(func);
165 }
166 
167 /** Create a backend batch normalization layer function
168  *
169  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
170  * @tparam TargetInfo                      Target-specific information
171  *
172  * @param[in] node Node to create the backend function for
173  *
174  * @return Backend batch normalization layer function
175  */
176 template <typename BatchNormalizationLayerFunction, typename TargetInfo>
create_batch_normalization_layer(BatchNormalizationLayerNode & node)177 std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
178 {
179     validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
180 
181     // Extract IO and info
182     typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
183     typename TargetInfo::TensorType *mean  = get_backing_tensor<TargetInfo>(node.input(1));
184     typename TargetInfo::TensorType *var   = get_backing_tensor<TargetInfo>(node.input(2));
185     typename TargetInfo::TensorType *beta  = get_backing_tensor<TargetInfo>(node.input(3));
186     typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
187 
188     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
189     const float                      epsilon   = node.epsilon();
190     const ActivationLayerInfo        fused_act = node.fused_activation();
191 
192     // Create and configure function
193     auto func = std::make_unique<BatchNormalizationLayerFunction>();
194     func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
195 
196     // Log info
197     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
198                                << node.name()
199                                << " Type: " << node.type()
200                                << " Target: " << TargetInfo::TargetType
201                                << " Data Type: " << input->info()->data_type()
202                                << " Shape: " << input->info()->tensor_shape()
203                                << " Epsilon: " << epsilon << " "
204                                << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
205                                << " InPlace: " << is_in_place_operation(input, output)
206                                << std::endl);
207 
208     return std::move(func);
209 }
210 
211 /** Create a backend batch normalization layer function
212  *
213  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
214  * @tparam TargetInfo                      Target-specific information
215  *
216  * @param[in] node Node to create the backend function for
217  * @param[in] ctx  Graph context
218  *
219  * @return Backend batch normalization layer function
220  */
221 template <typename FusedLayerTypes, typename TargetInfo>
create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode & node,GraphContext & ctx)222 std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
223 {
224     validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
225 
226     // Extract IO and info
227     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
228     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
229     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
230     typename TargetInfo::TensorType *mean    = get_backing_tensor<TargetInfo>(node.input(3));
231     typename TargetInfo::TensorType *var     = get_backing_tensor<TargetInfo>(node.input(4));
232     typename TargetInfo::TensorType *beta    = get_backing_tensor<TargetInfo>(node.input(5));
233     typename TargetInfo::TensorType *gamma   = get_backing_tensor<TargetInfo>(node.input(6));
234 
235     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
236 
237     const PadStrideInfo       conv_info  = node.convolution_info();
238     const unsigned int        num_groups = node.num_groups();
239     const bool                fast_math  = node.fast_math_hint() == FastMathHint::Enabled;
240     const ActivationLayerInfo fused_act  = node.fused_activation();
241     const float               epsilon    = node.epsilon();
242 
243     // Create and configure function (we assume that functions have been validated before creation)
244     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
245     std::unique_ptr<IFunction>      func;
246     std::string                     func_name;
247 
248     using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
249 
250     // Create and configure function
251     std::tie(func, func_name) = create_named_memory_managed_function<FType>(
252                                     std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
253 
254     // Log info
255     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
256                                << node.name()
257                                << " Type: " << node.type()
258                                << " Target: " << TargetInfo::TargetType
259                                << " Data Type: " << input->info()->data_type()
260                                << " Input shape: " << input->info()->tensor_shape()
261                                << " Weights shape: " << weights->info()->tensor_shape()
262                                << " Output shape: " << output->info()->tensor_shape()
263                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
264                                << std::endl);
265     return std::move(func);
266 }
267 
268 /** Create a backend fused depthwise convolution batch normalization layer function
269  *
270  * @tparam FusedLayerTypes             Fused layer types
271  * @tparam TargetInfo                  Target-specific information
272  *
273  * @param[in] node Node to create the backend function for
274  * @param[in] ctx  Graph context
275  *
276  * @return Backend fused depthwise convolution batch normalization layer function
277  */
278 template <typename FusedLayerTypes, typename TargetInfo>
create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode & node,GraphContext & ctx)279 std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
280 {
281     validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
282 
283     // Extract IO and info
284     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
285     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
286     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
287     typename TargetInfo::TensorType *mean    = get_backing_tensor<TargetInfo>(node.input(3));
288     typename TargetInfo::TensorType *var     = get_backing_tensor<TargetInfo>(node.input(4));
289     typename TargetInfo::TensorType *beta    = get_backing_tensor<TargetInfo>(node.input(5));
290     typename TargetInfo::TensorType *gamma   = get_backing_tensor<TargetInfo>(node.input(6));
291 
292     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
293 
294     const PadStrideInfo       conv_info        = node.convolution_info();
295     const unsigned int        depth_multiplier = node.depth_multiplier();
296     const ActivationLayerInfo fused_act        = node.fused_activation();
297     const float               epsilon          = node.epsilon();
298 
299     // Create and configure function (we assume that functions have been validated before creation)
300     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
301     std::unique_ptr<IFunction>      func;
302     std::string                     func_name;
303 
304     using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
305 
306     // Create and configure function
307     std::tie(func, func_name) = create_named_memory_managed_function<FType>(
308                                     std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
309 
310     // Log info
311     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
312                                << node.name()
313                                << " Type: " << node.type()
314                                << " Target: " << TargetInfo::TargetType
315                                << " Data Type: " << input->info()->data_type()
316                                << " Input shape: " << input->info()->tensor_shape()
317                                << " Weights shape: " << weights->info()->tensor_shape()
318                                << " Output shape: " << output->info()->tensor_shape()
319                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
320                                << std::endl);
321     return std::move(func);
322 }
323 
324 /** Create a backend bounding box transform layer function
325  *
326  * @tparam BoundingBoxTransformLayerFunction    Backend bounding box transform function
327  * @tparam TargetInfo                           Target-specific information
328  *
329  * @param[in] node Node to create the backend function for
330  *
331  * @return Backend bounding box transform layer function
332  */
333 template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
create_bounding_box_transform_layer(BoundingBoxTransformLayerNode & node)334 std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
335 {
336     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
337 
338     // Extract IO and info
339     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
340     typename TargetInfo::TensorType *deltas    = get_backing_tensor<TargetInfo>(node.input(1));
341     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
342     const BoundingBoxTransformInfo   bbox_info = node.info();
343 
344     // Create and configure function
345     auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
346     func->configure(input, output, deltas, bbox_info);
347 
348     // Log info
349     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
350                                << node.name()
351                                << " Type: " << node.type()
352                                << " Target: " << TargetInfo::TargetType
353                                << " Data Type: " << input->info()->data_type()
354                                << " Shape: " << input->info()->tensor_shape()
355                                << " BoundingBox Info img W: " << bbox_info.img_width() << " "
356                                << " BoundingBox Info img H: " << bbox_info.img_height() << " "
357                                << std::endl);
358 
359     return std::move(func);
360 }
361 
362 /** Create a backend channel shuffle layer function
363  *
364  * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
365  * @tparam TargetInfo                  Target-specific information
366  *
367  * @param[in] node Node to create the backend function for
368  *
369  * @return Backend channel shuffle layer function
370  */
371 template <typename ChannelShuffleLayerFunction, typename TargetInfo>
create_channel_shuffle_layer(ChannelShuffleLayerNode & node)372 std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
373 {
374     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
375 
376     // Extract IO and info
377     typename TargetInfo::TensorType *input      = get_backing_tensor<TargetInfo>(node.input(0));
378     typename TargetInfo::TensorType *output     = get_backing_tensor<TargetInfo>(node.output(0));
379     const unsigned int               num_groups = node.num_groups();
380 
381     // Create function
382     auto func = std::make_unique<ChannelShuffleLayerFunction>();
383     func->configure(input, output, num_groups);
384 
385     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
386                                << node.name()
387                                << " Type: " << node.type()
388                                << " Target: " << TargetInfo::TargetType
389                                << " Data Type: " << input->info()->data_type()
390                                << " Shape: " << input->info()->tensor_shape()
391                                << " Num groups: " << num_groups
392                                << std::endl);
393 
394     return std::move(func);
395 }
396 
397 /** Create a backend layer concatenate function
398  *
399  * @tparam ConcatenateLayerFunction Backend concatenate function
400  * @tparam TargetInfo               Target-specific information
401  *
402  * @param[in] node Node to create the backend function for
403  *
404  * @return Backend concatenate layer function
405  */
406 template <typename ConcatenateLayerFunction, typename TargetInfo>
create_concatenate_layer(ConcatenateLayerNode & node)407 std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
408 {
409     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
410     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
411 
412     // Return nullptr if depth concatenate is switched off
413     if(!node.is_enabled())
414     {
415         return nullptr;
416     }
417 
418     // Extract IO and info
419     std::vector<typename TargetInfo::SrcTensorType *> inputs;
420     for(unsigned int i = 0; i < node.num_inputs(); ++i)
421     {
422         inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
423     }
424     typename TargetInfo::TensorType *output      = get_backing_tensor<TargetInfo>(node.output(0));
425     const DataLayout                 data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
426     const size_t                     concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
427 
428     // Create and configure function
429     auto func = std::make_unique<ConcatenateLayerFunction>();
430     func->configure(inputs, output, concat_axis);
431 
432     // Log info
433     const bool         is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
434     std::ostringstream qss;
435     if(is_quantized)
436     {
437         qss << " Output QuantInfo: " << output->info()->quantization_info();
438     }
439     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
440                                << node.name()
441                                << " Type: " << node.type()
442                                << " Target: " << TargetInfo::TargetType
443                                << " Data Type: " << output->info()->data_type()
444                                << " Shape: " << output->info()->tensor_shape()
445                                << " Num Inputs: " << inputs.size()
446                                << " Axis: " << concat_axis
447                                << qss.str()
448                                << std::endl);
449 
450     return std::move(func);
451 }
452 
453 /** Create a backend convolution layer function
454  *
455  * @tparam ConvolutionLayerFunctions Backend convolution functions
456  * @tparam TargetInfo                Target-specific information
457  *
458  * @param[in] node Node to create the backend function for
459  * @param[in] ctx  Graph context
460  *
461  * @return Backend convolution layer function
462  */
463 template <typename ConvolutionLayerFunctions, typename TargetInfo>
create_convolution_layer(ConvolutionLayerNode & node,GraphContext & ctx)464 std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
465 {
466     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
467 
468     // Extract IO and info
469     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
470     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
471     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
472     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
473 
474     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
475 
476     if(is_quantized)
477     {
478         biases->info()->set_data_type(DataType::S32);
479     }
480 
481     const PadStrideInfo       conv_info      = node.convolution_info();
482     const unsigned int        num_groups     = node.num_groups();
483     const ConvolutionMethod   conv_algorithm = node.convolution_method();
484     const bool                fast_math      = node.fast_math_hint() == FastMathHint::Enabled;
485     const ActivationLayerInfo fused_act      = node.fused_activation();
486 
487     // Create and configure function (we assume that functions have been validated before creation)
488     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
489     std::unique_ptr<IFunction>      func;
490     std::string                     func_name;
491 
492     if(conv_algorithm == ConvolutionMethod::Winograd)
493     {
494         ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
495         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
496                                         std::string("WinogradConvolutionLayer"), mm,
497                                         input, weights, biases, output, conv_info, fused_act, fast_math);
498     }
499     else if(conv_algorithm == ConvolutionMethod::Direct)
500     {
501         ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
502         std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
503                                         std::string("DirectConvolutionLayer"),
504                                         input, weights, biases, output, conv_info, fused_act);
505     }
506     else if(conv_algorithm == ConvolutionMethod::GEMM)
507     {
508         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
509                                         std::string("GEMMConvolutionLayer"), mm,
510                                         input, weights, biases, output, conv_info,
511                                         WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
512     }
513     else
514     {
515         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
516                                         std::string("GenericConvolutionLayer"), mm,
517                                         input, weights, biases, output, conv_info,
518                                         WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
519     }
520 
521     // Log info
522     std::ostringstream qss;
523     if(is_quantized)
524     {
525         qss << " Input QuantInfo: " << input->info()->quantization_info()
526             << " Weights QuantInfo: " << weights->info()->quantization_info()
527             << " Output QuantInfo: " << output->info()->quantization_info();
528     }
529     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
530                                << node.name()
531                                << " Type: " << func_name
532                                << " Target: " << TargetInfo::TargetType
533                                << " Data Type: " << input->info()->data_type()
534                                << " Groups: " << num_groups
535                                << " Input shape: " << input->info()->tensor_shape()
536                                << " Weights shape: " << weights->info()->tensor_shape()
537                                << " Output shape: " << output->info()->tensor_shape()
538                                << qss.str()
539                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
540                                << std::endl);
541     return std::move(func);
542 }
543 
544 /** Create a backend convolution layer function with post operator
545  *
546  * @tparam ConvolutionLayerFunctions Backend convolution functions
547  * @tparam TargetInfo                Target-specific information
548  *
549  * @param[in] node Node to create the backend function for
550  * @param[in] ctx  Graph context
551  *
552  * @return Backend convolution layer function
553  */
554 template <typename ConvolutionLayerFunctions, typename TargetInfo>
create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode & node,GraphContext & ctx)555 std::unique_ptr<IFunction> create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
556 {
557     validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
558 
559     // Extract IO and info
560     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
561     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
562     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
563     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
564 
565     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
566 
567     if(is_quantized)
568     {
569         biases->info()->set_data_type(DataType::S32);
570     }
571 
572     const PadStrideInfo       conv_info  = node.convolution_info();
573     const unsigned int        num_groups = node.num_groups();
574     const ActivationLayerInfo fused_act  = node.fused_activation();
575 
576     experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
577 
578     auto &post_op_info_list = node.post_op_info_list();
579     for(const auto &post_op_info : post_op_info_list)
580     {
581         switch(post_op_info->type())
582         {
583             case PostOpType::Activation:
584             {
585                 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
586                 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
587                 break;
588             }
589             case PostOpType::Eltwise_Add:
590             {
591                 typename TargetInfo::TensorType *add_input    = get_backing_tensor<TargetInfo>(node.input(3));
592                 const auto                       eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
593                 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
594                 break;
595             }
596             default:
597             {
598                 ARM_COMPUTE_ERROR("Unsupported PostOpType");
599             }
600         }
601     }
602 
603     // Create and configure function (we assume that functions have been validated before creation)
604     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
605     std::unique_ptr<IFunction>      func;
606     std::string                     func_name;
607 
608     // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
609     std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
610                                     std::string("GEMMConvolutionLayer"), mm,
611                                     input, weights, biases, output, conv_info,
612                                     WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
613 
614     // Log info
615     std::ostringstream qss;
616     if(is_quantized)
617     {
618         qss << " Input QuantInfo: " << input->info()->quantization_info()
619             << " Weights QuantInfo: " << weights->info()->quantization_info()
620             << " Output QuantInfo: " << output->info()->quantization_info();
621     }
622     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
623                                << node.name()
624                                << " Type: " << func_name
625                                << " Target: " << TargetInfo::TargetType
626                                << " Data Type: " << input->info()->data_type()
627                                << " Groups: " << num_groups
628                                << " Input shape: " << input->info()->tensor_shape()
629                                << " Weights shape: " << weights->info()->tensor_shape()
630                                << " Output shape: " << output->info()->tensor_shape()
631                                << qss.str()
632                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
633                                << " Post ops" << post_ops
634                                << std::endl);
635     return std::move(func);
636 }
637 
638 /** Create a backend convolution batch normalization layer function with post operator
639  *
640  * @tparam FusedLayerTypes           Backend convolution functions
641  * @tparam TargetInfo                Target-specific information
642  *
643  * @param[in] node Node to create the backend function for
644  * @param[in] ctx  Graph context
645  *
646  * @return Backend fused convolution with batch normalization layer function
647  */
648 template <typename FusedLayerTypes, typename TargetInfo>
create_fused_convolution_batch_normalization_with_post_op(FusedConvolutionBatchNormalizationWithPostOpsNode & node,GraphContext & ctx)649 std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_with_post_op(FusedConvolutionBatchNormalizationWithPostOpsNode &node, GraphContext &ctx)
650 {
651     validate_node<TargetInfo>(node, 8 /* expected inputs */, 1 /* expected outputs */);
652 
653     // Extract IO and info
654     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
655     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
656     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
657     typename TargetInfo::TensorType *mean    = get_backing_tensor<TargetInfo>(node.input(3));
658     typename TargetInfo::TensorType *var     = get_backing_tensor<TargetInfo>(node.input(4));
659     typename TargetInfo::TensorType *beta    = get_backing_tensor<TargetInfo>(node.input(5));
660     typename TargetInfo::TensorType *gamma   = get_backing_tensor<TargetInfo>(node.input(6));
661 
662     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
663 
664     const PadStrideInfo conv_info  = node.convolution_info();
665     const unsigned int  num_groups = node.num_groups();
666     const bool          fast_math  = node.fast_math_hint() == FastMathHint::Enabled;
667     const float         epsilon    = node.epsilon();
668 
669     experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
670 
671     auto &post_op_info_list = node.post_op_info_list();
672     for(const auto &post_op_info : post_op_info_list)
673     {
674         switch(post_op_info->type())
675         {
676             case PostOpType::Activation:
677             {
678                 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
679                 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
680                 break;
681             }
682             case PostOpType::Eltwise_Add:
683             {
684                 typename TargetInfo::TensorType *add_input    = get_backing_tensor<TargetInfo>(node.input(3));
685                 const auto                       eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
686                 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
687                 break;
688             }
689             default:
690             {
691                 ARM_COMPUTE_ERROR("Unsupported PostOpType");
692             }
693         }
694     }
695 
696     // Create and configure function (we assume that functions have been validated before creation)
697     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
698     std::unique_ptr<IFunction>      func;
699     std::string                     func_name;
700 
701     using FType = FusedConvolutionBatchNormalizationWithPostOpsFunction<TargetInfo, FusedLayerTypes>;
702 
703     // Create and configure function
704     std::tie(func, func_name) = create_named_memory_managed_function<FType>(
705                                     std::string("FusedConvolutionBatchNormalizationLayerWithPostOpsLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, post_ops);
706 
707     // Log info
708     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
709                                << node.name()
710                                << " Type: " << node.type()
711                                << " Target: " << TargetInfo::TargetType
712                                << " Data Type: " << input->info()->data_type()
713                                << " Input shape: " << input->info()->tensor_shape()
714                                << " Weights shape: " << weights->info()->tensor_shape()
715                                << " Output shape: " << output->info()->tensor_shape()
716                                << " Post Ops:" << post_ops
717                                << std::endl);
718     return std::move(func);
719 }
720 
721 /** Create a backend deconvolution layer function
722  *
723  * @tparam DeconvolutionLayerFunction Backend deconvolution function
724  * @tparam TargetInfo                 Target-specific information
725  *
726  * @param[in] node Node to create the backend function for
727  * @param[in] ctx  Graph context
728  *
729  * @return Backend deconvolution layer function
730  */
731 template <typename DeconvolutionLayerFunction, typename TargetInfo>
create_deconvolution_layer(DeconvolutionLayerNode & node,GraphContext & ctx)732 std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
733 {
734     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
735 
736     // Extract IO and info
737     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
738     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
739     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
740     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
741 
742     const PadStrideInfo deconv_info = node.deconvolution_info();
743 
744     // Create and configure function (we assume that functions have been validated before creation)
745     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
746     std::unique_ptr<IFunction>      func;
747 
748     std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
749                                       std::string(), mm,
750                                       input, weights, biases, output, deconv_info);
751 
752     // Log info
753     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
754                                << node.name()
755                                << " Type: " << node.type()
756                                << " Target: " << TargetInfo::TargetType
757                                << " Data Type: " << input->info()->data_type()
758                                << " Input shape: " << input->info()->tensor_shape()
759                                << " Weights shape: " << weights->info()->tensor_shape()
760                                << " Output shape: " << output->info()->tensor_shape()
761                                << std::endl);
762     return func;
763 }
764 
765 /** Create a backend layer depth-wise convolution function
766  *
767  * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
768  * @tparam TargetInfo                         Target-specific information
769  *
770  * @param[in] node Node to create the backend function for
771  *
772  * @return Backend depth-wise convolution layer function
773  */
774 template <typename DepthwiseConvolutionLayer, typename TargetInfo>
create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode & node)775 std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
776 {
777     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
778 
779     // Extract IO and info
780     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
781     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
782     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
783     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
784 
785     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
786 
787     if(is_quantized)
788     {
789         biases->info()->set_data_type(DataType::S32);
790     }
791 
792     const PadStrideInfo       conv_info        = node.convolution_info();
793     const unsigned int        depth_multiplier = node.depth_multiplier();
794     const ActivationLayerInfo fused_act        = node.fused_activation();
795 
796     // Create and configure function (we assume that functions have been validated before creation)
797     std::unique_ptr<IFunction> func;
798     std::string                func_name;
799 
800     std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
801                                     std::string("DepthwiseConvolutionLayer"),
802                                     input, weights, biases, output, conv_info, depth_multiplier, fused_act);
803 
804     // Log info
805     std::ostringstream qss;
806     if(is_quantized)
807     {
808         qss << " Input QuantInfo: " << input->info()->quantization_info()
809             << " Weights QuantInfo: " << weights->info()->quantization_info()
810             << " Output QuantInfo: " << output->info()->quantization_info();
811     }
812     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
813                                << node.name()
814                                << " Type: " << func_name
815                                << " Target: " << TargetInfo::TargetType
816                                << " Data Type: " << input->info()->data_type()
817                                << " Input shape: " << input->info()->tensor_shape()
818                                << " Weights shape: " << weights->info()->tensor_shape()
819                                << " Output shape: " << output->info()->tensor_shape()
820                                << " Depth multiplier: " << depth_multiplier
821                                << qss.str()
822                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
823                                << std::endl);
824     return std::move(func);
825 }
826 
827 /** Create a backend depth to space layer function
828  *
829  * @tparam DepthToSpaceLayerNode Function Backend depth to space function
830  * @tparam TargetInfo            Target-specific information
831  *
832  * @param[in] node Node to create the backend function for
833  *
834  * @return Backend depth to space layer function
835  */
836 template <typename DepthToSpaceLayerFunction, typename TargetInfo>
create_depth_to_space_layer(DepthToSpaceLayerNode & node)837 std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
838 {
839     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
840 
841     // Extract IO and info
842     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
843     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
844 
845     ARM_COMPUTE_ERROR_ON(input == nullptr);
846     ARM_COMPUTE_ERROR_ON(output == nullptr);
847 
848     // Create and configure function
849     auto func = std::make_unique<DepthToSpaceLayerFunction>();
850     func->configure(input, output, node.block_shape());
851 
852     // Log info
853     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
854                                << node.name()
855                                << " Type: " << node.type()
856                                << " Target: " << TargetInfo::TargetType
857                                << " Data Type: " << input->info()->data_type()
858                                << " Input shape: " << input->info()->tensor_shape()
859                                << " Block Size: " << node.block_shape()
860                                << " Output shape: " << output->info()->tensor_shape()
861                                << std::endl);
862 
863     return std::move(func);
864 }
865 
866 /** Create a backend dequantize layer function
867  *
868  * @tparam DequantizationLayer Function Backend dequantize function
869  * @tparam TargetInfo          Target-specific information
870  *
871  * @param[in] node Node to create the backend function for
872  *
873  * @return Backend dequantize layer function
874  */
875 template <typename DequantizationLayerFunction, typename TargetInfo>
create_dequantization_layer(DequantizationLayerNode & node)876 std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
877 {
878     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
879 
880     // Extract IO and info
881     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
882     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
883 
884     ARM_COMPUTE_ERROR_ON(input == nullptr);
885     ARM_COMPUTE_ERROR_ON(output == nullptr);
886 
887     // Create and configure function
888     auto func = std::make_unique<DequantizationLayerFunction>();
889     func->configure(input, output);
890 
891     // Log info
892     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
893                                << node.name()
894                                << " Type: " << node.type()
895                                << " Target: " << TargetInfo::TargetType
896                                << " Data Type: " << input->info()->data_type()
897                                << " Input shape: " << input->info()->tensor_shape()
898                                << " Input quantization info: " << output->info()->quantization_info()
899                                << " Output shape: " << output->info()->tensor_shape()
900                                << std::endl);
901 
902     return std::move(func);
903 }
904 /** Create a backend detection output layer function
905  *
906  * @tparam DetectionOutputLayer Function Backend detection output function
907  * @tparam TargetInfo           Target-specific information
908  *
909  * @param[in] node Node to create the backend function for
910  *
911  * @return Backend detection output layer function
912  */
913 template <typename DetectionOutputLayerFunction, typename TargetInfo>
create_detection_output_layer(DetectionOutputLayerNode & node)914 std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
915 {
916     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
917 
918     // Extract IO and info
919     typename TargetInfo::TensorType *input0      = get_backing_tensor<TargetInfo>(node.input(0));
920     typename TargetInfo::TensorType *input1      = get_backing_tensor<TargetInfo>(node.input(1));
921     typename TargetInfo::TensorType *input2      = get_backing_tensor<TargetInfo>(node.input(2));
922     typename TargetInfo::TensorType *output      = get_backing_tensor<TargetInfo>(node.output(0));
923     const DetectionOutputLayerInfo   detect_info = node.detection_output_info();
924 
925     ARM_COMPUTE_ERROR_ON(input0 == nullptr);
926     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
927     ARM_COMPUTE_ERROR_ON(input2 == nullptr);
928     ARM_COMPUTE_ERROR_ON(output == nullptr);
929 
930     // Create and configure function
931     auto func = std::make_unique<DetectionOutputLayerFunction>();
932     func->configure(input0, input1, input2, output, detect_info);
933 
934     // Log info
935     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
936                                << node.name()
937                                << " Type: " << node.type()
938                                << " Target: " << TargetInfo::TargetType
939                                << " Data Type: " << input0->info()->data_type()
940                                << " Input0 shape: " << input0->info()->tensor_shape()
941                                << " Input1 shape: " << input1->info()->tensor_shape()
942                                << " Input2 shape: " << input2->info()->tensor_shape()
943                                << " Output shape: " << output->info()->tensor_shape()
944                                << " DetectionOutputLayer info: " << detect_info
945                                << std::endl);
946 
947     return std::move(func);
948 }
949 
950 /** Create a backend detection post process layer function
951  *
952  * @tparam DetectionPostProcessLayerFunction Backend detection output function
953  * @tparam TargetInfo                        Target-specific information
954  *
955  * @param[in] node Node to create the backend function for
956  *
957  * @return Backend detection post process layer function
958  */
959 template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
create_detection_post_process_layer(DetectionPostProcessLayerNode & node)960 std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
961 {
962     validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
963 
964     // Extract IO and info
965     typename TargetInfo::TensorType    *input0      = get_backing_tensor<TargetInfo>(node.input(0));
966     typename TargetInfo::TensorType    *input1      = get_backing_tensor<TargetInfo>(node.input(1));
967     typename TargetInfo::TensorType    *input2      = get_backing_tensor<TargetInfo>(node.input(2));
968     typename TargetInfo::TensorType    *output0     = get_backing_tensor<TargetInfo>(node.output(0));
969     typename TargetInfo::TensorType    *output1     = get_backing_tensor<TargetInfo>(node.output(1));
970     typename TargetInfo::TensorType    *output2     = get_backing_tensor<TargetInfo>(node.output(2));
971     typename TargetInfo::TensorType    *output3     = get_backing_tensor<TargetInfo>(node.output(3));
972     const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
973 
974     ARM_COMPUTE_ERROR_ON(input0 == nullptr);
975     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
976     ARM_COMPUTE_ERROR_ON(input2 == nullptr);
977     ARM_COMPUTE_ERROR_ON(output0 == nullptr);
978     ARM_COMPUTE_ERROR_ON(output1 == nullptr);
979     ARM_COMPUTE_ERROR_ON(output2 == nullptr);
980     ARM_COMPUTE_ERROR_ON(output3 == nullptr);
981 
982     // Create and configure function
983     auto func = std::make_unique<DetectionPostProcessLayerFunction>();
984     func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
985 
986     // Log info
987     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
988                                << node.name()
989                                << " Type: " << node.type()
990                                << " Target: " << TargetInfo::TargetType
991                                << " Data Type: " << input0->info()->data_type()
992                                << " Input0 shape: " << input0->info()->tensor_shape()
993                                << " Input1 shape: " << input1->info()->tensor_shape()
994                                << " Input2 shape: " << input2->info()->tensor_shape()
995                                << " Output0 shape: " << output0->info()->tensor_shape()
996                                << " Output1 shape: " << output1->info()->tensor_shape()
997                                << " Output2 shape: " << output2->info()->tensor_shape()
998                                << " Output3 shape: " << output3->info()->tensor_shape()
999                                << " DetectionPostProcessLayer info: " << detect_info
1000                                << std::endl);
1001 
1002     return std::move(func);
1003 }
1004 
1005 /** Create a backend element-wise operation layer function
1006  *
1007  * @tparam EltwiseFunctions Backend element-wise function
1008  * @tparam TargetInfo       Target-specific information
1009  *
1010  * @param[in] node Node to create the backend function for
1011  *
1012  * @return Backend element-wise operation layer function
1013  */
1014 template <typename EltwiseFunctions, typename TargetInfo>
create_eltwise_layer(EltwiseLayerNode & node)1015 std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
1016 {
1017     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1018 
1019     // Extract IO and info
1020     typename TargetInfo::TensorType *input1         = get_backing_tensor<TargetInfo>(node.input(0));
1021     typename TargetInfo::TensorType *input2         = get_backing_tensor<TargetInfo>(node.input(1));
1022     typename TargetInfo::TensorType *output         = get_backing_tensor<TargetInfo>(node.output(0));
1023     const EltwiseOperation           eltwise_op     = node.eltwise_operation();
1024     const ConvertPolicy              convert_policy = node.convert_policy();
1025     const ActivationLayerInfo        act_info       = node.fused_activation();
1026     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1027     ARM_COMPUTE_ERROR_ON(input2 == nullptr);
1028     ARM_COMPUTE_ERROR_ON(output == nullptr);
1029 
1030     std::unique_ptr<IFunction> func = nullptr;
1031     std::string                func_name;
1032     if(eltwise_op == EltwiseOperation::Add)
1033     {
1034         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
1035                                         std::string("ArithmeticAddition"),
1036                                         input1, input2, output, convert_policy, act_info);
1037     }
1038     else if(eltwise_op == EltwiseOperation::Sub)
1039     {
1040         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
1041                                         std::string("ArithmeticSubtraction"),
1042                                         input1, input2, output, convert_policy, act_info);
1043     }
1044     else if(eltwise_op == EltwiseOperation::Mul)
1045     {
1046         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
1047                                         std::string("PixelWiseMultiplication"),
1048                                         input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
1049     }
1050     else if(eltwise_op == EltwiseOperation::Max)
1051     {
1052         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
1053                                         std::string("ElementwiseMaximum"),
1054                                         input1, input2, output, act_info);
1055     }
1056     else if(eltwise_op == EltwiseOperation::Div)
1057     {
1058         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
1059                                         std::string("ArithmeticDivision"),
1060                                         input1, input2, output, act_info);
1061     }
1062     else
1063     {
1064         ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
1065     }
1066 
1067     // Log info
1068     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1069                                << node.name()
1070                                << " Type: " << node.type()
1071                                << " Target: " << TargetInfo::TargetType
1072                                << " Operation: " << func_name
1073                                << " Data Type: " << input1->info()->data_type()
1074                                << " Shape: " << input1->info()->tensor_shape()
1075                                << std::endl);
1076 
1077     return std::move(func);
1078 }
1079 
1080 /** Create a backend unary element-wise operation layer function
1081  *
1082  * @tparam UnaryEltwiseFunctions Backend unary element-wise function
1083  * @tparam TargetInfo       Target-specific information
1084  *
1085  * @param[in] node Node to create the backend function for
1086  *
1087  * @return Backend unary element-wise operation layer function
1088  */
1089 template <typename UnaryEltwiseFunctions, typename TargetInfo>
create_unary_eltwise_layer(UnaryEltwiseLayerNode & node)1090 std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
1091 {
1092     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1093 
1094     // Extract IO and info
1095     typename TargetInfo::TensorType *input      = get_backing_tensor<TargetInfo>(node.input(0));
1096     typename TargetInfo::TensorType *output     = get_backing_tensor<TargetInfo>(node.output(0));
1097     const UnaryEltwiseOperation      eltwise_op = node.eltwise_descriptor().op;
1098 
1099     ARM_COMPUTE_ERROR_ON(input == nullptr);
1100     ARM_COMPUTE_ERROR_ON(output == nullptr);
1101 
1102     std::unique_ptr<IFunction> func = nullptr;
1103     std::string                func_name;
1104     if(eltwise_op == UnaryEltwiseOperation::Exp)
1105     {
1106         std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1107                                         std::string("Exp"),
1108                                         input, output);
1109     }
1110     else
1111     {
1112         ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
1113     }
1114 
1115     // Log info
1116     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1117                                << node.name()
1118                                << " Type: " << node.type()
1119                                << " Target: " << TargetInfo::TargetType
1120                                << " Operation: " << func_name
1121                                << " Data Type: " << input->info()->data_type()
1122                                << " Shape: " << input->info()->tensor_shape()
1123                                << std::endl);
1124 
1125     return std::move(func);
1126 }
1127 
1128 /** Create a backend flatten layer function
1129  *
1130  * @tparam FlattenLayerFunction Backend flatten function
1131  * @tparam TargetInfo           Target-specific information
1132  *
1133  * @param[in] node Node to create the backend function for
1134  *
1135  * @return Backend flatten layer function
1136  */
1137 template <typename FlattenLayerFunction, typename TargetInfo>
create_flatten_layer(FlattenLayerNode & node)1138 std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
1139 {
1140     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1141 
1142     // Extract IO and info
1143     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1144     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1145 
1146     ARM_COMPUTE_ERROR_ON(input == nullptr);
1147     ARM_COMPUTE_ERROR_ON(output == nullptr);
1148 
1149     // Create and configure function
1150     auto func = std::make_unique<FlattenLayerFunction>();
1151     func->configure(input, output);
1152 
1153     // Log info
1154     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1155                                << node.name()
1156                                << " Type: " << node.type()
1157                                << " Target: " << TargetInfo::TargetType
1158                                << " Data Type: " << input->info()->data_type()
1159                                << " Input shape: " << input->info()->tensor_shape()
1160                                << " Output shape: " << output->info()->tensor_shape()
1161                                << std::endl);
1162 
1163     return std::move(func);
1164 }
1165 
1166 /** Create a backend fully connected layer function
1167  *
1168  * @tparam FullyConnectedLayerFunction Backend fully-connected function
1169  * @tparam TargetInfo                  Target-specific information
1170  *
1171  * @param[in] node Node to create the backend function for
1172  * @param[in] ctx  Graph context
1173  *
1174  * @return Backend fully connected layer function
1175  */
1176 template <typename FullyConnectedLayerFunction, typename TargetInfo>
create_fully_connected_layer(FullyConnectedLayerNode & node,GraphContext & ctx)1177 std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
1178 {
1179     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1180 
1181     // Extract IO and info
1182     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
1183     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1184     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
1185     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
1186     FullyConnectedLayerInfo          fc_info = node.info();
1187     fc_info.enable_fast_math                 = (node.fast_math_hint() == FastMathHint::Enabled);
1188 
1189     ARM_COMPUTE_ERROR_ON(input == nullptr);
1190     ARM_COMPUTE_ERROR_ON(weights == nullptr);
1191     ARM_COMPUTE_ERROR_ON(output == nullptr);
1192 
1193     // Create and configure function
1194     auto wm   = get_weights_manager(ctx, TargetInfo::TargetType);
1195     auto mm   = get_memory_manager(ctx, TargetInfo::TargetType);
1196     auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1197     func->configure(input, weights, biases, output, fc_info);
1198 
1199     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1200 
1201     // Log info
1202     std::ostringstream qss;
1203     if(is_quantized)
1204     {
1205         qss << " Input QuantInfo: " << input->info()->quantization_info()
1206             << " Weights QuantInfo: " << weights->info()->quantization_info()
1207             << " Output QuantInfo: " << output->info()->quantization_info();
1208     }
1209     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1210                                << node.name()
1211                                << " Type: " << node.type()
1212                                << " Target: " << TargetInfo::TargetType
1213                                << " Data Type: " << input->info()->data_type()
1214                                << qss.str()
1215                                << " Input shape: " << input->info()->tensor_shape()
1216                                << " Weights shape: " << weights->info()->tensor_shape()
1217                                << " Output shape: " << output->info()->tensor_shape()
1218                                << std::endl);
1219 
1220     return std::move(func);
1221 }
1222 
1223 /** Create a backend generate proposals layer function
1224  *
1225  * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1226  * @tparam TargetInfo                     Target-specific information
1227  *
1228  * @param[in] node Node to create the backend function for
1229  * @param[in] ctx  Graph context
1230  *
1231  * @return Backend generate proposals layer function
1232  */
1233 template <typename GenerateProposalsLayerFunction, typename TargetInfo>
create_generate_proposals_layer(GenerateProposalsLayerNode & node,GraphContext & ctx)1234 std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
1235 {
1236     validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1237 
1238     // Extract IO and info
1239     typename TargetInfo::TensorType *scores              = get_backing_tensor<TargetInfo>(node.input(0));
1240     typename TargetInfo::TensorType *deltas              = get_backing_tensor<TargetInfo>(node.input(1));
1241     typename TargetInfo::TensorType *anchors             = get_backing_tensor<TargetInfo>(node.input(2));
1242     typename TargetInfo::TensorType *proposals           = get_backing_tensor<TargetInfo>(node.output(0));
1243     typename TargetInfo::TensorType *scores_out          = get_backing_tensor<TargetInfo>(node.output(1));
1244     typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1245     const GenerateProposalsInfo      info                = node.info();
1246 
1247     ARM_COMPUTE_ERROR_ON(scores == nullptr);
1248     ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1249     ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1250     ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1251     ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1252 
1253     // Create and configure function
1254     auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1255     func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1256 
1257     // Log info
1258     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1259                                << " Target " << TargetInfo::TargetType
1260                                << " Data Type: " << scores->info()->data_type()
1261                                << " Scores shape: " << scores->info()->tensor_shape()
1262                                << " Deltas shape: " << deltas->info()->tensor_shape()
1263                                << " Anchors shape: " << anchors->info()->tensor_shape()
1264                                << " Proposals shape: " << proposals->info()->tensor_shape()
1265                                << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1266                                << " Scores Out shape: " << scores_out->info()->tensor_shape()
1267                                << std::endl);
1268 
1269     return std::move(func);
1270 }
1271 
1272 /** Create a backend l2 normalization layer function
1273  *
1274  * @tparam NormalizationLayerFunction Backend normalization function
1275  * @tparam TargetInfo                 Target-specific information
1276  *
1277  * @param[in] node Node to create the backend function for
1278  * @param[in] ctx  Graph context
1279  *
1280  * @return Backend normalization layer function
1281  */
1282 template <typename L2NormalizeLayerFunction, typename TargetInfo>
create_l2_normalize_layer(L2NormalizeLayerNode & node,GraphContext & ctx)1283 std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
1284 {
1285     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1286 
1287     // Extract IO and info
1288     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
1289     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
1290     int                              axis    = node.axis();
1291     float                            epsilon = node.epsilon();
1292 
1293     ARM_COMPUTE_ERROR_ON(input == nullptr);
1294     ARM_COMPUTE_ERROR_ON(output == nullptr);
1295 
1296     // Create and configure function
1297     auto mm   = get_memory_manager(ctx, TargetInfo::TargetType);
1298     auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1299     func->configure(input, output, axis, epsilon);
1300 
1301     // Log info
1302     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1303                                << node.name()
1304                                << " Type: " << node.type()
1305                                << " Target: " << TargetInfo::TargetType
1306                                << " Data Type: " << input->info()->data_type()
1307                                << " Input shape: " << input->info()->tensor_shape()
1308                                << " Output shape: " << output->info()->tensor_shape()
1309                                << " Axis: " << axis
1310                                << " Epsilon: " << epsilon
1311                                << std::endl);
1312 
1313     return std::move(func);
1314 }
1315 
1316 /** Create a backend normalization layer function
1317  *
1318  * @tparam NormalizationLayerFunction Backend normalization function
1319  * @tparam TargetInfo                 Target-specific information
1320  *
1321  * @param[in] node Node to create the backend function for
1322  * @param[in] ctx  Graph context
1323  *
1324  * @return Backend normalization layer function
1325  */
1326 template <typename NormalizationLayerFunction, typename TargetInfo>
create_normalization_layer(NormalizationLayerNode & node,GraphContext & ctx)1327 std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1328 {
1329     ARM_COMPUTE_UNUSED(ctx);
1330 
1331     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1332 
1333     // Extract IO and info
1334     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1335     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1336     const NormalizationLayerInfo     norm_info = node.normalization_info();
1337     ARM_COMPUTE_ERROR_ON(input == nullptr);
1338     ARM_COMPUTE_ERROR_ON(output == nullptr);
1339 
1340     // Create and configure function
1341     auto func = std::make_unique<NormalizationLayerFunction>();
1342     func->configure(input, output, norm_info);
1343 
1344     // Log info
1345     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1346                                << node.name()
1347                                << " Type: " << node.type()
1348                                << " Target: " << TargetInfo::TargetType
1349                                << " Data Type: " << input->info()->data_type()
1350                                << " Input shape: " << input->info()->tensor_shape()
1351                                << " Output shape: " << output->info()->tensor_shape()
1352                                << " Normalization info: " << norm_info.type()
1353                                << std::endl);
1354 
1355     return std::move(func);
1356 }
1357 
1358 /** Create a backend normalize planar YUV layer function
1359  *
1360  * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1361  * @tparam TargetInfo                      Target-specific information
1362  *
1363  * @param[in] node Node to create the backend function for
1364  *
1365  * @return Backend normalize plnar YUV layer function
1366  */
1367 template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode & node)1368 std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1369 {
1370     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1371 
1372     // Extract IO and info
1373     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1374     typename TargetInfo::TensorType *mean   = get_backing_tensor<TargetInfo>(node.input(1));
1375     typename TargetInfo::TensorType *std    = get_backing_tensor<TargetInfo>(node.input(2));
1376     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1377     ARM_COMPUTE_ERROR_ON(input == nullptr);
1378     ARM_COMPUTE_ERROR_ON(mean == nullptr);
1379     ARM_COMPUTE_ERROR_ON(std == nullptr);
1380     ARM_COMPUTE_ERROR_ON(output == nullptr);
1381 
1382     // Create and configure function
1383     auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1384     func->configure(input, output, mean, std);
1385 
1386     // Log info
1387     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1388                                << node.name()
1389                                << " Type: " << node.type()
1390                                << " Target: " << TargetInfo::TargetType
1391                                << " Data Type: " << input->info()->data_type()
1392                                << " Shape: " << input->info()->tensor_shape()
1393                                << std::endl);
1394 
1395     return std::move(func);
1396 }
1397 
1398 /** Create a backend pad layer function
1399  *
1400  * @tparam PadLayerFunction Backend pad function
1401  * @tparam TargetInfo       Target-specific information
1402  *
1403  * @param[in] node Node to create the backend function for
1404  *
1405  * @return Backend pad layer function
1406  */
1407 template <typename PadLayerFunction, typename TargetInfo>
create_pad_layer(PadLayerNode & node)1408 std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1409 {
1410     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1411 
1412     // Extract IO and info
1413     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1414     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1415     const PaddingList               &padding   = node.padding();
1416     const PixelValue                 pad_value = node.pad_value();
1417     ARM_COMPUTE_ERROR_ON(input == nullptr);
1418     ARM_COMPUTE_ERROR_ON(output == nullptr);
1419 
1420     // Create and configure function
1421     auto func = std::make_unique<PadLayerFunction>();
1422     func->configure(input, output, padding, pad_value);
1423 
1424     // Log info
1425     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1426                                << node.name()
1427                                << " Type: " << node.type()
1428                                << " Target: " << TargetInfo::TargetType
1429                                << " Data Type: " << input->info()->data_type()
1430                                << " Input shape: " << input->info()->tensor_shape()
1431                                << " Output shape: " << output->info()->tensor_shape()
1432                                << std::endl);
1433 
1434     return std::move(func);
1435 }
1436 
1437 /** Create a backend permute layer function
1438  *
1439  * @tparam PermuteLayerFunction Backend permute function
1440  * @tparam TargetInfo           Target-specific information
1441  *
1442  * @param[in] node Node to create the backend function for
1443  *
1444  * @return Backend permute layer function
1445  */
1446 template <typename PermuteLayerFunction, typename TargetInfo>
create_permute_layer(PermuteLayerNode & node)1447 std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1448 {
1449     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1450 
1451     // Extract IO and info
1452     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1453     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1454     const PermutationVector         &perm   = node.permutation_vector();
1455     ARM_COMPUTE_ERROR_ON(input == nullptr);
1456     ARM_COMPUTE_ERROR_ON(output == nullptr);
1457 
1458     // Create and configure function
1459     auto func = std::make_unique<PermuteLayerFunction>();
1460     func->configure(input, output, perm);
1461 
1462     // Log info
1463     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1464                                << node.name()
1465                                << " Type: " << node.type()
1466                                << " Target: " << TargetInfo::TargetType
1467                                << " Data Type: " << input->info()->data_type()
1468                                << " Input shape: " << input->info()->tensor_shape()
1469                                << " Output shape: " << output->info()->tensor_shape()
1470                                << " Permutation vector: " << perm
1471                                << std::endl);
1472 
1473     return std::move(func);
1474 }
1475 
1476 /** Create a backend pooling layer function
1477  *
1478  * @tparam PoolingLayerFunction Backend pooling function
1479  * @tparam TargetInfo           Target-specific information
1480  *
1481  * @param[in] node Node to create the backend function for
1482  *
1483  * @return Backend pooling layer function
1484  */
1485 template <typename PoolingLayerFunction, typename TargetInfo>
create_pooling_layer(PoolingLayerNode & node)1486 std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1487 {
1488     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1489 
1490     // Extract IO and info
1491     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1492     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1493     const PoolingLayerInfo           pool_info = node.pooling_info();
1494     ARM_COMPUTE_ERROR_ON(input == nullptr);
1495     ARM_COMPUTE_ERROR_ON(output == nullptr);
1496 
1497     // Create and configure function
1498     auto func = std::make_unique<PoolingLayerFunction>();
1499     func->configure(input, output, pool_info);
1500 
1501     // Log info
1502     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1503                                << node.name()
1504                                << " Type: " << node.type()
1505                                << " Target: " << TargetInfo::TargetType
1506                                << " Data Type: " << input->info()->data_type()
1507                                << " Input shape: " << input->info()->tensor_shape()
1508                                << " Output shape: " << output->info()->tensor_shape()
1509                                << " Pooling info: " << pool_info.pool_type
1510                                << std::endl);
1511 
1512     return std::move(func);
1513 }
1514 
1515 /** Create a backend PRelu layer function
1516  *
1517  * @tparam PReluFunction Backend PRelu function
1518  * @tparam TargetInfo    Target-specific information
1519  *
1520  * @param[in] node Node to create the backend function for
1521  *
1522  * @return Backend PRelu layer function
1523  */
1524 template <typename PReluFunction, typename TargetInfo>
create_prelu_layer(PReluLayerNode & node)1525 std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1526 {
1527     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1528 
1529     // Extract IO and info
1530     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1531     typename TargetInfo::TensorType *alpha  = get_backing_tensor<TargetInfo>(node.input(1));
1532     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1533     ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1534     ARM_COMPUTE_ERROR_ON(output == nullptr);
1535 
1536     // Create and configure function
1537     auto func = std::make_unique<PReluFunction>();
1538     func->configure(input, alpha, output);
1539 
1540     // Log info
1541     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1542                                << node.name()
1543                                << " Type: " << node.type()
1544                                << " Target: " << TargetInfo::TargetType
1545                                << " Data Type: " << input->info()->data_type()
1546                                << " Input shape: " << input->info()->tensor_shape()
1547                                << " Output shape: " << output->info()->tensor_shape()
1548                                << std::endl);
1549 
1550     return std::move(func);
1551 }
1552 
1553 /** Create a backend print layer function
1554  *
1555  * @tparam TargetInfo Target-specific information
1556  *
1557  * @param[in] node Node to create the backend function for
1558  *
1559  * @return Backend print layer function
1560  */
1561 template <typename TargetInfo>
create_print_layer(PrintLayerNode & node)1562 std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1563 {
1564     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1565 
1566     typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1567     ARM_COMPUTE_ERROR_ON(input == nullptr);
1568     ARM_COMPUTE_UNUSED(input);
1569 
1570     // Log info
1571     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1572                                << node.name()
1573                                << " Type: " << node.type()
1574                                << " Target: " << TargetInfo::TargetType
1575                                << " Data Type: " << input->info()->data_type()
1576                                << " Input shape: " << input->info()->tensor_shape()
1577                                << std::endl);
1578 
1579     return nullptr;
1580 }
1581 
1582 /** Create a backend priorbox layer function
1583  *
1584  * @tparam PriorBoxLayerFunction Backend priorbox function
1585  * @tparam TargetInfo           Target-specific information
1586  *
1587  * @param[in] node Node to create the backend function for
1588  *
1589  * @return Backend priorbox layer function
1590  */
1591 template <typename PriorBoxLayerFunction, typename TargetInfo>
create_priorbox_layer(PriorBoxLayerNode & node)1592 std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1593 {
1594     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1595 
1596     // Extract IO and info
1597     typename TargetInfo::TensorType *input0     = get_backing_tensor<TargetInfo>(node.input(0));
1598     typename TargetInfo::TensorType *input1     = get_backing_tensor<TargetInfo>(node.input(1));
1599     typename TargetInfo::TensorType *output     = get_backing_tensor<TargetInfo>(node.output(0));
1600     const PriorBoxLayerInfo          prior_info = node.priorbox_info();
1601     ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1602     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1603     ARM_COMPUTE_ERROR_ON(output == nullptr);
1604 
1605     // Create and configure function
1606     auto func = std::make_unique<PriorBoxLayerFunction>();
1607     func->configure(input0, input1, output, prior_info);
1608 
1609     // Log info
1610     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1611                                << node.name()
1612                                << " Type: " << node.type()
1613                                << " Target: " << TargetInfo::TargetType
1614                                << " Data Type: " << input0->info()->data_type()
1615                                << " Input0 shape: " << input0->info()->tensor_shape()
1616                                << " Input1 shape: " << input1->info()->tensor_shape()
1617                                << " Output shape: " << output->info()->tensor_shape()
1618                                << " PriorBoxLayer info: " << prior_info
1619                                << std::endl);
1620 
1621     return std::move(func);
1622 }
1623 
1624 /** Create a backend quantization layer function
1625  *
1626  * @tparam QuantizationLayerFunction Backend quantization function
1627  * @tparam TargetInfo                Target-specific information
1628  *
1629  * @param[in] node Node to create the backend function for
1630  *
1631  * @return Backend quantization layer function
1632  */
1633 template <typename QuantizationLayerFunction, typename TargetInfo>
create_quantization_layer(QuantizationLayerNode & node)1634 std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1635 {
1636     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1637 
1638     // Extract IO and info
1639     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1640     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1641     ARM_COMPUTE_ERROR_ON(input == nullptr);
1642     ARM_COMPUTE_ERROR_ON(output == nullptr);
1643 
1644     // Create and configure function
1645     auto func = std::make_unique<QuantizationLayerFunction>();
1646     func->configure(input, output);
1647 
1648     // Log info
1649     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1650                                << node.name()
1651                                << " Type: " << node.type()
1652                                << " Target: " << TargetInfo::TargetType
1653                                << " Data Type: " << input->info()->data_type()
1654                                << " Input shape: " << input->info()->tensor_shape()
1655                                << " Output shape: " << output->info()->tensor_shape()
1656                                << std::endl);
1657 
1658     return std::move(func);
1659 }
1660 
1661 /** Create a backend reduction operation layer function
1662  *
1663  * @tparam ReductionOperationFunction Backend reduction operation function
1664  * @tparam TargetInfo                 Target-specific information
1665  *
1666  * @param[in] node Node to create the backend function for
1667  * @param[in] ctx  Graph context
1668  *
1669  * @return Backend reduction sum layer function
1670  */
1671 template <typename ReductionOperationFunction, typename TargetInfo>
create_reduction_operation_layer(ReductionLayerNode & node,GraphContext & ctx)1672 std::unique_ptr<IFunction> create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
1673 {
1674     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1675 
1676     // Extract IO and info
1677     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1678     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1679     ReductionOperation               op        = node.op();
1680     int                              axis      = node.axis();
1681     bool                             keep_dims = node.keep_dims();
1682     ARM_COMPUTE_ERROR_ON(input == nullptr);
1683     ARM_COMPUTE_ERROR_ON(output == nullptr);
1684 
1685     // Create and configure function
1686     auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1687     func->configure(input, output, axis, op, keep_dims);
1688 
1689     // Log info
1690     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1691                                << node.name()
1692                                << " Type: " << node.type()
1693                                << " Target: " << TargetInfo::TargetType
1694                                << " Data Type: " << input->info()->data_type()
1695                                << " Input shape: " << input->info()->tensor_shape()
1696                                << " Output shape: " << output->info()->tensor_shape()
1697                                << " Operation: " << op
1698                                << " Axis: " << axis
1699                                << " Keep dimensions:" << keep_dims
1700                                << std::endl);
1701 
1702     return std::move(func);
1703 }
1704 
1705 /** Create a backend reorg layer function
1706  *
1707  * @tparam ReorgLayerFunction Backend reorg function
1708  * @tparam TargetInfo         Target-specific information
1709  *
1710  * @param[in] node Node to create the backend function for
1711  *
1712  * @return Backend reshape layer function
1713  */
1714 template <typename ReorgLayerFunction, typename TargetInfo>
create_reorg_layer(ReorgLayerNode & node)1715 std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1716 {
1717     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1718 
1719     // Extract IO and info
1720     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1721     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1722     ARM_COMPUTE_ERROR_ON(input == nullptr);
1723     ARM_COMPUTE_ERROR_ON(output == nullptr);
1724 
1725     // Create and configure function
1726     auto func = std::make_unique<ReorgLayerFunction>();
1727     func->configure(input, output, node.stride());
1728 
1729     // Log info
1730     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1731                                << node.name()
1732                                << " Type: " << node.type()
1733                                << " Target: " << TargetInfo::TargetType
1734                                << " Data Type: " << input->info()->data_type()
1735                                << " Input shape: " << input->info()->tensor_shape()
1736                                << " Output shape: " << output->info()->tensor_shape()
1737                                << std::endl);
1738 
1739     return std::move(func);
1740 }
1741 
1742 /** Create a backend reshape layer function
1743  *
1744  * @tparam ReshapeLayerFunction Backend reshape function
1745  * @tparam TargetInfo           Target-specific information
1746  *
1747  * @param[in] node Node to create the backend function for
1748  *
1749  * @return Backend reshape layer function
1750  */
1751 template <typename ReshapeLayerFunction, typename TargetInfo>
create_reshape_layer(ReshapeLayerNode & node)1752 std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1753 {
1754     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1755 
1756     // Extract IO and info
1757     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1758     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1759     ARM_COMPUTE_ERROR_ON(input == nullptr);
1760     ARM_COMPUTE_ERROR_ON(output == nullptr);
1761 
1762     // Create and configure function
1763     auto func = std::make_unique<ReshapeLayerFunction>();
1764     func->configure(input, output);
1765 
1766     // Log info
1767     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1768                                << node.name()
1769                                << " Type: " << node.type()
1770                                << " Target: " << TargetInfo::TargetType
1771                                << " Data Type: " << input->info()->data_type()
1772                                << " Input shape: " << input->info()->tensor_shape()
1773                                << " Output shape: " << output->info()->tensor_shape()
1774                                << std::endl);
1775 
1776     return std::move(func);
1777 }
1778 
1779 /** Create a backend resize layer function
1780  *
1781  * @tparam ResizeLayerFunction Backend resize function
1782  * @tparam TargetInfo          Target-specific information
1783  *
1784  * @param[in] node Node to create the backend function for
1785  *
1786  * @return Backend resize layer function
1787  */
1788 template <typename ResizeLayerFunction, typename TargetInfo>
create_resize_layer(ResizeLayerNode & node)1789 std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1790 {
1791     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1792 
1793     // Extract IO and info
1794     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1795     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1796     ARM_COMPUTE_ERROR_ON(input == nullptr);
1797     ARM_COMPUTE_ERROR_ON(output == nullptr);
1798     const InterpolationPolicy policy = node.policy();
1799 
1800     // Create and configure function
1801     auto func = std::make_unique<ResizeLayerFunction>();
1802     func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
1803 
1804     // Log info
1805     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1806                                << node.name()
1807                                << " Type: " << node.type()
1808                                << " Target: " << TargetInfo::TargetType
1809                                << " Data Type: " << input->info()->data_type()
1810                                << " Input shape: " << input->info()->tensor_shape()
1811                                << " Output shape: " << output->info()->tensor_shape()
1812                                << " Interpolation: " << policy
1813                                << std::endl);
1814 
1815     return std::move(func);
1816 }
1817 
1818 /** Create a backend ROI align layer function
1819  *
1820  * @tparam ROIAlignLayerFunction    ROI Align function
1821  * @tparam TargetInfo               Target-specific information
1822  *
1823  * @param[in] node Node to create the backend function for
1824  *
1825  * @return ROI Align layer function
1826  */
1827 template <typename ROIAlignLayerFunction, typename TargetInfo>
create_roi_align_layer(ROIAlignLayerNode & node)1828 std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1829 {
1830     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1831 
1832     // Extract IO and info
1833     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1834     typename TargetInfo::TensorType *rois   = get_backing_tensor<TargetInfo>(node.input(1));
1835     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1836     ARM_COMPUTE_ERROR_ON(input == nullptr);
1837     ARM_COMPUTE_ERROR_ON(output == nullptr);
1838     ARM_COMPUTE_ERROR_ON(rois == nullptr);
1839 
1840     const ROIPoolingLayerInfo pool_info = node.pooling_info();
1841 
1842     // Create and configure function
1843     auto func = std::make_unique<ROIAlignLayerFunction>();
1844 
1845     func->configure(input, rois, output, pool_info);
1846 
1847     // Log info
1848     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1849                                << node.name()
1850                                << " Type: " << node.type()
1851                                << " Target: " << TargetInfo::TargetType
1852                                << " Data Type: " << input->info()->data_type()
1853                                << " Input shape: " << input->info()->tensor_shape()
1854                                << " Output shape: " << output->info()->tensor_shape()
1855                                << " ROIs shape: " << rois->info()->tensor_shape()
1856                                << " ROIPooling width: " << pool_info.pooled_width()
1857                                << " ROIPooling height: " << pool_info.pooled_height()
1858                                << std::endl);
1859 
1860     return std::move(func);
1861 }
1862 
1863 /** Create a backend slice layer function
1864  *
1865  * @tparam SliceLayerFunction Backend slice function
1866  * @tparam TargetInfo         Target-specific information
1867  *
1868  * @param[in] node Node to create the backend function for
1869  *
1870  * @return Backend slice layer function
1871  */
1872 template <typename SliceLayerFunction, typename TargetInfo>
create_slice_layer(SliceLayerNode & node)1873 std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1874 {
1875     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1876 
1877     // Extract IO and info
1878     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1879     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1880     ARM_COMPUTE_ERROR_ON(input == nullptr);
1881     ARM_COMPUTE_ERROR_ON(output == nullptr);
1882 
1883     // Create and configure function
1884     auto func = std::make_unique<SliceLayerFunction>();
1885     func->configure(input, output, node.starts(), node.ends());
1886 
1887     // Log info
1888     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1889                                << node.name()
1890                                << " Type: " << node.type()
1891                                << " Target: " << TargetInfo::TargetType
1892                                << " Data Type: " << input->info()->data_type()
1893                                << " Input shape: " << input->info()->tensor_shape()
1894                                << " Output shape: " << output->info()->tensor_shape()
1895                                << std::endl);
1896 
1897     return std::move(func);
1898 }
1899 
1900 /** Create a backend softmax layer function
1901  *
1902  * @tparam SoftmaxLayerFunction Backend softmax function
1903  * @tparam TargetInfo           Target-specific information
1904  *
1905  * @param[in] node Node to create the backend function for
1906  * @param[in] ctx  Graph context
1907  *
1908  * @return Backend softmax layer function
1909  */
1910 template <typename SoftmaxLayerFunction, typename TargetInfo>
create_softmax_layer(SoftmaxLayerNode & node,GraphContext & ctx)1911 std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1912 {
1913     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1914 
1915     // Extract IO and info
1916     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1917     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1918     const float                      beta   = node.beta();
1919     ARM_COMPUTE_ERROR_ON(input == nullptr);
1920     ARM_COMPUTE_ERROR_ON(output == nullptr);
1921 
1922     // Create and configure function
1923     auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1924     func->configure(input, output, beta);
1925 
1926     // Log info
1927     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1928                                << node.name()
1929                                << " Type: " << node.type()
1930                                << " Target: " << TargetInfo::TargetType
1931                                << " Data Type: " << input->info()->data_type()
1932                                << " Input shape: " << input->info()->tensor_shape()
1933                                << " Output shape: " << output->info()->tensor_shape()
1934                                << std::endl);
1935 
1936     return std::move(func);
1937 }
1938 
1939 /** Create a backend layer stack function
1940  *
1941  * @tparam StackLayerFunction Backend stack function
1942  * @tparam TargetInfo         Target-specific information
1943  *
1944  * @param[in] node Node to create the backend function for
1945  *
1946  * @return Backend stack layer function
1947  */
1948 template <typename StackLayerFunction, typename TargetInfo>
create_stack_layer(StackLayerNode & node)1949 std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1950 {
1951     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1952     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1953 
1954     // Extract IO and info
1955     std::vector<typename TargetInfo::TensorType *> inputs;
1956     for(unsigned int i = 0; i < node.num_inputs(); ++i)
1957     {
1958         inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1959     }
1960     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1961     const int                        axis   = node.axis();
1962 
1963     // Create and configure function
1964     auto func = std::make_unique<StackLayerFunction>();
1965     func->configure(inputs, axis, output);
1966 
1967     // Log info
1968     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1969                                << node.name()
1970                                << " Type: " << node.type()
1971                                << " Target: " << TargetInfo::TargetType
1972                                << " Data Type: " << output->info()->data_type()
1973                                << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1974                                << " Output shape: " << output->info()->tensor_shape()
1975                                << " Num Inputs: " << inputs.size()
1976                                << " Axis: " << axis
1977                                << std::endl);
1978 
1979     return std::move(func);
1980 }
1981 
1982 /** Create a backend slice layer function
1983  *
1984  * @tparam StridedSliceLayerFunction Backend strided slice function
1985  * @tparam TargetInfo                Target-specific information
1986  *
1987  * @param[in] node Node to create the backend function for
1988  *
1989  * @return Backend strided slice layer function
1990  */
1991 template <typename StridedSliceLayerFunction, typename TargetInfo>
create_strided_slice_layer(StridedSliceLayerNode & node)1992 std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &node)
1993 {
1994     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1995 
1996     // Extract IO and info
1997     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
1998     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
1999     Coordinates                      starts  = node.starts();
2000     Coordinates                      ends    = node.ends();
2001     BiStrides                        strides = node.strides();
2002     StridedSliceLayerInfo            info    = node.strided_slice_info();
2003 
2004     ARM_COMPUTE_ERROR_ON(input == nullptr);
2005     ARM_COMPUTE_ERROR_ON(output == nullptr);
2006 
2007     // Create and configure function
2008     auto func = std::make_unique<StridedSliceLayerFunction>();
2009     func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
2010 
2011     // Log info
2012     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
2013                                << node.name()
2014                                << " Type: " << node.type()
2015                                << " Target: " << TargetInfo::TargetType
2016                                << " Data Type: " << input->info()->data_type()
2017                                << " Input shape: " << input->info()->tensor_shape()
2018                                << " Output shape: " << output->info()->tensor_shape()
2019                                << std::endl);
2020 
2021     return std::move(func);
2022 }
2023 } // namespace detail
2024 } // namespace backends
2025 } // namespace graph
2026 } // namespace arm_compute
2027 
2028 #endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */
2029