xref: /aosp_15_r20/external/android-nn-driver/ConversionUtils.hpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "Utils.hpp"
9 
10 #include <armnn/ArmNN.hpp>
11 #include <armnn/BackendHelper.hpp>
12 #include <armnn/utility/IgnoreUnused.hpp>
13 #include <armnn/utility/NumericCast.hpp>
14 
15 #include <armnnUtils/DataLayoutIndexed.hpp>
16 #include <armnnUtils/Transpose.hpp>
17 
18 #include "1.0/FullyConnected.hpp"
19 
20 #include <ActivationFunctor.h>
21 #include <CpuExecutor.h>
22 #include <OperationsUtils.h>
23 
24 #include <armnnUtils/FloatingPointComparison.hpp>
25 
26 #include <log/log.h>
27 #include <vector>
28 
29 #ifdef __clang__
30 #pragma clang diagnostic push
31 #pragma clang diagnostic ignored "-Wunneeded-internal-declaration"
32 #pragma clang diagnostic ignored "-Wunused-function"
33 #pragma clang diagnostic ignored "-Wunused-variable"
34 #endif
35 namespace armnn_driver
36 {
37 
38 ///
39 /// Helper classes
40 ///
41 
42 #ifdef ARMNN_ANDROID_R
43 using OperandType = android::nn::hal::OperandType;
44 #endif
45 
46 #ifdef ARMNN_ANDROID_S
47 #include <nnapi/Types.h>
48 #endif
49 
50 
51 struct ConversionData
52 {
ConversionDataarmnn_driver::ConversionData53     ConversionData(const std::vector<armnn::BackendId>& backends)
54     : m_Backends(backends)
55     , m_Network(nullptr, nullptr)
56     , m_DynamicInputsEncountered(false)
57     {}
58 
59     const std::vector<armnn::BackendId>       m_Backends;
60     armnn::INetworkPtr                        m_Network;
61     std::vector<armnn::IOutputSlot*>          m_OutputSlotForOperand;
62     std::vector<android::nn::RunTimePoolInfo> m_MemPools;
63     bool m_DynamicInputsEncountered;
64 };
65 
66 class LayerInputHandle
67 {
68 public:
69     LayerInputHandle();
70     LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
71 
72     bool IsValid() const;
73 
74     void Connect(armnn::IInputSlot& inputSlot);
75 
76     void Disconnect(armnn::IInputSlot& inputSlot);
77 
78     const armnn::TensorInfo& GetTensorInfo() const;
79 
80     void SanitizeQuantizationScale(LayerInputHandle& weight,
81                                    LayerInputHandle& input);
82 
83 private:
84     armnn::IOutputSlot* m_OutputSlot;
85     bool                m_Valid;
86     armnn::TensorInfo   m_TensorInfo;
87 };
88 
89 class ConstTensorPin
90 {
91 public:
92     // Creates an invalid tensor pin (can be used to signal errors)
93     // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
94     ConstTensorPin(bool optional = false);
95 
96     // @param tensorInfo TensorInfo associated with the tensor.
97     // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
98     // the model being converted.
99     // @param numBytes Number of bytes for the tensor data.
100     ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
101                    const armnn::PermutationVector& mappings);
102 
103     ConstTensorPin(const ConstTensorPin& other) = delete;
104     ConstTensorPin(ConstTensorPin&& other)      = default;
105 
106     bool IsValid() const;
107     bool IsOptional() const;
108 
109     const armnn::ConstTensor& GetConstTensor() const;
110     const armnn::ConstTensor* GetConstTensorPtr() const;
111 
112 private:
113     armnn::ConstTensor m_ConstTensor;
114 
115     // Owned memory for swizzled tensor data, only required if the tensor needed
116     // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
117     // the pools associated with the model being converted.
118     std::vector<uint8_t> m_SwizzledTensorData;
119 
120     // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
121     bool m_Optional;
122 };
123 
124 } // namespace armnn_driver
125 
126 ///
127 /// Utility functions
128 ///
129 
130 namespace
131 {
132 
133 using namespace armnn_driver;
134 using namespace android::nn;
135 
136 // Convenience function to log the reason for failing to convert a model.
137 // @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
138 template<class... Args>
Fail(const char * formatStr,Args &&...args)139 static bool Fail(const char* formatStr, Args&&... args)
140 {
141     ALOGD(formatStr, std::forward<Args>(args)...);
142     return false;
143 }
144 
145 // Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
146 // Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
147 #define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
148 try \
149 { \
150     for (auto&& backendId : backends) \
151     { \
152         auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
153         if (layerSupportObject.IsBackendRegistered()) \
154         { \
155             std::string reasonIfUnsupported; \
156             supported = \
157                 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
158             if (supported) \
159             { \
160                 setBackend = backendId; \
161                 break; \
162             } \
163             else \
164             { \
165                 if (reasonIfUnsupported.size() > 0) \
166                 { \
167                     ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
168                 } \
169                 else \
170                 { \
171                     ALOGD("%s: not supported by armnn", funcName); \
172                 } \
173             } \
174         } \
175         else \
176         { \
177             ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
178         } \
179     } \
180     if (!supported) \
181     { \
182         ALOGD("%s: not supported by any specified backend", funcName); \
183     } \
184 } \
185 catch (const armnn::InvalidArgumentException &e) \
186 { \
187     throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
188 }
189 
190 template<typename HalOperand>
GetTensorShapeForOperand(const HalOperand & operand)191 armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
192 {
193     return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
194 }
195 
IsOperandTypeSupportedForTensors(V1_0::OperandType type)196 inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
197 {
198     return type == V1_0::OperandType::TENSOR_FLOAT32      ||
199            type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
200            type == V1_0::OperandType::TENSOR_INT32;
201 }
202 
203 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
204 
205 // Support within the 1.2 driver for specific tensor data types
IsOperandTypeSupportedForTensors(V1_2::OperandType type)206 inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
207 {
208     return type == V1_2::OperandType::BOOL                           ||
209            type == V1_2::OperandType::TENSOR_BOOL8                   ||
210            type == V1_2::OperandType::TENSOR_FLOAT16                 ||
211            type == V1_2::OperandType::TENSOR_FLOAT32                 ||
212            type == V1_2::OperandType::TENSOR_QUANT8_ASYMM            ||
213            type == V1_2::OperandType::TENSOR_QUANT8_SYMM             ||
214            type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
215            type == V1_2::OperandType::TENSOR_QUANT16_SYMM            ||
216            type == V1_2::OperandType::TENSOR_INT32;
217 }
218 
219 #endif
220 
221 #ifdef ARMNN_ANDROID_NN_V1_3
222 
223 // Support within the 1.3 driver for specific tensor data types
IsOperandTypeSupportedForTensors(V1_3::OperandType type)224 inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
225 {
226     return type == V1_3::OperandType::BOOL                           ||
227            type == V1_3::OperandType::TENSOR_BOOL8                   ||
228            type == V1_3::OperandType::TENSOR_FLOAT16                 ||
229            type == V1_3::OperandType::TENSOR_FLOAT32                 ||
230            type == V1_3::OperandType::TENSOR_QUANT8_ASYMM            ||
231            type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED     ||
232            type == V1_3::OperandType::TENSOR_QUANT8_SYMM             ||
233            type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
234            type == V1_3::OperandType::TENSOR_QUANT16_SYMM            ||
235            type == V1_3::OperandType::TENSOR_INT32;
236 }
237 
238 #endif
239 
IsBool(V1_0::Operand)240 inline bool IsBool(V1_0::Operand)
241 {
242     return false;
243 }
244 
Is12OrLaterOperand(V1_0::Operand)245 inline bool Is12OrLaterOperand(V1_0::Operand)
246 {
247     return false;
248 }
249 
250 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
251 
IsBool(V1_2::Operand operand)252 inline bool IsBool(V1_2::Operand operand)
253 {
254     return operand.type == V1_2::OperandType::BOOL;
255 }
256 
257 /// Checks if a operand is 1_2 Operand
Is12OrLaterOperand(V1_2::Operand)258 inline bool Is12OrLaterOperand(V1_2::Operand)
259 {
260     return true;
261 }
262 
263 #endif
264 
265 #ifdef ARMNN_ANDROID_NN_V1_3
266 
IsBool(V1_3::Operand operand)267 inline bool IsBool(V1_3::Operand operand)
268 {
269     return operand.type == V1_3::OperandType::BOOL;
270 }
271 
272 /// Checks if a operand is 1_2 Operand
Is12OrLaterOperand(V1_3::Operand)273 inline bool Is12OrLaterOperand(V1_3::Operand)
274 {
275     return true;
276 }
277 
278 #endif
279 
280 template<typename LayerHandleType>
AddReshapeLayer(armnn::INetwork & network,LayerHandleType & inputLayer,armnn::TensorInfo reshapeInfo)281 armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
282                                           LayerHandleType& inputLayer,
283                                           armnn::TensorInfo reshapeInfo)
284 {
285     armnn::ReshapeDescriptor reshapeDescriptor;
286     reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
287 
288     armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
289     if (!reshapeLayer)
290     {
291         throw armnn::RuntimeException("ReshapeLayer is null");
292     }
293 
294     // Attach the input layer to the reshape layer
295     inputLayer.Connect(reshapeLayer->GetInputSlot(0));
296     reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
297 
298     return *reshapeLayer;
299 }
300 
BroadcastTensor(LayerInputHandle & input0,LayerInputHandle & input1,armnn::IConnectableLayer * startLayer,ConversionData & data)301 bool BroadcastTensor(LayerInputHandle& input0,
302                      LayerInputHandle& input1,
303                      armnn::IConnectableLayer* startLayer,
304                      ConversionData& data)
305 {
306     if (!startLayer)
307     {
308         throw armnn::RuntimeException("StartLayer is null");
309     }
310 
311     const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
312     const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
313 
314     unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
315     unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
316 
317     if (inputDimensions0 == inputDimensions1)
318     {
319         // The inputs have the same number of dimensions, simply connect them to the given layer as they are
320         input0.Connect(startLayer->GetInputSlot(0));
321         input1.Connect(startLayer->GetInputSlot(1));
322 
323         return true;
324     }
325 
326     // Since the number of dimensions do not match then we need to add degenerate dimensions
327     // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
328 
329     unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
330     unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
331                                            armnn::numeric_cast<int>(inputDimensions1));
332 
333     bool input0IsSmaller = inputDimensions0 < inputDimensions1;
334     LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
335     const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
336 
337     const armnn::TensorShape& smallShape = smallInfo.GetShape();
338     std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
339     for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
340     {
341         reshapedDimensions[i] = smallShape[i - sizeDifference];
342     }
343 
344     armnn::TensorInfo reshapedInfo = smallInfo;
345     reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
346                                               reshapedDimensions.data() });
347 
348     // RehsapeDescriptor that is ignored in the IsReshapeSupported function
349     armnn::ReshapeDescriptor reshapeDescriptor;
350 
351     bool isSupported = false;
352     armnn::BackendId setBackend;
353     FORWARD_LAYER_SUPPORT_FUNC(__func__,
354                                IsReshapeSupported,
355                                data.m_Backends,
356                                isSupported,
357                                setBackend,
358                                smallInfo,
359                                reshapedInfo,
360                                reshapeDescriptor);
361     if (!isSupported)
362     {
363         return false;
364     }
365 
366     if (!data.m_Network)
367     {
368         throw armnn::RuntimeException("Network is null");
369     }
370 
371     armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
372     reshapeLayer.SetBackendId(setBackend);
373 
374     if (input0IsSmaller)
375     {
376         // Input0 is the "smaller" tensor, connect the reshape layer as follows:
377         //
378         //  Input0 Input1
379         //     |     |
380         //  Reshape  |
381         //      \   /
382         //    StartLayer
383 
384         reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
385         input1.Connect(startLayer->GetInputSlot(1));
386     }
387     else
388     {
389         // Input1 is the "smaller" tensor, connect the reshape layer as follows:
390         //
391         //  Input0 Input1
392         //     |     |
393         //     |  Reshape
394         //      \   /
395         //    StartLayer
396 
397         input0.Connect(startLayer->GetInputSlot(0));
398         reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
399     }
400 
401     return true;
402 }
403 
CalcPadding(uint32_t input,uint32_t kernel,uint32_t stride,uint32_t & outPadHead,uint32_t & outPadTail,android::nn::PaddingScheme scheme)404 void CalcPadding(uint32_t input,
405                  uint32_t kernel,
406                  uint32_t stride,
407                  uint32_t& outPadHead,
408                  uint32_t& outPadTail,
409                  android::nn::PaddingScheme scheme)
410 {
411     int32_t padHead;
412     int32_t padTail;
413     calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
414     outPadHead = armnn::numeric_cast<uint32_t>(padHead);
415     outPadTail = armnn::numeric_cast<uint32_t>(padTail);
416 }
417 
418 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
419 
CalcPadding(uint32_t input,uint32_t kernel,uint32_t stride,uint32_t dilation,uint32_t & outPadHead,uint32_t & outPadTail,android::nn::PaddingScheme scheme)420 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
421                  uint32_t& outPadTail, android::nn::PaddingScheme scheme)
422 {
423     int32_t padHead;
424     int32_t padTail;
425     calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
426     outPadHead = armnn::numeric_cast<uint32_t>(padHead);
427     outPadTail = armnn::numeric_cast<uint32_t>(padTail);
428 }
429 
CalcPaddingTransposeConv(uint32_t output,uint32_t kernel,int32_t stride,int32_t & outPadHead,int32_t & outPadTail,android::nn::PaddingScheme scheme)430 void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
431                               int32_t& outPadTail, android::nn::PaddingScheme scheme)
432 {
433     calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
434 }
435 
436 #endif
437 
GetOperandShape(const V1_0::Operand & operand)438 Shape GetOperandShape(const V1_0::Operand& operand)
439 {
440     Shape shape;
441     shape.type = OperandType(operand.type);
442     shape.dimensions = operand.dimensions;
443     shape.scale = operand.scale;
444     shape.offset = operand.zeroPoint;
445     return shape;
446 }
447 
448 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
449 
GetOperandShape(const V1_2::Operand & operand)450 Shape GetOperandShape(const V1_2::Operand& operand)
451 {
452     Shape shape;
453     shape.type = OperandType(operand.type);
454     shape.dimensions = operand.dimensions;
455     shape.scale = operand.scale;
456     shape.offset = operand.zeroPoint;
457     return shape;
458 }
459 
460 #endif
461 
462 #ifdef ARMNN_ANDROID_NN_V1_3
463 
GetOperandShape(const V1_3::Operand & operand)464 Shape GetOperandShape(const V1_3::Operand& operand)
465 {
466     Shape shape;
467     shape.type = OperandType(operand.type);
468     shape.dimensions = operand.dimensions;
469     shape.scale = operand.scale;
470     shape.offset = operand.zeroPoint;
471     return shape;
472 }
473 
474 #endif
475 
476 // ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
477 // what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
478 // we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
479 // user (us, in this case) to ensure they match.
SanitizeBiasQuantizationScale(armnn::TensorInfo & biasInfo,const armnn::TensorInfo & weightInfo,const armnn::TensorInfo & inputInfo)480 void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
481                                    const armnn::TensorInfo& weightInfo,
482                                    const armnn::TensorInfo& inputInfo)
483 {
484     if (weightInfo.HasPerAxisQuantization())
485     {
486         // NOTE: Bias scale is always set to 0 for per-axis quantization and
487         // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
488         auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
489         {
490             return biasScale * inputInfo.GetQuantizationScale();
491         };
492 
493         std::vector<float> biasScales(weightInfo.GetQuantizationScales());
494         std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
495 
496         biasInfo.SetQuantizationScales(biasScales);
497         // bias is expected to be a 1d tensor, set qdim=0
498         biasInfo.SetQuantizationDim(0);
499 
500         ALOGV("Bias quantization params have been updated for per-axis quantization");
501     }
502     else
503     {
504         const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
505         if (biasInfo.GetQuantizationScale() != expectedBiasScale)
506         {
507             if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
508             {
509                 ALOGW("Bias quantization scale has been modified to match input * weights");
510                 biasInfo.SetQuantizationScale(expectedBiasScale);
511             }
512         }
513     }
514 }
515 
516 // 4D Tensor Permutations
517 const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
518 const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
519 const armnn::PermutationVector SwapDim2And3({ 0U, 1U, 3U, 2U });
520 
521 // 3D Permutation Vectors
522 const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
523 const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
524 
525 template<typename OSlot>
AddTransposeLayer(armnn::INetwork & network,OSlot & input,const armnn::PermutationVector & mappings)526 armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
527                                             const armnn::PermutationVector& mappings)
528 {
529     // Add swizzle layer
530     armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
531     if (!layer)
532     {
533         throw armnn::RuntimeException("TransposeLayer is null");
534     }
535     // Connect input to swizzle layer
536     input.Connect(layer->GetInputSlot(0));
537 
538     // Setup swizzled output
539     const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
540     layer->GetOutputSlot(0).SetTensorInfo(outInfo);
541 
542     return *layer;
543 }
544 
ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,const armnn::TensorShape & outputShape,uint32_t concatDim)545 bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
546                                const armnn::TensorShape & outputShape,
547                                uint32_t concatDim)
548 {
549     // Validate the output shape is correct given the input shapes (which have just been validated)
550     unsigned int numDimensions = inputShapes[0].GetNumDimensions();
551     if (outputShape.GetNumDimensions() != numDimensions)
552     {
553         return Fail("%s: Output shape has wrong number of dimensions", __func__);
554     }
555 
556     unsigned int outputSizeAlongConcatenatedDimension = 0;
557     for (unsigned int i = 0; i < inputShapes.size(); i++)
558     {
559         outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
560     }
561 
562     for (unsigned int i = 0; i < numDimensions; ++i)
563     {
564         if (i == concatDim)
565         {
566             if (outputShape[i] != outputSizeAlongConcatenatedDimension)
567             {
568                 return Fail(
569                         "%s: Invalid output shape for dimension %d (%d != %d)",
570                         __func__,
571                         i,
572                         outputShape[i],
573                         outputSizeAlongConcatenatedDimension);
574             }
575         }
576         else
577         {
578             if (outputShape[i] != inputShapes[0][i])
579             {
580                 return Fail("%s: Invalid output shape", __func__);
581             }
582         }
583     }
584 
585     return true;
586 }
587 
RequiresReshape(armnn::TensorShape & inputShape)588 bool RequiresReshape(armnn::TensorShape & inputShape)
589 {
590     return inputShape.GetNumDimensions() < 3;
591 }
592 
SwizzleInputs(armnn::INetwork & network,std::vector<LayerInputHandle> & inputs,std::vector<armnn::TensorShape> & inputShapes,const armnn::PermutationVector & mapping,std::vector<armnn::BackendId> & setBackends)593 void SwizzleInputs(armnn::INetwork& network,
594                    std::vector<LayerInputHandle>& inputs,
595                    std::vector<armnn::TensorShape>& inputShapes,
596                    const armnn::PermutationVector& mapping,
597                    std::vector<armnn::BackendId>& setBackends)
598 {
599     if (!mapping.IsEqual(IdentityPermutation4D))
600     {
601         size_t nInputs = inputs.size();
602         for (size_t i=0; i<nInputs; ++i)
603         {
604             // add swizzle layer
605             armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
606             swizzleLayer.SetBackendId(setBackends[i]);
607             auto& outputSlot = swizzleLayer.GetOutputSlot(0);
608             auto& outputInfo = outputSlot.GetTensorInfo();
609             // replace inputs with the swizzled ones
610             inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
611             inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
612         }
613     }
614 }
615 
TransposeInputTensors(ConversionData & data,std::vector<LayerInputHandle> & inputs,std::vector<armnn::TensorShape> & inputShapes,const armnn::PermutationVector & mapping)616 bool TransposeInputTensors(ConversionData& data,
617                           std::vector<LayerInputHandle>& inputs,
618                           std::vector<armnn::TensorShape>& inputShapes,
619                           const armnn::PermutationVector& mapping)
620 {
621     // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
622     if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
623     {
624         std::vector<armnn::BackendId> setBackendsVec;
625         armnn::TensorInfo outputTransposeInfo;
626         size_t nInputs = inputs.size();
627         for (size_t i=0; i<nInputs; ++i)
628         {
629             // check permute layer
630             armnn::TransposeDescriptor transposeDesc;
631             transposeDesc.m_DimMappings = mapping;
632             outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
633 
634             bool isSupported = false;
635             armnn::BackendId setBackend;
636             FORWARD_LAYER_SUPPORT_FUNC(__func__,
637                                        IsTransposeSupported,
638                                        data.m_Backends,
639                                        isSupported,
640                                        setBackend,
641                                        inputs[i].GetTensorInfo(),
642                                        outputTransposeInfo,
643                                        transposeDesc);
644             setBackendsVec.push_back(setBackend);
645             if (!isSupported)
646             {
647                 return false;
648             }
649 
650         }
651         SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
652     }
653     return true;
654 }
655 
656 
CreateConcatPermutationParameters(const unsigned int numberOfDimensions,int32_t & concatDimension,std::pair<armnn::PermutationVector,armnn::PermutationVector> & permutationPair)657 bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
658                                        int32_t & concatDimension,
659                                        std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
660 {
661     bool needPermute = false;
662 
663     if (numberOfDimensions < 3)
664     {
665         return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
666     }
667 
668     // ArmNN uses Compute Library subtensors to perform concatenation
669     // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
670     // or along dimension 0 or 2 for a 3-D tensor.
671     if (numberOfDimensions == 4 && concatDimension == 2)
672     {
673         concatDimension = 3;
674         permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
675         needPermute = true;
676     }
677     else if (numberOfDimensions == 3 && concatDimension == 1)
678     {
679         concatDimension = 0;
680         permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
681         needPermute = true;
682     }
683     // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
684     // permutation identity to only have 3 dimensions
685     else if (numberOfDimensions == 3 && concatDimension == 2)
686     {
687         permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
688     }
689     return needPermute;
690 }
691 
692 } // anonymous namespace
693 
694 namespace armnn_driver
695 {
696 
697 //// Creates an ArmNN activation layer and connects it to the given layer, if the
698 //// passed in AndroidNN activation function requires so.
699 //// @return The end layer of the sequence of layers built for the given AndroidNN
700 //// activation function or nullptr if an error occurred (e.g. unsupported activation).
701 //// Note that the end layer matches the input layer if no activation is required
702 //// (the sequence of layers has length 1).
703 armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
704                                             ActivationFn activation,
705                                             armnn::IConnectableLayer* prevLayer,
706                                             ConversionData& data);
707 
708 } // namespace armnn_driver
709 
710 ///
711 /// Utility templates
712 ///
713 
714 namespace armnn_driver
715 {
716 
717 using namespace android::nn;
718 
719 template<typename HalPolicy,
720          typename HalOperand   = typename HalPolicy::Operand,
721          typename HalOperation = typename HalPolicy::Operation,
722          typename HalModel     = typename HalPolicy::Model>
GetInputOperand(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,bool failOnIndexOutOfBounds=true)723 const HalOperand* GetInputOperand(const HalOperation& operation,
724                                   uint32_t inputIndex,
725                                   const HalModel& model,
726                                   bool failOnIndexOutOfBounds = true)
727 {
728     if (inputIndex >= operation.inputs.size())
729     {
730         if (failOnIndexOutOfBounds)
731         {
732             Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
733         }
734         return nullptr;
735     }
736 
737     // Model should have been validated beforehand
738     if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
739     {
740         Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
741         return nullptr;
742     }
743 
744     return &getMainModel(model).operands[operation.inputs[inputIndex]];
745 }
746 
747 template<typename HalPolicy,
748          typename HalOperand   = typename HalPolicy::Operand,
749          typename HalOperation = typename HalPolicy::Operation,
750          typename HalModel     = typename HalPolicy::Model>
GetOutputOperand(const HalOperation & operation,uint32_t outputIndex,const HalModel & model)751 const HalOperand* GetOutputOperand(const HalOperation& operation,
752                                    uint32_t outputIndex,
753                                    const HalModel& model)
754 {
755     if (outputIndex >= operation.outputs.size())
756     {
757         Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
758         return nullptr;
759     }
760 
761     // Model should have been validated beforehand
762     if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
763     {
764         Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
765         return nullptr;
766     }
767     return &getMainModel(model).operands[operation.outputs[outputIndex]];
768 }
769 
770 template<typename HalPolicy,
771          typename HalOperand = typename HalPolicy::Operand,
772          typename HalModel   = typename HalPolicy::Model>
GetOperandValueReadOnlyAddress(const HalOperand & operand,const HalModel & model,const ConversionData & data,bool optional=false)773 const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
774                                            const HalModel& model,
775                                            const ConversionData& data,
776                                            bool optional = false)
777 {
778     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
779 
780     const void* valueStart = nullptr;
781     switch (operand.lifetime)
782     {
783         case HalOperandLifeTime::CONSTANT_COPY:
784         {
785             // Constant found in model.operandValues
786             valueStart = &model.operandValues[operand.location.offset];
787             break;
788         }
789         case HalOperandLifeTime::CONSTANT_REFERENCE:
790         {
791             // Constant specified via a Memory object
792             valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
793             break;
794         }
795         case HalOperandLifeTime::NO_VALUE:
796         {
797             // An optional input tensor with no values is not an error so should not register as a fail
798             if (optional)
799             {
800                 valueStart = nullptr;
801                 break;
802             }
803             [[fallthrough]];
804         }
805         default:
806         {
807             // Unsupported/invalid (e.g. can't get value of an input to the model)
808             Fail("%s: unsupported/invalid operand lifetime: %s",
809                  __func__, toString(operand.lifetime).c_str());
810             valueStart = nullptr;
811         }
812     }
813 
814     return valueStart;
815 }
816 
817 template<typename HalPolicy,
818          typename HalOperation   = typename HalPolicy::Operation,
819          typename HalModel       = typename HalPolicy::Model,
820          typename HalOperandType = typename HalPolicy::OperandType>
GetOperandType(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,HalOperandType & type)821 bool GetOperandType(const HalOperation& operation,
822                     uint32_t inputIndex,
823                     const HalModel& model,
824                     HalOperandType& type)
825 {
826     using HalOperand = typename HalPolicy::Operand;
827 
828     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
829     if (!operand)
830     {
831         return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
832     }
833 
834     type = operand->type;
835     return true;
836 }
837 
838 template<typename HalPolicy,
839          typename HalOperand = typename HalPolicy::Operand>
IsOperandConstant(const HalOperand & operand)840 bool IsOperandConstant(const HalOperand& operand)
841 {
842     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
843 
844     HalOperandLifeTime lifetime = operand.lifetime;
845 
846     return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
847            lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
848            lifetime == HalOperandLifeTime::NO_VALUE;
849 }
850 
851 template<typename HalPolicy,
852          typename HalOperand   = typename HalPolicy::Operand,
853          typename HalModel     = typename HalPolicy::Model>
ConvertOperandToConstTensorPin(const HalOperand & operand,const HalModel & model,const ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute,const armnn::TensorShape * overrideTensorShape=nullptr,bool optional=false)854 ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
855                                               const HalModel& model,
856                                               const ConversionData& data,
857                                               const armnn::PermutationVector& dimensionMappings = g_DontPermute,
858                                               const armnn::TensorShape* overrideTensorShape = nullptr,
859                                               bool optional = false)
860 {
861     if (!IsOperandTypeSupportedForTensors(operand.type))
862     {
863         Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
864         return ConstTensorPin();
865     }
866 
867     if (!optional && !IsOperandConstant<HalPolicy>(operand))
868     {
869         Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
870         return ConstTensorPin();
871     }
872 
873     const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
874     if (!valueStart)
875     {
876         if (optional)
877         {
878             // optional tensor with no values is not really an error; return it as invalid, but marked as optional
879             return ConstTensorPin(true);
880         }
881         // mandatory tensor with no values
882         Fail("%s: failed to get operand address", __func__);
883         return ConstTensorPin();
884     }
885 
886     armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
887 
888     // Make sure isConstant flag is set.
889     tensorInfo.SetConstant();
890 
891     if (overrideTensorShape != nullptr)
892     {
893         tensorInfo.SetShape(*overrideTensorShape);
894     }
895     return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
896 }
897 
898 template<typename HalPolicy,
899          typename HalOperation = typename HalPolicy::Operation,
900          typename HalModel     = typename HalPolicy::Model>
ConvertOperationInputToConstTensorPin(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,const ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute,const armnn::TensorShape * overrideTensorShape=nullptr,bool optional=false)901 ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
902                                                      uint32_t inputIndex,
903                                                      const HalModel& model,
904                                                      const ConversionData& data,
905                                                      const armnn::PermutationVector& dimensionMappings = g_DontPermute,
906                                                      const armnn::TensorShape* overrideTensorShape = nullptr,
907                                                      bool optional = false)
908 {
909     using HalOperand = typename HalPolicy::Operand;
910 
911     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
912     if (!operand)
913     {
914         Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
915         return ConstTensorPin();
916     }
917     return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
918                                                      model,
919                                                      data,
920                                                      dimensionMappings,
921                                                      overrideTensorShape,
922                                                      optional);
923 }
924 
925 template<typename HalPolicy,
926          typename OutputType,
927          typename HalOperandType = typename HalPolicy::OperandType,
928          typename HalOperation   = typename HalPolicy::Operation,
929          typename HalModel       = typename HalPolicy::Model>
GetInputScalar(const HalOperation & operation,uint32_t inputIndex,HalOperandType type,OutputType & outValue,const HalModel & model,const ConversionData & data,bool optional=false)930 bool GetInputScalar(const HalOperation& operation,
931                     uint32_t inputIndex,
932                     HalOperandType type,
933                     OutputType& outValue,
934                     const HalModel& model,
935                     const ConversionData& data,
936                     bool optional = false)
937 {
938     using HalOperand = typename HalPolicy::Operand;
939 
940     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
941     if (!optional && !operand)
942     {
943         return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
944     }
945 
946     if (!optional && operand->type != type)
947     {
948         return Fail("%s: unexpected operand type: %s (should be %s)",
949                     __func__, toString(operand->type).c_str(), toString(type).c_str());
950     }
951 
952     if (!optional && operand->location.length != sizeof(OutputType))
953     {
954         return Fail("%s: incorrect operand location length: %i (should be %i)",
955                     __func__, operand->location.length, sizeof(OutputType));
956     }
957 
958     const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
959     if (!optional && !valueAddress)
960     {
961         return Fail("%s: failed to get address for operand", __func__);
962     }
963 
964     if(!optional)
965     {
966         outValue = *(static_cast<const OutputType*>(valueAddress));
967     }
968 
969     return true;
970 }
971 
972 template<typename HalPolicy,
973          typename HalOperation = typename HalPolicy::Operation,
974          typename HalModel     = typename HalPolicy::Model>
GetInputInt32(const HalOperation & operation,uint32_t inputIndex,int32_t & outValue,const HalModel & model,const ConversionData & data)975 bool GetInputInt32(const HalOperation& operation,
976                    uint32_t inputIndex,
977                    int32_t& outValue,
978                    const HalModel& model,
979                    const ConversionData& data)
980 {
981     return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
982 }
983 
984 template<typename HalPolicy,
985          typename HalOperation = typename HalPolicy::Operation,
986          typename HalModel     = typename HalPolicy::Model>
GetInputFloat32(const HalOperation & operation,uint32_t inputIndex,float & outValue,const HalModel & model,const ConversionData & data)987 bool GetInputFloat32(const HalOperation& operation,
988                      uint32_t inputIndex,
989                      float& outValue,
990                      const HalModel& model,
991                      const ConversionData& data)
992 {
993     return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
994 }
995 
996 template<typename HalPolicy,
997          typename HalOperation   = typename HalPolicy::Operation,
998          typename HalOperandType = typename HalPolicy::OperandType,
999          typename HalModel       = typename HalPolicy::Model>
GetInputActivationFunctionImpl(const HalOperation & operation,uint32_t inputIndex,HalOperandType type,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)1000 bool GetInputActivationFunctionImpl(const HalOperation& operation,
1001                                     uint32_t inputIndex,
1002                                     HalOperandType type,
1003                                     ActivationFn& outActivationFunction,
1004                                     const HalModel& model,
1005                                     const ConversionData& data)
1006 {
1007     if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
1008     {
1009         return Fail("%s: unexpected operand type: %s (should be %s or %s)",
1010                     __func__,
1011                     toString(type).c_str(),
1012                     toString(HalOperandType::INT32).c_str(),
1013                     toString(HalOperandType::TENSOR_INT32).c_str());
1014     }
1015 
1016     int32_t activationFunctionAsInt;
1017     if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
1018     {
1019         return Fail("%s: failed to get activation input value", __func__);
1020     }
1021     outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1022     return true;
1023 }
1024 
1025 template<typename HalPolicy,
1026          typename HalOperation = typename HalPolicy::Operation,
1027          typename HalModel     = typename HalPolicy::Model>
GetInputActivationFunction(const HalOperation & operation,uint32_t inputIndex,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)1028 bool GetInputActivationFunction(const HalOperation& operation,
1029                                 uint32_t inputIndex,
1030                                 ActivationFn& outActivationFunction,
1031                                 const HalModel& model,
1032                                 const ConversionData& data)
1033 {
1034     return GetInputActivationFunctionImpl<HalPolicy>(operation,
1035                                                      inputIndex,
1036                                                      HalPolicy::OperandType::INT32,
1037                                                      outActivationFunction,
1038                                                      model,
1039                                                      data);
1040 }
1041 
1042 template<typename HalPolicy,
1043          typename HalOperation = typename HalPolicy::Operation,
1044          typename HalModel     = typename HalPolicy::Model>
GetInputActivationFunctionFromTensor(const HalOperation & operation,uint32_t inputIndex,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)1045 bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1046                                           uint32_t inputIndex,
1047                                           ActivationFn& outActivationFunction,
1048                                           const HalModel& model,
1049                                           const ConversionData& data)
1050 {
1051     // This only accepts a 1-D tensor of size 1
1052     return GetInputActivationFunctionImpl<HalPolicy>(operation,
1053                                                      inputIndex,
1054                                                      HalPolicy::OperandType::INT32,
1055                                                      outActivationFunction,
1056                                                      model,
1057                                                      data);
1058 }
1059 
1060 
1061 template<typename HalPolicy,
1062          typename HalOperation   = typename HalPolicy::Operation,
1063          typename HalModel       = typename HalPolicy::Model>
GetOptionalInputActivation(const HalOperation & operation,uint32_t inputIndex,ActivationFn & activationFunction,const HalModel & model,const ConversionData & data)1064 bool GetOptionalInputActivation(const HalOperation& operation,
1065                                 uint32_t inputIndex,
1066                                 ActivationFn& activationFunction,
1067                                 const HalModel& model,
1068                                 const ConversionData& data)
1069 {
1070     if (operation.inputs.size() <= inputIndex)
1071     {
1072         activationFunction = ActivationFn::kActivationNone;
1073     }
1074     else
1075     {
1076         if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
1077         {
1078             return Fail("%s: Operation has invalid inputs", __func__);
1079         }
1080     }
1081     return true;
1082 }
1083 
1084 template<typename HalPolicy,
1085          typename ConvolutionDescriptor,
1086          typename HalOperation = typename HalPolicy::Operation,
1087          typename HalModel     = typename HalPolicy::Model>
GetOptionalConvolutionDilationParams(const HalOperation & operation,uint32_t dilationXIndex,ConvolutionDescriptor & descriptor,const HalModel & model,const ConversionData & data)1088 bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1089                                           uint32_t dilationXIndex,
1090                                           ConvolutionDescriptor& descriptor,
1091                                           const HalModel& model,
1092                                           const ConversionData& data)
1093 {
1094     bool success = true;
1095     if (operation.inputs.size() >= dilationXIndex + 2)
1096     {
1097         success &= GetInputScalar<HalPolicy>(operation,
1098                                              dilationXIndex,
1099                                              HalPolicy::OperandType::INT32,
1100                                              descriptor.m_DilationX,
1101                                              model,
1102                                              data);
1103         success &= GetInputScalar<HalPolicy>(operation,
1104                                              dilationXIndex + 1,
1105                                              HalPolicy::OperandType::INT32,
1106                                              descriptor.m_DilationY,
1107                                              model,
1108                                              data);
1109     }
1110 
1111     return success;
1112 }
1113 
1114 template<typename HalPolicy,
1115          typename HalOperation   = typename HalPolicy::Operation,
1116          typename HalModel       = typename HalPolicy::Model>
GetOptionalBool(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,const ConversionData & data)1117 bool GetOptionalBool(const HalOperation& operation,
1118                      uint32_t inputIndex,
1119                      const HalModel& model,
1120                      const ConversionData& data)
1121 {
1122     using HalOperand = typename HalPolicy::Operand;
1123 
1124     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1125     if (!operand)
1126     {
1127         return false;
1128     }
1129 
1130     if (!IsBool(*operand))
1131     {
1132         return false;
1133     }
1134 
1135     const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1136     if (!valueAddress)
1137     {
1138         return false;
1139     }
1140 
1141     if (*(static_cast<const bool*>(valueAddress)))
1142     {
1143         return true;
1144     }
1145     else
1146     {
1147         return false;
1148     }
1149 }
1150 
1151 template<typename HalPolicy,
1152          typename HalOperand = typename HalPolicy::Operand,
1153          typename HalModel   = typename HalPolicy::Model>
GetTensorInt32Values(const HalOperand & operand,std::vector<int32_t> & outValues,const HalModel & model,const ConversionData & data)1154 bool GetTensorInt32Values(const HalOperand& operand,
1155                           std::vector<int32_t>& outValues,
1156                           const HalModel& model,
1157                           const ConversionData& data)
1158 {
1159     if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
1160     {
1161         return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1162     }
1163 
1164     const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
1165     if (!startAddress)
1166     {
1167         return Fail("%s: failed to get operand address", __func__, operand.type);
1168     }
1169 
1170     // Check number of bytes is sensible
1171     const uint32_t numBytes = operand.location.length;
1172     if (numBytes % sizeof(int32_t) != 0)
1173     {
1174         return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1175                     __func__, numBytes, sizeof(int32_t));
1176     }
1177 
1178     outValues.resize(numBytes / sizeof(int32_t));
1179     memcpy(outValues.data(), startAddress, numBytes);
1180     return true;
1181 }
1182 
1183 template<typename HalPolicy,
1184          typename HalOperation = typename HalPolicy::Operation,
1185          typename HalModel     = typename HalPolicy::Model>
GetInputPaddingScheme(const HalOperation & operation,uint32_t inputIndex,PaddingScheme & outPaddingScheme,const HalModel & model,const ConversionData & data)1186 bool GetInputPaddingScheme(const HalOperation& operation,
1187                            uint32_t inputIndex,
1188                            PaddingScheme& outPaddingScheme,
1189                            const HalModel& model,
1190                            const ConversionData& data)
1191 {
1192     int32_t paddingSchemeAsInt;
1193     if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
1194     {
1195         return Fail("%s: failed to get padding scheme input value", __func__);
1196     }
1197 
1198     outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1199     return true;
1200 }
1201 
1202 template<typename HalPolicy,
1203          typename HalOperation = typename HalPolicy::Operation,
1204          typename HalModel     = typename HalPolicy::Model>
ConvertToLayerInputHandle(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute)1205 LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1206                                            uint32_t inputIndex,
1207                                            const HalModel& model,
1208                                            ConversionData& data,
1209                                            const armnn::PermutationVector& dimensionMappings = g_DontPermute)
1210 {
1211     using HalOperand         = typename HalPolicy::Operand;
1212     using HalOperandType     = typename HalPolicy::OperandType;
1213     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1214 
1215     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1216     if (!operand)
1217     {
1218         Fail("%s: failed to get input operand %i", __func__, inputIndex);
1219         return LayerInputHandle();
1220     }
1221 
1222     if (!IsOperandTypeSupportedForTensors(operand->type))
1223     {
1224         Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1225         return LayerInputHandle();
1226     }
1227 
1228     try
1229     {
1230         armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1231         if (IsDynamicTensor(operandTensorInfo))
1232         {
1233             Fail("%s: dynamic input tensors are not supported", __func__);
1234             return LayerInputHandle();
1235         }
1236 
1237         switch (operand->lifetime)
1238         {
1239             case HalOperandLifeTime::MODEL_INPUT:
1240             {
1241                 // NOTE: We must check whether we can support the input tensor on at least one
1242                 // of the provided backends; otherwise we cannot convert the operation
1243                 bool isInputSupported = false;
1244                 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1245                                            IsInputSupported,
1246                                            data.m_Backends,
1247                                            isInputSupported,
1248                                            armnn::BackendId(),
1249                                            operandTensorInfo);
1250 
1251                 if (!isInputSupported)
1252                 {
1253                     Fail("%s: unsupported input tensor", __func__);
1254                     return LayerInputHandle();
1255                 }
1256 
1257                 [[clang::fallthrough]]; // intentional fallthrough
1258             }
1259             case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1260             case HalOperandLifeTime::MODEL_OUTPUT:
1261             {
1262                 // The tensor is either an operand internal to the model, or a model input.
1263                 // It can be associated with an ArmNN output slot for an existing layer.
1264 
1265                 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1266                 const uint32_t operandIndex = operation.inputs[inputIndex];
1267                 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1268             }
1269             case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1270             case HalOperandLifeTime::CONSTANT_REFERENCE:
1271             {
1272                 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1273                 ConstTensorPin tensorPin =
1274                                     ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1275 
1276                 if (tensorPin.IsValid())
1277                 {
1278                     bool isSupported = false;
1279                     armnn::BackendId setBackend;
1280                     FORWARD_LAYER_SUPPORT_FUNC(__func__,
1281                                                IsConstantSupported,
1282                                                data.m_Backends,
1283                                                isSupported,
1284                                                setBackend,
1285                                                tensorPin.GetConstTensor().GetInfo());
1286                     if (!isSupported)
1287                     {
1288                         return LayerInputHandle();
1289                     }
1290 
1291                     armnn::IConnectableLayer* constantLayer =
1292                                     data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1293                     constantLayer->SetBackendId(setBackend);
1294                     armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1295                     armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1296                     outputSlot.SetTensorInfo(constantTensorInfo);
1297 
1298                     return LayerInputHandle(true, &outputSlot, constantTensorInfo);
1299                 }
1300                 else
1301                 {
1302                     Fail("%s: invalid operand tensor", __func__);
1303                     return LayerInputHandle();
1304                 }
1305             }
1306             default:
1307             {
1308                 // Unsupported lifetime for an input tensor
1309                 Fail("%s: unsupported lifetime for input tensor: %s",
1310                      __func__, toString(operand->lifetime).c_str());
1311                 return LayerInputHandle();
1312             }
1313         }
1314     }
1315     catch (UnsupportedOperand<HalOperandType>& e)
1316     {
1317         Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1318         return LayerInputHandle();
1319     }
1320 }
1321 
1322 
1323 #ifdef ARMNN_ANDROID_NN_V1_3
1324 template<typename HalPolicy>
ConvertToLayerInputHandle(const::android::hardware::neuralnetworks::V1_3::Operation & operation,uint32_t inputIndex,const::android::hardware::neuralnetworks::V1_3::Model & model,ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute)1325 LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1326                                            uint32_t inputIndex,
1327                                            const::android::hardware::neuralnetworks::V1_3::Model& model,
1328                                            ConversionData& data,
1329                                            const armnn::PermutationVector& dimensionMappings = g_DontPermute)
1330 {
1331     using HalOperand         = typename HalPolicy::Operand;
1332     using HalOperandType     = typename HalPolicy::OperandType;
1333     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1334 
1335     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1336     if (!operand)
1337     {
1338         Fail("%s: failed to get input operand %i", __func__, inputIndex);
1339         return LayerInputHandle();
1340     }
1341 
1342     if (!IsOperandTypeSupportedForTensors(operand->type))
1343     {
1344         Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1345         return LayerInputHandle();
1346     }
1347 
1348     try
1349     {
1350         armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1351 
1352         if (IsDynamicTensor(operandTensorInfo))
1353         {
1354             data.m_DynamicInputsEncountered = true;
1355 
1356             const uint32_t operandIndex = operation.inputs[inputIndex];
1357 
1358             // Check if the dynamic input tensors have been inferred by one of the previous layers
1359             // If not we can't support them
1360             if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
1361             {
1362                 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1363             }
1364             else
1365             {
1366                 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1367                 return LayerInputHandle();
1368             }
1369         }
1370 
1371         switch (operand->lifetime)
1372         {
1373             case HalOperandLifeTime::SUBGRAPH_INPUT:
1374             {
1375                 // NOTE: We must check whether we can support the input tensor on at least one
1376                 // of the provided backends; otherwise we cannot convert the operation
1377                 bool isInputSupported = false;
1378                 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1379                                            IsInputSupported,
1380                                            data.m_Backends,
1381                                            isInputSupported,
1382                                            armnn::BackendId(),
1383                                            operandTensorInfo);
1384 
1385                 if (!isInputSupported)
1386                 {
1387                     Fail("%s: unsupported input tensor", __func__);
1388                     return LayerInputHandle();
1389                 }
1390 
1391                 [[clang::fallthrough]]; // intentional fallthrough
1392             }
1393             case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1394             case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1395             {
1396                 // The tensor is either an operand internal to the model, or a model input.
1397                 // It can be associated with an ArmNN output slot for an existing layer.
1398 
1399                 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1400                 const uint32_t operandIndex = operation.inputs[inputIndex];
1401                 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1402             }
1403             case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1404             case HalOperandLifeTime::CONSTANT_REFERENCE:
1405             {
1406                 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1407                 ConstTensorPin tensorPin =
1408                                     ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1409 
1410                 if (tensorPin.IsValid())
1411                 {
1412                     bool isSupported = false;
1413                     armnn::BackendId setBackend;
1414                     FORWARD_LAYER_SUPPORT_FUNC(__func__,
1415                                                IsConstantSupported,
1416                                                data.m_Backends,
1417                                                isSupported,
1418                                                setBackend,
1419                                                tensorPin.GetConstTensor().GetInfo());
1420                     if (!isSupported)
1421                     {
1422                         return LayerInputHandle();
1423                     }
1424 
1425                     armnn::IConnectableLayer* constantLayer =
1426                         data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1427                     constantLayer->SetBackendId(setBackend);
1428                     armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1429                     armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1430                     outputSlot.SetTensorInfo(constantTensorInfo);
1431 
1432                     return LayerInputHandle(true, &outputSlot, constantTensorInfo);
1433                 }
1434                 else
1435                 {
1436                     Fail("%s: invalid operand tensor", __func__);
1437                     return LayerInputHandle();
1438                 }
1439                 break;
1440             }
1441             default:
1442             {
1443                 // Unsupported lifetime for an input tensor
1444                 Fail("%s: unsupported lifetime for input tensor: %s",
1445                      __func__, toString(operand->lifetime).c_str());
1446                 return LayerInputHandle();
1447             }
1448         }
1449     }
1450     catch (UnsupportedOperand<HalOperandType>& e)
1451     {
1452         Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1453         return LayerInputHandle();
1454     }
1455 }
1456 #endif
1457 
1458 template<typename HalPolicy,
1459          typename HalOperation = typename HalPolicy::Operation,
1460          typename HalModel     = typename HalPolicy::Model>
SetupAndTrackLayerOutputSlot(const HalOperation & operation,uint32_t operationOutputIndex,armnn::IConnectableLayer & layer,uint32_t layerOutputIndex,const HalModel & model,ConversionData & data,const armnn::TensorInfo * overrideOutputInfo=nullptr,const std::function<void (const armnn::TensorInfo &,bool &)> & validateFunc=nullptr,const ActivationFn & activationFunction=ActivationFn::kActivationNone,bool inferOutputShapes=false)1461 bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1462                                   uint32_t operationOutputIndex,
1463                                   armnn::IConnectableLayer& layer,
1464                                   uint32_t layerOutputIndex,
1465                                   const HalModel& model,
1466                                   ConversionData& data,
1467                                   const armnn::TensorInfo* overrideOutputInfo = nullptr,
1468                                   const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1469                                   const ActivationFn& activationFunction = ActivationFn::kActivationNone,
1470                                   bool inferOutputShapes = false)
1471 {
1472     using HalOperand = typename HalPolicy::Operand;
1473 
1474     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
1475     if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1476     {
1477         return false;
1478     }
1479 
1480     armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1481     if (overrideOutputInfo == nullptr)
1482     {
1483         outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1484     }
1485     else
1486     {
1487         outputSlot.SetTensorInfo(*overrideOutputInfo);
1488     }
1489 
1490     bool isSupported = false;
1491     if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
1492     {
1493         // Type one dynamic tensors require the previous layer's output shape for inference
1494         for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1495         {
1496             if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
1497             {
1498                 return false;
1499             }
1500         }
1501         // IsTensorInfoSet will infer the dynamic output shape
1502         outputSlot.IsTensorInfoSet();
1503         // Once the shape is inferred we can validate it
1504         validateFunc(outputSlot.GetTensorInfo(), isSupported);
1505 
1506         if(!isSupported)
1507         {
1508             for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1509             {
1510                 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1511             }
1512             return false;
1513         }
1514     }
1515 
1516     const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1517 
1518     if (activationFunction != ActivationFn::kActivationNone)
1519     {
1520         const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1521         armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1522                                                                      &layer, data);
1523 
1524         if (!endLayer)
1525         {
1526             return Fail("%s: ProcessActivation failed", __func__);
1527         }
1528 
1529         armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1530         data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1531     }
1532     else
1533     {
1534         data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1535     }
1536 
1537     return true;
1538 }
1539 
1540 template<typename HalPolicy,
1541          typename HalOperation = typename HalPolicy::Operation,
1542          typename HalModel     = typename HalPolicy::Model>
OptionalDataLayout(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,ConversionData & data)1543 armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1544                         uint32_t inputIndex,
1545                         const HalModel& model,
1546                         ConversionData& data)
1547 {
1548     using HalOperand = typename HalPolicy::Operand;
1549 
1550     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1551     if (!operand)
1552     {
1553         return armnn::DataLayout::NHWC;
1554     }
1555 
1556     if (!IsBool(*operand))
1557     {
1558         return armnn::DataLayout::NHWC;
1559     }
1560 
1561     const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1562     if (!valueAddress)
1563     {
1564         return armnn::DataLayout::NHWC;
1565     }
1566 
1567     if (*(static_cast<const bool*>(valueAddress)))
1568     {
1569         return armnn::DataLayout::NCHW;
1570     }
1571     else
1572     {
1573         return armnn::DataLayout::NHWC;
1574     }
1575 }
1576 
1577 template<typename HalPolicy,
1578          typename HalOperation = typename HalPolicy::Operation,
1579          typename HalModel     = typename HalPolicy::Model>
SetupAndTrackLayerOutputSlot(const HalOperation & operation,uint32_t outputIndex,armnn::IConnectableLayer & layer,const HalModel & model,ConversionData & data,const armnn::TensorInfo * overrideOutputInfo=nullptr,const std::function<void (const armnn::TensorInfo &,bool &)> & validateFunc=nullptr,const ActivationFn & activationFunction=ActivationFn::kActivationNone)1580 bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1581                                   uint32_t outputIndex,
1582                                   armnn::IConnectableLayer& layer,
1583                                   const HalModel& model,
1584                                   ConversionData& data,
1585                                   const armnn::TensorInfo* overrideOutputInfo = nullptr,
1586                                   const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1587                                   const ActivationFn& activationFunction = ActivationFn::kActivationNone)
1588 {
1589     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1590                                                    outputIndex,
1591                                                    layer,
1592                                                    outputIndex,
1593                                                    model,
1594                                                    data,
1595                                                    overrideOutputInfo,
1596                                                    validateFunc,
1597                                                    activationFunction);
1598 }
1599 
1600 template<typename HalPolicy,
1601          typename HalOperation = typename HalPolicy::Operation,
1602          typename HalModel     = typename HalPolicy::Model>
ConvertToActivation(const HalOperation & operation,const char * operationName,const armnn::ActivationDescriptor & activationDesc,const HalModel & model,ConversionData & data)1603 bool ConvertToActivation(const HalOperation& operation,
1604                          const char* operationName,
1605                          const armnn::ActivationDescriptor& activationDesc,
1606                          const HalModel& model,
1607                          ConversionData& data)
1608 {
1609     using HalOperand = typename HalPolicy::Operand;
1610 
1611     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1612     if (!input.IsValid())
1613     {
1614         return Fail("%s: Input 0 is invalid", operationName);
1615     }
1616 
1617     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1618     if (!outputOperand)
1619     {
1620         return false;
1621     }
1622 
1623     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1624 
1625     bool isSupported = false;
1626     armnn::BackendId setBackend;
1627     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1628     {
1629         FORWARD_LAYER_SUPPORT_FUNC(__func__,
1630                                    IsActivationSupported,
1631                                    data.m_Backends,
1632                                    isSupported,
1633                                    setBackend,
1634                                    input.GetTensorInfo(),
1635                                    outInfo,
1636                                    activationDesc);
1637     };
1638 
1639     if(IsDynamicTensor(outInfo))
1640     {
1641         isSupported = AreDynamicTensorsSupported();
1642     }
1643     else
1644     {
1645         validateFunc(outInfo, isSupported);
1646     }
1647 
1648     if (!isSupported)
1649     {
1650         return false;
1651     }
1652 
1653     armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1654     layer->SetBackendId(setBackend);
1655     if (!layer)
1656     {
1657         return Fail("%s: Could not add the ActivationLayer", __func__);
1658     }
1659     input.Connect(layer->GetInputSlot(0));
1660 
1661     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
1662 }
1663 
1664 template<typename HalPolicy,
1665     typename HalOperation = typename HalPolicy::Operation,
1666     typename HalModel     = typename HalPolicy::Model>
ConvertReLu(const HalOperation & operation,const HalModel & model,ConversionData & data)1667 bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1668 {
1669     armnn::ActivationDescriptor desc;
1670     desc.m_Function = armnn::ActivationFunction::ReLu;
1671 
1672     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1673 }
1674 
1675 template<typename HalPolicy,
1676     typename HalOperation = typename HalPolicy::Operation,
1677     typename HalModel     = typename HalPolicy::Model>
ConvertReLu1(const HalOperation & operation,const HalModel & model,ConversionData & data)1678 bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1679 {
1680     armnn::ActivationDescriptor desc;
1681     desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1682     desc.m_A        = 1.0f;
1683     desc.m_B        = -1.0f;
1684 
1685     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1686 }
1687 
1688 template<typename HalPolicy,
1689     typename HalOperation = typename HalPolicy::Operation,
1690     typename HalModel     = typename HalPolicy::Model>
ConvertReLu6(const HalOperation & operation,const HalModel & model,ConversionData & data)1691 bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1692 {
1693     armnn::ActivationDescriptor desc;
1694     desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1695     desc.m_A        = 6.0f;
1696 
1697     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1698 }
1699 
1700 template<typename HalPolicy,
1701     typename HalOperation = typename HalPolicy::Operation,
1702     typename HalModel     = typename HalPolicy::Model>
ConvertTanH(const HalOperation & operation,const HalModel & model,ConversionData & data)1703 bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1704 {
1705     armnn::ActivationDescriptor desc;
1706     desc.m_Function = armnn::ActivationFunction::TanH;
1707     desc.m_A = 1.0f; // android nn does not support tanH parameters
1708     desc.m_B = 1.0f; // set to 1.0f for unity scaling
1709 
1710     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1711 }
1712 
1713 template<typename HalPolicy,
1714          typename HalOperation   = typename HalPolicy::Operation,
1715          typename HalModel       = typename HalPolicy::Model>
ConvertPaddings(const HalOperation & operation,const HalModel & model,ConversionData & data,unsigned int rank,armnn::PadDescriptor & padDescriptor)1716 bool ConvertPaddings(const HalOperation& operation,
1717                      const HalModel& model,
1718                      ConversionData& data,
1719                      unsigned int rank,
1720                      armnn::PadDescriptor& padDescriptor)
1721 {
1722     using HalOperand = typename HalPolicy::Operand;
1723 
1724     const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1725     if (!paddingsOperand)
1726     {
1727         return Fail("%s: Could not read paddings operand", __func__);
1728     }
1729 
1730     armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1731     if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1732     {
1733         return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]",  __func__, rank);
1734     }
1735 
1736     std::vector<int32_t> paddings;
1737     if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1738     {
1739         return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1740     }
1741 
1742     // add padding for each dimension of input tensor.
1743     for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1744     {
1745         int paddingBeforeInput = paddings[i];
1746         int paddingAfterInput  = paddings[i + 1];
1747 
1748         if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1749         {
1750             return Fail("%s: Operation has invalid paddings operand, invalid padding values.",  __func__);
1751         }
1752 
1753         padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1754     }
1755 
1756     return true;
1757 }
1758 
1759 template<typename HalPolicy,
1760          typename HalOperation   = typename HalPolicy::Operation,
1761          typename HalModel       = typename HalPolicy::Model>
ConvertPooling2d(const HalOperation & operation,const char * operationName,armnn::PoolingAlgorithm poolType,const HalModel & model,ConversionData & data)1762 bool ConvertPooling2d(const HalOperation& operation,
1763                       const char* operationName,
1764                       armnn::PoolingAlgorithm poolType,
1765                       const HalModel& model,
1766                       ConversionData& data)
1767 {
1768     using HalOperand     = typename HalPolicy::Operand;
1769     using HalOperandType = typename HalPolicy::OperandType;
1770 
1771     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1772     if (!input.IsValid())
1773     {
1774         return Fail("%s: Operation Could not read input 0", operationName);
1775     }
1776 
1777     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1778     if (!output)
1779     {
1780         return Fail("%s: Could not read output 0", __func__);
1781     }
1782 
1783     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
1784     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1785 
1786     armnn::Pooling2dDescriptor desc;
1787     desc.m_PoolType = poolType;
1788     desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
1789     desc.m_DataLayout = armnn::DataLayout::NHWC;
1790 
1791     ActivationFn activation;
1792 
1793     auto inputSize = operation.inputs.size();
1794 
1795     if (inputSize >= 10)
1796     {
1797         // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1798         if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1799             !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1800             !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1801             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1802             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1803             !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1804             !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1805             !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1806             !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1807         {
1808             return Fail("%s: Operation has invalid inputs", operationName);
1809         }
1810 
1811         if (Is12OrLaterOperand(*output))
1812         {
1813             desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1814         }
1815     }
1816     else
1817     {
1818         // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1819         android::nn::PaddingScheme scheme;
1820         if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1821             !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1822             !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1823             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1824             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1825             !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
1826         {
1827             return Fail("%s: Operation has invalid inputs", operationName);
1828         }
1829 
1830         if (Is12OrLaterOperand(*output))
1831         {
1832             desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
1833         }
1834 
1835         const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1836         const unsigned int inputWidth  = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1837         const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1838 
1839         CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1840         CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1841     }
1842 
1843     bool isSupported = false;
1844     armnn::BackendId setBackend;
1845     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1846     {
1847         FORWARD_LAYER_SUPPORT_FUNC(__func__,
1848                                    IsPooling2dSupported,
1849                                    data.m_Backends,
1850                                    isSupported,
1851                                    setBackend,
1852                                    inputInfo,
1853                                    outputInfo,
1854                                    desc);
1855 
1856     };
1857 
1858     if(IsDynamicTensor(outputInfo))
1859     {
1860         isSupported = AreDynamicTensorsSupported();
1861     }
1862     else
1863     {
1864         validateFunc(outputInfo, isSupported);
1865     }
1866 
1867     if (!isSupported)
1868     {
1869         return false;
1870     }
1871 
1872     armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1873     pooling2dLayer->SetBackendId(setBackend);
1874     if (!pooling2dLayer)
1875     {
1876         return Fail("%s: AddPooling2dLayer failed", __func__);
1877     }
1878 
1879     input.Connect(pooling2dLayer->GetInputSlot(0));
1880 
1881     if (!isSupported)
1882     {
1883         return false;
1884     }
1885 
1886     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1887                                                    data, nullptr, validateFunc, activation);
1888 }
1889 
1890 template<typename HalPolicy,
1891          typename HalOperation = typename HalPolicy::Operation,
1892          typename HalModel     = typename HalPolicy::Model>
ConvertArgMinMax(const HalOperation & operation,const HalModel & model,ConversionData & data,armnn::ArgMinMaxFunction argMinMaxFunction)1893 bool ConvertArgMinMax(const HalOperation& operation,
1894                       const HalModel& model,
1895                       ConversionData& data,
1896                       armnn::ArgMinMaxFunction argMinMaxFunction)
1897 {
1898     ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1899 
1900     using HalOperand     = typename HalPolicy::Operand;
1901     using HalOperandType = typename HalPolicy::OperandType;
1902 
1903     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1904 
1905     if (!input0.IsValid())
1906     {
1907         return Fail("%s: Operation has invalid inputs", __func__);
1908     }
1909 
1910     int32_t axis;
1911     if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1912     {
1913         return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1914     }
1915 
1916     const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1917     int rank = static_cast<int>(inputInfo.GetNumDimensions());
1918 
1919     if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1920     {
1921         // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1922         // E.g. Rank 4 tensor can have axis in range [-4, 3)
1923         // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1924         return Fail("%s: Axis must be in range [-n, n)", __func__);
1925     }
1926 
1927     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1928     if (!output)
1929     {
1930         return Fail("%s: Could not read output 0", __func__);
1931     }
1932 
1933     const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1934 
1935     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1936 
1937     armnn::ArgMinMaxDescriptor descriptor;
1938     descriptor.m_Function = argMinMaxFunction;
1939     descriptor.m_Axis     = axis;
1940 
1941     bool isSupported = false;
1942     armnn::BackendId setBackend;
1943     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1944     {
1945         FORWARD_LAYER_SUPPORT_FUNC(__func__,
1946                                    IsArgMinMaxSupported,
1947                                    data.m_Backends,
1948                                    isSupported,
1949                                    setBackend,
1950                                    inputInfo0,
1951                                    outputInfo,
1952                                    descriptor);
1953     };
1954 
1955     if(IsDynamicTensor(outputInfo))
1956     {
1957         isSupported = AreDynamicTensorsSupported();
1958     }
1959     else
1960     {
1961         validateFunc(outputInfo, isSupported);
1962     }
1963 
1964     if (!isSupported)
1965     {
1966         return false;
1967     }
1968 
1969     armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1970     layer->SetBackendId(setBackend);
1971     if (!layer)
1972     {
1973         return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
1974     }
1975     input0.Connect(layer->GetInputSlot(0));
1976 
1977     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
1978 }
1979 
1980 template<typename HalPolicy,
1981          typename HalOperation = typename HalPolicy::Operation,
1982          typename HalModel     = typename HalPolicy::Model>
ConvertConcatenation(const HalOperation & operation,const HalModel & model,ConversionData & data)1983 bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
1984 {
1985     using HalOperand = typename HalPolicy::Operand;
1986     using HalOperandType = typename HalPolicy::OperandType;
1987 
1988     // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1989     if (operation.inputs.size() <= 1)
1990     {
1991         return Fail("%s: Operation has insufficient arguments", __func__);
1992     }
1993 
1994     // Get inputs and outputs
1995     const std::size_t numInputTensors = operation.inputs.size() - 1;
1996 
1997     int32_t concatDim;
1998     if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1999     {
2000         return Fail("%s: Operation has invalid inputs", __func__);
2001     }
2002 
2003     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2004     if (!outputOperand)
2005     {
2006         return Fail("%s: Operation has no outputs", __func__);
2007     }
2008 
2009     armnn::TensorInfo  outputInfo      = GetTensorInfoForOperand(*outputOperand);
2010     armnn::TensorShape outputShape     = outputInfo.GetShape();
2011     const bool         isDynamicTensor = IsDynamicTensor(outputInfo);
2012     //
2013     // handle negative concat dims along the lines of tensorflow as described here:
2014     //    https://www.tensorflow.org/api_docs/python/tf/concat
2015     // "negative axis refers to axis + rank(values)-th dimension"
2016     //
2017     if (concatDim < 0)
2018     {
2019         concatDim += outputShape.GetNumDimensions();
2020     }
2021 
2022     if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2023     {
2024         return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2025     }
2026 
2027     std::vector<LayerInputHandle>   inputHandles;
2028     std::vector<armnn::TensorShape> inputShapes;
2029 
2030     inputHandles.reserve(numInputTensors);
2031     inputShapes.reserve(numInputTensors);
2032 
2033     bool          inputsHaveBeenReshaped = false;
2034     unsigned int  tensorDimensionsAdded  = 0;
2035     for (uint32_t i = 0; i < numInputTensors; ++i)
2036     {
2037         const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2038         if (!operand)
2039         {
2040             return Fail("%s: Operation has invalid inputs", __func__);
2041         }
2042 
2043         LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2044         if (!operandInputHandle.IsValid())
2045         {
2046             return Fail("%s: Operation has invalid inputs", __func__);
2047         }
2048 
2049         armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
2050         if (operandShape.GetNumDimensions() == 0)
2051         {
2052             return Fail("%s: Operands with rank 0 are not supported", __func__);
2053         }
2054 
2055         if (RequiresReshape(operandShape))
2056         {
2057             inputsHaveBeenReshaped = true;
2058 
2059             armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2060 
2061             // Expand the tensor to three dimensions
2062             if (operandShape.GetNumDimensions() == 2)
2063             {
2064                 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2065                 tensorDimensionsAdded = 1;
2066             }
2067             else
2068             {
2069                 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2070                 tensorDimensionsAdded = 2;
2071             }
2072 
2073             armnn::ReshapeDescriptor reshapeDescriptor;
2074             reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2075 
2076             bool isSupported = false;
2077             armnn::BackendId setBackendReshape;
2078             FORWARD_LAYER_SUPPORT_FUNC(__func__,
2079                                        IsReshapeSupported,
2080                                        data.m_Backends,
2081                                        isSupported,
2082                                        setBackendReshape,
2083                                        operandInputHandle.GetTensorInfo(),
2084                                        reshapeInfo,
2085                                        reshapeDescriptor);
2086 
2087             if (!isSupported)
2088             {
2089                 return false;
2090             }
2091             armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
2092             newReshape.SetBackendId(setBackendReshape);
2093 
2094             // Point to the reshape operation rather then the input operation
2095             operandShape       = reshapeInfo.GetShape();
2096             operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2097         }
2098 
2099         inputShapes.emplace_back(operandShape);
2100         inputHandles.emplace_back(operandInputHandle);
2101 
2102         if (!inputHandles.back().IsValid())
2103         {
2104             return Fail("%s: Operation has invalid inputs", __func__);
2105         }
2106     }
2107 
2108     if (inputShapes.size() != inputHandles.size())
2109     {
2110         return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
2111                     inputShapes.size(), inputHandles.size());
2112     }
2113 
2114     if (inputsHaveBeenReshaped)
2115     {
2116         // Adjust the concatenation dimension by the amount of dimensions added (if any)
2117         concatDim += tensorDimensionsAdded;
2118 
2119         // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2120         if (tensorDimensionsAdded == 1)
2121         {
2122             if (IsDynamicTensor(outputInfo))
2123             {
2124                 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2125             }
2126             else
2127             {
2128                 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2129             }
2130         }
2131         else if (tensorDimensionsAdded == 2)
2132         {
2133             if (IsDynamicTensor(outputInfo))
2134             {
2135                 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2136             }
2137             else
2138             {
2139                 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2140             }
2141         }
2142     }
2143 
2144     // Check if permutations is required and get the pair of permutations required for the concatenation.
2145     // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2146     std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2147         std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2148     bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2149                                                          concatDim,
2150                                                          permutationPair);
2151 
2152     // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2153     if (!isDynamicTensor)
2154     {
2155         if (needPermute)
2156         {
2157             outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2158         }
2159 
2160         outputInfo.SetShape(outputShape);
2161     }
2162     // this is no-op for identity swizzles, otherwise it replaces both
2163     // the handles and shapes with the swizzled layer output handles and shapes
2164     if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
2165     {
2166         return false;
2167     }
2168 
2169     // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2170     armnn::OriginsDescriptor concatDescriptor;
2171 
2172     try
2173     {
2174         // The concat descriptor is always created across the only supported concat dimension
2175         // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2176         concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2177                                                                    inputShapes.end(),
2178                                                                    concatDim);
2179     } catch (std::exception& error)
2180     {
2181         return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2182     }
2183 
2184     // Validate the output shape is correct given the input shapes based on the
2185     // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2186     if (!isDynamicTensor)
2187     {
2188         if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2189         {
2190             return Fail("%s: Error validating the output shape for concat", __func__);
2191         }
2192     }
2193 
2194     std::vector<const armnn::TensorInfo*> inputTensorInfos;
2195     std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2196                    [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2197 
2198     bool isSupported  = false;
2199     armnn::BackendId setBackendConcat;
2200     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2201         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2202                                    IsConcatSupported,
2203                                    data.m_Backends,
2204                                    isSupported,
2205                                    setBackendConcat,
2206                                    inputTensorInfos,
2207                                    outputInfo,
2208                                    concatDescriptor);
2209     };
2210 
2211     if (!isDynamicTensor)
2212     {
2213         validateFunc(outputInfo, isSupported);
2214     }
2215     else
2216     {
2217         isSupported = AreDynamicTensorsSupported();
2218     }
2219 
2220     if (!isSupported)
2221     {
2222         return false;
2223     }
2224 
2225     armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2226     layer->SetBackendId(setBackendConcat);
2227     if (!layer)
2228     {
2229         return Fail("%s: Could not add the ConcatLayer", __func__);
2230     }
2231     layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2232     // Connect inputs to the layer
2233     const int numInputSlots = layer->GetNumInputSlots();
2234 
2235     if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
2236     {
2237         return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
2238                     static_cast<std::size_t>(numInputSlots), inputHandles.size());
2239     }
2240     for (int i = 0; i < numInputSlots; ++i)
2241     {
2242         // connect the input directly to the merge (concat) layer
2243         inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
2244     }
2245 
2246     // Transpose the output shape
2247     auto transposeOutputShape = [&](){
2248         armnn::TransposeDescriptor transposeDesc;
2249         transposeDesc.m_DimMappings = permutationPair.second;
2250         armnn::TensorInfo inputTransposeInfo  = layer->GetOutputSlot(0).GetTensorInfo();
2251         armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2252                                                                                  permutationPair.second);
2253         isSupported = false;
2254         armnn::BackendId setBackendTranspose;
2255         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2256                                    IsTransposeSupported,
2257                                    data.m_Backends,
2258                                    isSupported,
2259                                    setBackendTranspose,
2260                                    inputTransposeInfo,
2261                                    outputTransposeInfo,
2262                                    transposeDesc);
2263         if (!isSupported)
2264         {
2265             return false;
2266         }
2267         // Add permutation layer and connect the output to it, the permutation becomes the output layer
2268         armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
2269                                                                      permutationPair.second);
2270         deswizzleLayer.SetBackendId(setBackendTranspose);
2271         layer = &deswizzleLayer;
2272 
2273         return true;
2274     };
2275 
2276     if (needPermute && !isDynamicTensor)
2277     {
2278         transposeOutputShape();
2279     }
2280 
2281     if (inputsHaveBeenReshaped)
2282     {
2283         if (isDynamicTensor)
2284         {
2285             // Infer the output shapes of concat if outputs are type 1 dynamic
2286             if (!layer->GetOutputSlot(0).IsTensorInfoSet())
2287             {
2288                 return Fail("%s: TensorInfo is not set", __func__);
2289             }
2290             if (!ValidateConcatOutputShape(inputShapes,
2291                                            layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2292                                            concatDim))
2293             {
2294                 return Fail("%s: Error validating the output shape for concat", __func__);
2295             }
2296             transposeOutputShape();
2297         }
2298 
2299         armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2300         // Undo the reshape knowing the amount of dimensions added
2301         if (tensorDimensionsAdded == 1)
2302         {
2303             afterConcatInfo.SetShape(
2304                 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
2305         }
2306         else if (tensorDimensionsAdded == 2)
2307         {
2308             afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
2309         }
2310 
2311         armnn::ReshapeDescriptor reshapeDescriptor;
2312         reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2313         armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2314 
2315         isSupported = false;
2316         armnn::BackendId setBackendReshape2;
2317         auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2318             FORWARD_LAYER_SUPPORT_FUNC(__func__,
2319                                        IsReshapeSupported,
2320                                        data.m_Backends,
2321                                        isSupported,
2322                                        setBackendReshape2,
2323                                        concatInfo,
2324                                        afterConcatInfo,
2325                                        reshapeDescriptor);
2326         };
2327 
2328         if (!IsDynamicTensor(afterConcatInfo))
2329         {
2330             validateReshapeFunc(afterConcatInfo, isSupported);
2331         }
2332         else
2333         {
2334             isSupported = AreDynamicTensorsSupported();
2335         }
2336 
2337         if (!isSupported)
2338         {
2339             return false;
2340         }
2341         layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2342         layer->SetBackendId(setBackendReshape2);
2343         return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2344                                                        0,
2345                                                        *layer,
2346                                                        model,
2347                                                        data,
2348                                                        nullptr,
2349                                                        validateReshapeFunc);
2350     }
2351 
2352     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2353 }
2354 
2355 template<typename HalPolicy,
2356          typename HalOperation   = typename HalPolicy::Operation,
2357          typename HalModel       = typename HalPolicy::Model>
ConvertConv2d(const HalOperation & operation,const HalModel & model,ConversionData & data)2358 bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2359 {
2360     using HalOperand     = typename HalPolicy::Operand;
2361     using HalOperandType = typename HalPolicy::OperandType;
2362 
2363     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2364     if (!input.IsValid())
2365     {
2366         return Fail("%s: Operation has invalid inputs", __func__);
2367     }
2368 
2369     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2370     if (!output)
2371     {
2372         return Fail("%s: Could not read output 0", __func__);
2373     }
2374 
2375     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
2376     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2377 
2378     LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2379     if (!weightsInput.IsValid())
2380     {
2381         return Fail("%s: Operation has invalid inputs", __func__);
2382     }
2383 
2384     LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2385     if (!biasInput.IsValid())
2386     {
2387         return Fail("%s: Operation has invalid inputs", __func__);
2388     }
2389 
2390     biasInput.SanitizeQuantizationScale(weightsInput, input);
2391     armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2392     armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
2393 
2394     armnn::Convolution2dDescriptor desc;
2395     desc.m_DataLayout = armnn::DataLayout::NHWC;
2396     ActivationFn activation;
2397 
2398     if (operation.inputs.size() == 10)
2399     {
2400         if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2401             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2402             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2403             !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2404             !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2405             !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2406             !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
2407         {
2408             return Fail("%s: Operation has invalid inputs", __func__);
2409         }
2410     }
2411     else if (operation.inputs.size() == 7)
2412     {
2413         android::nn::PaddingScheme paddingScheme;
2414         if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2415             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2416             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2417             !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
2418         {
2419             return Fail("%s: Operation has invalid inputs", __func__);
2420         }
2421 
2422         const uint32_t kernelX = weightsInfo.GetShape()[2];
2423         const uint32_t kernelY = weightsInfo.GetShape()[1];
2424         const uint32_t inputX  = inputInfo.GetShape()[2];
2425         const uint32_t inputY  = inputInfo.GetShape()[1];
2426 
2427         CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2428         CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2429     }
2430     else
2431     {
2432         return Fail("%s: Unsupported number of operation inputs", __func__);
2433     }
2434 
2435     desc.m_BiasEnabled = true;
2436     armnn::Optional<armnn::TensorInfo> biases(biasInfo);
2437 
2438     bool isSupported = false;
2439     armnn::BackendId setBackend;
2440     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2441     {
2442         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2443                                    IsConvolution2dSupported,
2444                                    data.m_Backends,
2445                                    isSupported,
2446                                    setBackend,
2447                                    inputInfo,
2448                                    outputInfo,
2449                                    desc,
2450                                    weightsInfo,
2451                                    biases);
2452     };
2453 
2454     if(!IsDynamicTensor(outputInfo))
2455     {
2456         validateFunc(outputInfo, isSupported);
2457     }
2458     else
2459     {
2460         isSupported = AreDynamicTensorsSupported();
2461     }
2462 
2463     if (!isSupported)
2464     {
2465         return false;
2466     }
2467 
2468     armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
2469     startLayer->SetBackendId(setBackend);
2470 
2471     if (!startLayer)
2472     {
2473         return Fail("%s: AddConvolution2dLayer failed", __func__);
2474     }
2475 
2476     input.Connect(startLayer->GetInputSlot(0));
2477 
2478     // Connect weights and bias inputs
2479     weightsInput.Connect(startLayer->GetInputSlot(1));
2480     biasInput.Connect(startLayer->GetInputSlot(2));
2481 
2482     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2483                                                    data, nullptr, validateFunc, activation);
2484 }
2485 
2486 template<typename HalPolicy,
2487          typename HalOperation   = typename HalPolicy::Operation,
2488          typename HalModel       = typename HalPolicy::Model>
ConvertDepthToSpace(const HalOperation & operation,const HalModel & model,ConversionData & data)2489 bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2490 {
2491     using HalOperand     = typename HalPolicy::Operand;
2492     using HalOperandType = typename HalPolicy::OperandType;
2493 
2494     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2495     if (!input.IsValid() )
2496     {
2497         return Fail("%s: Operation has invalid inputs", __func__);
2498     }
2499 
2500     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2501     unsigned int rank = inputInfo.GetNumDimensions();
2502     if (rank != 4)
2503     {
2504         return Fail("%s: Only inputs with rank 4 are supported", __func__);
2505     }
2506 
2507     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2508     if (!output)
2509     {
2510         return Fail("%s: Could not read output 0", __func__);
2511     }
2512 
2513     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2514 
2515     armnn::DepthToSpaceDescriptor descriptor;
2516 
2517     GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2518     if (descriptor.m_BlockSize <= 1)
2519     {
2520         return Fail("%s: Block size must be at least 1 in all dimensions");
2521     }
2522 
2523     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2524     if (Is12OrLaterOperand(*output))
2525     {
2526         descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2527     }
2528 
2529     bool isSupported = false;
2530     armnn::BackendId setBackend;
2531     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2532     {
2533         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2534                                    IsDepthToSpaceSupported,
2535                                    data.m_Backends,
2536                                    isSupported,
2537                                    setBackend,
2538                                    inputInfo,
2539                                    outputInfo,
2540                                    descriptor);
2541     };
2542 
2543     if(!IsDynamicTensor(outputInfo))
2544     {
2545         validateFunc(outputInfo, isSupported);
2546     }
2547     else
2548     {
2549         isSupported = AreDynamicTensorsSupported();
2550     }
2551 
2552     if (!isSupported)
2553     {
2554         return false;
2555     }
2556 
2557     armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2558     layer->SetBackendId(setBackend);
2559     if (!layer)
2560     {
2561         return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
2562     }
2563     input.Connect(layer->GetInputSlot(0));
2564 
2565     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2566 }
2567 
2568 template<typename HalPolicy,
2569          typename HalOperation   = typename HalPolicy::Operation,
2570          typename HalModel       = typename HalPolicy::Model>
ConvertDepthwiseConv2d(const HalOperation & operation,const HalModel & model,ConversionData & data)2571 bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2572 {
2573     using HalOperand     = typename HalPolicy::Operand;
2574     using HalOperandType = typename HalPolicy::OperandType;
2575 
2576     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2577 
2578     if (!input.IsValid())
2579     {
2580         return Fail("%s: Operation has invalid inputs", __func__);
2581     }
2582 
2583     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2584 
2585     if (!output)
2586     {
2587         return Fail("%s: Could not read output 0", __func__);
2588     }
2589 
2590     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
2591     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2592 
2593     // ArmNN does not currently support non-fixed weights or bias
2594     // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
2595     const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2596     if (!weightsOperand)
2597     {
2598         return Fail("%s: Could not read weights", __func__);
2599     }
2600     // Basic sanity check on the weights shape.
2601     // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2602     // [1, filter_height, filter_width, depth_out]
2603     if (weightsOperand->dimensions[0] != 1)
2604     {
2605         return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2606     }
2607 
2608     armnn::DepthwiseConvolution2dDescriptor desc;
2609     desc.m_DataLayout = armnn::DataLayout::NHWC;
2610 
2611     LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2612     if (!weightsInput.IsValid())
2613     {
2614         return Fail("%s: Operation has invalid inputs", __func__);
2615     }
2616 
2617     const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2618     if (!biasOperand)
2619     {
2620         return Fail("%s: Could not read bias", __func__);
2621     }
2622 
2623     LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2624     if (!biasInput.IsValid())
2625     {
2626         return Fail("%s: Operation has invalid inputs", __func__);
2627     }
2628 
2629     biasInput.SanitizeQuantizationScale(weightsInput, input);
2630     armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2631     armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
2632 
2633     ActivationFn activation;
2634 
2635     if (operation.inputs.size() == 11)
2636     {
2637         if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2638             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2639             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2640             !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2641             !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2642             !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2643             !GetInputActivationFunction<HalPolicy>(operation,  10, activation, model, data))
2644         {
2645             return Fail("%s: Operation has invalid inputs", __func__);
2646         }
2647     }
2648     else if (operation.inputs.size() == 8)
2649     {
2650         android::nn::PaddingScheme paddingScheme;
2651         if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2652             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2653             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2654             !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
2655         {
2656             return Fail("%s: Operation has invalid inputs", __func__);
2657         }
2658 
2659         const uint32_t kernelX = weightsInfo.GetShape()[2];
2660         const uint32_t kernelY = weightsInfo.GetShape()[1];
2661         const uint32_t inputX  = inputInfo.GetShape()[2];
2662         const uint32_t inputY  = inputInfo.GetShape()[1];
2663 
2664         CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2665         CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2666     }
2667     else
2668     {
2669         return Fail("%s: Unsupported number of operation inputs", __func__);
2670     }
2671 
2672     desc.m_BiasEnabled = true;
2673     armnn::Optional<armnn::TensorInfo> biases(biasInfo);
2674 
2675     bool isSupported = false;
2676     armnn::BackendId setBackend;
2677     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2678     {
2679         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2680                                    IsDepthwiseConvolutionSupported,
2681                                    data.m_Backends,
2682                                    isSupported,
2683                                    setBackend,
2684                                    inputInfo,
2685                                    outputInfo,
2686                                    desc,
2687                                    weightsInfo,
2688                                    biases);
2689     };
2690 
2691     if(!IsDynamicTensor(outputInfo))
2692     {
2693         validateFunc(outputInfo, isSupported);
2694     }
2695     else
2696     {
2697         isSupported = AreDynamicTensorsSupported();
2698     }
2699 
2700 
2701     if (!isSupported)
2702     {
2703         return false;
2704     }
2705 
2706     armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
2707     startLayer->SetBackendId(setBackend);
2708     if (!startLayer)
2709     {
2710         return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2711     }
2712 
2713     input.Connect(startLayer->GetInputSlot(0));
2714 
2715     // Connect weights and bias inputs
2716     weightsInput.Connect(startLayer->GetInputSlot(1));
2717     biasInput.Connect(startLayer->GetInputSlot(2));
2718 
2719     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2720                                                    data, nullptr, validateFunc, activation);
2721 }
2722 
2723 template<typename HalPolicy,
2724          typename HalOperation = typename HalPolicy::Operation,
2725          typename HalModel     = typename HalPolicy::Model>
ConvertDequantize(const HalOperation & operation,const HalModel & model,ConversionData & data)2726 bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
2727 {
2728     using HalOperand = typename HalPolicy::Operand;
2729 
2730     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2731     if (!input.IsValid())
2732     {
2733         return Fail("%s: Operation has invalid input", __func__);
2734     }
2735 
2736     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
2737     const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2738     if (quantizationDim.has_value() && quantizationDim.value() != 0)
2739     {
2740         return Fail("%s: Operation has quantization dimension different than 0", __func__);
2741     }
2742 
2743     const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2744     if (!outputOperand)
2745     {
2746         return Fail("%s: Operation has invalid outputs", __func__);
2747     }
2748 
2749     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2750 
2751     bool isSupported = false;
2752     armnn::BackendId setBackend;
2753     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2754     {
2755         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2756                                    IsDequantizeSupported,
2757                                    data.m_Backends,
2758                                    isSupported,
2759                                    setBackend,
2760                                    inputInfo,
2761                                    outputInfo);
2762     };
2763 
2764     if(IsDynamicTensor(outputInfo))
2765     {
2766         isSupported = AreDynamicTensorsSupported();
2767     }
2768     else
2769     {
2770         validateFunc(outputInfo, isSupported);
2771     }
2772 
2773     if (!isSupported)
2774     {
2775         return false;
2776     }
2777 
2778     armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2779     layer->SetBackendId(setBackend);
2780     if (!layer)
2781     {
2782         return Fail("%s: Could not add the DequantizeLayer", __func__);
2783     }
2784     input.Connect(layer->GetInputSlot(0));
2785 
2786     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2787 }
2788 
2789 template<typename HalPolicy,
2790          typename HalOperation = typename HalPolicy::Operation,
2791          typename HalModel     = typename HalPolicy::Model>
ConvertElementwiseBinary(const HalOperation & operation,const HalModel & model,ConversionData & data,armnn::BinaryOperation binaryOperation)2792 bool ConvertElementwiseBinary(const HalOperation& operation,
2793                               const HalModel& model,
2794                               ConversionData& data,
2795                               armnn::BinaryOperation binaryOperation)
2796 {
2797     using HalOperand = typename HalPolicy::Operand;
2798 
2799     ALOGV("HalPolicy::ConvertElementwiseBinary()");
2800     ALOGV("binaryOperation = %s", GetBinaryOperationAsCString(binaryOperation));
2801 
2802     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2803     LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2804 
2805     if (!input0.IsValid() || !input1.IsValid())
2806     {
2807         return Fail("%s: Operation has invalid inputs", __func__);
2808     }
2809 
2810     // The FuseActivation parameter is always the input index 2, and it should be optional
2811     ActivationFn activationFunction;
2812     if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2813     {
2814         return Fail("%s: Operation has invalid optional input: activation function", __func__);
2815     }
2816 
2817     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2818     if (!output)
2819     {
2820         return Fail("%s: Could not read output", __func__);
2821     }
2822 
2823     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2824 
2825     armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
2826 
2827     bool isSupported = false;
2828     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2829     {
2830         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2831                                    IsElementwiseBinarySupported,
2832                                    data.m_Backends,
2833                                    isSupported,
2834                                    armnn::BackendId(),
2835                                    input0.GetTensorInfo(),
2836                                    input1.GetTensorInfo(),
2837                                    outputInfo,
2838                                    binaryOperation);
2839     };
2840 
2841     if (!IsDynamicTensor(outputInfo))
2842     {
2843         validateFunc(outputInfo, isSupported);
2844     }
2845     else
2846     {
2847         isSupported = AreDynamicTensorsSupported();
2848     }
2849 
2850     if (!isSupported)
2851     {
2852         return false;
2853     }
2854 
2855     armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
2856     if (!layer)
2857     {
2858         return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
2859     }
2860     bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2861     if (!isReshapeSupported)
2862     {
2863         return false;
2864     }
2865 
2866     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc,
2867                                                    activationFunction);
2868 }
2869 
2870 
2871 template<typename HalPolicy,
2872          typename HalOperation = typename HalPolicy::Operation,
2873          typename HalModel     = typename HalPolicy::Model>
ConvertFloor(const HalOperation & operation,const HalModel & model,ConversionData & data)2874 bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
2875 {
2876     using HalOperand = typename HalPolicy::Operand;
2877 
2878     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2879     if (!input.IsValid())
2880     {
2881         return Fail("%s: Operation has invalid inputs", __func__);
2882     }
2883 
2884     const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2885     if (!outputOperand)
2886     {
2887         return Fail("%s: Operation has invalid outputs", __func__);
2888     }
2889 
2890     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2891 
2892     bool isSupported = false;
2893     armnn::BackendId setBackend;
2894     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2895     {
2896         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2897                                    IsFloorSupported,
2898                                    data.m_Backends,
2899                                    isSupported,
2900                                    setBackend,
2901                                    input.GetTensorInfo(),
2902                                    outputInfo);
2903     };
2904 
2905     if(!IsDynamicTensor(outputInfo))
2906     {
2907         validateFunc(outputInfo, isSupported);
2908     }
2909     else
2910     {
2911         isSupported = AreDynamicTensorsSupported();
2912     }
2913 
2914     if (!isSupported)
2915     {
2916         return false;
2917     }
2918 
2919     armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2920     layer->SetBackendId(setBackend);
2921     if (!layer)
2922     {
2923         return Fail("%s: Could not add the FloorLayer", __func__);
2924     }
2925     input.Connect(layer->GetInputSlot(0));
2926 
2927     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2928 }
2929 
IsQSymm8(const V1_0::Operand &)2930 inline bool IsQSymm8(const V1_0::Operand&)
2931 {
2932     return false;
2933 }
2934 
2935 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
2936 
IsQSymm8(const V1_2::Operand & operand)2937 inline bool IsQSymm8(const V1_2::Operand& operand)
2938 {
2939     return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2940 }
2941 
2942 #endif
2943 
2944 #ifdef ARMNN_ANDROID_NN_V1_3
2945 
IsQSymm8(const V1_3::Operand & operand)2946 inline bool IsQSymm8(const V1_3::Operand& operand)
2947 {
2948     return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2949 }
2950 
2951 #endif
2952 
2953 enum class DequantizeStatus
2954 {
2955     SUCCESS,
2956     NOT_REQUIRED,
2957     INVALID_OPERAND
2958 };
2959 
2960 using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2961 
2962 template<typename HalPolicy,
2963          typename HalOperation = typename HalPolicy::Operation,
2964          typename HalModel     = typename HalPolicy::Model>
DequantizeIfRequired(size_t operand_index,const HalOperation & operation,const HalModel & model,const ConversionData & data)2965 DequantizeResult DequantizeIfRequired(size_t operand_index,
2966                                       const HalOperation& operation,
2967                                       const HalModel& model,
2968                                       const ConversionData& data)
2969 {
2970     using HalOperand = typename HalPolicy::Operand;
2971 
2972     const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
2973     if (!weightsOperand)
2974     {
2975         return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
2976     }
2977 
2978     if (IsOperandConstant<HalPolicy>(*weightsOperand))
2979     {
2980         // Weights are already constant
2981         return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
2982     }
2983 
2984     const size_t weightsInputIndex = operation.inputs[operand_index];
2985 
2986     // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2987     // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2988     for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
2989     {
2990         // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
2991         const auto& operationIt = getMainModel(model).operations[operationIdx];
2992         if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2993         {
2994             continue;
2995         }
2996 
2997         size_t outOpIndex = weightsInputIndex + 1;
2998         for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
2999         {
3000             outOpIndex = operationIt.outputs[i];
3001         }
3002 
3003         if (outOpIndex != weightsInputIndex)
3004         {
3005             continue;
3006         }
3007 
3008         const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
3009 
3010         if (!operand)
3011         {
3012             return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3013         }
3014 
3015         if (!IsQSymm8(*operand))
3016         {
3017             // Only supporting dequantize from QSYMM8 to FLOAT
3018             break;
3019         }
3020 
3021         // Allocate a new buffer for the dequantized data and manually dequantize
3022         const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
3023         if (!startValue)
3024         {
3025             // Failed to get the operand address
3026             break;
3027         }
3028 
3029         const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
3030         size_t dequantizedBufferLength = operand->location.length;
3031         const float quantizationScale  = operand->scale;
3032 
3033         auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
3034         for (size_t i = 0; i < dequantizedBufferLength; ++i)
3035         {
3036             float* dstPtr = dequantizedBuffer.get();
3037 
3038             if (!dstPtr)
3039             {
3040                 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3041             }
3042             *dstPtr = quantizedBuffer[i] * quantizationScale;
3043         }
3044 
3045         // Construct tensor info for dequantized ConstTensor
3046         armnn::TensorInfo tensorInfo(operand->dimensions.size(),
3047                                      operand->dimensions.data(),
3048                                      armnn::DataType::Float32);
3049 
3050         return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
3051                  std::move(tensorInfo),
3052                  DequantizeStatus::SUCCESS };
3053     }
3054 
3055     return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
3056 }
3057 
3058 template<typename HalPolicy,
3059          typename HalOperation = typename HalPolicy::Operation,
3060          typename HalModel     = typename HalPolicy::Model>
DequantizeAndMakeConstTensorPin(const HalOperation & operation,const HalModel & model,const ConversionData & data,size_t operandIndex,bool optional=false)3061 ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
3062                                                const HalModel& model,
3063                                                const ConversionData& data,
3064                                                size_t operandIndex,
3065                                                bool optional = false)
3066 {
3067     DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
3068 
3069     DequantizeStatus status = std::get<3>(dequantized);
3070     switch (status)
3071     {
3072         case DequantizeStatus::INVALID_OPERAND:
3073         {
3074             // return invalid const tensor pin
3075             return ConstTensorPin();
3076         }
3077         case DequantizeStatus::NOT_REQUIRED:
3078         {
3079             return ConvertOperationInputToConstTensorPin<HalPolicy>(
3080                 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3081         }
3082         case DequantizeStatus::SUCCESS:
3083         default:
3084         {
3085             return ConstTensorPin(
3086                 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3087         }
3088     }
3089 }
3090 
3091 
3092 template<typename HalPolicy,
3093          typename HalOperation = typename HalPolicy::Operation,
3094          typename HalModel     = typename HalPolicy::Model>
ConvertFullyConnected(const HalOperation & operation,const HalModel & model,ConversionData & data)3095 bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
3096 {
3097     using HalOperand = typename HalPolicy::Operand;
3098 
3099     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3100     if (!input.IsValid())
3101     {
3102         return Fail("%s: Operation has invalid inputs", __func__);
3103     }
3104 
3105     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3106     if (!output)
3107     {
3108         return Fail("%s: Could not read output 0", __func__);
3109     }
3110 
3111     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3112     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3113 
3114     LayerInputHandle weightsInput = LayerInputHandle();
3115     const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3116     if (!weightsOperand)
3117     {
3118         return Fail("%s: Could not read weights", __func__);
3119     }
3120 
3121     // If weights are constant a separate constant layer will be created to store data.
3122     // Otherwise handle non const weights as inputs.
3123     weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3124     if (!weightsInput.IsValid())
3125     {
3126         return Fail("%s: Operation has invalid inputs", __func__);
3127     }
3128 
3129     LayerInputHandle biasInput = LayerInputHandle();
3130     const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3131     if (!biasOperand)
3132     {
3133         return Fail("%s: Could not read bias", __func__);
3134     }
3135 
3136     // If bias are constant a separate constant layer will be created to store data.
3137     // Otherwise handle non const bias as inputs.
3138     biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3139     if (!biasInput.IsValid())
3140     {
3141         return Fail("%s: Operation has invalid inputs", __func__);
3142     }
3143 
3144     armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
3145     armnn::TensorInfo reshapedInfo = inputInfo;
3146     try
3147     {
3148         reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
3149     }
3150     catch (const std::exception& e)
3151     {
3152         return Fail("%s: %s", __func__, e.what());
3153     }
3154 
3155     // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3156     armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
3157     SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
3158 
3159     ActivationFn activationFunction;
3160     if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3161     {
3162         return Fail("%s: Operation has invalid inputs", __func__);
3163     }
3164 
3165     armnn::FullyConnectedDescriptor desc;
3166     desc.m_TransposeWeightMatrix = true;
3167     desc.m_BiasEnabled           = true;
3168     desc.m_ConstantWeights       = IsOperandConstant<HalPolicy>(*weightsOperand);
3169 
3170     bool isSupported = false;
3171     armnn::BackendId setBackend;
3172     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3173     {
3174         if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3175                                         weightsInfo.GetShape(),
3176                                         outputInfo.GetShape(),
3177                                         desc.m_TransposeWeightMatrix))
3178         {
3179             isSupported = false;
3180             Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3181             return;
3182         }
3183 
3184         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3185                                    IsFullyConnectedSupported,
3186                                    data.m_Backends,
3187                                    isSupported,
3188                                    setBackend,
3189                                    reshapedInfo,
3190                                    outputInfo,
3191                                    weightsInfo,
3192                                    biasInfo,
3193                                    desc);
3194     };
3195 
3196     if(!IsDynamicTensor(outputInfo))
3197     {
3198         validateFunc(outputInfo, isSupported);
3199     }
3200     else
3201     {
3202         isSupported = AreDynamicTensorsSupported();
3203     }
3204 
3205     if (!isSupported)
3206     {
3207         return false;
3208     }
3209 
3210     // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3211     armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
3212     startLayer->SetBackendId(setBackend);
3213 
3214     if (inputInfo.GetNumDimensions() > 2U)
3215     {
3216         armnn::ReshapeDescriptor reshapeDescriptor;
3217         reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
3218 
3219         armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3220         if (!reshapeLayer)
3221         {
3222             return Fail("%s:  could not add the reshapeLayer", __func__);
3223         }
3224         input.Connect(reshapeLayer->GetInputSlot(0));
3225         reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3226         reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
3227     }
3228     else
3229     {
3230         input.Connect(startLayer->GetInputSlot(0));
3231     }
3232 
3233     // Connect weights and bias inputs
3234     weightsInput.Connect(startLayer->GetInputSlot(1));
3235     biasInput.Connect(startLayer->GetInputSlot(2));
3236 
3237     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3238                                                    data, nullptr, validateFunc, activationFunction);
3239 }
3240 
3241 template<typename HalPolicy,
3242          typename HalOperation = typename HalPolicy::Operation,
3243          typename HalModel     = typename HalPolicy::Model>
ConvertL2Normalization(const HalOperation & operation,const HalModel & model,ConversionData & data)3244 bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
3245 {
3246     using HalOperand = typename HalPolicy::Operand;
3247 
3248     if (operation.inputs.size() != 1)
3249     {
3250         return Fail("%s: Optional inputs are not supported", __func__);
3251     }
3252 
3253     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3254     if (!input.IsValid())
3255     {
3256         return Fail("%s: Operation has invalid inputs", __func__);
3257     }
3258 
3259     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3260     if (!output)
3261     {
3262         return Fail("%s: Could not read output 0", __func__);
3263     }
3264 
3265     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3266     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3267 
3268     if (outputInfo.GetNumDimensions() != 4u)
3269     {
3270         return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3271     }
3272 
3273     armnn::L2NormalizationDescriptor desc;
3274     desc.m_DataLayout = armnn::DataLayout::NHWC;
3275 
3276     bool isSupported = false;
3277     armnn::BackendId setBackend;
3278     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3279     {
3280         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3281                                    IsL2NormalizationSupported,
3282                                    data.m_Backends,
3283                                    isSupported,
3284                                    setBackend,
3285                                    inputInfo,
3286                                    outputInfo,
3287                                    desc);
3288     };
3289 
3290     if(!IsDynamicTensor(outputInfo))
3291     {
3292         validateFunc(outputInfo, isSupported);
3293     }
3294     else
3295     {
3296         isSupported = AreDynamicTensorsSupported();
3297     }
3298 
3299     if (!isSupported)
3300     {
3301         return false;
3302     }
3303 
3304     armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3305     layer->SetBackendId(setBackend);
3306     if (!layer)
3307     {
3308         return Fail("%s: Could not add the L2NormalizationLayer", __func__);
3309     }
3310     input.Connect(layer->GetInputSlot(0));
3311 
3312     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3313 }
3314 
3315 template<typename HalPolicy,
3316          typename HalOperation = typename HalPolicy::Operation,
3317          typename HalModel     = typename HalPolicy::Model>
ConvertLocalResponseNormalization(const HalOperation & operation,const HalModel & model,ConversionData & data)3318 bool ConvertLocalResponseNormalization(const HalOperation& operation,
3319                                        const HalModel& model,
3320                                        ConversionData& data)
3321 {
3322     if (operation.inputs.size() != 5)
3323     {
3324         return Fail("%s: Optional inputs are not supported", __func__);
3325     }
3326 
3327     using HalOperand     = typename HalPolicy::Operand;
3328     using HalOperandType = typename HalPolicy::OperandType;
3329 
3330     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3331     if (!input.IsValid())
3332     {
3333         return Fail("%s: Operation has invalid inputs", __func__);
3334     }
3335 
3336     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3337     if (!output)
3338     {
3339         return Fail("%s: Could not read output 0", __func__);
3340     }
3341 
3342     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3343     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3344 
3345     if (outputInfo.GetNumDimensions() != 4u)
3346     {
3347         return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3348     }
3349 
3350     armnn::NormalizationDescriptor descriptor;
3351     descriptor.m_DataLayout      = armnn::DataLayout::NHWC;
3352     descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3353     descriptor.m_NormMethodType  = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3354 
3355     if (!input.IsValid() ||
3356         !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
3357         !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3358         !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3359         !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3360     {
3361         return Fail("%s: Operation has invalid inputs", __func__);
3362     }
3363 
3364     // ArmNN expects normSize to be the full size of the normalization
3365     // window rather than the radius as in AndroidNN.
3366     descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3367 
3368     bool isSupported = false;
3369     armnn::BackendId setBackend;
3370     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3371     {
3372         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3373                                    IsNormalizationSupported,
3374                                    data.m_Backends,
3375                                    isSupported,
3376                                    setBackend,
3377                                    inputInfo,
3378                                    outputInfo,
3379                                    descriptor);
3380     };
3381 
3382     if(!IsDynamicTensor(outputInfo))
3383     {
3384         validateFunc(outputInfo, isSupported);
3385     }
3386     else
3387     {
3388         isSupported = AreDynamicTensorsSupported();
3389     }
3390 
3391     if (!isSupported)
3392     {
3393         return false;
3394     }
3395 
3396     armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3397     layer->SetBackendId(setBackend);
3398     if (!layer)
3399     {
3400         return Fail("%s: Could not add the NormalizationLayer", __func__);
3401     }
3402     input.Connect(layer->GetInputSlot(0));
3403 
3404     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3405 }
3406 
3407 template<typename HalPolicy,
3408          typename HalOperation = typename HalPolicy::Operation,
3409          typename HalModel     = typename HalPolicy::Model>
ConvertLogistic(const HalOperation & operation,const HalModel & model,ConversionData & data)3410 bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
3411 {
3412     armnn::ActivationDescriptor desc;
3413     desc.m_Function = armnn::ActivationFunction::Sigmoid;
3414 
3415     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3416 }
3417 
3418 template<typename HalPolicy,
3419          typename HalOperation = typename HalPolicy::Operation,
3420          typename HalModel     = typename HalPolicy::Model>
ConvertMean(const HalOperation & operation,const HalModel & model,ConversionData & data)3421 bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
3422 {
3423     using HalOperand = typename HalPolicy::Operand;
3424 
3425     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3426     if (!input.IsValid())
3427     {
3428         return Fail("%s: Operation has invalid inputs", __func__);
3429     }
3430 
3431     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3432     if (!output)
3433     {
3434         return Fail("%s: Could not read output 0", __func__);
3435     }
3436 
3437     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3438 
3439     const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3440     if (!axisOperand)
3441     {
3442         return Fail("%s: Could not read input 1", __func__);
3443     }
3444 
3445     std::vector<int32_t> axis;
3446     if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3447     {
3448         return Fail("%s: Input 1 has invalid values", __func__);
3449     }
3450 
3451     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3452 
3453     // Convert the axis to unsigned int and remove duplicates.
3454     unsigned int rank = inputInfo.GetNumDimensions();
3455     std::set<unsigned int> uniqueAxis;
3456     std::transform(axis.begin(), axis.end(),
3457                    std::inserter(uniqueAxis, uniqueAxis.begin()),
3458                    [rank](int i) -> unsigned int { return (i + rank) % rank; });
3459 
3460     // Get the "keep dims" flag.
3461     int32_t keepDims = 0;
3462     if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3463     {
3464         return Fail("%s: Could not read input 2", __func__);
3465     }
3466 
3467     armnn::MeanDescriptor descriptor;
3468     descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3469     descriptor.m_KeepDims = keepDims > 0;
3470 
3471     bool isSupported = false;
3472     armnn::BackendId setBackend;
3473     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3474     {
3475         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3476                                    IsMeanSupported,
3477                                    data.m_Backends,
3478                                    isSupported,
3479                                    setBackend,
3480                                    inputInfo,
3481                                    outputInfo,
3482                                    descriptor);
3483     };
3484 
3485     if(!IsDynamicTensor(outputInfo))
3486     {
3487         validateFunc(outputInfo, isSupported);
3488     }
3489     else
3490     {
3491         isSupported = AreDynamicTensorsSupported();
3492     }
3493 
3494     if (!isSupported)
3495     {
3496         return false;
3497     }
3498 
3499     armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3500     layer->SetBackendId(setBackend);
3501     if (!layer)
3502     {
3503         return Fail("%s: Could not add the MeanLayer", __func__);
3504     }
3505     input.Connect(layer->GetInputSlot(0));
3506 
3507     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3508 }
3509 
3510 template<typename HalPolicy,
3511          typename HalOperation = typename HalPolicy::Operation,
3512          typename HalModel     = typename HalPolicy::Model>
ConvertPad(HalOperation & operation,const HalModel & model,ConversionData & data)3513 bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
3514 {
3515     using HalOperand = typename HalPolicy::Operand;
3516 
3517     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3518     if (!input.IsValid())
3519     {
3520         return Fail("%s: Operation has invalid inputs", __func__);
3521     }
3522 
3523     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3524     unsigned int rank = inputInfo.GetNumDimensions();
3525 
3526     armnn::PadDescriptor descriptor;
3527     if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3528     {
3529         return Fail("%s: Could not convert paddings", __func__);
3530     }
3531 
3532     // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3533     // the scale and zeroPoint must be the same as input0
3534     // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3535     // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3536     // (QuantizationOffset - QuantizationOffset) * scale = 0.
3537     if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3538     {
3539         descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3540     }
3541 
3542     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3543     if (!output)
3544     {
3545         return Fail("%s: Could not read output", __func__);
3546     }
3547 
3548     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3549 
3550     bool isSupported = false;
3551     armnn::BackendId setBackend;
3552     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3553     {
3554         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3555                                    IsPadSupported,
3556                                    data.m_Backends,
3557                                    isSupported,
3558                                    setBackend,
3559                                    inputInfo,
3560                                    outputInfo,
3561                                    descriptor);
3562     };
3563 
3564     if(!IsDynamicTensor(outputInfo))
3565     {
3566         validateFunc(outputInfo, isSupported);
3567     }
3568     else
3569     {
3570         isSupported = AreDynamicTensorsSupported();
3571     }
3572 
3573     if (!isSupported)
3574     {
3575         return false;
3576     }
3577 
3578     armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3579     layer->SetBackendId(setBackend);
3580     if (!layer)
3581     {
3582         return Fail("%s: Could not add the PadLayer", __func__);
3583     }
3584     input.Connect(layer->GetInputSlot(0));
3585 
3586     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3587 }
3588 
3589 template<typename HalPolicy,
3590          typename HalOperation = typename HalPolicy::Operation,
3591          typename HalModel     = typename HalPolicy::Model>
ConvertReshape(const HalOperation & operation,const HalModel & model,ConversionData & data)3592 bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
3593 {
3594     using HalOperand = typename HalPolicy::Operand;
3595 
3596     const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3597     const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3598     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
3599 
3600     if (inputOperand == nullptr
3601         || requestedShapeOperand == nullptr
3602         || outputOperand == nullptr)
3603     {
3604         return Fail("%s: Operation has invalid inputs", __func__);
3605     }
3606 
3607     if (requestedShapeOperand->dimensions.size() != 1)
3608     {
3609         return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3610                     __func__, requestedShapeOperand->dimensions.size());
3611     }
3612 
3613     std::vector<int32_t> targetDimensions;
3614     if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3615     {
3616         return Fail("%s: Could not read values of input 1", __func__);
3617     }
3618 
3619     const Shape inputOperandShape = GetOperandShape(*inputOperand);
3620 
3621     Shape requestedShape;
3622     // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3623     // function that resolves these values into a fully specified tensor shape.
3624     if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3625     {
3626         return Fail("%s: Failed to resolve the requested shape", __func__);
3627     }
3628 
3629     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3630     if (!input.IsValid())
3631     {
3632         return Fail("%s: Could not read input 0", __func__);
3633     }
3634 
3635     armnn::ReshapeDescriptor reshapeDescriptor;
3636     reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3637                                                          requestedShape.dimensions.data());
3638 
3639     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3640 
3641     bool isSupported = false;
3642     armnn::BackendId setBackend;
3643     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3644     {
3645         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3646                                    IsReshapeSupported,
3647                                    data.m_Backends,
3648                                    isSupported,
3649                                    setBackend,
3650                                    input.GetTensorInfo(),
3651                                    outputInfo,
3652                                    reshapeDescriptor);
3653     };
3654 
3655     if(!IsDynamicTensor(outputInfo))
3656     {
3657         validateFunc(outputInfo, isSupported);
3658     }
3659     else
3660     {
3661         isSupported = AreDynamicTensorsSupported();
3662     }
3663 
3664     if (!isSupported)
3665     {
3666         return false;
3667     }
3668 
3669     armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3670     layer->SetBackendId(setBackend);
3671     if (!layer)
3672     {
3673         return Fail("%s: Could not add the ReshapeLayer", __func__);
3674     }
3675     input.Connect(layer->GetInputSlot(0));
3676 
3677     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3678 }
3679 
3680 template<typename HalPolicy,
3681          typename HalOperation = typename HalPolicy::Operation,
3682          typename HalModel     = typename HalPolicy::Model>
ConvertSqueeze(const HalOperation & operation,const HalModel & model,ConversionData & data)3683 bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
3684 {
3685     using HalOperand = typename HalPolicy::Operand;
3686 
3687     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3688     if (!input.IsValid())
3689     {
3690         return Fail("%s: Operation has invalid inputs", __func__);
3691     }
3692 
3693     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3694     unsigned int rank = inputInfo.GetNumDimensions();
3695     if (rank > 4)
3696     {
3697         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3698     }
3699 
3700     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3701     if (!output)
3702     {
3703         return Fail("%s: Could not read output 0", __func__);
3704     }
3705 
3706     if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
3707     {
3708         return Fail("%s: Dynamic output tensors are not supported", __func__);
3709     }
3710 
3711     // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3712     // if the operand index is out of bounds.
3713     const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3714 
3715     std::vector<int32_t> axis;
3716     if (!axisOperand)
3717     {
3718         for (unsigned int i = 0; i < rank; ++i)
3719         {
3720             axis.push_back(static_cast<unsigned int>(i));
3721         }
3722     }
3723     else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3724     {
3725         return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
3726     }
3727 
3728     std::vector<uint32_t> outputDims;
3729     for (unsigned int i = 0; i < rank; i++)
3730     {
3731         bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3732         auto currentDimension = inputInfo.GetShape()[i];
3733         if (skipSqueeze || currentDimension != 1)
3734         {
3735             outputDims.push_back(currentDimension);
3736         }
3737     }
3738 
3739     armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3740 
3741     armnn::TensorInfo outputInfo = inputInfo;
3742     outputInfo.SetShape(outShape);
3743 
3744     armnn::ReshapeDescriptor reshapeDesc;
3745     reshapeDesc.m_TargetShape = outputInfo.GetShape();
3746 
3747     bool isSupported = false;
3748     armnn::BackendId setBackend;
3749     FORWARD_LAYER_SUPPORT_FUNC(__func__,
3750                                IsReshapeSupported,
3751                                data.m_Backends,
3752                                isSupported,
3753                                setBackend,
3754                                inputInfo,
3755                                outputInfo,
3756                                reshapeDesc);
3757 
3758     if (!isSupported)
3759     {
3760         return false;
3761     }
3762 
3763     armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3764     layer->SetBackendId(setBackend);
3765     if (!layer)
3766     {
3767         return Fail("%s: Could not add the ReshapeLayer", __func__);
3768     }
3769     input.Connect(layer->GetInputSlot(0));
3770 
3771     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3772 }
3773 
3774 template<typename HalPolicy,
3775          typename HalOperation = typename HalPolicy::Operation,
3776          typename HalModel     = typename HalPolicy::Model>
ConvertStridedSlice(const HalOperation & operation,const HalModel & model,ConversionData & data)3777 bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
3778 {
3779     using HalOperand = typename HalPolicy::Operand;
3780 
3781     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3782     if (!input.IsValid())
3783     {
3784         return Fail("%s: Operation has invalid inputs", __func__);
3785     }
3786 
3787     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3788     unsigned int rank = inputInfo.GetNumDimensions();
3789     if (rank > 4)
3790     {
3791         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3792     }
3793 
3794     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3795     if (!output)
3796     {
3797         return Fail("%s: Could not read output 0", __func__);
3798     }
3799 
3800     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3801 
3802     const HalOperand* beginOperand   = GetInputOperand<HalPolicy>(operation, 1, model);
3803     const HalOperand* endOperand     = GetInputOperand<HalPolicy>(operation, 2, model);
3804     const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
3805 
3806     std::vector<int32_t> beginValues;
3807     std::vector<int32_t> endValues;
3808     std::vector<int32_t> stridesValues;
3809 
3810     // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
3811     auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
3812     {
3813         if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3814         {
3815             return false;
3816         }
3817 
3818         if (operandValues.size() != rank)
3819         {
3820             return false;
3821         }
3822 
3823         return true;
3824     };
3825 
3826     if (!ValidateInputOperands(*beginOperand, beginValues)
3827         || !ValidateInputOperands(*endOperand, endValues)
3828         || !ValidateInputOperands(*stridesOperand, stridesValues))
3829     {
3830         return Fail("%s: Operation has invalid input operand", __func__);
3831     }
3832 
3833     // Stride cannot have value '0'
3834     if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3835     {
3836         return Fail("%s: Stride must be non-zero value.", __func__);
3837     }
3838 
3839     armnn::StridedSliceDescriptor descriptor;
3840     descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3841     descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3842     descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3843     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3844 
3845     // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3846     if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3847         !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3848         !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3849     {
3850         return Fail("%s: Operation has invalid inputs", __func__);
3851     }
3852 
3853     bool isSupported = false;
3854     armnn::BackendId setBackend;
3855     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3856     {
3857         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3858                                    IsStridedSliceSupported,
3859                                    data.m_Backends,
3860                                    isSupported,
3861                                    setBackend,
3862                                    inputInfo,
3863                                    outputInfo,
3864                                    descriptor);
3865     };
3866 
3867     if(IsDynamicTensor(outputInfo))
3868     {
3869         isSupported = AreDynamicTensorsSupported();
3870     }
3871     else
3872     {
3873         validateFunc(outputInfo, isSupported);
3874     }
3875 
3876     if (!isSupported)
3877     {
3878         return false;
3879     }
3880 
3881     // Check if slice can fit in a inferred output
3882     armnn::TensorShape inputShape = inputInfo.GetShape();
3883     for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3884     {
3885         int stride = descriptor.m_Stride[i];
3886 
3887         if (descriptor.m_ShrinkAxisMask & (1 << i))
3888         {
3889             // If the difference between the start point and the end point of the slice on an axis being shrunk
3890             // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3891             if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3892                                || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3893             {
3894                 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3895             }
3896 
3897             if(stride < 0)
3898             {
3899                 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3900             }
3901         }
3902     }
3903 
3904     armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3905     layer->SetBackendId(setBackend);
3906     if (!layer)
3907     {
3908         return Fail("%s: Could not add the StridedSliceLayer", __func__);
3909     }
3910     input.Connect(layer->GetInputSlot(0));
3911 
3912     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3913 }
3914 
3915 template<typename HalPolicy,
3916          typename HalOperation = typename HalPolicy::Operation,
3917          typename HalModel     = typename HalPolicy::Model>
ConvertTranspose(const HalOperation & operation,const HalModel & model,ConversionData & data)3918 bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
3919 {
3920     using HalOperand = typename HalPolicy::Operand;
3921     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
3922 
3923     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3924     if (!input.IsValid())
3925     {
3926         return Fail("%s: Operation has invalid inputs", __func__);
3927     }
3928 
3929     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3930     unsigned int rank = inputInfo.GetNumDimensions();
3931     if (rank > 4)
3932     {
3933         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3934     }
3935 
3936     // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3937     // if the operand index is out of bounds.
3938     const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3939 
3940     std::vector<int32_t> perm(rank);
3941     if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
3942     {
3943         for (unsigned int i = rank; i > 0; i--)
3944         {
3945             perm[rank - i] = armnn::numeric_cast<int> (i - 1);
3946         }
3947     }
3948     else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
3949     {
3950         return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
3951     }
3952 
3953     std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3954 
3955     armnn::TransposeDescriptor transposeDesc;
3956     transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
3957 
3958     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3959     if (!output)
3960     {
3961         return Fail("%s: Could not read output 0", __func__);
3962     }
3963 
3964     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3965 
3966     bool isSupported = false;
3967     armnn::BackendId setBackend;
3968     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3969     {
3970         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3971                                    IsTransposeSupported,
3972                                    data.m_Backends,
3973                                    isSupported,
3974                                    setBackend,
3975                                    inputInfo,
3976                                    outputInfo,
3977                                    transposeDesc);
3978         };
3979 
3980     if(IsDynamicTensor(outputInfo))
3981     {
3982         isSupported = AreDynamicTensorsSupported();
3983     }
3984     else
3985     {
3986         validateFunc(outputInfo, isSupported);
3987     }
3988 
3989     if (!isSupported)
3990     {
3991         return false;
3992     }
3993 
3994     armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
3995     layer->SetBackendId(setBackend);
3996     if (!layer)
3997     {
3998         return Fail("%s: Could not add the TransposeLayer", __func__);
3999     }
4000     input.Connect(layer->GetInputSlot(0));
4001 
4002     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4003 }
4004 
4005 template<typename HalPolicy,
4006          typename HalOperation   = typename HalPolicy::Operation,
4007          typename HalOperand     = typename HalPolicy::Operand,
4008          typename HalModel       = typename HalPolicy::Model>
ConvertBatchToSpaceNd(const HalOperation & operation,const HalModel & model,ConversionData & data)4009 bool ConvertBatchToSpaceNd(const HalOperation& operation,
4010                            const HalModel& model,
4011                            ConversionData& data)
4012 {
4013 
4014     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4015     if (!input.IsValid())
4016     {
4017         return Fail("%s: Operation has invalid inputs", __func__);
4018     }
4019 
4020     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4021     if (!output)
4022     {
4023         return Fail("%s: Could not read output 0", __func__);
4024     }
4025 
4026     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4027 
4028     const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4029     if (!blockOperand)
4030     {
4031         return Fail("%s: Could not read input 1", __func__);
4032     }
4033 
4034     // Convert the block operand to int32
4035     std::vector<int32_t> block;
4036     if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4037     {
4038         return Fail("%s: Input 1 has invalid values", __func__);
4039     }
4040 
4041     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4042 
4043     unsigned int rank = inputInfo.GetNumDimensions();
4044     if (rank != 4)
4045     {
4046         Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4047     }
4048 
4049     if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4050     {
4051         return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4052                     " greater than or equal to 1", __func__);
4053     }
4054 
4055     armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4056     batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4057     batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4058 
4059     if (Is12OrLaterOperand(*output))
4060     {
4061         batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
4062     }
4063     // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4064     batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4065 
4066     bool isSupported = false;
4067     armnn::BackendId setBackend;
4068     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4069     {
4070         FORWARD_LAYER_SUPPORT_FUNC(__func__,
4071                                    IsBatchToSpaceNdSupported,
4072                                    data.m_Backends,
4073                                    isSupported,
4074                                    setBackend,
4075                                    inputInfo,
4076                                    outputInfo,
4077                                    batchToSpaceNdDesc);
4078     };
4079 
4080     if(!IsDynamicTensor(outputInfo))
4081     {
4082         validateFunc(outputInfo, isSupported);
4083     }
4084     else
4085     {
4086         isSupported = AreDynamicTensorsSupported();
4087     }
4088 
4089 
4090     if (!isSupported)
4091     {
4092         return false;
4093     }
4094 
4095     armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4096     layer->SetBackendId(setBackend);
4097     if (!layer)
4098     {
4099         return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
4100     }
4101     input.Connect(layer->GetInputSlot(0));
4102 
4103     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4104 }
4105 
4106 template<typename HalPolicy,
4107          typename HalOperation = typename HalPolicy::Operation,
4108          typename HalOperand   = typename HalPolicy::Operand,
4109          typename HalModel     = typename HalPolicy::Model>
ConvertSpaceToBatchNd(const HalOperation & operation,const HalModel & model,ConversionData & data)4110 bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4111 {
4112     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4113     if (!input.IsValid())
4114     {
4115         return Fail("%s: Operation has invalid inputs", __func__);
4116     }
4117 
4118     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4119     unsigned int rank = inputInfo.GetNumDimensions();
4120     unsigned int spatialDim = rank - 2;
4121 
4122     if (rank != 4)
4123     {
4124         Fail("%s: Only inputs with rank 4 are supported", __func__);
4125     }
4126 
4127     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4128     if (!output)
4129     {
4130         return Fail("%s: Could not read output 0", __func__);
4131     }
4132 
4133     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4134 
4135     const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4136     const HalOperand* paddingsOperand   = GetInputOperand<HalPolicy>(operation, 2, model);
4137 
4138     armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4139     if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4140     {
4141         return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4142     }
4143 
4144     std::vector<int32_t> blockShape;
4145     if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4146     {
4147         return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4148     }
4149     if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4150     {
4151         return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4152     }
4153 
4154     armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4155     if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4156     {
4157         return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4158     }
4159 
4160     std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4161     std::vector<int32_t> paddings;
4162     if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4163     {
4164         return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4165     }
4166     for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4167     {
4168         int paddingBeforeInput = paddings[i];
4169         int paddingAfterInput = paddings[i + 1];
4170         if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4171         {
4172             return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4173         }
4174 
4175         paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
4176                                  static_cast<unsigned int>(paddingAfterInput));
4177     }
4178 
4179     armnn::SpaceToBatchNdDescriptor descriptor;
4180     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4181     descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4182     descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4183 
4184     if (Is12OrLaterOperand(*output))
4185     {
4186         descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4187     }
4188 
4189     bool isSupported = false;
4190     armnn::BackendId setBackend;
4191     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4192     {
4193         FORWARD_LAYER_SUPPORT_FUNC(__func__,
4194                                    IsSpaceToBatchNdSupported,
4195                                    data.m_Backends,
4196                                    isSupported,
4197                                    setBackend,
4198                                    inputInfo,
4199                                    outputInfo,
4200                                    descriptor);
4201     };
4202 
4203     if(IsDynamicTensor(outputInfo))
4204     {
4205         isSupported = AreDynamicTensorsSupported();
4206     }
4207     else
4208     {
4209         validateFunc(outputInfo, isSupported);
4210     }
4211 
4212     if (!isSupported)
4213     {
4214         return false;
4215     }
4216 
4217     armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4218     layer->SetBackendId(setBackend);
4219     if (!layer)
4220     {
4221         return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
4222     }
4223     input.Connect(layer->GetInputSlot(0));
4224 
4225     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4226 }
4227 
4228 } // namespace armnn_driver
4229 #ifdef __clang__
4230 #pragma clang diagnostic pop
4231 #endif
4232