1*3e777be0SXin Li //
2*3e777be0SXin Li // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3*3e777be0SXin Li // SPDX-License-Identifier: MIT
4*3e777be0SXin Li //
5*3e777be0SXin Li
6*3e777be0SXin Li #pragma once
7*3e777be0SXin Li
8*3e777be0SXin Li #include "Utils.hpp"
9*3e777be0SXin Li
10*3e777be0SXin Li #include <armnn/ArmNN.hpp>
11*3e777be0SXin Li #include <armnn/BackendHelper.hpp>
12*3e777be0SXin Li #include <armnn/utility/IgnoreUnused.hpp>
13*3e777be0SXin Li #include <armnn/utility/NumericCast.hpp>
14*3e777be0SXin Li
15*3e777be0SXin Li #include <armnnUtils/DataLayoutIndexed.hpp>
16*3e777be0SXin Li #include <armnnUtils/Transpose.hpp>
17*3e777be0SXin Li
18*3e777be0SXin Li #include "1.0/FullyConnected.hpp"
19*3e777be0SXin Li
20*3e777be0SXin Li #include <ActivationFunctor.h>
21*3e777be0SXin Li #include <CpuExecutor.h>
22*3e777be0SXin Li #include <OperationsUtils.h>
23*3e777be0SXin Li
24*3e777be0SXin Li #include <armnnUtils/FloatingPointComparison.hpp>
25*3e777be0SXin Li
26*3e777be0SXin Li #include <log/log.h>
27*3e777be0SXin Li #include <vector>
28*3e777be0SXin Li
29*3e777be0SXin Li #ifdef __clang__
30*3e777be0SXin Li #pragma clang diagnostic push
31*3e777be0SXin Li #pragma clang diagnostic ignored "-Wunneeded-internal-declaration"
32*3e777be0SXin Li #pragma clang diagnostic ignored "-Wunused-function"
33*3e777be0SXin Li #pragma clang diagnostic ignored "-Wunused-variable"
34*3e777be0SXin Li #endif
35*3e777be0SXin Li namespace armnn_driver
36*3e777be0SXin Li {
37*3e777be0SXin Li
38*3e777be0SXin Li ///
39*3e777be0SXin Li /// Helper classes
40*3e777be0SXin Li ///
41*3e777be0SXin Li
42*3e777be0SXin Li #ifdef ARMNN_ANDROID_R
43*3e777be0SXin Li using OperandType = android::nn::hal::OperandType;
44*3e777be0SXin Li #endif
45*3e777be0SXin Li
46*3e777be0SXin Li #ifdef ARMNN_ANDROID_S
47*3e777be0SXin Li #include <nnapi/Types.h>
48*3e777be0SXin Li #endif
49*3e777be0SXin Li
50*3e777be0SXin Li
51*3e777be0SXin Li struct ConversionData
52*3e777be0SXin Li {
ConversionDataarmnn_driver::ConversionData53*3e777be0SXin Li ConversionData(const std::vector<armnn::BackendId>& backends)
54*3e777be0SXin Li : m_Backends(backends)
55*3e777be0SXin Li , m_Network(nullptr, nullptr)
56*3e777be0SXin Li , m_DynamicInputsEncountered(false)
57*3e777be0SXin Li {}
58*3e777be0SXin Li
59*3e777be0SXin Li const std::vector<armnn::BackendId> m_Backends;
60*3e777be0SXin Li armnn::INetworkPtr m_Network;
61*3e777be0SXin Li std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
62*3e777be0SXin Li std::vector<android::nn::RunTimePoolInfo> m_MemPools;
63*3e777be0SXin Li bool m_DynamicInputsEncountered;
64*3e777be0SXin Li };
65*3e777be0SXin Li
66*3e777be0SXin Li class LayerInputHandle
67*3e777be0SXin Li {
68*3e777be0SXin Li public:
69*3e777be0SXin Li LayerInputHandle();
70*3e777be0SXin Li LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
71*3e777be0SXin Li
72*3e777be0SXin Li bool IsValid() const;
73*3e777be0SXin Li
74*3e777be0SXin Li void Connect(armnn::IInputSlot& inputSlot);
75*3e777be0SXin Li
76*3e777be0SXin Li void Disconnect(armnn::IInputSlot& inputSlot);
77*3e777be0SXin Li
78*3e777be0SXin Li const armnn::TensorInfo& GetTensorInfo() const;
79*3e777be0SXin Li
80*3e777be0SXin Li void SanitizeQuantizationScale(LayerInputHandle& weight,
81*3e777be0SXin Li LayerInputHandle& input);
82*3e777be0SXin Li
83*3e777be0SXin Li private:
84*3e777be0SXin Li armnn::IOutputSlot* m_OutputSlot;
85*3e777be0SXin Li bool m_Valid;
86*3e777be0SXin Li armnn::TensorInfo m_TensorInfo;
87*3e777be0SXin Li };
88*3e777be0SXin Li
89*3e777be0SXin Li class ConstTensorPin
90*3e777be0SXin Li {
91*3e777be0SXin Li public:
92*3e777be0SXin Li // Creates an invalid tensor pin (can be used to signal errors)
93*3e777be0SXin Li // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
94*3e777be0SXin Li ConstTensorPin(bool optional = false);
95*3e777be0SXin Li
96*3e777be0SXin Li // @param tensorInfo TensorInfo associated with the tensor.
97*3e777be0SXin Li // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
98*3e777be0SXin Li // the model being converted.
99*3e777be0SXin Li // @param numBytes Number of bytes for the tensor data.
100*3e777be0SXin Li ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
101*3e777be0SXin Li const armnn::PermutationVector& mappings);
102*3e777be0SXin Li
103*3e777be0SXin Li ConstTensorPin(const ConstTensorPin& other) = delete;
104*3e777be0SXin Li ConstTensorPin(ConstTensorPin&& other) = default;
105*3e777be0SXin Li
106*3e777be0SXin Li bool IsValid() const;
107*3e777be0SXin Li bool IsOptional() const;
108*3e777be0SXin Li
109*3e777be0SXin Li const armnn::ConstTensor& GetConstTensor() const;
110*3e777be0SXin Li const armnn::ConstTensor* GetConstTensorPtr() const;
111*3e777be0SXin Li
112*3e777be0SXin Li private:
113*3e777be0SXin Li armnn::ConstTensor m_ConstTensor;
114*3e777be0SXin Li
115*3e777be0SXin Li // Owned memory for swizzled tensor data, only required if the tensor needed
116*3e777be0SXin Li // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
117*3e777be0SXin Li // the pools associated with the model being converted.
118*3e777be0SXin Li std::vector<uint8_t> m_SwizzledTensorData;
119*3e777be0SXin Li
120*3e777be0SXin Li // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
121*3e777be0SXin Li bool m_Optional;
122*3e777be0SXin Li };
123*3e777be0SXin Li
124*3e777be0SXin Li } // namespace armnn_driver
125*3e777be0SXin Li
126*3e777be0SXin Li ///
127*3e777be0SXin Li /// Utility functions
128*3e777be0SXin Li ///
129*3e777be0SXin Li
130*3e777be0SXin Li namespace
131*3e777be0SXin Li {
132*3e777be0SXin Li
133*3e777be0SXin Li using namespace armnn_driver;
134*3e777be0SXin Li using namespace android::nn;
135*3e777be0SXin Li
136*3e777be0SXin Li // Convenience function to log the reason for failing to convert a model.
137*3e777be0SXin Li // @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
138*3e777be0SXin Li template<class... Args>
Fail(const char * formatStr,Args &&...args)139*3e777be0SXin Li static bool Fail(const char* formatStr, Args&&... args)
140*3e777be0SXin Li {
141*3e777be0SXin Li ALOGD(formatStr, std::forward<Args>(args)...);
142*3e777be0SXin Li return false;
143*3e777be0SXin Li }
144*3e777be0SXin Li
145*3e777be0SXin Li // Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
146*3e777be0SXin Li // Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
147*3e777be0SXin Li #define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
148*3e777be0SXin Li try \
149*3e777be0SXin Li { \
150*3e777be0SXin Li for (auto&& backendId : backends) \
151*3e777be0SXin Li { \
152*3e777be0SXin Li auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
153*3e777be0SXin Li if (layerSupportObject.IsBackendRegistered()) \
154*3e777be0SXin Li { \
155*3e777be0SXin Li std::string reasonIfUnsupported; \
156*3e777be0SXin Li supported = \
157*3e777be0SXin Li layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
158*3e777be0SXin Li if (supported) \
159*3e777be0SXin Li { \
160*3e777be0SXin Li setBackend = backendId; \
161*3e777be0SXin Li break; \
162*3e777be0SXin Li } \
163*3e777be0SXin Li else \
164*3e777be0SXin Li { \
165*3e777be0SXin Li if (reasonIfUnsupported.size() > 0) \
166*3e777be0SXin Li { \
167*3e777be0SXin Li ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
168*3e777be0SXin Li } \
169*3e777be0SXin Li else \
170*3e777be0SXin Li { \
171*3e777be0SXin Li ALOGD("%s: not supported by armnn", funcName); \
172*3e777be0SXin Li } \
173*3e777be0SXin Li } \
174*3e777be0SXin Li } \
175*3e777be0SXin Li else \
176*3e777be0SXin Li { \
177*3e777be0SXin Li ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
178*3e777be0SXin Li } \
179*3e777be0SXin Li } \
180*3e777be0SXin Li if (!supported) \
181*3e777be0SXin Li { \
182*3e777be0SXin Li ALOGD("%s: not supported by any specified backend", funcName); \
183*3e777be0SXin Li } \
184*3e777be0SXin Li } \
185*3e777be0SXin Li catch (const armnn::InvalidArgumentException &e) \
186*3e777be0SXin Li { \
187*3e777be0SXin Li throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
188*3e777be0SXin Li }
189*3e777be0SXin Li
190*3e777be0SXin Li template<typename HalOperand>
GetTensorShapeForOperand(const HalOperand & operand)191*3e777be0SXin Li armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
192*3e777be0SXin Li {
193*3e777be0SXin Li return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
194*3e777be0SXin Li }
195*3e777be0SXin Li
IsOperandTypeSupportedForTensors(V1_0::OperandType type)196*3e777be0SXin Li inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
197*3e777be0SXin Li {
198*3e777be0SXin Li return type == V1_0::OperandType::TENSOR_FLOAT32 ||
199*3e777be0SXin Li type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
200*3e777be0SXin Li type == V1_0::OperandType::TENSOR_INT32;
201*3e777be0SXin Li }
202*3e777be0SXin Li
203*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
204*3e777be0SXin Li
205*3e777be0SXin Li // Support within the 1.2 driver for specific tensor data types
IsOperandTypeSupportedForTensors(V1_2::OperandType type)206*3e777be0SXin Li inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
207*3e777be0SXin Li {
208*3e777be0SXin Li return type == V1_2::OperandType::BOOL ||
209*3e777be0SXin Li type == V1_2::OperandType::TENSOR_BOOL8 ||
210*3e777be0SXin Li type == V1_2::OperandType::TENSOR_FLOAT16 ||
211*3e777be0SXin Li type == V1_2::OperandType::TENSOR_FLOAT32 ||
212*3e777be0SXin Li type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
213*3e777be0SXin Li type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
214*3e777be0SXin Li type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
215*3e777be0SXin Li type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
216*3e777be0SXin Li type == V1_2::OperandType::TENSOR_INT32;
217*3e777be0SXin Li }
218*3e777be0SXin Li
219*3e777be0SXin Li #endif
220*3e777be0SXin Li
221*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
222*3e777be0SXin Li
223*3e777be0SXin Li // Support within the 1.3 driver for specific tensor data types
IsOperandTypeSupportedForTensors(V1_3::OperandType type)224*3e777be0SXin Li inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
225*3e777be0SXin Li {
226*3e777be0SXin Li return type == V1_3::OperandType::BOOL ||
227*3e777be0SXin Li type == V1_3::OperandType::TENSOR_BOOL8 ||
228*3e777be0SXin Li type == V1_3::OperandType::TENSOR_FLOAT16 ||
229*3e777be0SXin Li type == V1_3::OperandType::TENSOR_FLOAT32 ||
230*3e777be0SXin Li type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
231*3e777be0SXin Li type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
232*3e777be0SXin Li type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
233*3e777be0SXin Li type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
234*3e777be0SXin Li type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
235*3e777be0SXin Li type == V1_3::OperandType::TENSOR_INT32;
236*3e777be0SXin Li }
237*3e777be0SXin Li
238*3e777be0SXin Li #endif
239*3e777be0SXin Li
IsBool(V1_0::Operand)240*3e777be0SXin Li inline bool IsBool(V1_0::Operand)
241*3e777be0SXin Li {
242*3e777be0SXin Li return false;
243*3e777be0SXin Li }
244*3e777be0SXin Li
Is12OrLaterOperand(V1_0::Operand)245*3e777be0SXin Li inline bool Is12OrLaterOperand(V1_0::Operand)
246*3e777be0SXin Li {
247*3e777be0SXin Li return false;
248*3e777be0SXin Li }
249*3e777be0SXin Li
250*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
251*3e777be0SXin Li
IsBool(V1_2::Operand operand)252*3e777be0SXin Li inline bool IsBool(V1_2::Operand operand)
253*3e777be0SXin Li {
254*3e777be0SXin Li return operand.type == V1_2::OperandType::BOOL;
255*3e777be0SXin Li }
256*3e777be0SXin Li
257*3e777be0SXin Li /// Checks if a operand is 1_2 Operand
Is12OrLaterOperand(V1_2::Operand)258*3e777be0SXin Li inline bool Is12OrLaterOperand(V1_2::Operand)
259*3e777be0SXin Li {
260*3e777be0SXin Li return true;
261*3e777be0SXin Li }
262*3e777be0SXin Li
263*3e777be0SXin Li #endif
264*3e777be0SXin Li
265*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
266*3e777be0SXin Li
IsBool(V1_3::Operand operand)267*3e777be0SXin Li inline bool IsBool(V1_3::Operand operand)
268*3e777be0SXin Li {
269*3e777be0SXin Li return operand.type == V1_3::OperandType::BOOL;
270*3e777be0SXin Li }
271*3e777be0SXin Li
272*3e777be0SXin Li /// Checks if a operand is 1_2 Operand
Is12OrLaterOperand(V1_3::Operand)273*3e777be0SXin Li inline bool Is12OrLaterOperand(V1_3::Operand)
274*3e777be0SXin Li {
275*3e777be0SXin Li return true;
276*3e777be0SXin Li }
277*3e777be0SXin Li
278*3e777be0SXin Li #endif
279*3e777be0SXin Li
280*3e777be0SXin Li template<typename LayerHandleType>
AddReshapeLayer(armnn::INetwork & network,LayerHandleType & inputLayer,armnn::TensorInfo reshapeInfo)281*3e777be0SXin Li armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
282*3e777be0SXin Li LayerHandleType& inputLayer,
283*3e777be0SXin Li armnn::TensorInfo reshapeInfo)
284*3e777be0SXin Li {
285*3e777be0SXin Li armnn::ReshapeDescriptor reshapeDescriptor;
286*3e777be0SXin Li reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
287*3e777be0SXin Li
288*3e777be0SXin Li armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
289*3e777be0SXin Li if (!reshapeLayer)
290*3e777be0SXin Li {
291*3e777be0SXin Li throw armnn::RuntimeException("ReshapeLayer is null");
292*3e777be0SXin Li }
293*3e777be0SXin Li
294*3e777be0SXin Li // Attach the input layer to the reshape layer
295*3e777be0SXin Li inputLayer.Connect(reshapeLayer->GetInputSlot(0));
296*3e777be0SXin Li reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
297*3e777be0SXin Li
298*3e777be0SXin Li return *reshapeLayer;
299*3e777be0SXin Li }
300*3e777be0SXin Li
BroadcastTensor(LayerInputHandle & input0,LayerInputHandle & input1,armnn::IConnectableLayer * startLayer,ConversionData & data)301*3e777be0SXin Li bool BroadcastTensor(LayerInputHandle& input0,
302*3e777be0SXin Li LayerInputHandle& input1,
303*3e777be0SXin Li armnn::IConnectableLayer* startLayer,
304*3e777be0SXin Li ConversionData& data)
305*3e777be0SXin Li {
306*3e777be0SXin Li if (!startLayer)
307*3e777be0SXin Li {
308*3e777be0SXin Li throw armnn::RuntimeException("StartLayer is null");
309*3e777be0SXin Li }
310*3e777be0SXin Li
311*3e777be0SXin Li const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
312*3e777be0SXin Li const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
313*3e777be0SXin Li
314*3e777be0SXin Li unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
315*3e777be0SXin Li unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
316*3e777be0SXin Li
317*3e777be0SXin Li if (inputDimensions0 == inputDimensions1)
318*3e777be0SXin Li {
319*3e777be0SXin Li // The inputs have the same number of dimensions, simply connect them to the given layer as they are
320*3e777be0SXin Li input0.Connect(startLayer->GetInputSlot(0));
321*3e777be0SXin Li input1.Connect(startLayer->GetInputSlot(1));
322*3e777be0SXin Li
323*3e777be0SXin Li return true;
324*3e777be0SXin Li }
325*3e777be0SXin Li
326*3e777be0SXin Li // Since the number of dimensions do not match then we need to add degenerate dimensions
327*3e777be0SXin Li // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
328*3e777be0SXin Li
329*3e777be0SXin Li unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
330*3e777be0SXin Li unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
331*3e777be0SXin Li armnn::numeric_cast<int>(inputDimensions1));
332*3e777be0SXin Li
333*3e777be0SXin Li bool input0IsSmaller = inputDimensions0 < inputDimensions1;
334*3e777be0SXin Li LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
335*3e777be0SXin Li const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
336*3e777be0SXin Li
337*3e777be0SXin Li const armnn::TensorShape& smallShape = smallInfo.GetShape();
338*3e777be0SXin Li std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
339*3e777be0SXin Li for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
340*3e777be0SXin Li {
341*3e777be0SXin Li reshapedDimensions[i] = smallShape[i - sizeDifference];
342*3e777be0SXin Li }
343*3e777be0SXin Li
344*3e777be0SXin Li armnn::TensorInfo reshapedInfo = smallInfo;
345*3e777be0SXin Li reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
346*3e777be0SXin Li reshapedDimensions.data() });
347*3e777be0SXin Li
348*3e777be0SXin Li // RehsapeDescriptor that is ignored in the IsReshapeSupported function
349*3e777be0SXin Li armnn::ReshapeDescriptor reshapeDescriptor;
350*3e777be0SXin Li
351*3e777be0SXin Li bool isSupported = false;
352*3e777be0SXin Li armnn::BackendId setBackend;
353*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
354*3e777be0SXin Li IsReshapeSupported,
355*3e777be0SXin Li data.m_Backends,
356*3e777be0SXin Li isSupported,
357*3e777be0SXin Li setBackend,
358*3e777be0SXin Li smallInfo,
359*3e777be0SXin Li reshapedInfo,
360*3e777be0SXin Li reshapeDescriptor);
361*3e777be0SXin Li if (!isSupported)
362*3e777be0SXin Li {
363*3e777be0SXin Li return false;
364*3e777be0SXin Li }
365*3e777be0SXin Li
366*3e777be0SXin Li if (!data.m_Network)
367*3e777be0SXin Li {
368*3e777be0SXin Li throw armnn::RuntimeException("Network is null");
369*3e777be0SXin Li }
370*3e777be0SXin Li
371*3e777be0SXin Li armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
372*3e777be0SXin Li reshapeLayer.SetBackendId(setBackend);
373*3e777be0SXin Li
374*3e777be0SXin Li if (input0IsSmaller)
375*3e777be0SXin Li {
376*3e777be0SXin Li // Input0 is the "smaller" tensor, connect the reshape layer as follows:
377*3e777be0SXin Li //
378*3e777be0SXin Li // Input0 Input1
379*3e777be0SXin Li // | |
380*3e777be0SXin Li // Reshape |
381*3e777be0SXin Li // \ /
382*3e777be0SXin Li // StartLayer
383*3e777be0SXin Li
384*3e777be0SXin Li reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
385*3e777be0SXin Li input1.Connect(startLayer->GetInputSlot(1));
386*3e777be0SXin Li }
387*3e777be0SXin Li else
388*3e777be0SXin Li {
389*3e777be0SXin Li // Input1 is the "smaller" tensor, connect the reshape layer as follows:
390*3e777be0SXin Li //
391*3e777be0SXin Li // Input0 Input1
392*3e777be0SXin Li // | |
393*3e777be0SXin Li // | Reshape
394*3e777be0SXin Li // \ /
395*3e777be0SXin Li // StartLayer
396*3e777be0SXin Li
397*3e777be0SXin Li input0.Connect(startLayer->GetInputSlot(0));
398*3e777be0SXin Li reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
399*3e777be0SXin Li }
400*3e777be0SXin Li
401*3e777be0SXin Li return true;
402*3e777be0SXin Li }
403*3e777be0SXin Li
CalcPadding(uint32_t input,uint32_t kernel,uint32_t stride,uint32_t & outPadHead,uint32_t & outPadTail,android::nn::PaddingScheme scheme)404*3e777be0SXin Li void CalcPadding(uint32_t input,
405*3e777be0SXin Li uint32_t kernel,
406*3e777be0SXin Li uint32_t stride,
407*3e777be0SXin Li uint32_t& outPadHead,
408*3e777be0SXin Li uint32_t& outPadTail,
409*3e777be0SXin Li android::nn::PaddingScheme scheme)
410*3e777be0SXin Li {
411*3e777be0SXin Li int32_t padHead;
412*3e777be0SXin Li int32_t padTail;
413*3e777be0SXin Li calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
414*3e777be0SXin Li outPadHead = armnn::numeric_cast<uint32_t>(padHead);
415*3e777be0SXin Li outPadTail = armnn::numeric_cast<uint32_t>(padTail);
416*3e777be0SXin Li }
417*3e777be0SXin Li
418*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
419*3e777be0SXin Li
CalcPadding(uint32_t input,uint32_t kernel,uint32_t stride,uint32_t dilation,uint32_t & outPadHead,uint32_t & outPadTail,android::nn::PaddingScheme scheme)420*3e777be0SXin Li void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
421*3e777be0SXin Li uint32_t& outPadTail, android::nn::PaddingScheme scheme)
422*3e777be0SXin Li {
423*3e777be0SXin Li int32_t padHead;
424*3e777be0SXin Li int32_t padTail;
425*3e777be0SXin Li calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
426*3e777be0SXin Li outPadHead = armnn::numeric_cast<uint32_t>(padHead);
427*3e777be0SXin Li outPadTail = armnn::numeric_cast<uint32_t>(padTail);
428*3e777be0SXin Li }
429*3e777be0SXin Li
CalcPaddingTransposeConv(uint32_t output,uint32_t kernel,int32_t stride,int32_t & outPadHead,int32_t & outPadTail,android::nn::PaddingScheme scheme)430*3e777be0SXin Li void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
431*3e777be0SXin Li int32_t& outPadTail, android::nn::PaddingScheme scheme)
432*3e777be0SXin Li {
433*3e777be0SXin Li calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
434*3e777be0SXin Li }
435*3e777be0SXin Li
436*3e777be0SXin Li #endif
437*3e777be0SXin Li
GetOperandShape(const V1_0::Operand & operand)438*3e777be0SXin Li Shape GetOperandShape(const V1_0::Operand& operand)
439*3e777be0SXin Li {
440*3e777be0SXin Li Shape shape;
441*3e777be0SXin Li shape.type = OperandType(operand.type);
442*3e777be0SXin Li shape.dimensions = operand.dimensions;
443*3e777be0SXin Li shape.scale = operand.scale;
444*3e777be0SXin Li shape.offset = operand.zeroPoint;
445*3e777be0SXin Li return shape;
446*3e777be0SXin Li }
447*3e777be0SXin Li
448*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
449*3e777be0SXin Li
GetOperandShape(const V1_2::Operand & operand)450*3e777be0SXin Li Shape GetOperandShape(const V1_2::Operand& operand)
451*3e777be0SXin Li {
452*3e777be0SXin Li Shape shape;
453*3e777be0SXin Li shape.type = OperandType(operand.type);
454*3e777be0SXin Li shape.dimensions = operand.dimensions;
455*3e777be0SXin Li shape.scale = operand.scale;
456*3e777be0SXin Li shape.offset = operand.zeroPoint;
457*3e777be0SXin Li return shape;
458*3e777be0SXin Li }
459*3e777be0SXin Li
460*3e777be0SXin Li #endif
461*3e777be0SXin Li
462*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
463*3e777be0SXin Li
GetOperandShape(const V1_3::Operand & operand)464*3e777be0SXin Li Shape GetOperandShape(const V1_3::Operand& operand)
465*3e777be0SXin Li {
466*3e777be0SXin Li Shape shape;
467*3e777be0SXin Li shape.type = OperandType(operand.type);
468*3e777be0SXin Li shape.dimensions = operand.dimensions;
469*3e777be0SXin Li shape.scale = operand.scale;
470*3e777be0SXin Li shape.offset = operand.zeroPoint;
471*3e777be0SXin Li return shape;
472*3e777be0SXin Li }
473*3e777be0SXin Li
474*3e777be0SXin Li #endif
475*3e777be0SXin Li
476*3e777be0SXin Li // ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
477*3e777be0SXin Li // what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
478*3e777be0SXin Li // we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
479*3e777be0SXin Li // user (us, in this case) to ensure they match.
SanitizeBiasQuantizationScale(armnn::TensorInfo & biasInfo,const armnn::TensorInfo & weightInfo,const armnn::TensorInfo & inputInfo)480*3e777be0SXin Li void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
481*3e777be0SXin Li const armnn::TensorInfo& weightInfo,
482*3e777be0SXin Li const armnn::TensorInfo& inputInfo)
483*3e777be0SXin Li {
484*3e777be0SXin Li if (weightInfo.HasPerAxisQuantization())
485*3e777be0SXin Li {
486*3e777be0SXin Li // NOTE: Bias scale is always set to 0 for per-axis quantization and
487*3e777be0SXin Li // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
488*3e777be0SXin Li auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
489*3e777be0SXin Li {
490*3e777be0SXin Li return biasScale * inputInfo.GetQuantizationScale();
491*3e777be0SXin Li };
492*3e777be0SXin Li
493*3e777be0SXin Li std::vector<float> biasScales(weightInfo.GetQuantizationScales());
494*3e777be0SXin Li std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
495*3e777be0SXin Li
496*3e777be0SXin Li biasInfo.SetQuantizationScales(biasScales);
497*3e777be0SXin Li // bias is expected to be a 1d tensor, set qdim=0
498*3e777be0SXin Li biasInfo.SetQuantizationDim(0);
499*3e777be0SXin Li
500*3e777be0SXin Li ALOGV("Bias quantization params have been updated for per-axis quantization");
501*3e777be0SXin Li }
502*3e777be0SXin Li else
503*3e777be0SXin Li {
504*3e777be0SXin Li const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
505*3e777be0SXin Li if (biasInfo.GetQuantizationScale() != expectedBiasScale)
506*3e777be0SXin Li {
507*3e777be0SXin Li if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
508*3e777be0SXin Li {
509*3e777be0SXin Li ALOGW("Bias quantization scale has been modified to match input * weights");
510*3e777be0SXin Li biasInfo.SetQuantizationScale(expectedBiasScale);
511*3e777be0SXin Li }
512*3e777be0SXin Li }
513*3e777be0SXin Li }
514*3e777be0SXin Li }
515*3e777be0SXin Li
516*3e777be0SXin Li // 4D Tensor Permutations
517*3e777be0SXin Li const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
518*3e777be0SXin Li const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
519*3e777be0SXin Li const armnn::PermutationVector SwapDim2And3({ 0U, 1U, 3U, 2U });
520*3e777be0SXin Li
521*3e777be0SXin Li // 3D Permutation Vectors
522*3e777be0SXin Li const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
523*3e777be0SXin Li const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
524*3e777be0SXin Li
525*3e777be0SXin Li template<typename OSlot>
AddTransposeLayer(armnn::INetwork & network,OSlot & input,const armnn::PermutationVector & mappings)526*3e777be0SXin Li armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
527*3e777be0SXin Li const armnn::PermutationVector& mappings)
528*3e777be0SXin Li {
529*3e777be0SXin Li // Add swizzle layer
530*3e777be0SXin Li armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
531*3e777be0SXin Li if (!layer)
532*3e777be0SXin Li {
533*3e777be0SXin Li throw armnn::RuntimeException("TransposeLayer is null");
534*3e777be0SXin Li }
535*3e777be0SXin Li // Connect input to swizzle layer
536*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
537*3e777be0SXin Li
538*3e777be0SXin Li // Setup swizzled output
539*3e777be0SXin Li const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
540*3e777be0SXin Li layer->GetOutputSlot(0).SetTensorInfo(outInfo);
541*3e777be0SXin Li
542*3e777be0SXin Li return *layer;
543*3e777be0SXin Li }
544*3e777be0SXin Li
ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,const armnn::TensorShape & outputShape,uint32_t concatDim)545*3e777be0SXin Li bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
546*3e777be0SXin Li const armnn::TensorShape & outputShape,
547*3e777be0SXin Li uint32_t concatDim)
548*3e777be0SXin Li {
549*3e777be0SXin Li // Validate the output shape is correct given the input shapes (which have just been validated)
550*3e777be0SXin Li unsigned int numDimensions = inputShapes[0].GetNumDimensions();
551*3e777be0SXin Li if (outputShape.GetNumDimensions() != numDimensions)
552*3e777be0SXin Li {
553*3e777be0SXin Li return Fail("%s: Output shape has wrong number of dimensions", __func__);
554*3e777be0SXin Li }
555*3e777be0SXin Li
556*3e777be0SXin Li unsigned int outputSizeAlongConcatenatedDimension = 0;
557*3e777be0SXin Li for (unsigned int i = 0; i < inputShapes.size(); i++)
558*3e777be0SXin Li {
559*3e777be0SXin Li outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
560*3e777be0SXin Li }
561*3e777be0SXin Li
562*3e777be0SXin Li for (unsigned int i = 0; i < numDimensions; ++i)
563*3e777be0SXin Li {
564*3e777be0SXin Li if (i == concatDim)
565*3e777be0SXin Li {
566*3e777be0SXin Li if (outputShape[i] != outputSizeAlongConcatenatedDimension)
567*3e777be0SXin Li {
568*3e777be0SXin Li return Fail(
569*3e777be0SXin Li "%s: Invalid output shape for dimension %d (%d != %d)",
570*3e777be0SXin Li __func__,
571*3e777be0SXin Li i,
572*3e777be0SXin Li outputShape[i],
573*3e777be0SXin Li outputSizeAlongConcatenatedDimension);
574*3e777be0SXin Li }
575*3e777be0SXin Li }
576*3e777be0SXin Li else
577*3e777be0SXin Li {
578*3e777be0SXin Li if (outputShape[i] != inputShapes[0][i])
579*3e777be0SXin Li {
580*3e777be0SXin Li return Fail("%s: Invalid output shape", __func__);
581*3e777be0SXin Li }
582*3e777be0SXin Li }
583*3e777be0SXin Li }
584*3e777be0SXin Li
585*3e777be0SXin Li return true;
586*3e777be0SXin Li }
587*3e777be0SXin Li
RequiresReshape(armnn::TensorShape & inputShape)588*3e777be0SXin Li bool RequiresReshape(armnn::TensorShape & inputShape)
589*3e777be0SXin Li {
590*3e777be0SXin Li return inputShape.GetNumDimensions() < 3;
591*3e777be0SXin Li }
592*3e777be0SXin Li
SwizzleInputs(armnn::INetwork & network,std::vector<LayerInputHandle> & inputs,std::vector<armnn::TensorShape> & inputShapes,const armnn::PermutationVector & mapping,std::vector<armnn::BackendId> & setBackends)593*3e777be0SXin Li void SwizzleInputs(armnn::INetwork& network,
594*3e777be0SXin Li std::vector<LayerInputHandle>& inputs,
595*3e777be0SXin Li std::vector<armnn::TensorShape>& inputShapes,
596*3e777be0SXin Li const armnn::PermutationVector& mapping,
597*3e777be0SXin Li std::vector<armnn::BackendId>& setBackends)
598*3e777be0SXin Li {
599*3e777be0SXin Li if (!mapping.IsEqual(IdentityPermutation4D))
600*3e777be0SXin Li {
601*3e777be0SXin Li size_t nInputs = inputs.size();
602*3e777be0SXin Li for (size_t i=0; i<nInputs; ++i)
603*3e777be0SXin Li {
604*3e777be0SXin Li // add swizzle layer
605*3e777be0SXin Li armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
606*3e777be0SXin Li swizzleLayer.SetBackendId(setBackends[i]);
607*3e777be0SXin Li auto& outputSlot = swizzleLayer.GetOutputSlot(0);
608*3e777be0SXin Li auto& outputInfo = outputSlot.GetTensorInfo();
609*3e777be0SXin Li // replace inputs with the swizzled ones
610*3e777be0SXin Li inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
611*3e777be0SXin Li inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
612*3e777be0SXin Li }
613*3e777be0SXin Li }
614*3e777be0SXin Li }
615*3e777be0SXin Li
TransposeInputTensors(ConversionData & data,std::vector<LayerInputHandle> & inputs,std::vector<armnn::TensorShape> & inputShapes,const armnn::PermutationVector & mapping)616*3e777be0SXin Li bool TransposeInputTensors(ConversionData& data,
617*3e777be0SXin Li std::vector<LayerInputHandle>& inputs,
618*3e777be0SXin Li std::vector<armnn::TensorShape>& inputShapes,
619*3e777be0SXin Li const armnn::PermutationVector& mapping)
620*3e777be0SXin Li {
621*3e777be0SXin Li // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
622*3e777be0SXin Li if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
623*3e777be0SXin Li {
624*3e777be0SXin Li std::vector<armnn::BackendId> setBackendsVec;
625*3e777be0SXin Li armnn::TensorInfo outputTransposeInfo;
626*3e777be0SXin Li size_t nInputs = inputs.size();
627*3e777be0SXin Li for (size_t i=0; i<nInputs; ++i)
628*3e777be0SXin Li {
629*3e777be0SXin Li // check permute layer
630*3e777be0SXin Li armnn::TransposeDescriptor transposeDesc;
631*3e777be0SXin Li transposeDesc.m_DimMappings = mapping;
632*3e777be0SXin Li outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
633*3e777be0SXin Li
634*3e777be0SXin Li bool isSupported = false;
635*3e777be0SXin Li armnn::BackendId setBackend;
636*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
637*3e777be0SXin Li IsTransposeSupported,
638*3e777be0SXin Li data.m_Backends,
639*3e777be0SXin Li isSupported,
640*3e777be0SXin Li setBackend,
641*3e777be0SXin Li inputs[i].GetTensorInfo(),
642*3e777be0SXin Li outputTransposeInfo,
643*3e777be0SXin Li transposeDesc);
644*3e777be0SXin Li setBackendsVec.push_back(setBackend);
645*3e777be0SXin Li if (!isSupported)
646*3e777be0SXin Li {
647*3e777be0SXin Li return false;
648*3e777be0SXin Li }
649*3e777be0SXin Li
650*3e777be0SXin Li }
651*3e777be0SXin Li SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
652*3e777be0SXin Li }
653*3e777be0SXin Li return true;
654*3e777be0SXin Li }
655*3e777be0SXin Li
656*3e777be0SXin Li
CreateConcatPermutationParameters(const unsigned int numberOfDimensions,int32_t & concatDimension,std::pair<armnn::PermutationVector,armnn::PermutationVector> & permutationPair)657*3e777be0SXin Li bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
658*3e777be0SXin Li int32_t & concatDimension,
659*3e777be0SXin Li std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
660*3e777be0SXin Li {
661*3e777be0SXin Li bool needPermute = false;
662*3e777be0SXin Li
663*3e777be0SXin Li if (numberOfDimensions < 3)
664*3e777be0SXin Li {
665*3e777be0SXin Li return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
666*3e777be0SXin Li }
667*3e777be0SXin Li
668*3e777be0SXin Li // ArmNN uses Compute Library subtensors to perform concatenation
669*3e777be0SXin Li // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
670*3e777be0SXin Li // or along dimension 0 or 2 for a 3-D tensor.
671*3e777be0SXin Li if (numberOfDimensions == 4 && concatDimension == 2)
672*3e777be0SXin Li {
673*3e777be0SXin Li concatDimension = 3;
674*3e777be0SXin Li permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
675*3e777be0SXin Li needPermute = true;
676*3e777be0SXin Li }
677*3e777be0SXin Li else if (numberOfDimensions == 3 && concatDimension == 1)
678*3e777be0SXin Li {
679*3e777be0SXin Li concatDimension = 0;
680*3e777be0SXin Li permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
681*3e777be0SXin Li needPermute = true;
682*3e777be0SXin Li }
683*3e777be0SXin Li // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
684*3e777be0SXin Li // permutation identity to only have 3 dimensions
685*3e777be0SXin Li else if (numberOfDimensions == 3 && concatDimension == 2)
686*3e777be0SXin Li {
687*3e777be0SXin Li permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
688*3e777be0SXin Li }
689*3e777be0SXin Li return needPermute;
690*3e777be0SXin Li }
691*3e777be0SXin Li
692*3e777be0SXin Li } // anonymous namespace
693*3e777be0SXin Li
694*3e777be0SXin Li namespace armnn_driver
695*3e777be0SXin Li {
696*3e777be0SXin Li
697*3e777be0SXin Li //// Creates an ArmNN activation layer and connects it to the given layer, if the
698*3e777be0SXin Li //// passed in AndroidNN activation function requires so.
699*3e777be0SXin Li //// @return The end layer of the sequence of layers built for the given AndroidNN
700*3e777be0SXin Li //// activation function or nullptr if an error occurred (e.g. unsupported activation).
701*3e777be0SXin Li //// Note that the end layer matches the input layer if no activation is required
702*3e777be0SXin Li //// (the sequence of layers has length 1).
703*3e777be0SXin Li armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
704*3e777be0SXin Li ActivationFn activation,
705*3e777be0SXin Li armnn::IConnectableLayer* prevLayer,
706*3e777be0SXin Li ConversionData& data);
707*3e777be0SXin Li
708*3e777be0SXin Li } // namespace armnn_driver
709*3e777be0SXin Li
710*3e777be0SXin Li ///
711*3e777be0SXin Li /// Utility templates
712*3e777be0SXin Li ///
713*3e777be0SXin Li
714*3e777be0SXin Li namespace armnn_driver
715*3e777be0SXin Li {
716*3e777be0SXin Li
717*3e777be0SXin Li using namespace android::nn;
718*3e777be0SXin Li
719*3e777be0SXin Li template<typename HalPolicy,
720*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand,
721*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
722*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputOperand(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,bool failOnIndexOutOfBounds=true)723*3e777be0SXin Li const HalOperand* GetInputOperand(const HalOperation& operation,
724*3e777be0SXin Li uint32_t inputIndex,
725*3e777be0SXin Li const HalModel& model,
726*3e777be0SXin Li bool failOnIndexOutOfBounds = true)
727*3e777be0SXin Li {
728*3e777be0SXin Li if (inputIndex >= operation.inputs.size())
729*3e777be0SXin Li {
730*3e777be0SXin Li if (failOnIndexOutOfBounds)
731*3e777be0SXin Li {
732*3e777be0SXin Li Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
733*3e777be0SXin Li }
734*3e777be0SXin Li return nullptr;
735*3e777be0SXin Li }
736*3e777be0SXin Li
737*3e777be0SXin Li // Model should have been validated beforehand
738*3e777be0SXin Li if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
739*3e777be0SXin Li {
740*3e777be0SXin Li Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
741*3e777be0SXin Li return nullptr;
742*3e777be0SXin Li }
743*3e777be0SXin Li
744*3e777be0SXin Li return &getMainModel(model).operands[operation.inputs[inputIndex]];
745*3e777be0SXin Li }
746*3e777be0SXin Li
747*3e777be0SXin Li template<typename HalPolicy,
748*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand,
749*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
750*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetOutputOperand(const HalOperation & operation,uint32_t outputIndex,const HalModel & model)751*3e777be0SXin Li const HalOperand* GetOutputOperand(const HalOperation& operation,
752*3e777be0SXin Li uint32_t outputIndex,
753*3e777be0SXin Li const HalModel& model)
754*3e777be0SXin Li {
755*3e777be0SXin Li if (outputIndex >= operation.outputs.size())
756*3e777be0SXin Li {
757*3e777be0SXin Li Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
758*3e777be0SXin Li return nullptr;
759*3e777be0SXin Li }
760*3e777be0SXin Li
761*3e777be0SXin Li // Model should have been validated beforehand
762*3e777be0SXin Li if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
763*3e777be0SXin Li {
764*3e777be0SXin Li Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
765*3e777be0SXin Li return nullptr;
766*3e777be0SXin Li }
767*3e777be0SXin Li return &getMainModel(model).operands[operation.outputs[outputIndex]];
768*3e777be0SXin Li }
769*3e777be0SXin Li
770*3e777be0SXin Li template<typename HalPolicy,
771*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand,
772*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetOperandValueReadOnlyAddress(const HalOperand & operand,const HalModel & model,const ConversionData & data,bool optional=false)773*3e777be0SXin Li const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
774*3e777be0SXin Li const HalModel& model,
775*3e777be0SXin Li const ConversionData& data,
776*3e777be0SXin Li bool optional = false)
777*3e777be0SXin Li {
778*3e777be0SXin Li using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
779*3e777be0SXin Li
780*3e777be0SXin Li const void* valueStart = nullptr;
781*3e777be0SXin Li switch (operand.lifetime)
782*3e777be0SXin Li {
783*3e777be0SXin Li case HalOperandLifeTime::CONSTANT_COPY:
784*3e777be0SXin Li {
785*3e777be0SXin Li // Constant found in model.operandValues
786*3e777be0SXin Li valueStart = &model.operandValues[operand.location.offset];
787*3e777be0SXin Li break;
788*3e777be0SXin Li }
789*3e777be0SXin Li case HalOperandLifeTime::CONSTANT_REFERENCE:
790*3e777be0SXin Li {
791*3e777be0SXin Li // Constant specified via a Memory object
792*3e777be0SXin Li valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
793*3e777be0SXin Li break;
794*3e777be0SXin Li }
795*3e777be0SXin Li case HalOperandLifeTime::NO_VALUE:
796*3e777be0SXin Li {
797*3e777be0SXin Li // An optional input tensor with no values is not an error so should not register as a fail
798*3e777be0SXin Li if (optional)
799*3e777be0SXin Li {
800*3e777be0SXin Li valueStart = nullptr;
801*3e777be0SXin Li break;
802*3e777be0SXin Li }
803*3e777be0SXin Li [[fallthrough]];
804*3e777be0SXin Li }
805*3e777be0SXin Li default:
806*3e777be0SXin Li {
807*3e777be0SXin Li // Unsupported/invalid (e.g. can't get value of an input to the model)
808*3e777be0SXin Li Fail("%s: unsupported/invalid operand lifetime: %s",
809*3e777be0SXin Li __func__, toString(operand.lifetime).c_str());
810*3e777be0SXin Li valueStart = nullptr;
811*3e777be0SXin Li }
812*3e777be0SXin Li }
813*3e777be0SXin Li
814*3e777be0SXin Li return valueStart;
815*3e777be0SXin Li }
816*3e777be0SXin Li
817*3e777be0SXin Li template<typename HalPolicy,
818*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
819*3e777be0SXin Li typename HalModel = typename HalPolicy::Model,
820*3e777be0SXin Li typename HalOperandType = typename HalPolicy::OperandType>
GetOperandType(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,HalOperandType & type)821*3e777be0SXin Li bool GetOperandType(const HalOperation& operation,
822*3e777be0SXin Li uint32_t inputIndex,
823*3e777be0SXin Li const HalModel& model,
824*3e777be0SXin Li HalOperandType& type)
825*3e777be0SXin Li {
826*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
827*3e777be0SXin Li
828*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
829*3e777be0SXin Li if (!operand)
830*3e777be0SXin Li {
831*3e777be0SXin Li return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
832*3e777be0SXin Li }
833*3e777be0SXin Li
834*3e777be0SXin Li type = operand->type;
835*3e777be0SXin Li return true;
836*3e777be0SXin Li }
837*3e777be0SXin Li
838*3e777be0SXin Li template<typename HalPolicy,
839*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand>
IsOperandConstant(const HalOperand & operand)840*3e777be0SXin Li bool IsOperandConstant(const HalOperand& operand)
841*3e777be0SXin Li {
842*3e777be0SXin Li using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
843*3e777be0SXin Li
844*3e777be0SXin Li HalOperandLifeTime lifetime = operand.lifetime;
845*3e777be0SXin Li
846*3e777be0SXin Li return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
847*3e777be0SXin Li lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
848*3e777be0SXin Li lifetime == HalOperandLifeTime::NO_VALUE;
849*3e777be0SXin Li }
850*3e777be0SXin Li
851*3e777be0SXin Li template<typename HalPolicy,
852*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand,
853*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertOperandToConstTensorPin(const HalOperand & operand,const HalModel & model,const ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute,const armnn::TensorShape * overrideTensorShape=nullptr,bool optional=false)854*3e777be0SXin Li ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
855*3e777be0SXin Li const HalModel& model,
856*3e777be0SXin Li const ConversionData& data,
857*3e777be0SXin Li const armnn::PermutationVector& dimensionMappings = g_DontPermute,
858*3e777be0SXin Li const armnn::TensorShape* overrideTensorShape = nullptr,
859*3e777be0SXin Li bool optional = false)
860*3e777be0SXin Li {
861*3e777be0SXin Li if (!IsOperandTypeSupportedForTensors(operand.type))
862*3e777be0SXin Li {
863*3e777be0SXin Li Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
864*3e777be0SXin Li return ConstTensorPin();
865*3e777be0SXin Li }
866*3e777be0SXin Li
867*3e777be0SXin Li if (!optional && !IsOperandConstant<HalPolicy>(operand))
868*3e777be0SXin Li {
869*3e777be0SXin Li Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
870*3e777be0SXin Li return ConstTensorPin();
871*3e777be0SXin Li }
872*3e777be0SXin Li
873*3e777be0SXin Li const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
874*3e777be0SXin Li if (!valueStart)
875*3e777be0SXin Li {
876*3e777be0SXin Li if (optional)
877*3e777be0SXin Li {
878*3e777be0SXin Li // optional tensor with no values is not really an error; return it as invalid, but marked as optional
879*3e777be0SXin Li return ConstTensorPin(true);
880*3e777be0SXin Li }
881*3e777be0SXin Li // mandatory tensor with no values
882*3e777be0SXin Li Fail("%s: failed to get operand address", __func__);
883*3e777be0SXin Li return ConstTensorPin();
884*3e777be0SXin Li }
885*3e777be0SXin Li
886*3e777be0SXin Li armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
887*3e777be0SXin Li
888*3e777be0SXin Li // Make sure isConstant flag is set.
889*3e777be0SXin Li tensorInfo.SetConstant();
890*3e777be0SXin Li
891*3e777be0SXin Li if (overrideTensorShape != nullptr)
892*3e777be0SXin Li {
893*3e777be0SXin Li tensorInfo.SetShape(*overrideTensorShape);
894*3e777be0SXin Li }
895*3e777be0SXin Li return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
896*3e777be0SXin Li }
897*3e777be0SXin Li
898*3e777be0SXin Li template<typename HalPolicy,
899*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
900*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertOperationInputToConstTensorPin(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,const ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute,const armnn::TensorShape * overrideTensorShape=nullptr,bool optional=false)901*3e777be0SXin Li ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
902*3e777be0SXin Li uint32_t inputIndex,
903*3e777be0SXin Li const HalModel& model,
904*3e777be0SXin Li const ConversionData& data,
905*3e777be0SXin Li const armnn::PermutationVector& dimensionMappings = g_DontPermute,
906*3e777be0SXin Li const armnn::TensorShape* overrideTensorShape = nullptr,
907*3e777be0SXin Li bool optional = false)
908*3e777be0SXin Li {
909*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
910*3e777be0SXin Li
911*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
912*3e777be0SXin Li if (!operand)
913*3e777be0SXin Li {
914*3e777be0SXin Li Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
915*3e777be0SXin Li return ConstTensorPin();
916*3e777be0SXin Li }
917*3e777be0SXin Li return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
918*3e777be0SXin Li model,
919*3e777be0SXin Li data,
920*3e777be0SXin Li dimensionMappings,
921*3e777be0SXin Li overrideTensorShape,
922*3e777be0SXin Li optional);
923*3e777be0SXin Li }
924*3e777be0SXin Li
925*3e777be0SXin Li template<typename HalPolicy,
926*3e777be0SXin Li typename OutputType,
927*3e777be0SXin Li typename HalOperandType = typename HalPolicy::OperandType,
928*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
929*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputScalar(const HalOperation & operation,uint32_t inputIndex,HalOperandType type,OutputType & outValue,const HalModel & model,const ConversionData & data,bool optional=false)930*3e777be0SXin Li bool GetInputScalar(const HalOperation& operation,
931*3e777be0SXin Li uint32_t inputIndex,
932*3e777be0SXin Li HalOperandType type,
933*3e777be0SXin Li OutputType& outValue,
934*3e777be0SXin Li const HalModel& model,
935*3e777be0SXin Li const ConversionData& data,
936*3e777be0SXin Li bool optional = false)
937*3e777be0SXin Li {
938*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
939*3e777be0SXin Li
940*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
941*3e777be0SXin Li if (!optional && !operand)
942*3e777be0SXin Li {
943*3e777be0SXin Li return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
944*3e777be0SXin Li }
945*3e777be0SXin Li
946*3e777be0SXin Li if (!optional && operand->type != type)
947*3e777be0SXin Li {
948*3e777be0SXin Li return Fail("%s: unexpected operand type: %s (should be %s)",
949*3e777be0SXin Li __func__, toString(operand->type).c_str(), toString(type).c_str());
950*3e777be0SXin Li }
951*3e777be0SXin Li
952*3e777be0SXin Li if (!optional && operand->location.length != sizeof(OutputType))
953*3e777be0SXin Li {
954*3e777be0SXin Li return Fail("%s: incorrect operand location length: %i (should be %i)",
955*3e777be0SXin Li __func__, operand->location.length, sizeof(OutputType));
956*3e777be0SXin Li }
957*3e777be0SXin Li
958*3e777be0SXin Li const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
959*3e777be0SXin Li if (!optional && !valueAddress)
960*3e777be0SXin Li {
961*3e777be0SXin Li return Fail("%s: failed to get address for operand", __func__);
962*3e777be0SXin Li }
963*3e777be0SXin Li
964*3e777be0SXin Li if(!optional)
965*3e777be0SXin Li {
966*3e777be0SXin Li outValue = *(static_cast<const OutputType*>(valueAddress));
967*3e777be0SXin Li }
968*3e777be0SXin Li
969*3e777be0SXin Li return true;
970*3e777be0SXin Li }
971*3e777be0SXin Li
972*3e777be0SXin Li template<typename HalPolicy,
973*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
974*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputInt32(const HalOperation & operation,uint32_t inputIndex,int32_t & outValue,const HalModel & model,const ConversionData & data)975*3e777be0SXin Li bool GetInputInt32(const HalOperation& operation,
976*3e777be0SXin Li uint32_t inputIndex,
977*3e777be0SXin Li int32_t& outValue,
978*3e777be0SXin Li const HalModel& model,
979*3e777be0SXin Li const ConversionData& data)
980*3e777be0SXin Li {
981*3e777be0SXin Li return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
982*3e777be0SXin Li }
983*3e777be0SXin Li
984*3e777be0SXin Li template<typename HalPolicy,
985*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
986*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputFloat32(const HalOperation & operation,uint32_t inputIndex,float & outValue,const HalModel & model,const ConversionData & data)987*3e777be0SXin Li bool GetInputFloat32(const HalOperation& operation,
988*3e777be0SXin Li uint32_t inputIndex,
989*3e777be0SXin Li float& outValue,
990*3e777be0SXin Li const HalModel& model,
991*3e777be0SXin Li const ConversionData& data)
992*3e777be0SXin Li {
993*3e777be0SXin Li return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
994*3e777be0SXin Li }
995*3e777be0SXin Li
996*3e777be0SXin Li template<typename HalPolicy,
997*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
998*3e777be0SXin Li typename HalOperandType = typename HalPolicy::OperandType,
999*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputActivationFunctionImpl(const HalOperation & operation,uint32_t inputIndex,HalOperandType type,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)1000*3e777be0SXin Li bool GetInputActivationFunctionImpl(const HalOperation& operation,
1001*3e777be0SXin Li uint32_t inputIndex,
1002*3e777be0SXin Li HalOperandType type,
1003*3e777be0SXin Li ActivationFn& outActivationFunction,
1004*3e777be0SXin Li const HalModel& model,
1005*3e777be0SXin Li const ConversionData& data)
1006*3e777be0SXin Li {
1007*3e777be0SXin Li if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
1008*3e777be0SXin Li {
1009*3e777be0SXin Li return Fail("%s: unexpected operand type: %s (should be %s or %s)",
1010*3e777be0SXin Li __func__,
1011*3e777be0SXin Li toString(type).c_str(),
1012*3e777be0SXin Li toString(HalOperandType::INT32).c_str(),
1013*3e777be0SXin Li toString(HalOperandType::TENSOR_INT32).c_str());
1014*3e777be0SXin Li }
1015*3e777be0SXin Li
1016*3e777be0SXin Li int32_t activationFunctionAsInt;
1017*3e777be0SXin Li if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
1018*3e777be0SXin Li {
1019*3e777be0SXin Li return Fail("%s: failed to get activation input value", __func__);
1020*3e777be0SXin Li }
1021*3e777be0SXin Li outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1022*3e777be0SXin Li return true;
1023*3e777be0SXin Li }
1024*3e777be0SXin Li
1025*3e777be0SXin Li template<typename HalPolicy,
1026*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1027*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputActivationFunction(const HalOperation & operation,uint32_t inputIndex,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)1028*3e777be0SXin Li bool GetInputActivationFunction(const HalOperation& operation,
1029*3e777be0SXin Li uint32_t inputIndex,
1030*3e777be0SXin Li ActivationFn& outActivationFunction,
1031*3e777be0SXin Li const HalModel& model,
1032*3e777be0SXin Li const ConversionData& data)
1033*3e777be0SXin Li {
1034*3e777be0SXin Li return GetInputActivationFunctionImpl<HalPolicy>(operation,
1035*3e777be0SXin Li inputIndex,
1036*3e777be0SXin Li HalPolicy::OperandType::INT32,
1037*3e777be0SXin Li outActivationFunction,
1038*3e777be0SXin Li model,
1039*3e777be0SXin Li data);
1040*3e777be0SXin Li }
1041*3e777be0SXin Li
1042*3e777be0SXin Li template<typename HalPolicy,
1043*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1044*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputActivationFunctionFromTensor(const HalOperation & operation,uint32_t inputIndex,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)1045*3e777be0SXin Li bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1046*3e777be0SXin Li uint32_t inputIndex,
1047*3e777be0SXin Li ActivationFn& outActivationFunction,
1048*3e777be0SXin Li const HalModel& model,
1049*3e777be0SXin Li const ConversionData& data)
1050*3e777be0SXin Li {
1051*3e777be0SXin Li // This only accepts a 1-D tensor of size 1
1052*3e777be0SXin Li return GetInputActivationFunctionImpl<HalPolicy>(operation,
1053*3e777be0SXin Li inputIndex,
1054*3e777be0SXin Li HalPolicy::OperandType::INT32,
1055*3e777be0SXin Li outActivationFunction,
1056*3e777be0SXin Li model,
1057*3e777be0SXin Li data);
1058*3e777be0SXin Li }
1059*3e777be0SXin Li
1060*3e777be0SXin Li
1061*3e777be0SXin Li template<typename HalPolicy,
1062*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1063*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetOptionalInputActivation(const HalOperation & operation,uint32_t inputIndex,ActivationFn & activationFunction,const HalModel & model,const ConversionData & data)1064*3e777be0SXin Li bool GetOptionalInputActivation(const HalOperation& operation,
1065*3e777be0SXin Li uint32_t inputIndex,
1066*3e777be0SXin Li ActivationFn& activationFunction,
1067*3e777be0SXin Li const HalModel& model,
1068*3e777be0SXin Li const ConversionData& data)
1069*3e777be0SXin Li {
1070*3e777be0SXin Li if (operation.inputs.size() <= inputIndex)
1071*3e777be0SXin Li {
1072*3e777be0SXin Li activationFunction = ActivationFn::kActivationNone;
1073*3e777be0SXin Li }
1074*3e777be0SXin Li else
1075*3e777be0SXin Li {
1076*3e777be0SXin Li if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
1077*3e777be0SXin Li {
1078*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
1079*3e777be0SXin Li }
1080*3e777be0SXin Li }
1081*3e777be0SXin Li return true;
1082*3e777be0SXin Li }
1083*3e777be0SXin Li
1084*3e777be0SXin Li template<typename HalPolicy,
1085*3e777be0SXin Li typename ConvolutionDescriptor,
1086*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1087*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetOptionalConvolutionDilationParams(const HalOperation & operation,uint32_t dilationXIndex,ConvolutionDescriptor & descriptor,const HalModel & model,const ConversionData & data)1088*3e777be0SXin Li bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1089*3e777be0SXin Li uint32_t dilationXIndex,
1090*3e777be0SXin Li ConvolutionDescriptor& descriptor,
1091*3e777be0SXin Li const HalModel& model,
1092*3e777be0SXin Li const ConversionData& data)
1093*3e777be0SXin Li {
1094*3e777be0SXin Li bool success = true;
1095*3e777be0SXin Li if (operation.inputs.size() >= dilationXIndex + 2)
1096*3e777be0SXin Li {
1097*3e777be0SXin Li success &= GetInputScalar<HalPolicy>(operation,
1098*3e777be0SXin Li dilationXIndex,
1099*3e777be0SXin Li HalPolicy::OperandType::INT32,
1100*3e777be0SXin Li descriptor.m_DilationX,
1101*3e777be0SXin Li model,
1102*3e777be0SXin Li data);
1103*3e777be0SXin Li success &= GetInputScalar<HalPolicy>(operation,
1104*3e777be0SXin Li dilationXIndex + 1,
1105*3e777be0SXin Li HalPolicy::OperandType::INT32,
1106*3e777be0SXin Li descriptor.m_DilationY,
1107*3e777be0SXin Li model,
1108*3e777be0SXin Li data);
1109*3e777be0SXin Li }
1110*3e777be0SXin Li
1111*3e777be0SXin Li return success;
1112*3e777be0SXin Li }
1113*3e777be0SXin Li
1114*3e777be0SXin Li template<typename HalPolicy,
1115*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1116*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetOptionalBool(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,const ConversionData & data)1117*3e777be0SXin Li bool GetOptionalBool(const HalOperation& operation,
1118*3e777be0SXin Li uint32_t inputIndex,
1119*3e777be0SXin Li const HalModel& model,
1120*3e777be0SXin Li const ConversionData& data)
1121*3e777be0SXin Li {
1122*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1123*3e777be0SXin Li
1124*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1125*3e777be0SXin Li if (!operand)
1126*3e777be0SXin Li {
1127*3e777be0SXin Li return false;
1128*3e777be0SXin Li }
1129*3e777be0SXin Li
1130*3e777be0SXin Li if (!IsBool(*operand))
1131*3e777be0SXin Li {
1132*3e777be0SXin Li return false;
1133*3e777be0SXin Li }
1134*3e777be0SXin Li
1135*3e777be0SXin Li const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1136*3e777be0SXin Li if (!valueAddress)
1137*3e777be0SXin Li {
1138*3e777be0SXin Li return false;
1139*3e777be0SXin Li }
1140*3e777be0SXin Li
1141*3e777be0SXin Li if (*(static_cast<const bool*>(valueAddress)))
1142*3e777be0SXin Li {
1143*3e777be0SXin Li return true;
1144*3e777be0SXin Li }
1145*3e777be0SXin Li else
1146*3e777be0SXin Li {
1147*3e777be0SXin Li return false;
1148*3e777be0SXin Li }
1149*3e777be0SXin Li }
1150*3e777be0SXin Li
1151*3e777be0SXin Li template<typename HalPolicy,
1152*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand,
1153*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetTensorInt32Values(const HalOperand & operand,std::vector<int32_t> & outValues,const HalModel & model,const ConversionData & data)1154*3e777be0SXin Li bool GetTensorInt32Values(const HalOperand& operand,
1155*3e777be0SXin Li std::vector<int32_t>& outValues,
1156*3e777be0SXin Li const HalModel& model,
1157*3e777be0SXin Li const ConversionData& data)
1158*3e777be0SXin Li {
1159*3e777be0SXin Li if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
1160*3e777be0SXin Li {
1161*3e777be0SXin Li return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1162*3e777be0SXin Li }
1163*3e777be0SXin Li
1164*3e777be0SXin Li const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
1165*3e777be0SXin Li if (!startAddress)
1166*3e777be0SXin Li {
1167*3e777be0SXin Li return Fail("%s: failed to get operand address", __func__, operand.type);
1168*3e777be0SXin Li }
1169*3e777be0SXin Li
1170*3e777be0SXin Li // Check number of bytes is sensible
1171*3e777be0SXin Li const uint32_t numBytes = operand.location.length;
1172*3e777be0SXin Li if (numBytes % sizeof(int32_t) != 0)
1173*3e777be0SXin Li {
1174*3e777be0SXin Li return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1175*3e777be0SXin Li __func__, numBytes, sizeof(int32_t));
1176*3e777be0SXin Li }
1177*3e777be0SXin Li
1178*3e777be0SXin Li outValues.resize(numBytes / sizeof(int32_t));
1179*3e777be0SXin Li memcpy(outValues.data(), startAddress, numBytes);
1180*3e777be0SXin Li return true;
1181*3e777be0SXin Li }
1182*3e777be0SXin Li
1183*3e777be0SXin Li template<typename HalPolicy,
1184*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1185*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
GetInputPaddingScheme(const HalOperation & operation,uint32_t inputIndex,PaddingScheme & outPaddingScheme,const HalModel & model,const ConversionData & data)1186*3e777be0SXin Li bool GetInputPaddingScheme(const HalOperation& operation,
1187*3e777be0SXin Li uint32_t inputIndex,
1188*3e777be0SXin Li PaddingScheme& outPaddingScheme,
1189*3e777be0SXin Li const HalModel& model,
1190*3e777be0SXin Li const ConversionData& data)
1191*3e777be0SXin Li {
1192*3e777be0SXin Li int32_t paddingSchemeAsInt;
1193*3e777be0SXin Li if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
1194*3e777be0SXin Li {
1195*3e777be0SXin Li return Fail("%s: failed to get padding scheme input value", __func__);
1196*3e777be0SXin Li }
1197*3e777be0SXin Li
1198*3e777be0SXin Li outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1199*3e777be0SXin Li return true;
1200*3e777be0SXin Li }
1201*3e777be0SXin Li
1202*3e777be0SXin Li template<typename HalPolicy,
1203*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1204*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertToLayerInputHandle(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute)1205*3e777be0SXin Li LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1206*3e777be0SXin Li uint32_t inputIndex,
1207*3e777be0SXin Li const HalModel& model,
1208*3e777be0SXin Li ConversionData& data,
1209*3e777be0SXin Li const armnn::PermutationVector& dimensionMappings = g_DontPermute)
1210*3e777be0SXin Li {
1211*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1212*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
1213*3e777be0SXin Li using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1214*3e777be0SXin Li
1215*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1216*3e777be0SXin Li if (!operand)
1217*3e777be0SXin Li {
1218*3e777be0SXin Li Fail("%s: failed to get input operand %i", __func__, inputIndex);
1219*3e777be0SXin Li return LayerInputHandle();
1220*3e777be0SXin Li }
1221*3e777be0SXin Li
1222*3e777be0SXin Li if (!IsOperandTypeSupportedForTensors(operand->type))
1223*3e777be0SXin Li {
1224*3e777be0SXin Li Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1225*3e777be0SXin Li return LayerInputHandle();
1226*3e777be0SXin Li }
1227*3e777be0SXin Li
1228*3e777be0SXin Li try
1229*3e777be0SXin Li {
1230*3e777be0SXin Li armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1231*3e777be0SXin Li if (IsDynamicTensor(operandTensorInfo))
1232*3e777be0SXin Li {
1233*3e777be0SXin Li Fail("%s: dynamic input tensors are not supported", __func__);
1234*3e777be0SXin Li return LayerInputHandle();
1235*3e777be0SXin Li }
1236*3e777be0SXin Li
1237*3e777be0SXin Li switch (operand->lifetime)
1238*3e777be0SXin Li {
1239*3e777be0SXin Li case HalOperandLifeTime::MODEL_INPUT:
1240*3e777be0SXin Li {
1241*3e777be0SXin Li // NOTE: We must check whether we can support the input tensor on at least one
1242*3e777be0SXin Li // of the provided backends; otherwise we cannot convert the operation
1243*3e777be0SXin Li bool isInputSupported = false;
1244*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
1245*3e777be0SXin Li IsInputSupported,
1246*3e777be0SXin Li data.m_Backends,
1247*3e777be0SXin Li isInputSupported,
1248*3e777be0SXin Li armnn::BackendId(),
1249*3e777be0SXin Li operandTensorInfo);
1250*3e777be0SXin Li
1251*3e777be0SXin Li if (!isInputSupported)
1252*3e777be0SXin Li {
1253*3e777be0SXin Li Fail("%s: unsupported input tensor", __func__);
1254*3e777be0SXin Li return LayerInputHandle();
1255*3e777be0SXin Li }
1256*3e777be0SXin Li
1257*3e777be0SXin Li [[clang::fallthrough]]; // intentional fallthrough
1258*3e777be0SXin Li }
1259*3e777be0SXin Li case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1260*3e777be0SXin Li case HalOperandLifeTime::MODEL_OUTPUT:
1261*3e777be0SXin Li {
1262*3e777be0SXin Li // The tensor is either an operand internal to the model, or a model input.
1263*3e777be0SXin Li // It can be associated with an ArmNN output slot for an existing layer.
1264*3e777be0SXin Li
1265*3e777be0SXin Li // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1266*3e777be0SXin Li const uint32_t operandIndex = operation.inputs[inputIndex];
1267*3e777be0SXin Li return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1268*3e777be0SXin Li }
1269*3e777be0SXin Li case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1270*3e777be0SXin Li case HalOperandLifeTime::CONSTANT_REFERENCE:
1271*3e777be0SXin Li {
1272*3e777be0SXin Li // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1273*3e777be0SXin Li ConstTensorPin tensorPin =
1274*3e777be0SXin Li ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1275*3e777be0SXin Li
1276*3e777be0SXin Li if (tensorPin.IsValid())
1277*3e777be0SXin Li {
1278*3e777be0SXin Li bool isSupported = false;
1279*3e777be0SXin Li armnn::BackendId setBackend;
1280*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
1281*3e777be0SXin Li IsConstantSupported,
1282*3e777be0SXin Li data.m_Backends,
1283*3e777be0SXin Li isSupported,
1284*3e777be0SXin Li setBackend,
1285*3e777be0SXin Li tensorPin.GetConstTensor().GetInfo());
1286*3e777be0SXin Li if (!isSupported)
1287*3e777be0SXin Li {
1288*3e777be0SXin Li return LayerInputHandle();
1289*3e777be0SXin Li }
1290*3e777be0SXin Li
1291*3e777be0SXin Li armnn::IConnectableLayer* constantLayer =
1292*3e777be0SXin Li data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1293*3e777be0SXin Li constantLayer->SetBackendId(setBackend);
1294*3e777be0SXin Li armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1295*3e777be0SXin Li armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1296*3e777be0SXin Li outputSlot.SetTensorInfo(constantTensorInfo);
1297*3e777be0SXin Li
1298*3e777be0SXin Li return LayerInputHandle(true, &outputSlot, constantTensorInfo);
1299*3e777be0SXin Li }
1300*3e777be0SXin Li else
1301*3e777be0SXin Li {
1302*3e777be0SXin Li Fail("%s: invalid operand tensor", __func__);
1303*3e777be0SXin Li return LayerInputHandle();
1304*3e777be0SXin Li }
1305*3e777be0SXin Li }
1306*3e777be0SXin Li default:
1307*3e777be0SXin Li {
1308*3e777be0SXin Li // Unsupported lifetime for an input tensor
1309*3e777be0SXin Li Fail("%s: unsupported lifetime for input tensor: %s",
1310*3e777be0SXin Li __func__, toString(operand->lifetime).c_str());
1311*3e777be0SXin Li return LayerInputHandle();
1312*3e777be0SXin Li }
1313*3e777be0SXin Li }
1314*3e777be0SXin Li }
1315*3e777be0SXin Li catch (UnsupportedOperand<HalOperandType>& e)
1316*3e777be0SXin Li {
1317*3e777be0SXin Li Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1318*3e777be0SXin Li return LayerInputHandle();
1319*3e777be0SXin Li }
1320*3e777be0SXin Li }
1321*3e777be0SXin Li
1322*3e777be0SXin Li
1323*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
1324*3e777be0SXin Li template<typename HalPolicy>
ConvertToLayerInputHandle(const::android::hardware::neuralnetworks::V1_3::Operation & operation,uint32_t inputIndex,const::android::hardware::neuralnetworks::V1_3::Model & model,ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute)1325*3e777be0SXin Li LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1326*3e777be0SXin Li uint32_t inputIndex,
1327*3e777be0SXin Li const::android::hardware::neuralnetworks::V1_3::Model& model,
1328*3e777be0SXin Li ConversionData& data,
1329*3e777be0SXin Li const armnn::PermutationVector& dimensionMappings = g_DontPermute)
1330*3e777be0SXin Li {
1331*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1332*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
1333*3e777be0SXin Li using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1334*3e777be0SXin Li
1335*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1336*3e777be0SXin Li if (!operand)
1337*3e777be0SXin Li {
1338*3e777be0SXin Li Fail("%s: failed to get input operand %i", __func__, inputIndex);
1339*3e777be0SXin Li return LayerInputHandle();
1340*3e777be0SXin Li }
1341*3e777be0SXin Li
1342*3e777be0SXin Li if (!IsOperandTypeSupportedForTensors(operand->type))
1343*3e777be0SXin Li {
1344*3e777be0SXin Li Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1345*3e777be0SXin Li return LayerInputHandle();
1346*3e777be0SXin Li }
1347*3e777be0SXin Li
1348*3e777be0SXin Li try
1349*3e777be0SXin Li {
1350*3e777be0SXin Li armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1351*3e777be0SXin Li
1352*3e777be0SXin Li if (IsDynamicTensor(operandTensorInfo))
1353*3e777be0SXin Li {
1354*3e777be0SXin Li data.m_DynamicInputsEncountered = true;
1355*3e777be0SXin Li
1356*3e777be0SXin Li const uint32_t operandIndex = operation.inputs[inputIndex];
1357*3e777be0SXin Li
1358*3e777be0SXin Li // Check if the dynamic input tensors have been inferred by one of the previous layers
1359*3e777be0SXin Li // If not we can't support them
1360*3e777be0SXin Li if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
1361*3e777be0SXin Li {
1362*3e777be0SXin Li operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1363*3e777be0SXin Li }
1364*3e777be0SXin Li else
1365*3e777be0SXin Li {
1366*3e777be0SXin Li Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1367*3e777be0SXin Li return LayerInputHandle();
1368*3e777be0SXin Li }
1369*3e777be0SXin Li }
1370*3e777be0SXin Li
1371*3e777be0SXin Li switch (operand->lifetime)
1372*3e777be0SXin Li {
1373*3e777be0SXin Li case HalOperandLifeTime::SUBGRAPH_INPUT:
1374*3e777be0SXin Li {
1375*3e777be0SXin Li // NOTE: We must check whether we can support the input tensor on at least one
1376*3e777be0SXin Li // of the provided backends; otherwise we cannot convert the operation
1377*3e777be0SXin Li bool isInputSupported = false;
1378*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
1379*3e777be0SXin Li IsInputSupported,
1380*3e777be0SXin Li data.m_Backends,
1381*3e777be0SXin Li isInputSupported,
1382*3e777be0SXin Li armnn::BackendId(),
1383*3e777be0SXin Li operandTensorInfo);
1384*3e777be0SXin Li
1385*3e777be0SXin Li if (!isInputSupported)
1386*3e777be0SXin Li {
1387*3e777be0SXin Li Fail("%s: unsupported input tensor", __func__);
1388*3e777be0SXin Li return LayerInputHandle();
1389*3e777be0SXin Li }
1390*3e777be0SXin Li
1391*3e777be0SXin Li [[clang::fallthrough]]; // intentional fallthrough
1392*3e777be0SXin Li }
1393*3e777be0SXin Li case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1394*3e777be0SXin Li case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1395*3e777be0SXin Li {
1396*3e777be0SXin Li // The tensor is either an operand internal to the model, or a model input.
1397*3e777be0SXin Li // It can be associated with an ArmNN output slot for an existing layer.
1398*3e777be0SXin Li
1399*3e777be0SXin Li // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1400*3e777be0SXin Li const uint32_t operandIndex = operation.inputs[inputIndex];
1401*3e777be0SXin Li return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1402*3e777be0SXin Li }
1403*3e777be0SXin Li case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1404*3e777be0SXin Li case HalOperandLifeTime::CONSTANT_REFERENCE:
1405*3e777be0SXin Li {
1406*3e777be0SXin Li // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1407*3e777be0SXin Li ConstTensorPin tensorPin =
1408*3e777be0SXin Li ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1409*3e777be0SXin Li
1410*3e777be0SXin Li if (tensorPin.IsValid())
1411*3e777be0SXin Li {
1412*3e777be0SXin Li bool isSupported = false;
1413*3e777be0SXin Li armnn::BackendId setBackend;
1414*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
1415*3e777be0SXin Li IsConstantSupported,
1416*3e777be0SXin Li data.m_Backends,
1417*3e777be0SXin Li isSupported,
1418*3e777be0SXin Li setBackend,
1419*3e777be0SXin Li tensorPin.GetConstTensor().GetInfo());
1420*3e777be0SXin Li if (!isSupported)
1421*3e777be0SXin Li {
1422*3e777be0SXin Li return LayerInputHandle();
1423*3e777be0SXin Li }
1424*3e777be0SXin Li
1425*3e777be0SXin Li armnn::IConnectableLayer* constantLayer =
1426*3e777be0SXin Li data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1427*3e777be0SXin Li constantLayer->SetBackendId(setBackend);
1428*3e777be0SXin Li armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1429*3e777be0SXin Li armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1430*3e777be0SXin Li outputSlot.SetTensorInfo(constantTensorInfo);
1431*3e777be0SXin Li
1432*3e777be0SXin Li return LayerInputHandle(true, &outputSlot, constantTensorInfo);
1433*3e777be0SXin Li }
1434*3e777be0SXin Li else
1435*3e777be0SXin Li {
1436*3e777be0SXin Li Fail("%s: invalid operand tensor", __func__);
1437*3e777be0SXin Li return LayerInputHandle();
1438*3e777be0SXin Li }
1439*3e777be0SXin Li break;
1440*3e777be0SXin Li }
1441*3e777be0SXin Li default:
1442*3e777be0SXin Li {
1443*3e777be0SXin Li // Unsupported lifetime for an input tensor
1444*3e777be0SXin Li Fail("%s: unsupported lifetime for input tensor: %s",
1445*3e777be0SXin Li __func__, toString(operand->lifetime).c_str());
1446*3e777be0SXin Li return LayerInputHandle();
1447*3e777be0SXin Li }
1448*3e777be0SXin Li }
1449*3e777be0SXin Li }
1450*3e777be0SXin Li catch (UnsupportedOperand<HalOperandType>& e)
1451*3e777be0SXin Li {
1452*3e777be0SXin Li Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1453*3e777be0SXin Li return LayerInputHandle();
1454*3e777be0SXin Li }
1455*3e777be0SXin Li }
1456*3e777be0SXin Li #endif
1457*3e777be0SXin Li
1458*3e777be0SXin Li template<typename HalPolicy,
1459*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1460*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
SetupAndTrackLayerOutputSlot(const HalOperation & operation,uint32_t operationOutputIndex,armnn::IConnectableLayer & layer,uint32_t layerOutputIndex,const HalModel & model,ConversionData & data,const armnn::TensorInfo * overrideOutputInfo=nullptr,const std::function<void (const armnn::TensorInfo &,bool &)> & validateFunc=nullptr,const ActivationFn & activationFunction=ActivationFn::kActivationNone,bool inferOutputShapes=false)1461*3e777be0SXin Li bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1462*3e777be0SXin Li uint32_t operationOutputIndex,
1463*3e777be0SXin Li armnn::IConnectableLayer& layer,
1464*3e777be0SXin Li uint32_t layerOutputIndex,
1465*3e777be0SXin Li const HalModel& model,
1466*3e777be0SXin Li ConversionData& data,
1467*3e777be0SXin Li const armnn::TensorInfo* overrideOutputInfo = nullptr,
1468*3e777be0SXin Li const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1469*3e777be0SXin Li const ActivationFn& activationFunction = ActivationFn::kActivationNone,
1470*3e777be0SXin Li bool inferOutputShapes = false)
1471*3e777be0SXin Li {
1472*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1473*3e777be0SXin Li
1474*3e777be0SXin Li const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
1475*3e777be0SXin Li if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1476*3e777be0SXin Li {
1477*3e777be0SXin Li return false;
1478*3e777be0SXin Li }
1479*3e777be0SXin Li
1480*3e777be0SXin Li armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1481*3e777be0SXin Li if (overrideOutputInfo == nullptr)
1482*3e777be0SXin Li {
1483*3e777be0SXin Li outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1484*3e777be0SXin Li }
1485*3e777be0SXin Li else
1486*3e777be0SXin Li {
1487*3e777be0SXin Li outputSlot.SetTensorInfo(*overrideOutputInfo);
1488*3e777be0SXin Li }
1489*3e777be0SXin Li
1490*3e777be0SXin Li bool isSupported = false;
1491*3e777be0SXin Li if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
1492*3e777be0SXin Li {
1493*3e777be0SXin Li // Type one dynamic tensors require the previous layer's output shape for inference
1494*3e777be0SXin Li for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1495*3e777be0SXin Li {
1496*3e777be0SXin Li if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
1497*3e777be0SXin Li {
1498*3e777be0SXin Li return false;
1499*3e777be0SXin Li }
1500*3e777be0SXin Li }
1501*3e777be0SXin Li // IsTensorInfoSet will infer the dynamic output shape
1502*3e777be0SXin Li outputSlot.IsTensorInfoSet();
1503*3e777be0SXin Li // Once the shape is inferred we can validate it
1504*3e777be0SXin Li validateFunc(outputSlot.GetTensorInfo(), isSupported);
1505*3e777be0SXin Li
1506*3e777be0SXin Li if(!isSupported)
1507*3e777be0SXin Li {
1508*3e777be0SXin Li for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1509*3e777be0SXin Li {
1510*3e777be0SXin Li layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1511*3e777be0SXin Li }
1512*3e777be0SXin Li return false;
1513*3e777be0SXin Li }
1514*3e777be0SXin Li }
1515*3e777be0SXin Li
1516*3e777be0SXin Li const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1517*3e777be0SXin Li
1518*3e777be0SXin Li if (activationFunction != ActivationFn::kActivationNone)
1519*3e777be0SXin Li {
1520*3e777be0SXin Li const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1521*3e777be0SXin Li armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1522*3e777be0SXin Li &layer, data);
1523*3e777be0SXin Li
1524*3e777be0SXin Li if (!endLayer)
1525*3e777be0SXin Li {
1526*3e777be0SXin Li return Fail("%s: ProcessActivation failed", __func__);
1527*3e777be0SXin Li }
1528*3e777be0SXin Li
1529*3e777be0SXin Li armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1530*3e777be0SXin Li data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1531*3e777be0SXin Li }
1532*3e777be0SXin Li else
1533*3e777be0SXin Li {
1534*3e777be0SXin Li data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1535*3e777be0SXin Li }
1536*3e777be0SXin Li
1537*3e777be0SXin Li return true;
1538*3e777be0SXin Li }
1539*3e777be0SXin Li
1540*3e777be0SXin Li template<typename HalPolicy,
1541*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1542*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
OptionalDataLayout(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,ConversionData & data)1543*3e777be0SXin Li armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1544*3e777be0SXin Li uint32_t inputIndex,
1545*3e777be0SXin Li const HalModel& model,
1546*3e777be0SXin Li ConversionData& data)
1547*3e777be0SXin Li {
1548*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1549*3e777be0SXin Li
1550*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1551*3e777be0SXin Li if (!operand)
1552*3e777be0SXin Li {
1553*3e777be0SXin Li return armnn::DataLayout::NHWC;
1554*3e777be0SXin Li }
1555*3e777be0SXin Li
1556*3e777be0SXin Li if (!IsBool(*operand))
1557*3e777be0SXin Li {
1558*3e777be0SXin Li return armnn::DataLayout::NHWC;
1559*3e777be0SXin Li }
1560*3e777be0SXin Li
1561*3e777be0SXin Li const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1562*3e777be0SXin Li if (!valueAddress)
1563*3e777be0SXin Li {
1564*3e777be0SXin Li return armnn::DataLayout::NHWC;
1565*3e777be0SXin Li }
1566*3e777be0SXin Li
1567*3e777be0SXin Li if (*(static_cast<const bool*>(valueAddress)))
1568*3e777be0SXin Li {
1569*3e777be0SXin Li return armnn::DataLayout::NCHW;
1570*3e777be0SXin Li }
1571*3e777be0SXin Li else
1572*3e777be0SXin Li {
1573*3e777be0SXin Li return armnn::DataLayout::NHWC;
1574*3e777be0SXin Li }
1575*3e777be0SXin Li }
1576*3e777be0SXin Li
1577*3e777be0SXin Li template<typename HalPolicy,
1578*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1579*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
SetupAndTrackLayerOutputSlot(const HalOperation & operation,uint32_t outputIndex,armnn::IConnectableLayer & layer,const HalModel & model,ConversionData & data,const armnn::TensorInfo * overrideOutputInfo=nullptr,const std::function<void (const armnn::TensorInfo &,bool &)> & validateFunc=nullptr,const ActivationFn & activationFunction=ActivationFn::kActivationNone)1580*3e777be0SXin Li bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1581*3e777be0SXin Li uint32_t outputIndex,
1582*3e777be0SXin Li armnn::IConnectableLayer& layer,
1583*3e777be0SXin Li const HalModel& model,
1584*3e777be0SXin Li ConversionData& data,
1585*3e777be0SXin Li const armnn::TensorInfo* overrideOutputInfo = nullptr,
1586*3e777be0SXin Li const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1587*3e777be0SXin Li const ActivationFn& activationFunction = ActivationFn::kActivationNone)
1588*3e777be0SXin Li {
1589*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1590*3e777be0SXin Li outputIndex,
1591*3e777be0SXin Li layer,
1592*3e777be0SXin Li outputIndex,
1593*3e777be0SXin Li model,
1594*3e777be0SXin Li data,
1595*3e777be0SXin Li overrideOutputInfo,
1596*3e777be0SXin Li validateFunc,
1597*3e777be0SXin Li activationFunction);
1598*3e777be0SXin Li }
1599*3e777be0SXin Li
1600*3e777be0SXin Li template<typename HalPolicy,
1601*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1602*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertToActivation(const HalOperation & operation,const char * operationName,const armnn::ActivationDescriptor & activationDesc,const HalModel & model,ConversionData & data)1603*3e777be0SXin Li bool ConvertToActivation(const HalOperation& operation,
1604*3e777be0SXin Li const char* operationName,
1605*3e777be0SXin Li const armnn::ActivationDescriptor& activationDesc,
1606*3e777be0SXin Li const HalModel& model,
1607*3e777be0SXin Li ConversionData& data)
1608*3e777be0SXin Li {
1609*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1610*3e777be0SXin Li
1611*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1612*3e777be0SXin Li if (!input.IsValid())
1613*3e777be0SXin Li {
1614*3e777be0SXin Li return Fail("%s: Input 0 is invalid", operationName);
1615*3e777be0SXin Li }
1616*3e777be0SXin Li
1617*3e777be0SXin Li const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1618*3e777be0SXin Li if (!outputOperand)
1619*3e777be0SXin Li {
1620*3e777be0SXin Li return false;
1621*3e777be0SXin Li }
1622*3e777be0SXin Li
1623*3e777be0SXin Li const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1624*3e777be0SXin Li
1625*3e777be0SXin Li bool isSupported = false;
1626*3e777be0SXin Li armnn::BackendId setBackend;
1627*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1628*3e777be0SXin Li {
1629*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
1630*3e777be0SXin Li IsActivationSupported,
1631*3e777be0SXin Li data.m_Backends,
1632*3e777be0SXin Li isSupported,
1633*3e777be0SXin Li setBackend,
1634*3e777be0SXin Li input.GetTensorInfo(),
1635*3e777be0SXin Li outInfo,
1636*3e777be0SXin Li activationDesc);
1637*3e777be0SXin Li };
1638*3e777be0SXin Li
1639*3e777be0SXin Li if(IsDynamicTensor(outInfo))
1640*3e777be0SXin Li {
1641*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
1642*3e777be0SXin Li }
1643*3e777be0SXin Li else
1644*3e777be0SXin Li {
1645*3e777be0SXin Li validateFunc(outInfo, isSupported);
1646*3e777be0SXin Li }
1647*3e777be0SXin Li
1648*3e777be0SXin Li if (!isSupported)
1649*3e777be0SXin Li {
1650*3e777be0SXin Li return false;
1651*3e777be0SXin Li }
1652*3e777be0SXin Li
1653*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1654*3e777be0SXin Li layer->SetBackendId(setBackend);
1655*3e777be0SXin Li if (!layer)
1656*3e777be0SXin Li {
1657*3e777be0SXin Li return Fail("%s: Could not add the ActivationLayer", __func__);
1658*3e777be0SXin Li }
1659*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
1660*3e777be0SXin Li
1661*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
1662*3e777be0SXin Li }
1663*3e777be0SXin Li
1664*3e777be0SXin Li template<typename HalPolicy,
1665*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1666*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertReLu(const HalOperation & operation,const HalModel & model,ConversionData & data)1667*3e777be0SXin Li bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1668*3e777be0SXin Li {
1669*3e777be0SXin Li armnn::ActivationDescriptor desc;
1670*3e777be0SXin Li desc.m_Function = armnn::ActivationFunction::ReLu;
1671*3e777be0SXin Li
1672*3e777be0SXin Li return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1673*3e777be0SXin Li }
1674*3e777be0SXin Li
1675*3e777be0SXin Li template<typename HalPolicy,
1676*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1677*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertReLu1(const HalOperation & operation,const HalModel & model,ConversionData & data)1678*3e777be0SXin Li bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1679*3e777be0SXin Li {
1680*3e777be0SXin Li armnn::ActivationDescriptor desc;
1681*3e777be0SXin Li desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1682*3e777be0SXin Li desc.m_A = 1.0f;
1683*3e777be0SXin Li desc.m_B = -1.0f;
1684*3e777be0SXin Li
1685*3e777be0SXin Li return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1686*3e777be0SXin Li }
1687*3e777be0SXin Li
1688*3e777be0SXin Li template<typename HalPolicy,
1689*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1690*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertReLu6(const HalOperation & operation,const HalModel & model,ConversionData & data)1691*3e777be0SXin Li bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1692*3e777be0SXin Li {
1693*3e777be0SXin Li armnn::ActivationDescriptor desc;
1694*3e777be0SXin Li desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1695*3e777be0SXin Li desc.m_A = 6.0f;
1696*3e777be0SXin Li
1697*3e777be0SXin Li return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1698*3e777be0SXin Li }
1699*3e777be0SXin Li
1700*3e777be0SXin Li template<typename HalPolicy,
1701*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1702*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertTanH(const HalOperation & operation,const HalModel & model,ConversionData & data)1703*3e777be0SXin Li bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1704*3e777be0SXin Li {
1705*3e777be0SXin Li armnn::ActivationDescriptor desc;
1706*3e777be0SXin Li desc.m_Function = armnn::ActivationFunction::TanH;
1707*3e777be0SXin Li desc.m_A = 1.0f; // android nn does not support tanH parameters
1708*3e777be0SXin Li desc.m_B = 1.0f; // set to 1.0f for unity scaling
1709*3e777be0SXin Li
1710*3e777be0SXin Li return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1711*3e777be0SXin Li }
1712*3e777be0SXin Li
1713*3e777be0SXin Li template<typename HalPolicy,
1714*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1715*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertPaddings(const HalOperation & operation,const HalModel & model,ConversionData & data,unsigned int rank,armnn::PadDescriptor & padDescriptor)1716*3e777be0SXin Li bool ConvertPaddings(const HalOperation& operation,
1717*3e777be0SXin Li const HalModel& model,
1718*3e777be0SXin Li ConversionData& data,
1719*3e777be0SXin Li unsigned int rank,
1720*3e777be0SXin Li armnn::PadDescriptor& padDescriptor)
1721*3e777be0SXin Li {
1722*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1723*3e777be0SXin Li
1724*3e777be0SXin Li const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1725*3e777be0SXin Li if (!paddingsOperand)
1726*3e777be0SXin Li {
1727*3e777be0SXin Li return Fail("%s: Could not read paddings operand", __func__);
1728*3e777be0SXin Li }
1729*3e777be0SXin Li
1730*3e777be0SXin Li armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1731*3e777be0SXin Li if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1732*3e777be0SXin Li {
1733*3e777be0SXin Li return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1734*3e777be0SXin Li }
1735*3e777be0SXin Li
1736*3e777be0SXin Li std::vector<int32_t> paddings;
1737*3e777be0SXin Li if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1738*3e777be0SXin Li {
1739*3e777be0SXin Li return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1740*3e777be0SXin Li }
1741*3e777be0SXin Li
1742*3e777be0SXin Li // add padding for each dimension of input tensor.
1743*3e777be0SXin Li for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1744*3e777be0SXin Li {
1745*3e777be0SXin Li int paddingBeforeInput = paddings[i];
1746*3e777be0SXin Li int paddingAfterInput = paddings[i + 1];
1747*3e777be0SXin Li
1748*3e777be0SXin Li if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1749*3e777be0SXin Li {
1750*3e777be0SXin Li return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1751*3e777be0SXin Li }
1752*3e777be0SXin Li
1753*3e777be0SXin Li padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1754*3e777be0SXin Li }
1755*3e777be0SXin Li
1756*3e777be0SXin Li return true;
1757*3e777be0SXin Li }
1758*3e777be0SXin Li
1759*3e777be0SXin Li template<typename HalPolicy,
1760*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1761*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertPooling2d(const HalOperation & operation,const char * operationName,armnn::PoolingAlgorithm poolType,const HalModel & model,ConversionData & data)1762*3e777be0SXin Li bool ConvertPooling2d(const HalOperation& operation,
1763*3e777be0SXin Li const char* operationName,
1764*3e777be0SXin Li armnn::PoolingAlgorithm poolType,
1765*3e777be0SXin Li const HalModel& model,
1766*3e777be0SXin Li ConversionData& data)
1767*3e777be0SXin Li {
1768*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1769*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
1770*3e777be0SXin Li
1771*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1772*3e777be0SXin Li if (!input.IsValid())
1773*3e777be0SXin Li {
1774*3e777be0SXin Li return Fail("%s: Operation Could not read input 0", operationName);
1775*3e777be0SXin Li }
1776*3e777be0SXin Li
1777*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1778*3e777be0SXin Li if (!output)
1779*3e777be0SXin Li {
1780*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
1781*3e777be0SXin Li }
1782*3e777be0SXin Li
1783*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1784*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1785*3e777be0SXin Li
1786*3e777be0SXin Li armnn::Pooling2dDescriptor desc;
1787*3e777be0SXin Li desc.m_PoolType = poolType;
1788*3e777be0SXin Li desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
1789*3e777be0SXin Li desc.m_DataLayout = armnn::DataLayout::NHWC;
1790*3e777be0SXin Li
1791*3e777be0SXin Li ActivationFn activation;
1792*3e777be0SXin Li
1793*3e777be0SXin Li auto inputSize = operation.inputs.size();
1794*3e777be0SXin Li
1795*3e777be0SXin Li if (inputSize >= 10)
1796*3e777be0SXin Li {
1797*3e777be0SXin Li // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1798*3e777be0SXin Li if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1799*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1800*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1801*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1802*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1803*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1804*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1805*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1806*3e777be0SXin Li !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1807*3e777be0SXin Li {
1808*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", operationName);
1809*3e777be0SXin Li }
1810*3e777be0SXin Li
1811*3e777be0SXin Li if (Is12OrLaterOperand(*output))
1812*3e777be0SXin Li {
1813*3e777be0SXin Li desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1814*3e777be0SXin Li }
1815*3e777be0SXin Li }
1816*3e777be0SXin Li else
1817*3e777be0SXin Li {
1818*3e777be0SXin Li // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1819*3e777be0SXin Li android::nn::PaddingScheme scheme;
1820*3e777be0SXin Li if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1821*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1822*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1823*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1824*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1825*3e777be0SXin Li !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
1826*3e777be0SXin Li {
1827*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", operationName);
1828*3e777be0SXin Li }
1829*3e777be0SXin Li
1830*3e777be0SXin Li if (Is12OrLaterOperand(*output))
1831*3e777be0SXin Li {
1832*3e777be0SXin Li desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
1833*3e777be0SXin Li }
1834*3e777be0SXin Li
1835*3e777be0SXin Li const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1836*3e777be0SXin Li const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1837*3e777be0SXin Li const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1838*3e777be0SXin Li
1839*3e777be0SXin Li CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1840*3e777be0SXin Li CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1841*3e777be0SXin Li }
1842*3e777be0SXin Li
1843*3e777be0SXin Li bool isSupported = false;
1844*3e777be0SXin Li armnn::BackendId setBackend;
1845*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1846*3e777be0SXin Li {
1847*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
1848*3e777be0SXin Li IsPooling2dSupported,
1849*3e777be0SXin Li data.m_Backends,
1850*3e777be0SXin Li isSupported,
1851*3e777be0SXin Li setBackend,
1852*3e777be0SXin Li inputInfo,
1853*3e777be0SXin Li outputInfo,
1854*3e777be0SXin Li desc);
1855*3e777be0SXin Li
1856*3e777be0SXin Li };
1857*3e777be0SXin Li
1858*3e777be0SXin Li if(IsDynamicTensor(outputInfo))
1859*3e777be0SXin Li {
1860*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
1861*3e777be0SXin Li }
1862*3e777be0SXin Li else
1863*3e777be0SXin Li {
1864*3e777be0SXin Li validateFunc(outputInfo, isSupported);
1865*3e777be0SXin Li }
1866*3e777be0SXin Li
1867*3e777be0SXin Li if (!isSupported)
1868*3e777be0SXin Li {
1869*3e777be0SXin Li return false;
1870*3e777be0SXin Li }
1871*3e777be0SXin Li
1872*3e777be0SXin Li armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1873*3e777be0SXin Li pooling2dLayer->SetBackendId(setBackend);
1874*3e777be0SXin Li if (!pooling2dLayer)
1875*3e777be0SXin Li {
1876*3e777be0SXin Li return Fail("%s: AddPooling2dLayer failed", __func__);
1877*3e777be0SXin Li }
1878*3e777be0SXin Li
1879*3e777be0SXin Li input.Connect(pooling2dLayer->GetInputSlot(0));
1880*3e777be0SXin Li
1881*3e777be0SXin Li if (!isSupported)
1882*3e777be0SXin Li {
1883*3e777be0SXin Li return false;
1884*3e777be0SXin Li }
1885*3e777be0SXin Li
1886*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1887*3e777be0SXin Li data, nullptr, validateFunc, activation);
1888*3e777be0SXin Li }
1889*3e777be0SXin Li
1890*3e777be0SXin Li template<typename HalPolicy,
1891*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1892*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertArgMinMax(const HalOperation & operation,const HalModel & model,ConversionData & data,armnn::ArgMinMaxFunction argMinMaxFunction)1893*3e777be0SXin Li bool ConvertArgMinMax(const HalOperation& operation,
1894*3e777be0SXin Li const HalModel& model,
1895*3e777be0SXin Li ConversionData& data,
1896*3e777be0SXin Li armnn::ArgMinMaxFunction argMinMaxFunction)
1897*3e777be0SXin Li {
1898*3e777be0SXin Li ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1899*3e777be0SXin Li
1900*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1901*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
1902*3e777be0SXin Li
1903*3e777be0SXin Li LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1904*3e777be0SXin Li
1905*3e777be0SXin Li if (!input0.IsValid())
1906*3e777be0SXin Li {
1907*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
1908*3e777be0SXin Li }
1909*3e777be0SXin Li
1910*3e777be0SXin Li int32_t axis;
1911*3e777be0SXin Li if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1912*3e777be0SXin Li {
1913*3e777be0SXin Li return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1914*3e777be0SXin Li }
1915*3e777be0SXin Li
1916*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1917*3e777be0SXin Li int rank = static_cast<int>(inputInfo.GetNumDimensions());
1918*3e777be0SXin Li
1919*3e777be0SXin Li if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1920*3e777be0SXin Li {
1921*3e777be0SXin Li // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1922*3e777be0SXin Li // E.g. Rank 4 tensor can have axis in range [-4, 3)
1923*3e777be0SXin Li // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1924*3e777be0SXin Li return Fail("%s: Axis must be in range [-n, n)", __func__);
1925*3e777be0SXin Li }
1926*3e777be0SXin Li
1927*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1928*3e777be0SXin Li if (!output)
1929*3e777be0SXin Li {
1930*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
1931*3e777be0SXin Li }
1932*3e777be0SXin Li
1933*3e777be0SXin Li const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1934*3e777be0SXin Li
1935*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1936*3e777be0SXin Li
1937*3e777be0SXin Li armnn::ArgMinMaxDescriptor descriptor;
1938*3e777be0SXin Li descriptor.m_Function = argMinMaxFunction;
1939*3e777be0SXin Li descriptor.m_Axis = axis;
1940*3e777be0SXin Li
1941*3e777be0SXin Li bool isSupported = false;
1942*3e777be0SXin Li armnn::BackendId setBackend;
1943*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1944*3e777be0SXin Li {
1945*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
1946*3e777be0SXin Li IsArgMinMaxSupported,
1947*3e777be0SXin Li data.m_Backends,
1948*3e777be0SXin Li isSupported,
1949*3e777be0SXin Li setBackend,
1950*3e777be0SXin Li inputInfo0,
1951*3e777be0SXin Li outputInfo,
1952*3e777be0SXin Li descriptor);
1953*3e777be0SXin Li };
1954*3e777be0SXin Li
1955*3e777be0SXin Li if(IsDynamicTensor(outputInfo))
1956*3e777be0SXin Li {
1957*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
1958*3e777be0SXin Li }
1959*3e777be0SXin Li else
1960*3e777be0SXin Li {
1961*3e777be0SXin Li validateFunc(outputInfo, isSupported);
1962*3e777be0SXin Li }
1963*3e777be0SXin Li
1964*3e777be0SXin Li if (!isSupported)
1965*3e777be0SXin Li {
1966*3e777be0SXin Li return false;
1967*3e777be0SXin Li }
1968*3e777be0SXin Li
1969*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1970*3e777be0SXin Li layer->SetBackendId(setBackend);
1971*3e777be0SXin Li if (!layer)
1972*3e777be0SXin Li {
1973*3e777be0SXin Li return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
1974*3e777be0SXin Li }
1975*3e777be0SXin Li input0.Connect(layer->GetInputSlot(0));
1976*3e777be0SXin Li
1977*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
1978*3e777be0SXin Li }
1979*3e777be0SXin Li
1980*3e777be0SXin Li template<typename HalPolicy,
1981*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
1982*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertConcatenation(const HalOperation & operation,const HalModel & model,ConversionData & data)1983*3e777be0SXin Li bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
1984*3e777be0SXin Li {
1985*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
1986*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
1987*3e777be0SXin Li
1988*3e777be0SXin Li // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1989*3e777be0SXin Li if (operation.inputs.size() <= 1)
1990*3e777be0SXin Li {
1991*3e777be0SXin Li return Fail("%s: Operation has insufficient arguments", __func__);
1992*3e777be0SXin Li }
1993*3e777be0SXin Li
1994*3e777be0SXin Li // Get inputs and outputs
1995*3e777be0SXin Li const std::size_t numInputTensors = operation.inputs.size() - 1;
1996*3e777be0SXin Li
1997*3e777be0SXin Li int32_t concatDim;
1998*3e777be0SXin Li if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1999*3e777be0SXin Li {
2000*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2001*3e777be0SXin Li }
2002*3e777be0SXin Li
2003*3e777be0SXin Li const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2004*3e777be0SXin Li if (!outputOperand)
2005*3e777be0SXin Li {
2006*3e777be0SXin Li return Fail("%s: Operation has no outputs", __func__);
2007*3e777be0SXin Li }
2008*3e777be0SXin Li
2009*3e777be0SXin Li armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2010*3e777be0SXin Li armnn::TensorShape outputShape = outputInfo.GetShape();
2011*3e777be0SXin Li const bool isDynamicTensor = IsDynamicTensor(outputInfo);
2012*3e777be0SXin Li //
2013*3e777be0SXin Li // handle negative concat dims along the lines of tensorflow as described here:
2014*3e777be0SXin Li // https://www.tensorflow.org/api_docs/python/tf/concat
2015*3e777be0SXin Li // "negative axis refers to axis + rank(values)-th dimension"
2016*3e777be0SXin Li //
2017*3e777be0SXin Li if (concatDim < 0)
2018*3e777be0SXin Li {
2019*3e777be0SXin Li concatDim += outputShape.GetNumDimensions();
2020*3e777be0SXin Li }
2021*3e777be0SXin Li
2022*3e777be0SXin Li if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2023*3e777be0SXin Li {
2024*3e777be0SXin Li return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2025*3e777be0SXin Li }
2026*3e777be0SXin Li
2027*3e777be0SXin Li std::vector<LayerInputHandle> inputHandles;
2028*3e777be0SXin Li std::vector<armnn::TensorShape> inputShapes;
2029*3e777be0SXin Li
2030*3e777be0SXin Li inputHandles.reserve(numInputTensors);
2031*3e777be0SXin Li inputShapes.reserve(numInputTensors);
2032*3e777be0SXin Li
2033*3e777be0SXin Li bool inputsHaveBeenReshaped = false;
2034*3e777be0SXin Li unsigned int tensorDimensionsAdded = 0;
2035*3e777be0SXin Li for (uint32_t i = 0; i < numInputTensors; ++i)
2036*3e777be0SXin Li {
2037*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2038*3e777be0SXin Li if (!operand)
2039*3e777be0SXin Li {
2040*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2041*3e777be0SXin Li }
2042*3e777be0SXin Li
2043*3e777be0SXin Li LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2044*3e777be0SXin Li if (!operandInputHandle.IsValid())
2045*3e777be0SXin Li {
2046*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2047*3e777be0SXin Li }
2048*3e777be0SXin Li
2049*3e777be0SXin Li armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
2050*3e777be0SXin Li if (operandShape.GetNumDimensions() == 0)
2051*3e777be0SXin Li {
2052*3e777be0SXin Li return Fail("%s: Operands with rank 0 are not supported", __func__);
2053*3e777be0SXin Li }
2054*3e777be0SXin Li
2055*3e777be0SXin Li if (RequiresReshape(operandShape))
2056*3e777be0SXin Li {
2057*3e777be0SXin Li inputsHaveBeenReshaped = true;
2058*3e777be0SXin Li
2059*3e777be0SXin Li armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2060*3e777be0SXin Li
2061*3e777be0SXin Li // Expand the tensor to three dimensions
2062*3e777be0SXin Li if (operandShape.GetNumDimensions() == 2)
2063*3e777be0SXin Li {
2064*3e777be0SXin Li reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2065*3e777be0SXin Li tensorDimensionsAdded = 1;
2066*3e777be0SXin Li }
2067*3e777be0SXin Li else
2068*3e777be0SXin Li {
2069*3e777be0SXin Li reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2070*3e777be0SXin Li tensorDimensionsAdded = 2;
2071*3e777be0SXin Li }
2072*3e777be0SXin Li
2073*3e777be0SXin Li armnn::ReshapeDescriptor reshapeDescriptor;
2074*3e777be0SXin Li reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2075*3e777be0SXin Li
2076*3e777be0SXin Li bool isSupported = false;
2077*3e777be0SXin Li armnn::BackendId setBackendReshape;
2078*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2079*3e777be0SXin Li IsReshapeSupported,
2080*3e777be0SXin Li data.m_Backends,
2081*3e777be0SXin Li isSupported,
2082*3e777be0SXin Li setBackendReshape,
2083*3e777be0SXin Li operandInputHandle.GetTensorInfo(),
2084*3e777be0SXin Li reshapeInfo,
2085*3e777be0SXin Li reshapeDescriptor);
2086*3e777be0SXin Li
2087*3e777be0SXin Li if (!isSupported)
2088*3e777be0SXin Li {
2089*3e777be0SXin Li return false;
2090*3e777be0SXin Li }
2091*3e777be0SXin Li armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
2092*3e777be0SXin Li newReshape.SetBackendId(setBackendReshape);
2093*3e777be0SXin Li
2094*3e777be0SXin Li // Point to the reshape operation rather then the input operation
2095*3e777be0SXin Li operandShape = reshapeInfo.GetShape();
2096*3e777be0SXin Li operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2097*3e777be0SXin Li }
2098*3e777be0SXin Li
2099*3e777be0SXin Li inputShapes.emplace_back(operandShape);
2100*3e777be0SXin Li inputHandles.emplace_back(operandInputHandle);
2101*3e777be0SXin Li
2102*3e777be0SXin Li if (!inputHandles.back().IsValid())
2103*3e777be0SXin Li {
2104*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2105*3e777be0SXin Li }
2106*3e777be0SXin Li }
2107*3e777be0SXin Li
2108*3e777be0SXin Li if (inputShapes.size() != inputHandles.size())
2109*3e777be0SXin Li {
2110*3e777be0SXin Li return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
2111*3e777be0SXin Li inputShapes.size(), inputHandles.size());
2112*3e777be0SXin Li }
2113*3e777be0SXin Li
2114*3e777be0SXin Li if (inputsHaveBeenReshaped)
2115*3e777be0SXin Li {
2116*3e777be0SXin Li // Adjust the concatenation dimension by the amount of dimensions added (if any)
2117*3e777be0SXin Li concatDim += tensorDimensionsAdded;
2118*3e777be0SXin Li
2119*3e777be0SXin Li // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2120*3e777be0SXin Li if (tensorDimensionsAdded == 1)
2121*3e777be0SXin Li {
2122*3e777be0SXin Li if (IsDynamicTensor(outputInfo))
2123*3e777be0SXin Li {
2124*3e777be0SXin Li outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2125*3e777be0SXin Li }
2126*3e777be0SXin Li else
2127*3e777be0SXin Li {
2128*3e777be0SXin Li outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2129*3e777be0SXin Li }
2130*3e777be0SXin Li }
2131*3e777be0SXin Li else if (tensorDimensionsAdded == 2)
2132*3e777be0SXin Li {
2133*3e777be0SXin Li if (IsDynamicTensor(outputInfo))
2134*3e777be0SXin Li {
2135*3e777be0SXin Li outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2136*3e777be0SXin Li }
2137*3e777be0SXin Li else
2138*3e777be0SXin Li {
2139*3e777be0SXin Li outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2140*3e777be0SXin Li }
2141*3e777be0SXin Li }
2142*3e777be0SXin Li }
2143*3e777be0SXin Li
2144*3e777be0SXin Li // Check if permutations is required and get the pair of permutations required for the concatenation.
2145*3e777be0SXin Li // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2146*3e777be0SXin Li std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2147*3e777be0SXin Li std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2148*3e777be0SXin Li bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2149*3e777be0SXin Li concatDim,
2150*3e777be0SXin Li permutationPair);
2151*3e777be0SXin Li
2152*3e777be0SXin Li // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2153*3e777be0SXin Li if (!isDynamicTensor)
2154*3e777be0SXin Li {
2155*3e777be0SXin Li if (needPermute)
2156*3e777be0SXin Li {
2157*3e777be0SXin Li outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2158*3e777be0SXin Li }
2159*3e777be0SXin Li
2160*3e777be0SXin Li outputInfo.SetShape(outputShape);
2161*3e777be0SXin Li }
2162*3e777be0SXin Li // this is no-op for identity swizzles, otherwise it replaces both
2163*3e777be0SXin Li // the handles and shapes with the swizzled layer output handles and shapes
2164*3e777be0SXin Li if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
2165*3e777be0SXin Li {
2166*3e777be0SXin Li return false;
2167*3e777be0SXin Li }
2168*3e777be0SXin Li
2169*3e777be0SXin Li // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2170*3e777be0SXin Li armnn::OriginsDescriptor concatDescriptor;
2171*3e777be0SXin Li
2172*3e777be0SXin Li try
2173*3e777be0SXin Li {
2174*3e777be0SXin Li // The concat descriptor is always created across the only supported concat dimension
2175*3e777be0SXin Li // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2176*3e777be0SXin Li concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2177*3e777be0SXin Li inputShapes.end(),
2178*3e777be0SXin Li concatDim);
2179*3e777be0SXin Li } catch (std::exception& error)
2180*3e777be0SXin Li {
2181*3e777be0SXin Li return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2182*3e777be0SXin Li }
2183*3e777be0SXin Li
2184*3e777be0SXin Li // Validate the output shape is correct given the input shapes based on the
2185*3e777be0SXin Li // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2186*3e777be0SXin Li if (!isDynamicTensor)
2187*3e777be0SXin Li {
2188*3e777be0SXin Li if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2189*3e777be0SXin Li {
2190*3e777be0SXin Li return Fail("%s: Error validating the output shape for concat", __func__);
2191*3e777be0SXin Li }
2192*3e777be0SXin Li }
2193*3e777be0SXin Li
2194*3e777be0SXin Li std::vector<const armnn::TensorInfo*> inputTensorInfos;
2195*3e777be0SXin Li std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2196*3e777be0SXin Li [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2197*3e777be0SXin Li
2198*3e777be0SXin Li bool isSupported = false;
2199*3e777be0SXin Li armnn::BackendId setBackendConcat;
2200*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2201*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2202*3e777be0SXin Li IsConcatSupported,
2203*3e777be0SXin Li data.m_Backends,
2204*3e777be0SXin Li isSupported,
2205*3e777be0SXin Li setBackendConcat,
2206*3e777be0SXin Li inputTensorInfos,
2207*3e777be0SXin Li outputInfo,
2208*3e777be0SXin Li concatDescriptor);
2209*3e777be0SXin Li };
2210*3e777be0SXin Li
2211*3e777be0SXin Li if (!isDynamicTensor)
2212*3e777be0SXin Li {
2213*3e777be0SXin Li validateFunc(outputInfo, isSupported);
2214*3e777be0SXin Li }
2215*3e777be0SXin Li else
2216*3e777be0SXin Li {
2217*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2218*3e777be0SXin Li }
2219*3e777be0SXin Li
2220*3e777be0SXin Li if (!isSupported)
2221*3e777be0SXin Li {
2222*3e777be0SXin Li return false;
2223*3e777be0SXin Li }
2224*3e777be0SXin Li
2225*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2226*3e777be0SXin Li layer->SetBackendId(setBackendConcat);
2227*3e777be0SXin Li if (!layer)
2228*3e777be0SXin Li {
2229*3e777be0SXin Li return Fail("%s: Could not add the ConcatLayer", __func__);
2230*3e777be0SXin Li }
2231*3e777be0SXin Li layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2232*3e777be0SXin Li // Connect inputs to the layer
2233*3e777be0SXin Li const int numInputSlots = layer->GetNumInputSlots();
2234*3e777be0SXin Li
2235*3e777be0SXin Li if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
2236*3e777be0SXin Li {
2237*3e777be0SXin Li return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
2238*3e777be0SXin Li static_cast<std::size_t>(numInputSlots), inputHandles.size());
2239*3e777be0SXin Li }
2240*3e777be0SXin Li for (int i = 0; i < numInputSlots; ++i)
2241*3e777be0SXin Li {
2242*3e777be0SXin Li // connect the input directly to the merge (concat) layer
2243*3e777be0SXin Li inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
2244*3e777be0SXin Li }
2245*3e777be0SXin Li
2246*3e777be0SXin Li // Transpose the output shape
2247*3e777be0SXin Li auto transposeOutputShape = [&](){
2248*3e777be0SXin Li armnn::TransposeDescriptor transposeDesc;
2249*3e777be0SXin Li transposeDesc.m_DimMappings = permutationPair.second;
2250*3e777be0SXin Li armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2251*3e777be0SXin Li armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2252*3e777be0SXin Li permutationPair.second);
2253*3e777be0SXin Li isSupported = false;
2254*3e777be0SXin Li armnn::BackendId setBackendTranspose;
2255*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2256*3e777be0SXin Li IsTransposeSupported,
2257*3e777be0SXin Li data.m_Backends,
2258*3e777be0SXin Li isSupported,
2259*3e777be0SXin Li setBackendTranspose,
2260*3e777be0SXin Li inputTransposeInfo,
2261*3e777be0SXin Li outputTransposeInfo,
2262*3e777be0SXin Li transposeDesc);
2263*3e777be0SXin Li if (!isSupported)
2264*3e777be0SXin Li {
2265*3e777be0SXin Li return false;
2266*3e777be0SXin Li }
2267*3e777be0SXin Li // Add permutation layer and connect the output to it, the permutation becomes the output layer
2268*3e777be0SXin Li armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
2269*3e777be0SXin Li permutationPair.second);
2270*3e777be0SXin Li deswizzleLayer.SetBackendId(setBackendTranspose);
2271*3e777be0SXin Li layer = &deswizzleLayer;
2272*3e777be0SXin Li
2273*3e777be0SXin Li return true;
2274*3e777be0SXin Li };
2275*3e777be0SXin Li
2276*3e777be0SXin Li if (needPermute && !isDynamicTensor)
2277*3e777be0SXin Li {
2278*3e777be0SXin Li transposeOutputShape();
2279*3e777be0SXin Li }
2280*3e777be0SXin Li
2281*3e777be0SXin Li if (inputsHaveBeenReshaped)
2282*3e777be0SXin Li {
2283*3e777be0SXin Li if (isDynamicTensor)
2284*3e777be0SXin Li {
2285*3e777be0SXin Li // Infer the output shapes of concat if outputs are type 1 dynamic
2286*3e777be0SXin Li if (!layer->GetOutputSlot(0).IsTensorInfoSet())
2287*3e777be0SXin Li {
2288*3e777be0SXin Li return Fail("%s: TensorInfo is not set", __func__);
2289*3e777be0SXin Li }
2290*3e777be0SXin Li if (!ValidateConcatOutputShape(inputShapes,
2291*3e777be0SXin Li layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2292*3e777be0SXin Li concatDim))
2293*3e777be0SXin Li {
2294*3e777be0SXin Li return Fail("%s: Error validating the output shape for concat", __func__);
2295*3e777be0SXin Li }
2296*3e777be0SXin Li transposeOutputShape();
2297*3e777be0SXin Li }
2298*3e777be0SXin Li
2299*3e777be0SXin Li armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2300*3e777be0SXin Li // Undo the reshape knowing the amount of dimensions added
2301*3e777be0SXin Li if (tensorDimensionsAdded == 1)
2302*3e777be0SXin Li {
2303*3e777be0SXin Li afterConcatInfo.SetShape(
2304*3e777be0SXin Li armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
2305*3e777be0SXin Li }
2306*3e777be0SXin Li else if (tensorDimensionsAdded == 2)
2307*3e777be0SXin Li {
2308*3e777be0SXin Li afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
2309*3e777be0SXin Li }
2310*3e777be0SXin Li
2311*3e777be0SXin Li armnn::ReshapeDescriptor reshapeDescriptor;
2312*3e777be0SXin Li reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2313*3e777be0SXin Li armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2314*3e777be0SXin Li
2315*3e777be0SXin Li isSupported = false;
2316*3e777be0SXin Li armnn::BackendId setBackendReshape2;
2317*3e777be0SXin Li auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2318*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2319*3e777be0SXin Li IsReshapeSupported,
2320*3e777be0SXin Li data.m_Backends,
2321*3e777be0SXin Li isSupported,
2322*3e777be0SXin Li setBackendReshape2,
2323*3e777be0SXin Li concatInfo,
2324*3e777be0SXin Li afterConcatInfo,
2325*3e777be0SXin Li reshapeDescriptor);
2326*3e777be0SXin Li };
2327*3e777be0SXin Li
2328*3e777be0SXin Li if (!IsDynamicTensor(afterConcatInfo))
2329*3e777be0SXin Li {
2330*3e777be0SXin Li validateReshapeFunc(afterConcatInfo, isSupported);
2331*3e777be0SXin Li }
2332*3e777be0SXin Li else
2333*3e777be0SXin Li {
2334*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2335*3e777be0SXin Li }
2336*3e777be0SXin Li
2337*3e777be0SXin Li if (!isSupported)
2338*3e777be0SXin Li {
2339*3e777be0SXin Li return false;
2340*3e777be0SXin Li }
2341*3e777be0SXin Li layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2342*3e777be0SXin Li layer->SetBackendId(setBackendReshape2);
2343*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2344*3e777be0SXin Li 0,
2345*3e777be0SXin Li *layer,
2346*3e777be0SXin Li model,
2347*3e777be0SXin Li data,
2348*3e777be0SXin Li nullptr,
2349*3e777be0SXin Li validateReshapeFunc);
2350*3e777be0SXin Li }
2351*3e777be0SXin Li
2352*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2353*3e777be0SXin Li }
2354*3e777be0SXin Li
2355*3e777be0SXin Li template<typename HalPolicy,
2356*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
2357*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertConv2d(const HalOperation & operation,const HalModel & model,ConversionData & data)2358*3e777be0SXin Li bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2359*3e777be0SXin Li {
2360*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
2361*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
2362*3e777be0SXin Li
2363*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2364*3e777be0SXin Li if (!input.IsValid())
2365*3e777be0SXin Li {
2366*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2367*3e777be0SXin Li }
2368*3e777be0SXin Li
2369*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2370*3e777be0SXin Li if (!output)
2371*3e777be0SXin Li {
2372*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
2373*3e777be0SXin Li }
2374*3e777be0SXin Li
2375*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2376*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2377*3e777be0SXin Li
2378*3e777be0SXin Li LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2379*3e777be0SXin Li if (!weightsInput.IsValid())
2380*3e777be0SXin Li {
2381*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2382*3e777be0SXin Li }
2383*3e777be0SXin Li
2384*3e777be0SXin Li LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2385*3e777be0SXin Li if (!biasInput.IsValid())
2386*3e777be0SXin Li {
2387*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2388*3e777be0SXin Li }
2389*3e777be0SXin Li
2390*3e777be0SXin Li biasInput.SanitizeQuantizationScale(weightsInput, input);
2391*3e777be0SXin Li armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2392*3e777be0SXin Li armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
2393*3e777be0SXin Li
2394*3e777be0SXin Li armnn::Convolution2dDescriptor desc;
2395*3e777be0SXin Li desc.m_DataLayout = armnn::DataLayout::NHWC;
2396*3e777be0SXin Li ActivationFn activation;
2397*3e777be0SXin Li
2398*3e777be0SXin Li if (operation.inputs.size() == 10)
2399*3e777be0SXin Li {
2400*3e777be0SXin Li if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2401*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2402*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2403*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2404*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2405*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2406*3e777be0SXin Li !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
2407*3e777be0SXin Li {
2408*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2409*3e777be0SXin Li }
2410*3e777be0SXin Li }
2411*3e777be0SXin Li else if (operation.inputs.size() == 7)
2412*3e777be0SXin Li {
2413*3e777be0SXin Li android::nn::PaddingScheme paddingScheme;
2414*3e777be0SXin Li if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2415*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2416*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2417*3e777be0SXin Li !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
2418*3e777be0SXin Li {
2419*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2420*3e777be0SXin Li }
2421*3e777be0SXin Li
2422*3e777be0SXin Li const uint32_t kernelX = weightsInfo.GetShape()[2];
2423*3e777be0SXin Li const uint32_t kernelY = weightsInfo.GetShape()[1];
2424*3e777be0SXin Li const uint32_t inputX = inputInfo.GetShape()[2];
2425*3e777be0SXin Li const uint32_t inputY = inputInfo.GetShape()[1];
2426*3e777be0SXin Li
2427*3e777be0SXin Li CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2428*3e777be0SXin Li CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2429*3e777be0SXin Li }
2430*3e777be0SXin Li else
2431*3e777be0SXin Li {
2432*3e777be0SXin Li return Fail("%s: Unsupported number of operation inputs", __func__);
2433*3e777be0SXin Li }
2434*3e777be0SXin Li
2435*3e777be0SXin Li desc.m_BiasEnabled = true;
2436*3e777be0SXin Li armnn::Optional<armnn::TensorInfo> biases(biasInfo);
2437*3e777be0SXin Li
2438*3e777be0SXin Li bool isSupported = false;
2439*3e777be0SXin Li armnn::BackendId setBackend;
2440*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2441*3e777be0SXin Li {
2442*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2443*3e777be0SXin Li IsConvolution2dSupported,
2444*3e777be0SXin Li data.m_Backends,
2445*3e777be0SXin Li isSupported,
2446*3e777be0SXin Li setBackend,
2447*3e777be0SXin Li inputInfo,
2448*3e777be0SXin Li outputInfo,
2449*3e777be0SXin Li desc,
2450*3e777be0SXin Li weightsInfo,
2451*3e777be0SXin Li biases);
2452*3e777be0SXin Li };
2453*3e777be0SXin Li
2454*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
2455*3e777be0SXin Li {
2456*3e777be0SXin Li validateFunc(outputInfo, isSupported);
2457*3e777be0SXin Li }
2458*3e777be0SXin Li else
2459*3e777be0SXin Li {
2460*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2461*3e777be0SXin Li }
2462*3e777be0SXin Li
2463*3e777be0SXin Li if (!isSupported)
2464*3e777be0SXin Li {
2465*3e777be0SXin Li return false;
2466*3e777be0SXin Li }
2467*3e777be0SXin Li
2468*3e777be0SXin Li armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
2469*3e777be0SXin Li startLayer->SetBackendId(setBackend);
2470*3e777be0SXin Li
2471*3e777be0SXin Li if (!startLayer)
2472*3e777be0SXin Li {
2473*3e777be0SXin Li return Fail("%s: AddConvolution2dLayer failed", __func__);
2474*3e777be0SXin Li }
2475*3e777be0SXin Li
2476*3e777be0SXin Li input.Connect(startLayer->GetInputSlot(0));
2477*3e777be0SXin Li
2478*3e777be0SXin Li // Connect weights and bias inputs
2479*3e777be0SXin Li weightsInput.Connect(startLayer->GetInputSlot(1));
2480*3e777be0SXin Li biasInput.Connect(startLayer->GetInputSlot(2));
2481*3e777be0SXin Li
2482*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2483*3e777be0SXin Li data, nullptr, validateFunc, activation);
2484*3e777be0SXin Li }
2485*3e777be0SXin Li
2486*3e777be0SXin Li template<typename HalPolicy,
2487*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
2488*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertDepthToSpace(const HalOperation & operation,const HalModel & model,ConversionData & data)2489*3e777be0SXin Li bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2490*3e777be0SXin Li {
2491*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
2492*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
2493*3e777be0SXin Li
2494*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2495*3e777be0SXin Li if (!input.IsValid() )
2496*3e777be0SXin Li {
2497*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2498*3e777be0SXin Li }
2499*3e777be0SXin Li
2500*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2501*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
2502*3e777be0SXin Li if (rank != 4)
2503*3e777be0SXin Li {
2504*3e777be0SXin Li return Fail("%s: Only inputs with rank 4 are supported", __func__);
2505*3e777be0SXin Li }
2506*3e777be0SXin Li
2507*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2508*3e777be0SXin Li if (!output)
2509*3e777be0SXin Li {
2510*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
2511*3e777be0SXin Li }
2512*3e777be0SXin Li
2513*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2514*3e777be0SXin Li
2515*3e777be0SXin Li armnn::DepthToSpaceDescriptor descriptor;
2516*3e777be0SXin Li
2517*3e777be0SXin Li GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2518*3e777be0SXin Li if (descriptor.m_BlockSize <= 1)
2519*3e777be0SXin Li {
2520*3e777be0SXin Li return Fail("%s: Block size must be at least 1 in all dimensions");
2521*3e777be0SXin Li }
2522*3e777be0SXin Li
2523*3e777be0SXin Li descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2524*3e777be0SXin Li if (Is12OrLaterOperand(*output))
2525*3e777be0SXin Li {
2526*3e777be0SXin Li descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2527*3e777be0SXin Li }
2528*3e777be0SXin Li
2529*3e777be0SXin Li bool isSupported = false;
2530*3e777be0SXin Li armnn::BackendId setBackend;
2531*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2532*3e777be0SXin Li {
2533*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2534*3e777be0SXin Li IsDepthToSpaceSupported,
2535*3e777be0SXin Li data.m_Backends,
2536*3e777be0SXin Li isSupported,
2537*3e777be0SXin Li setBackend,
2538*3e777be0SXin Li inputInfo,
2539*3e777be0SXin Li outputInfo,
2540*3e777be0SXin Li descriptor);
2541*3e777be0SXin Li };
2542*3e777be0SXin Li
2543*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
2544*3e777be0SXin Li {
2545*3e777be0SXin Li validateFunc(outputInfo, isSupported);
2546*3e777be0SXin Li }
2547*3e777be0SXin Li else
2548*3e777be0SXin Li {
2549*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2550*3e777be0SXin Li }
2551*3e777be0SXin Li
2552*3e777be0SXin Li if (!isSupported)
2553*3e777be0SXin Li {
2554*3e777be0SXin Li return false;
2555*3e777be0SXin Li }
2556*3e777be0SXin Li
2557*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2558*3e777be0SXin Li layer->SetBackendId(setBackend);
2559*3e777be0SXin Li if (!layer)
2560*3e777be0SXin Li {
2561*3e777be0SXin Li return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
2562*3e777be0SXin Li }
2563*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
2564*3e777be0SXin Li
2565*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2566*3e777be0SXin Li }
2567*3e777be0SXin Li
2568*3e777be0SXin Li template<typename HalPolicy,
2569*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
2570*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertDepthwiseConv2d(const HalOperation & operation,const HalModel & model,ConversionData & data)2571*3e777be0SXin Li bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2572*3e777be0SXin Li {
2573*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
2574*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
2575*3e777be0SXin Li
2576*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2577*3e777be0SXin Li
2578*3e777be0SXin Li if (!input.IsValid())
2579*3e777be0SXin Li {
2580*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2581*3e777be0SXin Li }
2582*3e777be0SXin Li
2583*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2584*3e777be0SXin Li
2585*3e777be0SXin Li if (!output)
2586*3e777be0SXin Li {
2587*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
2588*3e777be0SXin Li }
2589*3e777be0SXin Li
2590*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2591*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2592*3e777be0SXin Li
2593*3e777be0SXin Li // ArmNN does not currently support non-fixed weights or bias
2594*3e777be0SXin Li // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
2595*3e777be0SXin Li const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2596*3e777be0SXin Li if (!weightsOperand)
2597*3e777be0SXin Li {
2598*3e777be0SXin Li return Fail("%s: Could not read weights", __func__);
2599*3e777be0SXin Li }
2600*3e777be0SXin Li // Basic sanity check on the weights shape.
2601*3e777be0SXin Li // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2602*3e777be0SXin Li // [1, filter_height, filter_width, depth_out]
2603*3e777be0SXin Li if (weightsOperand->dimensions[0] != 1)
2604*3e777be0SXin Li {
2605*3e777be0SXin Li return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2606*3e777be0SXin Li }
2607*3e777be0SXin Li
2608*3e777be0SXin Li armnn::DepthwiseConvolution2dDescriptor desc;
2609*3e777be0SXin Li desc.m_DataLayout = armnn::DataLayout::NHWC;
2610*3e777be0SXin Li
2611*3e777be0SXin Li LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2612*3e777be0SXin Li if (!weightsInput.IsValid())
2613*3e777be0SXin Li {
2614*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2615*3e777be0SXin Li }
2616*3e777be0SXin Li
2617*3e777be0SXin Li const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2618*3e777be0SXin Li if (!biasOperand)
2619*3e777be0SXin Li {
2620*3e777be0SXin Li return Fail("%s: Could not read bias", __func__);
2621*3e777be0SXin Li }
2622*3e777be0SXin Li
2623*3e777be0SXin Li LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2624*3e777be0SXin Li if (!biasInput.IsValid())
2625*3e777be0SXin Li {
2626*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2627*3e777be0SXin Li }
2628*3e777be0SXin Li
2629*3e777be0SXin Li biasInput.SanitizeQuantizationScale(weightsInput, input);
2630*3e777be0SXin Li armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2631*3e777be0SXin Li armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
2632*3e777be0SXin Li
2633*3e777be0SXin Li ActivationFn activation;
2634*3e777be0SXin Li
2635*3e777be0SXin Li if (operation.inputs.size() == 11)
2636*3e777be0SXin Li {
2637*3e777be0SXin Li if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2638*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2639*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2640*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2641*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2642*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2643*3e777be0SXin Li !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
2644*3e777be0SXin Li {
2645*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2646*3e777be0SXin Li }
2647*3e777be0SXin Li }
2648*3e777be0SXin Li else if (operation.inputs.size() == 8)
2649*3e777be0SXin Li {
2650*3e777be0SXin Li android::nn::PaddingScheme paddingScheme;
2651*3e777be0SXin Li if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2652*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2653*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2654*3e777be0SXin Li !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
2655*3e777be0SXin Li {
2656*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2657*3e777be0SXin Li }
2658*3e777be0SXin Li
2659*3e777be0SXin Li const uint32_t kernelX = weightsInfo.GetShape()[2];
2660*3e777be0SXin Li const uint32_t kernelY = weightsInfo.GetShape()[1];
2661*3e777be0SXin Li const uint32_t inputX = inputInfo.GetShape()[2];
2662*3e777be0SXin Li const uint32_t inputY = inputInfo.GetShape()[1];
2663*3e777be0SXin Li
2664*3e777be0SXin Li CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2665*3e777be0SXin Li CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2666*3e777be0SXin Li }
2667*3e777be0SXin Li else
2668*3e777be0SXin Li {
2669*3e777be0SXin Li return Fail("%s: Unsupported number of operation inputs", __func__);
2670*3e777be0SXin Li }
2671*3e777be0SXin Li
2672*3e777be0SXin Li desc.m_BiasEnabled = true;
2673*3e777be0SXin Li armnn::Optional<armnn::TensorInfo> biases(biasInfo);
2674*3e777be0SXin Li
2675*3e777be0SXin Li bool isSupported = false;
2676*3e777be0SXin Li armnn::BackendId setBackend;
2677*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2678*3e777be0SXin Li {
2679*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2680*3e777be0SXin Li IsDepthwiseConvolutionSupported,
2681*3e777be0SXin Li data.m_Backends,
2682*3e777be0SXin Li isSupported,
2683*3e777be0SXin Li setBackend,
2684*3e777be0SXin Li inputInfo,
2685*3e777be0SXin Li outputInfo,
2686*3e777be0SXin Li desc,
2687*3e777be0SXin Li weightsInfo,
2688*3e777be0SXin Li biases);
2689*3e777be0SXin Li };
2690*3e777be0SXin Li
2691*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
2692*3e777be0SXin Li {
2693*3e777be0SXin Li validateFunc(outputInfo, isSupported);
2694*3e777be0SXin Li }
2695*3e777be0SXin Li else
2696*3e777be0SXin Li {
2697*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2698*3e777be0SXin Li }
2699*3e777be0SXin Li
2700*3e777be0SXin Li
2701*3e777be0SXin Li if (!isSupported)
2702*3e777be0SXin Li {
2703*3e777be0SXin Li return false;
2704*3e777be0SXin Li }
2705*3e777be0SXin Li
2706*3e777be0SXin Li armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
2707*3e777be0SXin Li startLayer->SetBackendId(setBackend);
2708*3e777be0SXin Li if (!startLayer)
2709*3e777be0SXin Li {
2710*3e777be0SXin Li return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2711*3e777be0SXin Li }
2712*3e777be0SXin Li
2713*3e777be0SXin Li input.Connect(startLayer->GetInputSlot(0));
2714*3e777be0SXin Li
2715*3e777be0SXin Li // Connect weights and bias inputs
2716*3e777be0SXin Li weightsInput.Connect(startLayer->GetInputSlot(1));
2717*3e777be0SXin Li biasInput.Connect(startLayer->GetInputSlot(2));
2718*3e777be0SXin Li
2719*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2720*3e777be0SXin Li data, nullptr, validateFunc, activation);
2721*3e777be0SXin Li }
2722*3e777be0SXin Li
2723*3e777be0SXin Li template<typename HalPolicy,
2724*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
2725*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertDequantize(const HalOperation & operation,const HalModel & model,ConversionData & data)2726*3e777be0SXin Li bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
2727*3e777be0SXin Li {
2728*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
2729*3e777be0SXin Li
2730*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2731*3e777be0SXin Li if (!input.IsValid())
2732*3e777be0SXin Li {
2733*3e777be0SXin Li return Fail("%s: Operation has invalid input", __func__);
2734*3e777be0SXin Li }
2735*3e777be0SXin Li
2736*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2737*3e777be0SXin Li const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2738*3e777be0SXin Li if (quantizationDim.has_value() && quantizationDim.value() != 0)
2739*3e777be0SXin Li {
2740*3e777be0SXin Li return Fail("%s: Operation has quantization dimension different than 0", __func__);
2741*3e777be0SXin Li }
2742*3e777be0SXin Li
2743*3e777be0SXin Li const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2744*3e777be0SXin Li if (!outputOperand)
2745*3e777be0SXin Li {
2746*3e777be0SXin Li return Fail("%s: Operation has invalid outputs", __func__);
2747*3e777be0SXin Li }
2748*3e777be0SXin Li
2749*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2750*3e777be0SXin Li
2751*3e777be0SXin Li bool isSupported = false;
2752*3e777be0SXin Li armnn::BackendId setBackend;
2753*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2754*3e777be0SXin Li {
2755*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2756*3e777be0SXin Li IsDequantizeSupported,
2757*3e777be0SXin Li data.m_Backends,
2758*3e777be0SXin Li isSupported,
2759*3e777be0SXin Li setBackend,
2760*3e777be0SXin Li inputInfo,
2761*3e777be0SXin Li outputInfo);
2762*3e777be0SXin Li };
2763*3e777be0SXin Li
2764*3e777be0SXin Li if(IsDynamicTensor(outputInfo))
2765*3e777be0SXin Li {
2766*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2767*3e777be0SXin Li }
2768*3e777be0SXin Li else
2769*3e777be0SXin Li {
2770*3e777be0SXin Li validateFunc(outputInfo, isSupported);
2771*3e777be0SXin Li }
2772*3e777be0SXin Li
2773*3e777be0SXin Li if (!isSupported)
2774*3e777be0SXin Li {
2775*3e777be0SXin Li return false;
2776*3e777be0SXin Li }
2777*3e777be0SXin Li
2778*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2779*3e777be0SXin Li layer->SetBackendId(setBackend);
2780*3e777be0SXin Li if (!layer)
2781*3e777be0SXin Li {
2782*3e777be0SXin Li return Fail("%s: Could not add the DequantizeLayer", __func__);
2783*3e777be0SXin Li }
2784*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
2785*3e777be0SXin Li
2786*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2787*3e777be0SXin Li }
2788*3e777be0SXin Li
2789*3e777be0SXin Li template<typename HalPolicy,
2790*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
2791*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertElementwiseBinary(const HalOperation & operation,const HalModel & model,ConversionData & data,armnn::BinaryOperation binaryOperation)2792*3e777be0SXin Li bool ConvertElementwiseBinary(const HalOperation& operation,
2793*3e777be0SXin Li const HalModel& model,
2794*3e777be0SXin Li ConversionData& data,
2795*3e777be0SXin Li armnn::BinaryOperation binaryOperation)
2796*3e777be0SXin Li {
2797*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
2798*3e777be0SXin Li
2799*3e777be0SXin Li ALOGV("HalPolicy::ConvertElementwiseBinary()");
2800*3e777be0SXin Li ALOGV("binaryOperation = %s", GetBinaryOperationAsCString(binaryOperation));
2801*3e777be0SXin Li
2802*3e777be0SXin Li LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2803*3e777be0SXin Li LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2804*3e777be0SXin Li
2805*3e777be0SXin Li if (!input0.IsValid() || !input1.IsValid())
2806*3e777be0SXin Li {
2807*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2808*3e777be0SXin Li }
2809*3e777be0SXin Li
2810*3e777be0SXin Li // The FuseActivation parameter is always the input index 2, and it should be optional
2811*3e777be0SXin Li ActivationFn activationFunction;
2812*3e777be0SXin Li if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2813*3e777be0SXin Li {
2814*3e777be0SXin Li return Fail("%s: Operation has invalid optional input: activation function", __func__);
2815*3e777be0SXin Li }
2816*3e777be0SXin Li
2817*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2818*3e777be0SXin Li if (!output)
2819*3e777be0SXin Li {
2820*3e777be0SXin Li return Fail("%s: Could not read output", __func__);
2821*3e777be0SXin Li }
2822*3e777be0SXin Li
2823*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2824*3e777be0SXin Li
2825*3e777be0SXin Li armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
2826*3e777be0SXin Li
2827*3e777be0SXin Li bool isSupported = false;
2828*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2829*3e777be0SXin Li {
2830*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2831*3e777be0SXin Li IsElementwiseBinarySupported,
2832*3e777be0SXin Li data.m_Backends,
2833*3e777be0SXin Li isSupported,
2834*3e777be0SXin Li armnn::BackendId(),
2835*3e777be0SXin Li input0.GetTensorInfo(),
2836*3e777be0SXin Li input1.GetTensorInfo(),
2837*3e777be0SXin Li outputInfo,
2838*3e777be0SXin Li binaryOperation);
2839*3e777be0SXin Li };
2840*3e777be0SXin Li
2841*3e777be0SXin Li if (!IsDynamicTensor(outputInfo))
2842*3e777be0SXin Li {
2843*3e777be0SXin Li validateFunc(outputInfo, isSupported);
2844*3e777be0SXin Li }
2845*3e777be0SXin Li else
2846*3e777be0SXin Li {
2847*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2848*3e777be0SXin Li }
2849*3e777be0SXin Li
2850*3e777be0SXin Li if (!isSupported)
2851*3e777be0SXin Li {
2852*3e777be0SXin Li return false;
2853*3e777be0SXin Li }
2854*3e777be0SXin Li
2855*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
2856*3e777be0SXin Li if (!layer)
2857*3e777be0SXin Li {
2858*3e777be0SXin Li return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
2859*3e777be0SXin Li }
2860*3e777be0SXin Li bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2861*3e777be0SXin Li if (!isReshapeSupported)
2862*3e777be0SXin Li {
2863*3e777be0SXin Li return false;
2864*3e777be0SXin Li }
2865*3e777be0SXin Li
2866*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc,
2867*3e777be0SXin Li activationFunction);
2868*3e777be0SXin Li }
2869*3e777be0SXin Li
2870*3e777be0SXin Li
2871*3e777be0SXin Li template<typename HalPolicy,
2872*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
2873*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertFloor(const HalOperation & operation,const HalModel & model,ConversionData & data)2874*3e777be0SXin Li bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
2875*3e777be0SXin Li {
2876*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
2877*3e777be0SXin Li
2878*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2879*3e777be0SXin Li if (!input.IsValid())
2880*3e777be0SXin Li {
2881*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
2882*3e777be0SXin Li }
2883*3e777be0SXin Li
2884*3e777be0SXin Li const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2885*3e777be0SXin Li if (!outputOperand)
2886*3e777be0SXin Li {
2887*3e777be0SXin Li return Fail("%s: Operation has invalid outputs", __func__);
2888*3e777be0SXin Li }
2889*3e777be0SXin Li
2890*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2891*3e777be0SXin Li
2892*3e777be0SXin Li bool isSupported = false;
2893*3e777be0SXin Li armnn::BackendId setBackend;
2894*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2895*3e777be0SXin Li {
2896*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
2897*3e777be0SXin Li IsFloorSupported,
2898*3e777be0SXin Li data.m_Backends,
2899*3e777be0SXin Li isSupported,
2900*3e777be0SXin Li setBackend,
2901*3e777be0SXin Li input.GetTensorInfo(),
2902*3e777be0SXin Li outputInfo);
2903*3e777be0SXin Li };
2904*3e777be0SXin Li
2905*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
2906*3e777be0SXin Li {
2907*3e777be0SXin Li validateFunc(outputInfo, isSupported);
2908*3e777be0SXin Li }
2909*3e777be0SXin Li else
2910*3e777be0SXin Li {
2911*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
2912*3e777be0SXin Li }
2913*3e777be0SXin Li
2914*3e777be0SXin Li if (!isSupported)
2915*3e777be0SXin Li {
2916*3e777be0SXin Li return false;
2917*3e777be0SXin Li }
2918*3e777be0SXin Li
2919*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2920*3e777be0SXin Li layer->SetBackendId(setBackend);
2921*3e777be0SXin Li if (!layer)
2922*3e777be0SXin Li {
2923*3e777be0SXin Li return Fail("%s: Could not add the FloorLayer", __func__);
2924*3e777be0SXin Li }
2925*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
2926*3e777be0SXin Li
2927*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2928*3e777be0SXin Li }
2929*3e777be0SXin Li
IsQSymm8(const V1_0::Operand &)2930*3e777be0SXin Li inline bool IsQSymm8(const V1_0::Operand&)
2931*3e777be0SXin Li {
2932*3e777be0SXin Li return false;
2933*3e777be0SXin Li }
2934*3e777be0SXin Li
2935*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
2936*3e777be0SXin Li
IsQSymm8(const V1_2::Operand & operand)2937*3e777be0SXin Li inline bool IsQSymm8(const V1_2::Operand& operand)
2938*3e777be0SXin Li {
2939*3e777be0SXin Li return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2940*3e777be0SXin Li }
2941*3e777be0SXin Li
2942*3e777be0SXin Li #endif
2943*3e777be0SXin Li
2944*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
2945*3e777be0SXin Li
IsQSymm8(const V1_3::Operand & operand)2946*3e777be0SXin Li inline bool IsQSymm8(const V1_3::Operand& operand)
2947*3e777be0SXin Li {
2948*3e777be0SXin Li return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2949*3e777be0SXin Li }
2950*3e777be0SXin Li
2951*3e777be0SXin Li #endif
2952*3e777be0SXin Li
2953*3e777be0SXin Li enum class DequantizeStatus
2954*3e777be0SXin Li {
2955*3e777be0SXin Li SUCCESS,
2956*3e777be0SXin Li NOT_REQUIRED,
2957*3e777be0SXin Li INVALID_OPERAND
2958*3e777be0SXin Li };
2959*3e777be0SXin Li
2960*3e777be0SXin Li using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2961*3e777be0SXin Li
2962*3e777be0SXin Li template<typename HalPolicy,
2963*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
2964*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
DequantizeIfRequired(size_t operand_index,const HalOperation & operation,const HalModel & model,const ConversionData & data)2965*3e777be0SXin Li DequantizeResult DequantizeIfRequired(size_t operand_index,
2966*3e777be0SXin Li const HalOperation& operation,
2967*3e777be0SXin Li const HalModel& model,
2968*3e777be0SXin Li const ConversionData& data)
2969*3e777be0SXin Li {
2970*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
2971*3e777be0SXin Li
2972*3e777be0SXin Li const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
2973*3e777be0SXin Li if (!weightsOperand)
2974*3e777be0SXin Li {
2975*3e777be0SXin Li return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
2976*3e777be0SXin Li }
2977*3e777be0SXin Li
2978*3e777be0SXin Li if (IsOperandConstant<HalPolicy>(*weightsOperand))
2979*3e777be0SXin Li {
2980*3e777be0SXin Li // Weights are already constant
2981*3e777be0SXin Li return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
2982*3e777be0SXin Li }
2983*3e777be0SXin Li
2984*3e777be0SXin Li const size_t weightsInputIndex = operation.inputs[operand_index];
2985*3e777be0SXin Li
2986*3e777be0SXin Li // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2987*3e777be0SXin Li // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2988*3e777be0SXin Li for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
2989*3e777be0SXin Li {
2990*3e777be0SXin Li // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
2991*3e777be0SXin Li const auto& operationIt = getMainModel(model).operations[operationIdx];
2992*3e777be0SXin Li if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2993*3e777be0SXin Li {
2994*3e777be0SXin Li continue;
2995*3e777be0SXin Li }
2996*3e777be0SXin Li
2997*3e777be0SXin Li size_t outOpIndex = weightsInputIndex + 1;
2998*3e777be0SXin Li for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
2999*3e777be0SXin Li {
3000*3e777be0SXin Li outOpIndex = operationIt.outputs[i];
3001*3e777be0SXin Li }
3002*3e777be0SXin Li
3003*3e777be0SXin Li if (outOpIndex != weightsInputIndex)
3004*3e777be0SXin Li {
3005*3e777be0SXin Li continue;
3006*3e777be0SXin Li }
3007*3e777be0SXin Li
3008*3e777be0SXin Li const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
3009*3e777be0SXin Li
3010*3e777be0SXin Li if (!operand)
3011*3e777be0SXin Li {
3012*3e777be0SXin Li return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3013*3e777be0SXin Li }
3014*3e777be0SXin Li
3015*3e777be0SXin Li if (!IsQSymm8(*operand))
3016*3e777be0SXin Li {
3017*3e777be0SXin Li // Only supporting dequantize from QSYMM8 to FLOAT
3018*3e777be0SXin Li break;
3019*3e777be0SXin Li }
3020*3e777be0SXin Li
3021*3e777be0SXin Li // Allocate a new buffer for the dequantized data and manually dequantize
3022*3e777be0SXin Li const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
3023*3e777be0SXin Li if (!startValue)
3024*3e777be0SXin Li {
3025*3e777be0SXin Li // Failed to get the operand address
3026*3e777be0SXin Li break;
3027*3e777be0SXin Li }
3028*3e777be0SXin Li
3029*3e777be0SXin Li const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
3030*3e777be0SXin Li size_t dequantizedBufferLength = operand->location.length;
3031*3e777be0SXin Li const float quantizationScale = operand->scale;
3032*3e777be0SXin Li
3033*3e777be0SXin Li auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
3034*3e777be0SXin Li for (size_t i = 0; i < dequantizedBufferLength; ++i)
3035*3e777be0SXin Li {
3036*3e777be0SXin Li float* dstPtr = dequantizedBuffer.get();
3037*3e777be0SXin Li
3038*3e777be0SXin Li if (!dstPtr)
3039*3e777be0SXin Li {
3040*3e777be0SXin Li return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3041*3e777be0SXin Li }
3042*3e777be0SXin Li *dstPtr = quantizedBuffer[i] * quantizationScale;
3043*3e777be0SXin Li }
3044*3e777be0SXin Li
3045*3e777be0SXin Li // Construct tensor info for dequantized ConstTensor
3046*3e777be0SXin Li armnn::TensorInfo tensorInfo(operand->dimensions.size(),
3047*3e777be0SXin Li operand->dimensions.data(),
3048*3e777be0SXin Li armnn::DataType::Float32);
3049*3e777be0SXin Li
3050*3e777be0SXin Li return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
3051*3e777be0SXin Li std::move(tensorInfo),
3052*3e777be0SXin Li DequantizeStatus::SUCCESS };
3053*3e777be0SXin Li }
3054*3e777be0SXin Li
3055*3e777be0SXin Li return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
3056*3e777be0SXin Li }
3057*3e777be0SXin Li
3058*3e777be0SXin Li template<typename HalPolicy,
3059*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3060*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
DequantizeAndMakeConstTensorPin(const HalOperation & operation,const HalModel & model,const ConversionData & data,size_t operandIndex,bool optional=false)3061*3e777be0SXin Li ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
3062*3e777be0SXin Li const HalModel& model,
3063*3e777be0SXin Li const ConversionData& data,
3064*3e777be0SXin Li size_t operandIndex,
3065*3e777be0SXin Li bool optional = false)
3066*3e777be0SXin Li {
3067*3e777be0SXin Li DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
3068*3e777be0SXin Li
3069*3e777be0SXin Li DequantizeStatus status = std::get<3>(dequantized);
3070*3e777be0SXin Li switch (status)
3071*3e777be0SXin Li {
3072*3e777be0SXin Li case DequantizeStatus::INVALID_OPERAND:
3073*3e777be0SXin Li {
3074*3e777be0SXin Li // return invalid const tensor pin
3075*3e777be0SXin Li return ConstTensorPin();
3076*3e777be0SXin Li }
3077*3e777be0SXin Li case DequantizeStatus::NOT_REQUIRED:
3078*3e777be0SXin Li {
3079*3e777be0SXin Li return ConvertOperationInputToConstTensorPin<HalPolicy>(
3080*3e777be0SXin Li operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3081*3e777be0SXin Li }
3082*3e777be0SXin Li case DequantizeStatus::SUCCESS:
3083*3e777be0SXin Li default:
3084*3e777be0SXin Li {
3085*3e777be0SXin Li return ConstTensorPin(
3086*3e777be0SXin Li std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3087*3e777be0SXin Li }
3088*3e777be0SXin Li }
3089*3e777be0SXin Li }
3090*3e777be0SXin Li
3091*3e777be0SXin Li
3092*3e777be0SXin Li template<typename HalPolicy,
3093*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3094*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertFullyConnected(const HalOperation & operation,const HalModel & model,ConversionData & data)3095*3e777be0SXin Li bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
3096*3e777be0SXin Li {
3097*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3098*3e777be0SXin Li
3099*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3100*3e777be0SXin Li if (!input.IsValid())
3101*3e777be0SXin Li {
3102*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3103*3e777be0SXin Li }
3104*3e777be0SXin Li
3105*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3106*3e777be0SXin Li if (!output)
3107*3e777be0SXin Li {
3108*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
3109*3e777be0SXin Li }
3110*3e777be0SXin Li
3111*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3112*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3113*3e777be0SXin Li
3114*3e777be0SXin Li LayerInputHandle weightsInput = LayerInputHandle();
3115*3e777be0SXin Li const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3116*3e777be0SXin Li if (!weightsOperand)
3117*3e777be0SXin Li {
3118*3e777be0SXin Li return Fail("%s: Could not read weights", __func__);
3119*3e777be0SXin Li }
3120*3e777be0SXin Li
3121*3e777be0SXin Li // If weights are constant a separate constant layer will be created to store data.
3122*3e777be0SXin Li // Otherwise handle non const weights as inputs.
3123*3e777be0SXin Li weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3124*3e777be0SXin Li if (!weightsInput.IsValid())
3125*3e777be0SXin Li {
3126*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3127*3e777be0SXin Li }
3128*3e777be0SXin Li
3129*3e777be0SXin Li LayerInputHandle biasInput = LayerInputHandle();
3130*3e777be0SXin Li const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3131*3e777be0SXin Li if (!biasOperand)
3132*3e777be0SXin Li {
3133*3e777be0SXin Li return Fail("%s: Could not read bias", __func__);
3134*3e777be0SXin Li }
3135*3e777be0SXin Li
3136*3e777be0SXin Li // If bias are constant a separate constant layer will be created to store data.
3137*3e777be0SXin Li // Otherwise handle non const bias as inputs.
3138*3e777be0SXin Li biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3139*3e777be0SXin Li if (!biasInput.IsValid())
3140*3e777be0SXin Li {
3141*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3142*3e777be0SXin Li }
3143*3e777be0SXin Li
3144*3e777be0SXin Li armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
3145*3e777be0SXin Li armnn::TensorInfo reshapedInfo = inputInfo;
3146*3e777be0SXin Li try
3147*3e777be0SXin Li {
3148*3e777be0SXin Li reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
3149*3e777be0SXin Li }
3150*3e777be0SXin Li catch (const std::exception& e)
3151*3e777be0SXin Li {
3152*3e777be0SXin Li return Fail("%s: %s", __func__, e.what());
3153*3e777be0SXin Li }
3154*3e777be0SXin Li
3155*3e777be0SXin Li // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3156*3e777be0SXin Li armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
3157*3e777be0SXin Li SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
3158*3e777be0SXin Li
3159*3e777be0SXin Li ActivationFn activationFunction;
3160*3e777be0SXin Li if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3161*3e777be0SXin Li {
3162*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3163*3e777be0SXin Li }
3164*3e777be0SXin Li
3165*3e777be0SXin Li armnn::FullyConnectedDescriptor desc;
3166*3e777be0SXin Li desc.m_TransposeWeightMatrix = true;
3167*3e777be0SXin Li desc.m_BiasEnabled = true;
3168*3e777be0SXin Li desc.m_ConstantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
3169*3e777be0SXin Li
3170*3e777be0SXin Li bool isSupported = false;
3171*3e777be0SXin Li armnn::BackendId setBackend;
3172*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3173*3e777be0SXin Li {
3174*3e777be0SXin Li if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3175*3e777be0SXin Li weightsInfo.GetShape(),
3176*3e777be0SXin Li outputInfo.GetShape(),
3177*3e777be0SXin Li desc.m_TransposeWeightMatrix))
3178*3e777be0SXin Li {
3179*3e777be0SXin Li isSupported = false;
3180*3e777be0SXin Li Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3181*3e777be0SXin Li return;
3182*3e777be0SXin Li }
3183*3e777be0SXin Li
3184*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3185*3e777be0SXin Li IsFullyConnectedSupported,
3186*3e777be0SXin Li data.m_Backends,
3187*3e777be0SXin Li isSupported,
3188*3e777be0SXin Li setBackend,
3189*3e777be0SXin Li reshapedInfo,
3190*3e777be0SXin Li outputInfo,
3191*3e777be0SXin Li weightsInfo,
3192*3e777be0SXin Li biasInfo,
3193*3e777be0SXin Li desc);
3194*3e777be0SXin Li };
3195*3e777be0SXin Li
3196*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
3197*3e777be0SXin Li {
3198*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3199*3e777be0SXin Li }
3200*3e777be0SXin Li else
3201*3e777be0SXin Li {
3202*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3203*3e777be0SXin Li }
3204*3e777be0SXin Li
3205*3e777be0SXin Li if (!isSupported)
3206*3e777be0SXin Li {
3207*3e777be0SXin Li return false;
3208*3e777be0SXin Li }
3209*3e777be0SXin Li
3210*3e777be0SXin Li // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3211*3e777be0SXin Li armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
3212*3e777be0SXin Li startLayer->SetBackendId(setBackend);
3213*3e777be0SXin Li
3214*3e777be0SXin Li if (inputInfo.GetNumDimensions() > 2U)
3215*3e777be0SXin Li {
3216*3e777be0SXin Li armnn::ReshapeDescriptor reshapeDescriptor;
3217*3e777be0SXin Li reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
3218*3e777be0SXin Li
3219*3e777be0SXin Li armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3220*3e777be0SXin Li if (!reshapeLayer)
3221*3e777be0SXin Li {
3222*3e777be0SXin Li return Fail("%s: could not add the reshapeLayer", __func__);
3223*3e777be0SXin Li }
3224*3e777be0SXin Li input.Connect(reshapeLayer->GetInputSlot(0));
3225*3e777be0SXin Li reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3226*3e777be0SXin Li reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
3227*3e777be0SXin Li }
3228*3e777be0SXin Li else
3229*3e777be0SXin Li {
3230*3e777be0SXin Li input.Connect(startLayer->GetInputSlot(0));
3231*3e777be0SXin Li }
3232*3e777be0SXin Li
3233*3e777be0SXin Li // Connect weights and bias inputs
3234*3e777be0SXin Li weightsInput.Connect(startLayer->GetInputSlot(1));
3235*3e777be0SXin Li biasInput.Connect(startLayer->GetInputSlot(2));
3236*3e777be0SXin Li
3237*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3238*3e777be0SXin Li data, nullptr, validateFunc, activationFunction);
3239*3e777be0SXin Li }
3240*3e777be0SXin Li
3241*3e777be0SXin Li template<typename HalPolicy,
3242*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3243*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertL2Normalization(const HalOperation & operation,const HalModel & model,ConversionData & data)3244*3e777be0SXin Li bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
3245*3e777be0SXin Li {
3246*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3247*3e777be0SXin Li
3248*3e777be0SXin Li if (operation.inputs.size() != 1)
3249*3e777be0SXin Li {
3250*3e777be0SXin Li return Fail("%s: Optional inputs are not supported", __func__);
3251*3e777be0SXin Li }
3252*3e777be0SXin Li
3253*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3254*3e777be0SXin Li if (!input.IsValid())
3255*3e777be0SXin Li {
3256*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3257*3e777be0SXin Li }
3258*3e777be0SXin Li
3259*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3260*3e777be0SXin Li if (!output)
3261*3e777be0SXin Li {
3262*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
3263*3e777be0SXin Li }
3264*3e777be0SXin Li
3265*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3266*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3267*3e777be0SXin Li
3268*3e777be0SXin Li if (outputInfo.GetNumDimensions() != 4u)
3269*3e777be0SXin Li {
3270*3e777be0SXin Li return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3271*3e777be0SXin Li }
3272*3e777be0SXin Li
3273*3e777be0SXin Li armnn::L2NormalizationDescriptor desc;
3274*3e777be0SXin Li desc.m_DataLayout = armnn::DataLayout::NHWC;
3275*3e777be0SXin Li
3276*3e777be0SXin Li bool isSupported = false;
3277*3e777be0SXin Li armnn::BackendId setBackend;
3278*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3279*3e777be0SXin Li {
3280*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3281*3e777be0SXin Li IsL2NormalizationSupported,
3282*3e777be0SXin Li data.m_Backends,
3283*3e777be0SXin Li isSupported,
3284*3e777be0SXin Li setBackend,
3285*3e777be0SXin Li inputInfo,
3286*3e777be0SXin Li outputInfo,
3287*3e777be0SXin Li desc);
3288*3e777be0SXin Li };
3289*3e777be0SXin Li
3290*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
3291*3e777be0SXin Li {
3292*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3293*3e777be0SXin Li }
3294*3e777be0SXin Li else
3295*3e777be0SXin Li {
3296*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3297*3e777be0SXin Li }
3298*3e777be0SXin Li
3299*3e777be0SXin Li if (!isSupported)
3300*3e777be0SXin Li {
3301*3e777be0SXin Li return false;
3302*3e777be0SXin Li }
3303*3e777be0SXin Li
3304*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3305*3e777be0SXin Li layer->SetBackendId(setBackend);
3306*3e777be0SXin Li if (!layer)
3307*3e777be0SXin Li {
3308*3e777be0SXin Li return Fail("%s: Could not add the L2NormalizationLayer", __func__);
3309*3e777be0SXin Li }
3310*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
3311*3e777be0SXin Li
3312*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3313*3e777be0SXin Li }
3314*3e777be0SXin Li
3315*3e777be0SXin Li template<typename HalPolicy,
3316*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3317*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertLocalResponseNormalization(const HalOperation & operation,const HalModel & model,ConversionData & data)3318*3e777be0SXin Li bool ConvertLocalResponseNormalization(const HalOperation& operation,
3319*3e777be0SXin Li const HalModel& model,
3320*3e777be0SXin Li ConversionData& data)
3321*3e777be0SXin Li {
3322*3e777be0SXin Li if (operation.inputs.size() != 5)
3323*3e777be0SXin Li {
3324*3e777be0SXin Li return Fail("%s: Optional inputs are not supported", __func__);
3325*3e777be0SXin Li }
3326*3e777be0SXin Li
3327*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3328*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
3329*3e777be0SXin Li
3330*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3331*3e777be0SXin Li if (!input.IsValid())
3332*3e777be0SXin Li {
3333*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3334*3e777be0SXin Li }
3335*3e777be0SXin Li
3336*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3337*3e777be0SXin Li if (!output)
3338*3e777be0SXin Li {
3339*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
3340*3e777be0SXin Li }
3341*3e777be0SXin Li
3342*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3343*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3344*3e777be0SXin Li
3345*3e777be0SXin Li if (outputInfo.GetNumDimensions() != 4u)
3346*3e777be0SXin Li {
3347*3e777be0SXin Li return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3348*3e777be0SXin Li }
3349*3e777be0SXin Li
3350*3e777be0SXin Li armnn::NormalizationDescriptor descriptor;
3351*3e777be0SXin Li descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3352*3e777be0SXin Li descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3353*3e777be0SXin Li descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3354*3e777be0SXin Li
3355*3e777be0SXin Li if (!input.IsValid() ||
3356*3e777be0SXin Li !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
3357*3e777be0SXin Li !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3358*3e777be0SXin Li !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3359*3e777be0SXin Li !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3360*3e777be0SXin Li {
3361*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3362*3e777be0SXin Li }
3363*3e777be0SXin Li
3364*3e777be0SXin Li // ArmNN expects normSize to be the full size of the normalization
3365*3e777be0SXin Li // window rather than the radius as in AndroidNN.
3366*3e777be0SXin Li descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3367*3e777be0SXin Li
3368*3e777be0SXin Li bool isSupported = false;
3369*3e777be0SXin Li armnn::BackendId setBackend;
3370*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3371*3e777be0SXin Li {
3372*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3373*3e777be0SXin Li IsNormalizationSupported,
3374*3e777be0SXin Li data.m_Backends,
3375*3e777be0SXin Li isSupported,
3376*3e777be0SXin Li setBackend,
3377*3e777be0SXin Li inputInfo,
3378*3e777be0SXin Li outputInfo,
3379*3e777be0SXin Li descriptor);
3380*3e777be0SXin Li };
3381*3e777be0SXin Li
3382*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
3383*3e777be0SXin Li {
3384*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3385*3e777be0SXin Li }
3386*3e777be0SXin Li else
3387*3e777be0SXin Li {
3388*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3389*3e777be0SXin Li }
3390*3e777be0SXin Li
3391*3e777be0SXin Li if (!isSupported)
3392*3e777be0SXin Li {
3393*3e777be0SXin Li return false;
3394*3e777be0SXin Li }
3395*3e777be0SXin Li
3396*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3397*3e777be0SXin Li layer->SetBackendId(setBackend);
3398*3e777be0SXin Li if (!layer)
3399*3e777be0SXin Li {
3400*3e777be0SXin Li return Fail("%s: Could not add the NormalizationLayer", __func__);
3401*3e777be0SXin Li }
3402*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
3403*3e777be0SXin Li
3404*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3405*3e777be0SXin Li }
3406*3e777be0SXin Li
3407*3e777be0SXin Li template<typename HalPolicy,
3408*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3409*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertLogistic(const HalOperation & operation,const HalModel & model,ConversionData & data)3410*3e777be0SXin Li bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
3411*3e777be0SXin Li {
3412*3e777be0SXin Li armnn::ActivationDescriptor desc;
3413*3e777be0SXin Li desc.m_Function = armnn::ActivationFunction::Sigmoid;
3414*3e777be0SXin Li
3415*3e777be0SXin Li return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3416*3e777be0SXin Li }
3417*3e777be0SXin Li
3418*3e777be0SXin Li template<typename HalPolicy,
3419*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3420*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertMean(const HalOperation & operation,const HalModel & model,ConversionData & data)3421*3e777be0SXin Li bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
3422*3e777be0SXin Li {
3423*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3424*3e777be0SXin Li
3425*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3426*3e777be0SXin Li if (!input.IsValid())
3427*3e777be0SXin Li {
3428*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3429*3e777be0SXin Li }
3430*3e777be0SXin Li
3431*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3432*3e777be0SXin Li if (!output)
3433*3e777be0SXin Li {
3434*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
3435*3e777be0SXin Li }
3436*3e777be0SXin Li
3437*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3438*3e777be0SXin Li
3439*3e777be0SXin Li const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3440*3e777be0SXin Li if (!axisOperand)
3441*3e777be0SXin Li {
3442*3e777be0SXin Li return Fail("%s: Could not read input 1", __func__);
3443*3e777be0SXin Li }
3444*3e777be0SXin Li
3445*3e777be0SXin Li std::vector<int32_t> axis;
3446*3e777be0SXin Li if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3447*3e777be0SXin Li {
3448*3e777be0SXin Li return Fail("%s: Input 1 has invalid values", __func__);
3449*3e777be0SXin Li }
3450*3e777be0SXin Li
3451*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3452*3e777be0SXin Li
3453*3e777be0SXin Li // Convert the axis to unsigned int and remove duplicates.
3454*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
3455*3e777be0SXin Li std::set<unsigned int> uniqueAxis;
3456*3e777be0SXin Li std::transform(axis.begin(), axis.end(),
3457*3e777be0SXin Li std::inserter(uniqueAxis, uniqueAxis.begin()),
3458*3e777be0SXin Li [rank](int i) -> unsigned int { return (i + rank) % rank; });
3459*3e777be0SXin Li
3460*3e777be0SXin Li // Get the "keep dims" flag.
3461*3e777be0SXin Li int32_t keepDims = 0;
3462*3e777be0SXin Li if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3463*3e777be0SXin Li {
3464*3e777be0SXin Li return Fail("%s: Could not read input 2", __func__);
3465*3e777be0SXin Li }
3466*3e777be0SXin Li
3467*3e777be0SXin Li armnn::MeanDescriptor descriptor;
3468*3e777be0SXin Li descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3469*3e777be0SXin Li descriptor.m_KeepDims = keepDims > 0;
3470*3e777be0SXin Li
3471*3e777be0SXin Li bool isSupported = false;
3472*3e777be0SXin Li armnn::BackendId setBackend;
3473*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3474*3e777be0SXin Li {
3475*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3476*3e777be0SXin Li IsMeanSupported,
3477*3e777be0SXin Li data.m_Backends,
3478*3e777be0SXin Li isSupported,
3479*3e777be0SXin Li setBackend,
3480*3e777be0SXin Li inputInfo,
3481*3e777be0SXin Li outputInfo,
3482*3e777be0SXin Li descriptor);
3483*3e777be0SXin Li };
3484*3e777be0SXin Li
3485*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
3486*3e777be0SXin Li {
3487*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3488*3e777be0SXin Li }
3489*3e777be0SXin Li else
3490*3e777be0SXin Li {
3491*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3492*3e777be0SXin Li }
3493*3e777be0SXin Li
3494*3e777be0SXin Li if (!isSupported)
3495*3e777be0SXin Li {
3496*3e777be0SXin Li return false;
3497*3e777be0SXin Li }
3498*3e777be0SXin Li
3499*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3500*3e777be0SXin Li layer->SetBackendId(setBackend);
3501*3e777be0SXin Li if (!layer)
3502*3e777be0SXin Li {
3503*3e777be0SXin Li return Fail("%s: Could not add the MeanLayer", __func__);
3504*3e777be0SXin Li }
3505*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
3506*3e777be0SXin Li
3507*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3508*3e777be0SXin Li }
3509*3e777be0SXin Li
3510*3e777be0SXin Li template<typename HalPolicy,
3511*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3512*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertPad(HalOperation & operation,const HalModel & model,ConversionData & data)3513*3e777be0SXin Li bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
3514*3e777be0SXin Li {
3515*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3516*3e777be0SXin Li
3517*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3518*3e777be0SXin Li if (!input.IsValid())
3519*3e777be0SXin Li {
3520*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3521*3e777be0SXin Li }
3522*3e777be0SXin Li
3523*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3524*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
3525*3e777be0SXin Li
3526*3e777be0SXin Li armnn::PadDescriptor descriptor;
3527*3e777be0SXin Li if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3528*3e777be0SXin Li {
3529*3e777be0SXin Li return Fail("%s: Could not convert paddings", __func__);
3530*3e777be0SXin Li }
3531*3e777be0SXin Li
3532*3e777be0SXin Li // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3533*3e777be0SXin Li // the scale and zeroPoint must be the same as input0
3534*3e777be0SXin Li // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3535*3e777be0SXin Li // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3536*3e777be0SXin Li // (QuantizationOffset - QuantizationOffset) * scale = 0.
3537*3e777be0SXin Li if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3538*3e777be0SXin Li {
3539*3e777be0SXin Li descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3540*3e777be0SXin Li }
3541*3e777be0SXin Li
3542*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3543*3e777be0SXin Li if (!output)
3544*3e777be0SXin Li {
3545*3e777be0SXin Li return Fail("%s: Could not read output", __func__);
3546*3e777be0SXin Li }
3547*3e777be0SXin Li
3548*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3549*3e777be0SXin Li
3550*3e777be0SXin Li bool isSupported = false;
3551*3e777be0SXin Li armnn::BackendId setBackend;
3552*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3553*3e777be0SXin Li {
3554*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3555*3e777be0SXin Li IsPadSupported,
3556*3e777be0SXin Li data.m_Backends,
3557*3e777be0SXin Li isSupported,
3558*3e777be0SXin Li setBackend,
3559*3e777be0SXin Li inputInfo,
3560*3e777be0SXin Li outputInfo,
3561*3e777be0SXin Li descriptor);
3562*3e777be0SXin Li };
3563*3e777be0SXin Li
3564*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
3565*3e777be0SXin Li {
3566*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3567*3e777be0SXin Li }
3568*3e777be0SXin Li else
3569*3e777be0SXin Li {
3570*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3571*3e777be0SXin Li }
3572*3e777be0SXin Li
3573*3e777be0SXin Li if (!isSupported)
3574*3e777be0SXin Li {
3575*3e777be0SXin Li return false;
3576*3e777be0SXin Li }
3577*3e777be0SXin Li
3578*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3579*3e777be0SXin Li layer->SetBackendId(setBackend);
3580*3e777be0SXin Li if (!layer)
3581*3e777be0SXin Li {
3582*3e777be0SXin Li return Fail("%s: Could not add the PadLayer", __func__);
3583*3e777be0SXin Li }
3584*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
3585*3e777be0SXin Li
3586*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3587*3e777be0SXin Li }
3588*3e777be0SXin Li
3589*3e777be0SXin Li template<typename HalPolicy,
3590*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3591*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertReshape(const HalOperation & operation,const HalModel & model,ConversionData & data)3592*3e777be0SXin Li bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
3593*3e777be0SXin Li {
3594*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3595*3e777be0SXin Li
3596*3e777be0SXin Li const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3597*3e777be0SXin Li const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3598*3e777be0SXin Li const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
3599*3e777be0SXin Li
3600*3e777be0SXin Li if (inputOperand == nullptr
3601*3e777be0SXin Li || requestedShapeOperand == nullptr
3602*3e777be0SXin Li || outputOperand == nullptr)
3603*3e777be0SXin Li {
3604*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3605*3e777be0SXin Li }
3606*3e777be0SXin Li
3607*3e777be0SXin Li if (requestedShapeOperand->dimensions.size() != 1)
3608*3e777be0SXin Li {
3609*3e777be0SXin Li return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3610*3e777be0SXin Li __func__, requestedShapeOperand->dimensions.size());
3611*3e777be0SXin Li }
3612*3e777be0SXin Li
3613*3e777be0SXin Li std::vector<int32_t> targetDimensions;
3614*3e777be0SXin Li if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3615*3e777be0SXin Li {
3616*3e777be0SXin Li return Fail("%s: Could not read values of input 1", __func__);
3617*3e777be0SXin Li }
3618*3e777be0SXin Li
3619*3e777be0SXin Li const Shape inputOperandShape = GetOperandShape(*inputOperand);
3620*3e777be0SXin Li
3621*3e777be0SXin Li Shape requestedShape;
3622*3e777be0SXin Li // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3623*3e777be0SXin Li // function that resolves these values into a fully specified tensor shape.
3624*3e777be0SXin Li if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3625*3e777be0SXin Li {
3626*3e777be0SXin Li return Fail("%s: Failed to resolve the requested shape", __func__);
3627*3e777be0SXin Li }
3628*3e777be0SXin Li
3629*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3630*3e777be0SXin Li if (!input.IsValid())
3631*3e777be0SXin Li {
3632*3e777be0SXin Li return Fail("%s: Could not read input 0", __func__);
3633*3e777be0SXin Li }
3634*3e777be0SXin Li
3635*3e777be0SXin Li armnn::ReshapeDescriptor reshapeDescriptor;
3636*3e777be0SXin Li reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3637*3e777be0SXin Li requestedShape.dimensions.data());
3638*3e777be0SXin Li
3639*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3640*3e777be0SXin Li
3641*3e777be0SXin Li bool isSupported = false;
3642*3e777be0SXin Li armnn::BackendId setBackend;
3643*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3644*3e777be0SXin Li {
3645*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3646*3e777be0SXin Li IsReshapeSupported,
3647*3e777be0SXin Li data.m_Backends,
3648*3e777be0SXin Li isSupported,
3649*3e777be0SXin Li setBackend,
3650*3e777be0SXin Li input.GetTensorInfo(),
3651*3e777be0SXin Li outputInfo,
3652*3e777be0SXin Li reshapeDescriptor);
3653*3e777be0SXin Li };
3654*3e777be0SXin Li
3655*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
3656*3e777be0SXin Li {
3657*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3658*3e777be0SXin Li }
3659*3e777be0SXin Li else
3660*3e777be0SXin Li {
3661*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3662*3e777be0SXin Li }
3663*3e777be0SXin Li
3664*3e777be0SXin Li if (!isSupported)
3665*3e777be0SXin Li {
3666*3e777be0SXin Li return false;
3667*3e777be0SXin Li }
3668*3e777be0SXin Li
3669*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3670*3e777be0SXin Li layer->SetBackendId(setBackend);
3671*3e777be0SXin Li if (!layer)
3672*3e777be0SXin Li {
3673*3e777be0SXin Li return Fail("%s: Could not add the ReshapeLayer", __func__);
3674*3e777be0SXin Li }
3675*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
3676*3e777be0SXin Li
3677*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3678*3e777be0SXin Li }
3679*3e777be0SXin Li
3680*3e777be0SXin Li template<typename HalPolicy,
3681*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3682*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertSqueeze(const HalOperation & operation,const HalModel & model,ConversionData & data)3683*3e777be0SXin Li bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
3684*3e777be0SXin Li {
3685*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3686*3e777be0SXin Li
3687*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3688*3e777be0SXin Li if (!input.IsValid())
3689*3e777be0SXin Li {
3690*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3691*3e777be0SXin Li }
3692*3e777be0SXin Li
3693*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3694*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
3695*3e777be0SXin Li if (rank > 4)
3696*3e777be0SXin Li {
3697*3e777be0SXin Li Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3698*3e777be0SXin Li }
3699*3e777be0SXin Li
3700*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3701*3e777be0SXin Li if (!output)
3702*3e777be0SXin Li {
3703*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
3704*3e777be0SXin Li }
3705*3e777be0SXin Li
3706*3e777be0SXin Li if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
3707*3e777be0SXin Li {
3708*3e777be0SXin Li return Fail("%s: Dynamic output tensors are not supported", __func__);
3709*3e777be0SXin Li }
3710*3e777be0SXin Li
3711*3e777be0SXin Li // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3712*3e777be0SXin Li // if the operand index is out of bounds.
3713*3e777be0SXin Li const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3714*3e777be0SXin Li
3715*3e777be0SXin Li std::vector<int32_t> axis;
3716*3e777be0SXin Li if (!axisOperand)
3717*3e777be0SXin Li {
3718*3e777be0SXin Li for (unsigned int i = 0; i < rank; ++i)
3719*3e777be0SXin Li {
3720*3e777be0SXin Li axis.push_back(static_cast<unsigned int>(i));
3721*3e777be0SXin Li }
3722*3e777be0SXin Li }
3723*3e777be0SXin Li else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3724*3e777be0SXin Li {
3725*3e777be0SXin Li return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
3726*3e777be0SXin Li }
3727*3e777be0SXin Li
3728*3e777be0SXin Li std::vector<uint32_t> outputDims;
3729*3e777be0SXin Li for (unsigned int i = 0; i < rank; i++)
3730*3e777be0SXin Li {
3731*3e777be0SXin Li bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3732*3e777be0SXin Li auto currentDimension = inputInfo.GetShape()[i];
3733*3e777be0SXin Li if (skipSqueeze || currentDimension != 1)
3734*3e777be0SXin Li {
3735*3e777be0SXin Li outputDims.push_back(currentDimension);
3736*3e777be0SXin Li }
3737*3e777be0SXin Li }
3738*3e777be0SXin Li
3739*3e777be0SXin Li armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3740*3e777be0SXin Li
3741*3e777be0SXin Li armnn::TensorInfo outputInfo = inputInfo;
3742*3e777be0SXin Li outputInfo.SetShape(outShape);
3743*3e777be0SXin Li
3744*3e777be0SXin Li armnn::ReshapeDescriptor reshapeDesc;
3745*3e777be0SXin Li reshapeDesc.m_TargetShape = outputInfo.GetShape();
3746*3e777be0SXin Li
3747*3e777be0SXin Li bool isSupported = false;
3748*3e777be0SXin Li armnn::BackendId setBackend;
3749*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3750*3e777be0SXin Li IsReshapeSupported,
3751*3e777be0SXin Li data.m_Backends,
3752*3e777be0SXin Li isSupported,
3753*3e777be0SXin Li setBackend,
3754*3e777be0SXin Li inputInfo,
3755*3e777be0SXin Li outputInfo,
3756*3e777be0SXin Li reshapeDesc);
3757*3e777be0SXin Li
3758*3e777be0SXin Li if (!isSupported)
3759*3e777be0SXin Li {
3760*3e777be0SXin Li return false;
3761*3e777be0SXin Li }
3762*3e777be0SXin Li
3763*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3764*3e777be0SXin Li layer->SetBackendId(setBackend);
3765*3e777be0SXin Li if (!layer)
3766*3e777be0SXin Li {
3767*3e777be0SXin Li return Fail("%s: Could not add the ReshapeLayer", __func__);
3768*3e777be0SXin Li }
3769*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
3770*3e777be0SXin Li
3771*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3772*3e777be0SXin Li }
3773*3e777be0SXin Li
3774*3e777be0SXin Li template<typename HalPolicy,
3775*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3776*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertStridedSlice(const HalOperation & operation,const HalModel & model,ConversionData & data)3777*3e777be0SXin Li bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
3778*3e777be0SXin Li {
3779*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3780*3e777be0SXin Li
3781*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3782*3e777be0SXin Li if (!input.IsValid())
3783*3e777be0SXin Li {
3784*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3785*3e777be0SXin Li }
3786*3e777be0SXin Li
3787*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3788*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
3789*3e777be0SXin Li if (rank > 4)
3790*3e777be0SXin Li {
3791*3e777be0SXin Li Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3792*3e777be0SXin Li }
3793*3e777be0SXin Li
3794*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3795*3e777be0SXin Li if (!output)
3796*3e777be0SXin Li {
3797*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
3798*3e777be0SXin Li }
3799*3e777be0SXin Li
3800*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3801*3e777be0SXin Li
3802*3e777be0SXin Li const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3803*3e777be0SXin Li const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3804*3e777be0SXin Li const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
3805*3e777be0SXin Li
3806*3e777be0SXin Li std::vector<int32_t> beginValues;
3807*3e777be0SXin Li std::vector<int32_t> endValues;
3808*3e777be0SXin Li std::vector<int32_t> stridesValues;
3809*3e777be0SXin Li
3810*3e777be0SXin Li // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
3811*3e777be0SXin Li auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
3812*3e777be0SXin Li {
3813*3e777be0SXin Li if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3814*3e777be0SXin Li {
3815*3e777be0SXin Li return false;
3816*3e777be0SXin Li }
3817*3e777be0SXin Li
3818*3e777be0SXin Li if (operandValues.size() != rank)
3819*3e777be0SXin Li {
3820*3e777be0SXin Li return false;
3821*3e777be0SXin Li }
3822*3e777be0SXin Li
3823*3e777be0SXin Li return true;
3824*3e777be0SXin Li };
3825*3e777be0SXin Li
3826*3e777be0SXin Li if (!ValidateInputOperands(*beginOperand, beginValues)
3827*3e777be0SXin Li || !ValidateInputOperands(*endOperand, endValues)
3828*3e777be0SXin Li || !ValidateInputOperands(*stridesOperand, stridesValues))
3829*3e777be0SXin Li {
3830*3e777be0SXin Li return Fail("%s: Operation has invalid input operand", __func__);
3831*3e777be0SXin Li }
3832*3e777be0SXin Li
3833*3e777be0SXin Li // Stride cannot have value '0'
3834*3e777be0SXin Li if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3835*3e777be0SXin Li {
3836*3e777be0SXin Li return Fail("%s: Stride must be non-zero value.", __func__);
3837*3e777be0SXin Li }
3838*3e777be0SXin Li
3839*3e777be0SXin Li armnn::StridedSliceDescriptor descriptor;
3840*3e777be0SXin Li descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3841*3e777be0SXin Li descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3842*3e777be0SXin Li descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3843*3e777be0SXin Li descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3844*3e777be0SXin Li
3845*3e777be0SXin Li // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3846*3e777be0SXin Li if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3847*3e777be0SXin Li !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3848*3e777be0SXin Li !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3849*3e777be0SXin Li {
3850*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3851*3e777be0SXin Li }
3852*3e777be0SXin Li
3853*3e777be0SXin Li bool isSupported = false;
3854*3e777be0SXin Li armnn::BackendId setBackend;
3855*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3856*3e777be0SXin Li {
3857*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3858*3e777be0SXin Li IsStridedSliceSupported,
3859*3e777be0SXin Li data.m_Backends,
3860*3e777be0SXin Li isSupported,
3861*3e777be0SXin Li setBackend,
3862*3e777be0SXin Li inputInfo,
3863*3e777be0SXin Li outputInfo,
3864*3e777be0SXin Li descriptor);
3865*3e777be0SXin Li };
3866*3e777be0SXin Li
3867*3e777be0SXin Li if(IsDynamicTensor(outputInfo))
3868*3e777be0SXin Li {
3869*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3870*3e777be0SXin Li }
3871*3e777be0SXin Li else
3872*3e777be0SXin Li {
3873*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3874*3e777be0SXin Li }
3875*3e777be0SXin Li
3876*3e777be0SXin Li if (!isSupported)
3877*3e777be0SXin Li {
3878*3e777be0SXin Li return false;
3879*3e777be0SXin Li }
3880*3e777be0SXin Li
3881*3e777be0SXin Li // Check if slice can fit in a inferred output
3882*3e777be0SXin Li armnn::TensorShape inputShape = inputInfo.GetShape();
3883*3e777be0SXin Li for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3884*3e777be0SXin Li {
3885*3e777be0SXin Li int stride = descriptor.m_Stride[i];
3886*3e777be0SXin Li
3887*3e777be0SXin Li if (descriptor.m_ShrinkAxisMask & (1 << i))
3888*3e777be0SXin Li {
3889*3e777be0SXin Li // If the difference between the start point and the end point of the slice on an axis being shrunk
3890*3e777be0SXin Li // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3891*3e777be0SXin Li if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3892*3e777be0SXin Li || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3893*3e777be0SXin Li {
3894*3e777be0SXin Li return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3895*3e777be0SXin Li }
3896*3e777be0SXin Li
3897*3e777be0SXin Li if(stride < 0)
3898*3e777be0SXin Li {
3899*3e777be0SXin Li return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3900*3e777be0SXin Li }
3901*3e777be0SXin Li }
3902*3e777be0SXin Li }
3903*3e777be0SXin Li
3904*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3905*3e777be0SXin Li layer->SetBackendId(setBackend);
3906*3e777be0SXin Li if (!layer)
3907*3e777be0SXin Li {
3908*3e777be0SXin Li return Fail("%s: Could not add the StridedSliceLayer", __func__);
3909*3e777be0SXin Li }
3910*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
3911*3e777be0SXin Li
3912*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3913*3e777be0SXin Li }
3914*3e777be0SXin Li
3915*3e777be0SXin Li template<typename HalPolicy,
3916*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
3917*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertTranspose(const HalOperation & operation,const HalModel & model,ConversionData & data)3918*3e777be0SXin Li bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
3919*3e777be0SXin Li {
3920*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
3921*3e777be0SXin Li using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
3922*3e777be0SXin Li
3923*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3924*3e777be0SXin Li if (!input.IsValid())
3925*3e777be0SXin Li {
3926*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
3927*3e777be0SXin Li }
3928*3e777be0SXin Li
3929*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3930*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
3931*3e777be0SXin Li if (rank > 4)
3932*3e777be0SXin Li {
3933*3e777be0SXin Li Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3934*3e777be0SXin Li }
3935*3e777be0SXin Li
3936*3e777be0SXin Li // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3937*3e777be0SXin Li // if the operand index is out of bounds.
3938*3e777be0SXin Li const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3939*3e777be0SXin Li
3940*3e777be0SXin Li std::vector<int32_t> perm(rank);
3941*3e777be0SXin Li if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
3942*3e777be0SXin Li {
3943*3e777be0SXin Li for (unsigned int i = rank; i > 0; i--)
3944*3e777be0SXin Li {
3945*3e777be0SXin Li perm[rank - i] = armnn::numeric_cast<int> (i - 1);
3946*3e777be0SXin Li }
3947*3e777be0SXin Li }
3948*3e777be0SXin Li else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
3949*3e777be0SXin Li {
3950*3e777be0SXin Li return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
3951*3e777be0SXin Li }
3952*3e777be0SXin Li
3953*3e777be0SXin Li std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3954*3e777be0SXin Li
3955*3e777be0SXin Li armnn::TransposeDescriptor transposeDesc;
3956*3e777be0SXin Li transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
3957*3e777be0SXin Li
3958*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3959*3e777be0SXin Li if (!output)
3960*3e777be0SXin Li {
3961*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
3962*3e777be0SXin Li }
3963*3e777be0SXin Li
3964*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3965*3e777be0SXin Li
3966*3e777be0SXin Li bool isSupported = false;
3967*3e777be0SXin Li armnn::BackendId setBackend;
3968*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3969*3e777be0SXin Li {
3970*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
3971*3e777be0SXin Li IsTransposeSupported,
3972*3e777be0SXin Li data.m_Backends,
3973*3e777be0SXin Li isSupported,
3974*3e777be0SXin Li setBackend,
3975*3e777be0SXin Li inputInfo,
3976*3e777be0SXin Li outputInfo,
3977*3e777be0SXin Li transposeDesc);
3978*3e777be0SXin Li };
3979*3e777be0SXin Li
3980*3e777be0SXin Li if(IsDynamicTensor(outputInfo))
3981*3e777be0SXin Li {
3982*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
3983*3e777be0SXin Li }
3984*3e777be0SXin Li else
3985*3e777be0SXin Li {
3986*3e777be0SXin Li validateFunc(outputInfo, isSupported);
3987*3e777be0SXin Li }
3988*3e777be0SXin Li
3989*3e777be0SXin Li if (!isSupported)
3990*3e777be0SXin Li {
3991*3e777be0SXin Li return false;
3992*3e777be0SXin Li }
3993*3e777be0SXin Li
3994*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
3995*3e777be0SXin Li layer->SetBackendId(setBackend);
3996*3e777be0SXin Li if (!layer)
3997*3e777be0SXin Li {
3998*3e777be0SXin Li return Fail("%s: Could not add the TransposeLayer", __func__);
3999*3e777be0SXin Li }
4000*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
4001*3e777be0SXin Li
4002*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4003*3e777be0SXin Li }
4004*3e777be0SXin Li
4005*3e777be0SXin Li template<typename HalPolicy,
4006*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
4007*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand,
4008*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertBatchToSpaceNd(const HalOperation & operation,const HalModel & model,ConversionData & data)4009*3e777be0SXin Li bool ConvertBatchToSpaceNd(const HalOperation& operation,
4010*3e777be0SXin Li const HalModel& model,
4011*3e777be0SXin Li ConversionData& data)
4012*3e777be0SXin Li {
4013*3e777be0SXin Li
4014*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4015*3e777be0SXin Li if (!input.IsValid())
4016*3e777be0SXin Li {
4017*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
4018*3e777be0SXin Li }
4019*3e777be0SXin Li
4020*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4021*3e777be0SXin Li if (!output)
4022*3e777be0SXin Li {
4023*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
4024*3e777be0SXin Li }
4025*3e777be0SXin Li
4026*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4027*3e777be0SXin Li
4028*3e777be0SXin Li const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4029*3e777be0SXin Li if (!blockOperand)
4030*3e777be0SXin Li {
4031*3e777be0SXin Li return Fail("%s: Could not read input 1", __func__);
4032*3e777be0SXin Li }
4033*3e777be0SXin Li
4034*3e777be0SXin Li // Convert the block operand to int32
4035*3e777be0SXin Li std::vector<int32_t> block;
4036*3e777be0SXin Li if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4037*3e777be0SXin Li {
4038*3e777be0SXin Li return Fail("%s: Input 1 has invalid values", __func__);
4039*3e777be0SXin Li }
4040*3e777be0SXin Li
4041*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4042*3e777be0SXin Li
4043*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
4044*3e777be0SXin Li if (rank != 4)
4045*3e777be0SXin Li {
4046*3e777be0SXin Li Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4047*3e777be0SXin Li }
4048*3e777be0SXin Li
4049*3e777be0SXin Li if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4050*3e777be0SXin Li {
4051*3e777be0SXin Li return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4052*3e777be0SXin Li " greater than or equal to 1", __func__);
4053*3e777be0SXin Li }
4054*3e777be0SXin Li
4055*3e777be0SXin Li armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4056*3e777be0SXin Li batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4057*3e777be0SXin Li batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4058*3e777be0SXin Li
4059*3e777be0SXin Li if (Is12OrLaterOperand(*output))
4060*3e777be0SXin Li {
4061*3e777be0SXin Li batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
4062*3e777be0SXin Li }
4063*3e777be0SXin Li // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4064*3e777be0SXin Li batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4065*3e777be0SXin Li
4066*3e777be0SXin Li bool isSupported = false;
4067*3e777be0SXin Li armnn::BackendId setBackend;
4068*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4069*3e777be0SXin Li {
4070*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
4071*3e777be0SXin Li IsBatchToSpaceNdSupported,
4072*3e777be0SXin Li data.m_Backends,
4073*3e777be0SXin Li isSupported,
4074*3e777be0SXin Li setBackend,
4075*3e777be0SXin Li inputInfo,
4076*3e777be0SXin Li outputInfo,
4077*3e777be0SXin Li batchToSpaceNdDesc);
4078*3e777be0SXin Li };
4079*3e777be0SXin Li
4080*3e777be0SXin Li if(!IsDynamicTensor(outputInfo))
4081*3e777be0SXin Li {
4082*3e777be0SXin Li validateFunc(outputInfo, isSupported);
4083*3e777be0SXin Li }
4084*3e777be0SXin Li else
4085*3e777be0SXin Li {
4086*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
4087*3e777be0SXin Li }
4088*3e777be0SXin Li
4089*3e777be0SXin Li
4090*3e777be0SXin Li if (!isSupported)
4091*3e777be0SXin Li {
4092*3e777be0SXin Li return false;
4093*3e777be0SXin Li }
4094*3e777be0SXin Li
4095*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4096*3e777be0SXin Li layer->SetBackendId(setBackend);
4097*3e777be0SXin Li if (!layer)
4098*3e777be0SXin Li {
4099*3e777be0SXin Li return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
4100*3e777be0SXin Li }
4101*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
4102*3e777be0SXin Li
4103*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4104*3e777be0SXin Li }
4105*3e777be0SXin Li
4106*3e777be0SXin Li template<typename HalPolicy,
4107*3e777be0SXin Li typename HalOperation = typename HalPolicy::Operation,
4108*3e777be0SXin Li typename HalOperand = typename HalPolicy::Operand,
4109*3e777be0SXin Li typename HalModel = typename HalPolicy::Model>
ConvertSpaceToBatchNd(const HalOperation & operation,const HalModel & model,ConversionData & data)4110*3e777be0SXin Li bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4111*3e777be0SXin Li {
4112*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4113*3e777be0SXin Li if (!input.IsValid())
4114*3e777be0SXin Li {
4115*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
4116*3e777be0SXin Li }
4117*3e777be0SXin Li
4118*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4119*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
4120*3e777be0SXin Li unsigned int spatialDim = rank - 2;
4121*3e777be0SXin Li
4122*3e777be0SXin Li if (rank != 4)
4123*3e777be0SXin Li {
4124*3e777be0SXin Li Fail("%s: Only inputs with rank 4 are supported", __func__);
4125*3e777be0SXin Li }
4126*3e777be0SXin Li
4127*3e777be0SXin Li const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4128*3e777be0SXin Li if (!output)
4129*3e777be0SXin Li {
4130*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
4131*3e777be0SXin Li }
4132*3e777be0SXin Li
4133*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4134*3e777be0SXin Li
4135*3e777be0SXin Li const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4136*3e777be0SXin Li const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4137*3e777be0SXin Li
4138*3e777be0SXin Li armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4139*3e777be0SXin Li if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4140*3e777be0SXin Li {
4141*3e777be0SXin Li return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4142*3e777be0SXin Li }
4143*3e777be0SXin Li
4144*3e777be0SXin Li std::vector<int32_t> blockShape;
4145*3e777be0SXin Li if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4146*3e777be0SXin Li {
4147*3e777be0SXin Li return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4148*3e777be0SXin Li }
4149*3e777be0SXin Li if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4150*3e777be0SXin Li {
4151*3e777be0SXin Li return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4152*3e777be0SXin Li }
4153*3e777be0SXin Li
4154*3e777be0SXin Li armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4155*3e777be0SXin Li if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4156*3e777be0SXin Li {
4157*3e777be0SXin Li return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4158*3e777be0SXin Li }
4159*3e777be0SXin Li
4160*3e777be0SXin Li std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4161*3e777be0SXin Li std::vector<int32_t> paddings;
4162*3e777be0SXin Li if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4163*3e777be0SXin Li {
4164*3e777be0SXin Li return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4165*3e777be0SXin Li }
4166*3e777be0SXin Li for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4167*3e777be0SXin Li {
4168*3e777be0SXin Li int paddingBeforeInput = paddings[i];
4169*3e777be0SXin Li int paddingAfterInput = paddings[i + 1];
4170*3e777be0SXin Li if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4171*3e777be0SXin Li {
4172*3e777be0SXin Li return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4173*3e777be0SXin Li }
4174*3e777be0SXin Li
4175*3e777be0SXin Li paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
4176*3e777be0SXin Li static_cast<unsigned int>(paddingAfterInput));
4177*3e777be0SXin Li }
4178*3e777be0SXin Li
4179*3e777be0SXin Li armnn::SpaceToBatchNdDescriptor descriptor;
4180*3e777be0SXin Li descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4181*3e777be0SXin Li descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4182*3e777be0SXin Li descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4183*3e777be0SXin Li
4184*3e777be0SXin Li if (Is12OrLaterOperand(*output))
4185*3e777be0SXin Li {
4186*3e777be0SXin Li descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4187*3e777be0SXin Li }
4188*3e777be0SXin Li
4189*3e777be0SXin Li bool isSupported = false;
4190*3e777be0SXin Li armnn::BackendId setBackend;
4191*3e777be0SXin Li auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4192*3e777be0SXin Li {
4193*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
4194*3e777be0SXin Li IsSpaceToBatchNdSupported,
4195*3e777be0SXin Li data.m_Backends,
4196*3e777be0SXin Li isSupported,
4197*3e777be0SXin Li setBackend,
4198*3e777be0SXin Li inputInfo,
4199*3e777be0SXin Li outputInfo,
4200*3e777be0SXin Li descriptor);
4201*3e777be0SXin Li };
4202*3e777be0SXin Li
4203*3e777be0SXin Li if(IsDynamicTensor(outputInfo))
4204*3e777be0SXin Li {
4205*3e777be0SXin Li isSupported = AreDynamicTensorsSupported();
4206*3e777be0SXin Li }
4207*3e777be0SXin Li else
4208*3e777be0SXin Li {
4209*3e777be0SXin Li validateFunc(outputInfo, isSupported);
4210*3e777be0SXin Li }
4211*3e777be0SXin Li
4212*3e777be0SXin Li if (!isSupported)
4213*3e777be0SXin Li {
4214*3e777be0SXin Li return false;
4215*3e777be0SXin Li }
4216*3e777be0SXin Li
4217*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4218*3e777be0SXin Li layer->SetBackendId(setBackend);
4219*3e777be0SXin Li if (!layer)
4220*3e777be0SXin Li {
4221*3e777be0SXin Li return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
4222*3e777be0SXin Li }
4223*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
4224*3e777be0SXin Li
4225*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4226*3e777be0SXin Li }
4227*3e777be0SXin Li
4228*3e777be0SXin Li } // namespace armnn_driver
4229*3e777be0SXin Li #ifdef __clang__
4230*3e777be0SXin Li #pragma clang diagnostic pop
4231*3e777be0SXin Li #endif
4232