1*3e777be0SXin Li //
2*3e777be0SXin Li // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3*3e777be0SXin Li // SPDX-License-Identifier: MIT
4*3e777be0SXin Li //
5*3e777be0SXin Li
6*3e777be0SXin Li #include "HalPolicy.hpp"
7*3e777be0SXin Li
8*3e777be0SXin Li #include <armnn/Optional.hpp>
9*3e777be0SXin Li
10*3e777be0SXin Li #include "FullyConnected.hpp"
11*3e777be0SXin Li #include "Utils.hpp"
12*3e777be0SXin Li
13*3e777be0SXin Li namespace armnn_driver
14*3e777be0SXin Li {
15*3e777be0SXin Li namespace hal_1_0
16*3e777be0SXin Li {
17*3e777be0SXin Li
ConvertOperation(const Operation & operation,const Model & model,ConversionData & data)18*3e777be0SXin Li bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19*3e777be0SXin Li {
20*3e777be0SXin Li switch (operation.type)
21*3e777be0SXin Li {
22*3e777be0SXin Li case V1_0::OperationType::ADD:
23*3e777be0SXin Li return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Add);
24*3e777be0SXin Li case V1_0::OperationType::AVERAGE_POOL_2D:
25*3e777be0SXin Li return ConvertAveragePool2d(operation, model, data);
26*3e777be0SXin Li case V1_0::OperationType::CONCATENATION:
27*3e777be0SXin Li return ConvertConcatenation(operation, model, data);
28*3e777be0SXin Li case V1_0::OperationType::CONV_2D:
29*3e777be0SXin Li return ConvertConv2d(operation, model, data);
30*3e777be0SXin Li case V1_0::OperationType::DEPTH_TO_SPACE:
31*3e777be0SXin Li return ConvertDepthToSpace(operation, model, data);
32*3e777be0SXin Li case V1_0::OperationType::DEPTHWISE_CONV_2D:
33*3e777be0SXin Li return ConvertDepthwiseConv2d(operation, model, data);
34*3e777be0SXin Li case V1_0::OperationType::DEQUANTIZE:
35*3e777be0SXin Li return ConvertDequantize(operation, model, data);
36*3e777be0SXin Li case V1_0::OperationType::FLOOR:
37*3e777be0SXin Li return ConvertFloor(operation, model, data);
38*3e777be0SXin Li case V1_0::OperationType::FULLY_CONNECTED:
39*3e777be0SXin Li return ConvertFullyConnected(operation, model, data);
40*3e777be0SXin Li case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
41*3e777be0SXin Li return ConvertLocalResponseNormalization(operation, model, data);
42*3e777be0SXin Li case V1_0::OperationType::LOGISTIC:
43*3e777be0SXin Li return ConvertLogistic(operation, model, data);
44*3e777be0SXin Li case V1_0::OperationType::LSTM:
45*3e777be0SXin Li return ConvertLstm(operation, model, data);
46*3e777be0SXin Li case V1_0::OperationType::L2_NORMALIZATION:
47*3e777be0SXin Li return ConvertL2Normalization(operation, model, data);
48*3e777be0SXin Li case V1_0::OperationType::L2_POOL_2D:
49*3e777be0SXin Li return ConvertL2Pool2d(operation, model, data);
50*3e777be0SXin Li case V1_0::OperationType::MAX_POOL_2D:
51*3e777be0SXin Li return ConvertMaxPool2d(operation, model, data);
52*3e777be0SXin Li case V1_0::OperationType::MUL:
53*3e777be0SXin Li return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Mul);
54*3e777be0SXin Li case V1_0::OperationType::RELU:
55*3e777be0SXin Li return ConvertReLu(operation, model, data);
56*3e777be0SXin Li case V1_0::OperationType::RELU1:
57*3e777be0SXin Li return ConvertReLu1(operation, model, data);
58*3e777be0SXin Li case V1_0::OperationType::RELU6:
59*3e777be0SXin Li return ConvertReLu6(operation, model, data);
60*3e777be0SXin Li case V1_0::OperationType::SOFTMAX:
61*3e777be0SXin Li return ConvertSoftmax(operation, model, data);
62*3e777be0SXin Li case V1_0::OperationType::SPACE_TO_DEPTH:
63*3e777be0SXin Li return ConvertSpaceToDepth(operation, model, data);
64*3e777be0SXin Li case V1_0::OperationType::TANH:
65*3e777be0SXin Li return ConvertTanH(operation, model, data);
66*3e777be0SXin Li case V1_0::OperationType::RESHAPE:
67*3e777be0SXin Li return ConvertReshape(operation, model, data);
68*3e777be0SXin Li case V1_0::OperationType::RESIZE_BILINEAR:
69*3e777be0SXin Li return ConvertResizeBilinear(operation, model, data);
70*3e777be0SXin Li default:
71*3e777be0SXin Li return Fail("%s: Operation type %s not supported in ArmnnDriver",
72*3e777be0SXin Li __func__, toString(operation.type).c_str());
73*3e777be0SXin Li }
74*3e777be0SXin Li }
75*3e777be0SXin Li
ConvertAveragePool2d(const Operation & operation,const Model & model,ConversionData & data)76*3e777be0SXin Li bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
77*3e777be0SXin Li {
78*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
79*3e777be0SXin Li return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
80*3e777be0SXin Li }
81*3e777be0SXin Li
ConvertConcatenation(const Operation & operation,const Model & model,ConversionData & data)82*3e777be0SXin Li bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
83*3e777be0SXin Li {
84*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertConcatenation()");
85*3e777be0SXin Li return ::ConvertConcatenation<hal_1_0::HalPolicy>(operation, model, data);
86*3e777be0SXin Li }
87*3e777be0SXin Li
ConvertConv2d(const Operation & operation,const Model & model,ConversionData & data)88*3e777be0SXin Li bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
89*3e777be0SXin Li {
90*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertConv2d()");
91*3e777be0SXin Li return ::ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
92*3e777be0SXin Li }
93*3e777be0SXin Li
ConvertDepthToSpace(const Operation & operation,const Model & model,ConversionData & data)94*3e777be0SXin Li bool HalPolicy::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
95*3e777be0SXin Li {
96*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertDepthToSpace()");
97*3e777be0SXin Li return ::ConvertDepthToSpace<hal_1_0::HalPolicy>(operation, model, data);
98*3e777be0SXin Li }
99*3e777be0SXin Li
ConvertDepthwiseConv2d(const Operation & operation,const Model & model,ConversionData & data)100*3e777be0SXin Li bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
101*3e777be0SXin Li {
102*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertDepthwiseConv2d()");
103*3e777be0SXin Li return ::ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
104*3e777be0SXin Li }
105*3e777be0SXin Li
ConvertDequantize(const Operation & operation,const Model & model,ConversionData & data)106*3e777be0SXin Li bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
107*3e777be0SXin Li {
108*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
109*3e777be0SXin Li return ::ConvertDequantize<hal_1_0::HalPolicy>(operation, model, data);
110*3e777be0SXin Li }
111*3e777be0SXin Li
ConvertElementwiseBinary(const Operation & operation,const Model & model,ConversionData & data,armnn::BinaryOperation binaryOperation)112*3e777be0SXin Li bool HalPolicy::ConvertElementwiseBinary(const Operation& operation,
113*3e777be0SXin Li const Model& model,
114*3e777be0SXin Li ConversionData& data,
115*3e777be0SXin Li armnn::BinaryOperation binaryOperation)
116*3e777be0SXin Li {
117*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertElementwiseBinary()");
118*3e777be0SXin Li return ::ConvertElementwiseBinary<hal_1_0::HalPolicy>(operation, model, data, binaryOperation);
119*3e777be0SXin Li }
120*3e777be0SXin Li
ConvertFloor(const Operation & operation,const Model & model,ConversionData & data)121*3e777be0SXin Li bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
122*3e777be0SXin Li {
123*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
124*3e777be0SXin Li return ::ConvertFloor<hal_1_0::HalPolicy>(operation, model, data);
125*3e777be0SXin Li }
126*3e777be0SXin Li
ConvertFullyConnected(const Operation & operation,const Model & model,ConversionData & data)127*3e777be0SXin Li bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
128*3e777be0SXin Li {
129*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
130*3e777be0SXin Li return ::ConvertFullyConnected<hal_1_0::HalPolicy>(operation, model, data);
131*3e777be0SXin Li }
132*3e777be0SXin Li
ConvertLocalResponseNormalization(const Operation & operation,const Model & model,ConversionData & data)133*3e777be0SXin Li bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
134*3e777be0SXin Li const Model& model,
135*3e777be0SXin Li ConversionData& data)
136*3e777be0SXin Li {
137*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
138*3e777be0SXin Li return ::ConvertLocalResponseNormalization<hal_1_0::HalPolicy>(operation, model, data);
139*3e777be0SXin Li }
140*3e777be0SXin Li
ConvertLogistic(const Operation & operation,const Model & model,ConversionData & data)141*3e777be0SXin Li bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
142*3e777be0SXin Li {
143*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
144*3e777be0SXin Li return ::ConvertLogistic<hal_1_0::HalPolicy>(operation, model, data);
145*3e777be0SXin Li }
146*3e777be0SXin Li
ConvertLstm(const Operation & operation,const Model & model,ConversionData & data)147*3e777be0SXin Li bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
148*3e777be0SXin Li {
149*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertLstm()");
150*3e777be0SXin Li
151*3e777be0SXin Li // Inputs:
152*3e777be0SXin Li // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
153*3e777be0SXin Li // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
154*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
155*3e777be0SXin Li if (!input.IsValid())
156*3e777be0SXin Li {
157*3e777be0SXin Li return Fail("%s: Could not read input 0: input", __func__);
158*3e777be0SXin Li }
159*3e777be0SXin Li // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
160*3e777be0SXin Li LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
161*3e777be0SXin Li if (!outputStateIn.IsValid())
162*3e777be0SXin Li {
163*3e777be0SXin Li return Fail("%s: Could not read input 18: outputStateIn", __func__);
164*3e777be0SXin Li }
165*3e777be0SXin Li // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
166*3e777be0SXin Li LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
167*3e777be0SXin Li if (!cellStateIn.IsValid())
168*3e777be0SXin Li {
169*3e777be0SXin Li return Fail("%s: Could not read input 19: cellStateIn", __func__);
170*3e777be0SXin Li }
171*3e777be0SXin Li
172*3e777be0SXin Li // Get the mandatory input tensors:
173*3e777be0SXin Li // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
174*3e777be0SXin Li // [num_units, input_size].
175*3e777be0SXin Li const ConstTensorPin inputToForgetWeightsPin =
176*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
177*3e777be0SXin Li // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
178*3e777be0SXin Li // [num_units, input_size].
179*3e777be0SXin Li const ConstTensorPin inputToCellWeightsPin =
180*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
181*3e777be0SXin Li // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
182*3e777be0SXin Li // [num_units, input_size].
183*3e777be0SXin Li const ConstTensorPin inputToOutputWeightsPin =
184*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
185*3e777be0SXin Li // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
186*3e777be0SXin Li // [num_units, output_size].
187*3e777be0SXin Li const ConstTensorPin recurrentToForgetWeightsPin =
188*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
189*3e777be0SXin Li // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
190*3e777be0SXin Li // [num_units, output_size].
191*3e777be0SXin Li const ConstTensorPin recurrentToCellWeightsPin =
192*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
193*3e777be0SXin Li // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
194*3e777be0SXin Li // [num_units, output_size].
195*3e777be0SXin Li const ConstTensorPin recurrentToOutputWeightsPin =
196*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
197*3e777be0SXin Li // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
198*3e777be0SXin Li const ConstTensorPin forgetGateBiasPin =
199*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
200*3e777be0SXin Li // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
201*3e777be0SXin Li const ConstTensorPin cellBiasPin =
202*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
203*3e777be0SXin Li // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
204*3e777be0SXin Li const ConstTensorPin outputGateBiasPin =
205*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
206*3e777be0SXin Li
207*3e777be0SXin Li if (!inputToForgetWeightsPin.IsValid() ||
208*3e777be0SXin Li !inputToCellWeightsPin.IsValid() ||
209*3e777be0SXin Li !inputToOutputWeightsPin.IsValid() ||
210*3e777be0SXin Li !recurrentToForgetWeightsPin.IsValid() ||
211*3e777be0SXin Li !recurrentToCellWeightsPin.IsValid() ||
212*3e777be0SXin Li !recurrentToOutputWeightsPin.IsValid() ||
213*3e777be0SXin Li !forgetGateBiasPin.IsValid() ||
214*3e777be0SXin Li !cellBiasPin.IsValid() ||
215*3e777be0SXin Li !outputGateBiasPin.IsValid())
216*3e777be0SXin Li {
217*3e777be0SXin Li return Fail("%s: Operation has invalid tensor inputs", __func__);
218*3e777be0SXin Li }
219*3e777be0SXin Li
220*3e777be0SXin Li // Get the optional input tensors:
221*3e777be0SXin Li // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
222*3e777be0SXin Li // [num_units, input_size], where “num_units” corresponds to the number of cell units.
223*3e777be0SXin Li const ConstTensorPin inputToInputWeightsPin =
224*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
225*3e777be0SXin Li 1,
226*3e777be0SXin Li model,
227*3e777be0SXin Li data,
228*3e777be0SXin Li g_DontPermute,
229*3e777be0SXin Li nullptr,
230*3e777be0SXin Li true);
231*3e777be0SXin Li
232*3e777be0SXin Li // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
233*3e777be0SXin Li // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
234*3e777be0SXin Li // “num_units”), or the second dimension of the “projection_weights”, if defined.
235*3e777be0SXin Li const ConstTensorPin recurrentToInputWeightsPin =
236*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
237*3e777be0SXin Li 5,
238*3e777be0SXin Li model,
239*3e777be0SXin Li data,
240*3e777be0SXin Li g_DontPermute,
241*3e777be0SXin Li nullptr,
242*3e777be0SXin Li true);
243*3e777be0SXin Li
244*3e777be0SXin Li // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
245*3e777be0SXin Li const ConstTensorPin cellToInputWeightsPin =
246*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
247*3e777be0SXin Li 9,
248*3e777be0SXin Li model,
249*3e777be0SXin Li data,
250*3e777be0SXin Li g_DontPermute,
251*3e777be0SXin Li nullptr,
252*3e777be0SXin Li true);
253*3e777be0SXin Li
254*3e777be0SXin Li // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
255*3e777be0SXin Li const ConstTensorPin cellToForgetWeightsPin =
256*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
257*3e777be0SXin Li 10,
258*3e777be0SXin Li model,
259*3e777be0SXin Li data,
260*3e777be0SXin Li g_DontPermute,
261*3e777be0SXin Li nullptr,
262*3e777be0SXin Li true);
263*3e777be0SXin Li
264*3e777be0SXin Li // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
265*3e777be0SXin Li const ConstTensorPin cellToOutputWeightsPin =
266*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
267*3e777be0SXin Li 11,
268*3e777be0SXin Li model,
269*3e777be0SXin Li data,
270*3e777be0SXin Li g_DontPermute,
271*3e777be0SXin Li nullptr,
272*3e777be0SXin Li true);
273*3e777be0SXin Li
274*3e777be0SXin Li // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
275*3e777be0SXin Li const ConstTensorPin inputGateBiasPin =
276*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
277*3e777be0SXin Li 12,
278*3e777be0SXin Li model,
279*3e777be0SXin Li data,
280*3e777be0SXin Li g_DontPermute,
281*3e777be0SXin Li nullptr,
282*3e777be0SXin Li true);
283*3e777be0SXin Li
284*3e777be0SXin Li // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
285*3e777be0SXin Li // [output_size, num_units].
286*3e777be0SXin Li const ConstTensorPin projectionWeightsPin =
287*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
288*3e777be0SXin Li 16,
289*3e777be0SXin Li model,
290*3e777be0SXin Li data,
291*3e777be0SXin Li g_DontPermute,
292*3e777be0SXin Li nullptr,
293*3e777be0SXin Li true);
294*3e777be0SXin Li
295*3e777be0SXin Li // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
296*3e777be0SXin Li const ConstTensorPin projectionBiasPin =
297*3e777be0SXin Li ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
298*3e777be0SXin Li 17,
299*3e777be0SXin Li model,
300*3e777be0SXin Li data,
301*3e777be0SXin Li g_DontPermute,
302*3e777be0SXin Li nullptr,
303*3e777be0SXin Li true);
304*3e777be0SXin Li
305*3e777be0SXin Li if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
306*3e777be0SXin Li (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
307*3e777be0SXin Li (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
308*3e777be0SXin Li (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
309*3e777be0SXin Li (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
310*3e777be0SXin Li (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
311*3e777be0SXin Li (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
312*3e777be0SXin Li (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
313*3e777be0SXin Li {
314*3e777be0SXin Li return Fail("%s: Operation has invalid tensor inputs", __func__);
315*3e777be0SXin Li }
316*3e777be0SXin Li
317*3e777be0SXin Li // Get the mandatory input scalars (actually 1-D tensors of size 1):
318*3e777be0SXin Li // 20: The activation function: A value indicating the activation function:
319*3e777be0SXin Li // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
320*3e777be0SXin Li // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
321*3e777be0SXin Li // If set to 0.0 then clipping is disabled.
322*3e777be0SXin Li // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
323*3e777be0SXin Li // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
324*3e777be0SXin Li ActivationFn activation;
325*3e777be0SXin Li float cellClip;
326*3e777be0SXin Li float projClip;
327*3e777be0SXin Li if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
328*3e777be0SXin Li !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
329*3e777be0SXin Li !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
330*3e777be0SXin Li {
331*3e777be0SXin Li return Fail("%s: Operation has invalid scalar inputs", __func__);
332*3e777be0SXin Li }
333*3e777be0SXin Li
334*3e777be0SXin Li // Outputs:
335*3e777be0SXin Li // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
336*3e777be0SXin Li // with CIFG, or [batch_size, num_units * 3] without CIFG.
337*3e777be0SXin Li const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
338*3e777be0SXin Li if (!scratchBuffer)
339*3e777be0SXin Li {
340*3e777be0SXin Li return Fail("%s: Could not read output 0: scratchBuffer", __func__);
341*3e777be0SXin Li }
342*3e777be0SXin Li // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
343*3e777be0SXin Li const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
344*3e777be0SXin Li if (!outputStateOut)
345*3e777be0SXin Li {
346*3e777be0SXin Li return Fail("%s: Could not read output 1: outputStateOut", __func__);
347*3e777be0SXin Li }
348*3e777be0SXin Li // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
349*3e777be0SXin Li const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
350*3e777be0SXin Li if (!cellStateOut)
351*3e777be0SXin Li {
352*3e777be0SXin Li return Fail("%s: Could not read output 2: cellStateOut", __func__);
353*3e777be0SXin Li }
354*3e777be0SXin Li // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
355*3e777be0SXin Li // effectively the same as the current “output state (out)” value.
356*3e777be0SXin Li const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
357*3e777be0SXin Li if (!output)
358*3e777be0SXin Li {
359*3e777be0SXin Li return Fail("%s: Could not read output 3: output", __func__);
360*3e777be0SXin Li }
361*3e777be0SXin Li
362*3e777be0SXin Li // set the params structure for the AddLstmLayer call
363*3e777be0SXin Li armnn::LstmInputParams params;
364*3e777be0SXin Li params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
365*3e777be0SXin Li params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
366*3e777be0SXin Li params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
367*3e777be0SXin Li params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
368*3e777be0SXin Li params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
369*3e777be0SXin Li params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
370*3e777be0SXin Li params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
371*3e777be0SXin Li params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
372*3e777be0SXin Li params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
373*3e777be0SXin Li params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
374*3e777be0SXin Li params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
375*3e777be0SXin Li params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
376*3e777be0SXin Li params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
377*3e777be0SXin Li params.m_CellBias = cellBiasPin.GetConstTensorPtr();
378*3e777be0SXin Li params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
379*3e777be0SXin Li params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
380*3e777be0SXin Li params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
381*3e777be0SXin Li
382*3e777be0SXin Li // set the layer descriptor
383*3e777be0SXin Li armnn::LstmDescriptor desc;
384*3e777be0SXin Li desc.m_ActivationFunc = activation;
385*3e777be0SXin Li desc.m_ClippingThresCell = cellClip;
386*3e777be0SXin Li desc.m_ClippingThresProj = projClip;
387*3e777be0SXin Li desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
388*3e777be0SXin Li params.m_RecurrentToInputWeights == nullptr ||
389*3e777be0SXin Li params.m_InputGateBias == nullptr);
390*3e777be0SXin Li desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
391*3e777be0SXin Li params.m_CellToOutputWeights != nullptr);
392*3e777be0SXin Li desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
393*3e777be0SXin Li
394*3e777be0SXin Li // validate the optional input groups
395*3e777be0SXin Li if (desc.m_CifgEnabled &&
396*3e777be0SXin Li (params.m_InputToInputWeights != nullptr ||
397*3e777be0SXin Li params.m_RecurrentToInputWeights != nullptr ||
398*3e777be0SXin Li params.m_InputGateBias != nullptr))
399*3e777be0SXin Li {
400*3e777be0SXin Li return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
401*3e777be0SXin Li " and input gate bias must be provided", __func__);
402*3e777be0SXin Li }
403*3e777be0SXin Li
404*3e777be0SXin Li if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
405*3e777be0SXin Li {
406*3e777be0SXin Li return Fail("%s: projection bias should not be provided without projection weights", __func__);
407*3e777be0SXin Li }
408*3e777be0SXin Li
409*3e777be0SXin Li if (desc.m_PeepholeEnabled &&
410*3e777be0SXin Li (params.m_CellToForgetWeights == nullptr ||
411*3e777be0SXin Li params.m_CellToOutputWeights == nullptr ||
412*3e777be0SXin Li (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
413*3e777be0SXin Li {
414*3e777be0SXin Li return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
415*3e777be0SXin Li " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
416*3e777be0SXin Li }
417*3e777be0SXin Li
418*3e777be0SXin Li // Check if the layer is supported
419*3e777be0SXin Li // Inputs
420*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
421*3e777be0SXin Li const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
422*3e777be0SXin Li const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
423*3e777be0SXin Li
424*3e777be0SXin Li // Outputs
425*3e777be0SXin Li const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
426*3e777be0SXin Li const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
427*3e777be0SXin Li const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
428*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
429*3e777be0SXin Li
430*3e777be0SXin Li // Basic parameters
431*3e777be0SXin Li armnn::LstmInputParamsInfo paramsInfo;
432*3e777be0SXin Li paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
433*3e777be0SXin Li paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
434*3e777be0SXin Li paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
435*3e777be0SXin Li paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
436*3e777be0SXin Li paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
437*3e777be0SXin Li paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
438*3e777be0SXin Li paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
439*3e777be0SXin Li paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
440*3e777be0SXin Li paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
441*3e777be0SXin Li
442*3e777be0SXin Li // Optional parameters
443*3e777be0SXin Li if(!desc.m_CifgEnabled)
444*3e777be0SXin Li {
445*3e777be0SXin Li paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
446*3e777be0SXin Li paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
447*3e777be0SXin Li if (params.m_CellToInputWeights != nullptr)
448*3e777be0SXin Li {
449*3e777be0SXin Li paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
450*3e777be0SXin Li }
451*3e777be0SXin Li paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
452*3e777be0SXin Li }
453*3e777be0SXin Li
454*3e777be0SXin Li if(desc.m_ProjectionEnabled)
455*3e777be0SXin Li {
456*3e777be0SXin Li paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
457*3e777be0SXin Li if (params.m_ProjectionBias != nullptr)
458*3e777be0SXin Li {
459*3e777be0SXin Li paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
460*3e777be0SXin Li }
461*3e777be0SXin Li }
462*3e777be0SXin Li
463*3e777be0SXin Li if(desc.m_PeepholeEnabled)
464*3e777be0SXin Li {
465*3e777be0SXin Li paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
466*3e777be0SXin Li paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
467*3e777be0SXin Li }
468*3e777be0SXin Li
469*3e777be0SXin Li bool isSupported = false;
470*3e777be0SXin Li armnn::BackendId setBackend;
471*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
472*3e777be0SXin Li IsLstmSupported,
473*3e777be0SXin Li data.m_Backends,
474*3e777be0SXin Li isSupported,
475*3e777be0SXin Li setBackend,
476*3e777be0SXin Li inputInfo,
477*3e777be0SXin Li outputStateInInfo,
478*3e777be0SXin Li cellStateInInfo,
479*3e777be0SXin Li scratchBufferInfo,
480*3e777be0SXin Li outputStateOutInfo,
481*3e777be0SXin Li cellStateOutInfo,
482*3e777be0SXin Li outputInfo,
483*3e777be0SXin Li desc,
484*3e777be0SXin Li paramsInfo);
485*3e777be0SXin Li if (!isSupported)
486*3e777be0SXin Li {
487*3e777be0SXin Li return false;
488*3e777be0SXin Li }
489*3e777be0SXin Li
490*3e777be0SXin Li // Add the layer
491*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
492*3e777be0SXin Li layer->SetBackendId(setBackend);
493*3e777be0SXin Li
494*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
495*3e777be0SXin Li outputStateIn.Connect(layer->GetInputSlot(1));
496*3e777be0SXin Li cellStateIn.Connect(layer->GetInputSlot(2));
497*3e777be0SXin Li
498*3e777be0SXin Li return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
499*3e777be0SXin Li SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
500*3e777be0SXin Li SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
501*3e777be0SXin Li SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
502*3e777be0SXin Li }
503*3e777be0SXin Li
ConvertL2Normalization(const Operation & operation,const Model & model,ConversionData & data)504*3e777be0SXin Li bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
505*3e777be0SXin Li {
506*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
507*3e777be0SXin Li return ::ConvertL2Normalization<hal_1_0::HalPolicy>(operation, model, data);
508*3e777be0SXin Li }
509*3e777be0SXin Li
ConvertL2Pool2d(const Operation & operation,const Model & model,ConversionData & data)510*3e777be0SXin Li bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
511*3e777be0SXin Li {
512*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertL2Pool2d()");
513*3e777be0SXin Li return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
514*3e777be0SXin Li }
515*3e777be0SXin Li
ConvertMaxPool2d(const Operation & operation,const Model & model,ConversionData & data)516*3e777be0SXin Li bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
517*3e777be0SXin Li {
518*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertMaxPool2d()");
519*3e777be0SXin Li return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
520*3e777be0SXin Li }
521*3e777be0SXin Li
ConvertReLu(const Operation & operation,const Model & model,ConversionData & data)522*3e777be0SXin Li bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
523*3e777be0SXin Li {
524*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
525*3e777be0SXin Li return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
526*3e777be0SXin Li }
527*3e777be0SXin Li
ConvertReLu1(const Operation & operation,const Model & model,ConversionData & data)528*3e777be0SXin Li bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
529*3e777be0SXin Li {
530*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
531*3e777be0SXin Li return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
532*3e777be0SXin Li }
533*3e777be0SXin Li
ConvertReLu6(const Operation & operation,const Model & model,ConversionData & data)534*3e777be0SXin Li bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
535*3e777be0SXin Li {
536*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
537*3e777be0SXin Li return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
538*3e777be0SXin Li }
539*3e777be0SXin Li
ConvertSoftmax(const Operation & operation,const Model & model,ConversionData & data)540*3e777be0SXin Li bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
541*3e777be0SXin Li {
542*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertSoftmax()");
543*3e777be0SXin Li
544*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
545*3e777be0SXin Li if (!input.IsValid())
546*3e777be0SXin Li {
547*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
548*3e777be0SXin Li }
549*3e777be0SXin Li
550*3e777be0SXin Li const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
551*3e777be0SXin Li if (!outputOperand)
552*3e777be0SXin Li {
553*3e777be0SXin Li return Fail("%s: Operation has no outputs", __func__);
554*3e777be0SXin Li }
555*3e777be0SXin Li
556*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
557*3e777be0SXin Li if (IsDynamicTensor(outputInfo))
558*3e777be0SXin Li {
559*3e777be0SXin Li return Fail("%s: Dynamic output tensors are not supported", __func__);
560*3e777be0SXin Li }
561*3e777be0SXin Li
562*3e777be0SXin Li armnn::SoftmaxDescriptor desc;
563*3e777be0SXin Li if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
564*3e777be0SXin Li {
565*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
566*3e777be0SXin Li }
567*3e777be0SXin Li
568*3e777be0SXin Li bool isSupported = false;
569*3e777be0SXin Li armnn::BackendId setBackend;
570*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
571*3e777be0SXin Li IsSoftmaxSupported,
572*3e777be0SXin Li data.m_Backends,
573*3e777be0SXin Li isSupported,
574*3e777be0SXin Li setBackend,
575*3e777be0SXin Li input.GetTensorInfo(),
576*3e777be0SXin Li outputInfo,
577*3e777be0SXin Li desc);
578*3e777be0SXin Li if (!isSupported)
579*3e777be0SXin Li {
580*3e777be0SXin Li return false;
581*3e777be0SXin Li }
582*3e777be0SXin Li
583*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
584*3e777be0SXin Li layer->SetBackendId(setBackend);
585*3e777be0SXin Li if (!layer)
586*3e777be0SXin Li {
587*3e777be0SXin Li return Fail("%s: Could not add the SoftmaxLayer", __func__);
588*3e777be0SXin Li }
589*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
590*3e777be0SXin Li
591*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
592*3e777be0SXin Li }
593*3e777be0SXin Li
ConvertSpaceToDepth(const Operation & operation,const Model & model,ConversionData & data)594*3e777be0SXin Li bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
595*3e777be0SXin Li {
596*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertSpaceToDepth()");
597*3e777be0SXin Li
598*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
599*3e777be0SXin Li if (!input.IsValid() )
600*3e777be0SXin Li {
601*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
602*3e777be0SXin Li }
603*3e777be0SXin Li
604*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
605*3e777be0SXin Li unsigned int rank = inputInfo.GetNumDimensions();
606*3e777be0SXin Li
607*3e777be0SXin Li if (rank != 4)
608*3e777be0SXin Li {
609*3e777be0SXin Li return Fail("%s: Only inputs with rank 4 are supported", __func__);
610*3e777be0SXin Li }
611*3e777be0SXin Li
612*3e777be0SXin Li armnn::SpaceToDepthDescriptor desc;
613*3e777be0SXin Li
614*3e777be0SXin Li GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
615*3e777be0SXin Li
616*3e777be0SXin Li if (desc.m_BlockSize <= 1)
617*3e777be0SXin Li {
618*3e777be0SXin Li return Fail("%s: Block size must be at least 1 in all dimensions");
619*3e777be0SXin Li }
620*3e777be0SXin Li
621*3e777be0SXin Li const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
622*3e777be0SXin Li if (!output)
623*3e777be0SXin Li {
624*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
625*3e777be0SXin Li }
626*3e777be0SXin Li
627*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
628*3e777be0SXin Li if (IsDynamicTensor(outputInfo))
629*3e777be0SXin Li {
630*3e777be0SXin Li return Fail("%s: Dynamic output tensors are not supported", __func__);
631*3e777be0SXin Li }
632*3e777be0SXin Li
633*3e777be0SXin Li bool isSupported = false;
634*3e777be0SXin Li armnn::BackendId setBackend;
635*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
636*3e777be0SXin Li IsSpaceToDepthSupported,
637*3e777be0SXin Li data.m_Backends,
638*3e777be0SXin Li isSupported,
639*3e777be0SXin Li setBackend,
640*3e777be0SXin Li inputInfo,
641*3e777be0SXin Li outputInfo,
642*3e777be0SXin Li desc);
643*3e777be0SXin Li if (!isSupported)
644*3e777be0SXin Li {
645*3e777be0SXin Li return false;
646*3e777be0SXin Li }
647*3e777be0SXin Li
648*3e777be0SXin Li armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
649*3e777be0SXin Li layer->SetBackendId(setBackend);
650*3e777be0SXin Li if (!layer)
651*3e777be0SXin Li {
652*3e777be0SXin Li return Fail("%s: Could not add the SpaceToDepthLayer", __func__);
653*3e777be0SXin Li }
654*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
655*3e777be0SXin Li
656*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
657*3e777be0SXin Li }
658*3e777be0SXin Li
ConvertTanH(const Operation & operation,const Model & model,ConversionData & data)659*3e777be0SXin Li bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
660*3e777be0SXin Li {
661*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
662*3e777be0SXin Li return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
663*3e777be0SXin Li }
664*3e777be0SXin Li
ConvertReshape(const Operation & operation,const Model & model,ConversionData & data)665*3e777be0SXin Li bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
666*3e777be0SXin Li {
667*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
668*3e777be0SXin Li return ::ConvertReshape<hal_1_0::HalPolicy>(operation, model, data);
669*3e777be0SXin Li }
670*3e777be0SXin Li
ConvertResizeBilinear(const Operation & operation,const Model & model,ConversionData & data)671*3e777be0SXin Li bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
672*3e777be0SXin Li {
673*3e777be0SXin Li ALOGV("hal_1_0::HalPolicy::ConvertResizeBilinear()");
674*3e777be0SXin Li
675*3e777be0SXin Li LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
676*3e777be0SXin Li if (!input.IsValid())
677*3e777be0SXin Li {
678*3e777be0SXin Li return Fail("%s: Could not read input 0", __func__);
679*3e777be0SXin Li }
680*3e777be0SXin Li
681*3e777be0SXin Li const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
682*3e777be0SXin Li if (!output)
683*3e777be0SXin Li {
684*3e777be0SXin Li return Fail("%s: Could not read output 0", __func__);
685*3e777be0SXin Li }
686*3e777be0SXin Li
687*3e777be0SXin Li const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
688*3e777be0SXin Li const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
689*3e777be0SXin Li
690*3e777be0SXin Li if (IsDynamicTensor(outputInfo))
691*3e777be0SXin Li {
692*3e777be0SXin Li return Fail("%s: Dynamic output tensors are not supported", __func__);
693*3e777be0SXin Li }
694*3e777be0SXin Li
695*3e777be0SXin Li armnn::ResizeDescriptor desc;
696*3e777be0SXin Li desc.m_Method = armnn::ResizeMethod::Bilinear;
697*3e777be0SXin Li desc.m_DataLayout = armnn::DataLayout::NHWC;
698*3e777be0SXin Li
699*3e777be0SXin Li bool isSupported = false;
700*3e777be0SXin Li armnn::BackendId setBackend;
701*3e777be0SXin Li FORWARD_LAYER_SUPPORT_FUNC(__func__,
702*3e777be0SXin Li IsResizeSupported,
703*3e777be0SXin Li data.m_Backends,
704*3e777be0SXin Li isSupported,
705*3e777be0SXin Li setBackend,
706*3e777be0SXin Li inputInfo,
707*3e777be0SXin Li outputInfo,
708*3e777be0SXin Li desc);
709*3e777be0SXin Li if (!isSupported)
710*3e777be0SXin Li {
711*3e777be0SXin Li return false;
712*3e777be0SXin Li }
713*3e777be0SXin Li
714*3e777be0SXin Li if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
715*3e777be0SXin Li !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
716*3e777be0SXin Li {
717*3e777be0SXin Li return Fail("%s: Operation has invalid inputs", __func__);
718*3e777be0SXin Li }
719*3e777be0SXin Li
720*3e777be0SXin Li armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
721*3e777be0SXin Li layer->SetBackendId(setBackend);
722*3e777be0SXin Li if (!layer)
723*3e777be0SXin Li {
724*3e777be0SXin Li return Fail("%s: Could not add the ResizeLayer", __func__);
725*3e777be0SXin Li }
726*3e777be0SXin Li layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
727*3e777be0SXin Li input.Connect(layer->GetInputSlot(0));
728*3e777be0SXin Li
729*3e777be0SXin Li return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
730*3e777be0SXin Li
731*3e777be0SXin Li }
732*3e777be0SXin Li
733*3e777be0SXin Li } // namespace hal_1_0
734*3e777be0SXin Li } // namespace armnn_driver
735