xref: /aosp_15_r20/external/android-nn-driver/ConversionUtils_1_3.hpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1 //
2 // Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "ConversionUtils_1_2.hpp"
9 
10 using Half = half_float::half;
11 
12 namespace armnn_driver
13 {
14 
15 using namespace armnn;
16 using namespace android::nn;
17 
18 template<typename HalPolicy,
19          typename HalOperation = typename HalPolicy::Operation,
20          typename HalModel     = typename HalPolicy::Model>
ConvertElu(const HalOperation & operation,const HalModel & model,ConversionData & data)21 bool ConvertElu(const HalOperation& operation, const HalModel& model, ConversionData& data)
22 {
23     using HalOperandType = typename HalPolicy::OperandType;
24 
25     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
26     if (!input0.IsValid())
27     {
28         return Fail("%s: Operation has invalid inputs", __func__);
29     }
30 
31     // Determine data type of input tensor
32     HalOperandType inputType;
33     if (!GetOperandType<HalPolicy>(operation, 0, model, inputType))
34     {
35         return Fail("%s: Operation has invalid inputs", __func__);
36     }
37 
38     ActivationDescriptor desc;
39     desc.m_Function = ActivationFunction::Elu;
40 
41     // Read alpha
42     if (inputType == HalOperandType::TENSOR_FLOAT16)
43     {
44         Half alpha;
45 
46         if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::FLOAT16, alpha, model, data))
47         {
48             return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
49         }
50 
51         desc.m_A = static_cast<float>(alpha);
52     }
53     else if (inputType == HalOperandType::TENSOR_FLOAT32)
54     {
55         if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::FLOAT32, desc.m_A, model, data))
56         {
57             return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
58         }
59     }
60     else
61     {
62         return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
63     }
64 
65     return ::ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
66 }
67 
68 template<typename HalPolicy,
69     typename HalOperation = typename HalPolicy::Operation,
70     typename HalModel     = typename HalPolicy::Model>
ConvertFill(const HalOperation & operation,const HalModel & model,ConversionData & data)71 bool ConvertFill(const HalOperation& operation, const HalModel& model, ConversionData& data)
72 {
73     using HalOperand     = typename HalPolicy::Operand;
74     using HalOperandType = typename HalPolicy::OperandType;
75 
76     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
77     if (!input.IsValid())
78     {
79         return Fail("%s: Operation has invalid inputs", __func__);
80     }
81 
82     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
83     if (!output)
84     {
85         return Fail("%s: Could not read output", __func__);
86     }
87 
88     const TensorInfo& inputInfo  = input.GetTensorInfo();
89     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
90     if (IsDynamicTensor(outputInfo))
91     {
92         return Fail("%s: Dynamic output tensors are not supported", __func__);
93     }
94 
95     // Determine data type of output tensor
96     HalOperandType outputType = output->type;
97     FillDescriptor descriptor;
98     // Read the scalar fill value
99     if (outputType == HalOperandType::TENSOR_FLOAT16)
100     {
101         Half value;
102 
103         if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::FLOAT16, value, model, data))
104         {
105             return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
106         }
107 
108         descriptor.m_Value = static_cast<float>(value);
109     }
110     else if (outputType == HalOperandType::TENSOR_FLOAT32)
111     {
112         if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::FLOAT32, descriptor.m_Value, model, data))
113         {
114             return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
115         }
116     }
117     else if (outputType == HalOperandType::TENSOR_INT32)
118     {
119         int32_t value;
120 
121         if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, value, model, data))
122         {
123             return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
124         }
125 
126         descriptor.m_Value = static_cast<float>(value);
127     }
128     else
129     {
130         return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
131     }
132 
133     bool isSupported = false;
134     armnn::BackendId setBackend;
135     FORWARD_LAYER_SUPPORT_FUNC(__func__,
136                                IsFillSupported,
137                                data.m_Backends,
138                                isSupported,
139                                setBackend,
140                                inputInfo,
141                                outputInfo,
142                                descriptor);
143     if (!isSupported)
144     {
145         return false;
146     }
147 
148     IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
149     layer->SetBackendId(setBackend);
150     if (!layer)
151     {
152         return Fail("%s: Could not add the FillLayer", __func__);
153     }
154     input.Connect(layer->GetInputSlot(0));
155 
156     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
157 }
158 
159 template<typename HalPolicy,
160          typename HalOperation = typename HalPolicy::Operation,
161          typename HalModel     = typename HalPolicy::Model>
ConvertLogicalBinary(const HalOperation & operation,const HalModel & model,ConversionData & data,LogicalBinaryOperation logicalOperation)162 bool ConvertLogicalBinary(const HalOperation& operation,
163                           const HalModel& model,
164                           ConversionData& data,
165                           LogicalBinaryOperation logicalOperation)
166 {
167     using HalOperand = typename HalPolicy::Operand;
168 
169     ALOGV("HalPolicy::ConvertLogicalBinary()");
170     ALOGV("logicalOperation = %s", GetLogicalBinaryOperationAsCString(logicalOperation));
171 
172     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
173     LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
174 
175     if (!(input0.IsValid() && input1.IsValid()))
176     {
177         return Fail("%s: Operation has invalid inputs", __func__);
178     }
179 
180     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
181     if (!output)
182     {
183         return Fail("%s: Could not read output 0", __func__);
184     }
185 
186     const TensorInfo& inputInfo0 = input0.GetTensorInfo();
187     const TensorInfo& inputInfo1 = input1.GetTensorInfo();
188     const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
189 
190     LogicalBinaryDescriptor descriptor(logicalOperation);
191 
192     bool isSupported = false;
193     armnn::BackendId setBackend;
194     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
195     {
196         FORWARD_LAYER_SUPPORT_FUNC(__func__,
197                                    IsLogicalBinarySupported,
198                                    data.m_Backends,
199                                    isSupported,
200                                    setBackend,
201                                    inputInfo0,
202                                    inputInfo1,
203                                    outputInfo,
204                                    descriptor);
205     };
206 
207     if(!IsDynamicTensor(outputInfo))
208     {
209         validateFunc(outputInfo, isSupported);
210     }
211     else
212     {
213         isSupported = AreDynamicTensorsSupported();
214     }
215 
216     if (!isSupported)
217     {
218         return false;
219     }
220 
221     IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
222     layer->SetBackendId(setBackend);
223     if (!layer)
224     {
225         return Fail("%s: Could not add the LogicalBinaryLayer", __func__);
226     }
227 
228     bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
229     if (!isReshapeSupported)
230     {
231         return false;
232     }
233 
234     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
235 }
236 
237 template<typename HalPolicy,
238          typename HalOperation = typename HalPolicy::Operation,
239          typename HalModel     = typename HalPolicy::Model>
ConvertQuantizedLstm(const HalOperation & operation,const HalModel & model,ConversionData & data)240 bool ConvertQuantizedLstm(const HalOperation& operation, const HalModel& model, ConversionData& data)
241 {
242     using HalOperand     = typename HalPolicy::Operand;
243     using HalOperandType = typename HalPolicy::OperandType;
244 
245     ALOGV("HalPolicy::ConvertQuantizedLstm()");
246 
247     //Inputs:
248     // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
249     //    specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
250     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
251     if (!input.IsValid())
252     {
253         return Fail("%s: Could not read input 0: input", __func__);
254     }
255 
256     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, of shape [batch_size, output_size].
257     LayerInputHandle outputStatePrevTimeStep = ConvertToLayerInputHandle<HalPolicy>(operation, 18, model, data);
258     if (!outputStatePrevTimeStep.IsValid())
259     {
260         return Fail("%s: Could not read input 18: outputStatePrevTimeStep", __func__);
261     }
262 
263     // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
264     LayerInputHandle cellStatePrevTimeStep = ConvertToLayerInputHandle<HalPolicy>(operation, 19, model, data);
265     if (!cellStatePrevTimeStep.IsValid())
266     {
267         return Fail("%s: Could not read input 19: cellStatePrevTimeStep", __func__);
268     }
269 
270     // Get the mandatory input tensors:
271 
272     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
273     //     [num_units, input_size].
274     const ConstTensorPin inputToForgetWeightsPin =
275         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
276 
277     // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
278     // [num_units, input_size].
279     const ConstTensorPin inputToCellWeightsPin =
280         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 3, model, data);
281 
282     // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
283     //     [num_units, input_size].
284     const ConstTensorPin inputToOutputWeightsPin =
285         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 4, model, data);
286 
287     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
288     //     [num_units, output_size].
289     const ConstTensorPin recurrentToForgetWeightsPin =
290         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 6, model, data);
291 
292     // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
293     //     [num_units, output_size].
294     const ConstTensorPin recurrentToCellWeightsPin =
295         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 7, model, data);
296 
297     // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
298     //     [num_units, output_size].
299     const ConstTensorPin recurrentToOutputWeightsPin =
300         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 8, model, data);
301 
302     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
303     const ConstTensorPin forgetGateBiasPin =
304         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 13, model, data);
305 
306     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
307     const ConstTensorPin cellBiasPin =
308         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 14, model, data);
309 
310     // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
311     const ConstTensorPin outputGateBiasPin =
312         ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 15, model, data);
313 
314     if (!inputToForgetWeightsPin.IsValid() ||
315         !inputToCellWeightsPin.IsValid() ||
316         !inputToOutputWeightsPin.IsValid() ||
317         !recurrentToForgetWeightsPin.IsValid() ||
318         !recurrentToCellWeightsPin.IsValid() ||
319         !recurrentToOutputWeightsPin.IsValid() ||
320         !forgetGateBiasPin.IsValid() ||
321         !cellBiasPin.IsValid() ||
322         !outputGateBiasPin.IsValid())
323     {
324         return Fail("%s: Operation has invalid tensor inputs", __func__);
325     }
326 
327     // Get the optional input tensors:
328 
329     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
330     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
331     const ConstTensorPin inputToInputWeightsPin =
332         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
333                                                          1,
334                                                          model,
335                                                          data,
336                                                          g_DontPermute,
337                                                          nullptr,
338                                                          true);
339 
340     // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
341     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
342     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
343     const ConstTensorPin recurrentToInputWeightsPin =
344         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
345                                                          5,
346                                                          model,
347                                                          data,
348                                                          g_DontPermute,
349                                                          nullptr,
350                                                          true);
351 
352     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
353     // [num_units].
354     const ConstTensorPin cellToInputWeightsPin =
355         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
356                                                          9,
357                                                          model,
358                                                          data,
359                                                          g_DontPermute,
360                                                          nullptr,
361                                                          true);
362 
363     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
364     // [num_units].
365     const ConstTensorPin cellToForgetWeightsPin =
366         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
367                                                          10,
368                                                          model,
369                                                          data,
370                                                          g_DontPermute,
371                                                          nullptr,
372                                                          true);
373 
374     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
375     // [num_units].
376     const ConstTensorPin cellToOutputWeightsPin =
377         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
378                                                          11,
379                                                          model,
380                                                          data,
381                                                          g_DontPermute,
382                                                          nullptr,
383                                                          true);
384 
385     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
386     const ConstTensorPin inputGateBiasPin =
387         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
388                                                          12,
389                                                          model,
390                                                          data,
391                                                          g_DontPermute,
392                                                          nullptr,
393                                                          true);
394 
395     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
396     //     [output_size, num_units].
397     const ConstTensorPin projectionWeightsPin =
398         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
399                                                          16,
400                                                          model,
401                                                          data,
402                                                          g_DontPermute,
403                                                          nullptr,
404                                                          true);
405 
406     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [output_size].
407     const ConstTensorPin projectionBiasPin =
408         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
409                                                          17,
410                                                          model,
411                                                          data,
412                                                          g_DontPermute,
413                                                          nullptr,
414                                                          true);
415 
416     if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional())
417         || (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional())
418         || (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional())
419         || (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional())
420         || (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional())
421         || (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional())
422         || (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional())
423         || (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
424     {
425         return Fail("%s: Operation has invalid tensor inputs", __func__);
426     }
427 
428 
429     // Get the optional normalization tensors
430 
431     // 20: The input layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
432     //     Used to rescale normalized inputs to activation at input gate.
433     const ConstTensorPin inputLayerNormWeightsPin =
434         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
435                                                          20,
436                                                          model,
437                                                          data,
438                                                          g_DontPermute,
439                                                          nullptr,
440                                                          true);
441 
442     // 21: The forget layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM
443     //     Used to rescale normalized inputs to activation at forget gate.
444     const ConstTensorPin forgetLayerNormWeightsPin =
445         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
446                                                          21,
447                                                          model,
448                                                          data,
449                                                          g_DontPermute,
450                                                          nullptr,
451                                                          true);
452 
453     // 22: The cell layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
454     //     Used to rescale normalized inputs to activation at cell gate.
455     const ConstTensorPin cellLayerNormWeightsPin =
456         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
457                                                          22,
458                                                          model,
459                                                          data,
460                                                          g_DontPermute,
461                                                          nullptr,
462                                                          true);
463 
464     // 23: The output layer normalization weights. A 1-D tensor of shape [num_units].
465     //     Used to rescale normalized inputs to activation at output gate.
466     const ConstTensorPin outputLayerNormWeightsPin =
467         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
468                                                          23,
469                                                          model,
470                                                          data,
471                                                          g_DontPermute,
472                                                          nullptr,
473                                                          true);
474 
475     if ((!inputLayerNormWeightsPin.IsValid() && !inputLayerNormWeightsPin.IsOptional())
476         || (!forgetLayerNormWeightsPin.IsValid() && !forgetLayerNormWeightsPin.IsOptional())
477         || (!cellLayerNormWeightsPin.IsValid() && !cellLayerNormWeightsPin.IsOptional())
478         || (!outputLayerNormWeightsPin.IsValid() && !outputLayerNormWeightsPin.IsOptional()))
479     {
480         return Fail("%s: Operation has invalid tensor inputs", __func__);
481     }
482 
483     // Get the optional input scalars:
484     // 24: The cell clip:  If provided the cell state is clipped by this value prior to the cell output activation.
485     // 25: The projection clip: If provided and projection is enabled, this is used for clipping the projected values.
486 
487     // Get the mandatory input scalars:
488     // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
489     // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
490     // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
491     // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
492     // 30: The zero point of the hidden state, i.e. input to projection.
493     // 31: The scale of the hidden state, i.e. input to projection.
494     float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
495     int projInputZeroPoint;
496 
497     if (!GetInputScalar<HalPolicy>(operation, 24, HalOperandType::FLOAT32, cellClip, model, data, true) ||
498         !GetInputScalar<HalPolicy>(operation, 25, HalOperandType::FLOAT32, projClip, model, data, true) ||
499         !GetInputScalar<HalPolicy>(operation, 26, HalOperandType::FLOAT32, matMulInputGate, model, data) ||
500         !GetInputScalar<HalPolicy>(operation, 27, HalOperandType::FLOAT32, matMulForgetGate, model, data) ||
501         !GetInputScalar<HalPolicy>(operation, 28, HalOperandType::FLOAT32, matMulCellGate, model, data) ||
502         !GetInputScalar<HalPolicy>(operation, 29, HalOperandType::FLOAT32, matMulOutputGate, model, data) ||
503         !GetInputScalar<HalPolicy>(operation, 30, HalOperandType::INT32, projInputZeroPoint, model, data) ||
504         !GetInputScalar<HalPolicy>(operation, 31, HalOperandType::FLOAT32, projInputScale, model, data))
505     {
506         return Fail("%s: Operation has invalid scalar inputs", __func__);
507     }
508 
509     // Outputs:
510     // 0: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size,
511     // output_size].
512     const HalOperand* outputStateOut = GetOutputOperand<HalPolicy>(operation, 0, model);
513     if (!outputStateOut)
514     {
515         return Fail("%s: Could not read output 0: outputStateOut", __func__);
516     }
517 
518     // 1: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
519     const HalOperand* cellStateOut = GetOutputOperand<HalPolicy>(operation, 1, model);
520     if (!cellStateOut)
521     {
522         return Fail("%s: Could not read output 1: cellStateOut", __func__);
523     }
524 
525     // 2: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size, output_size].
526     // This is effectively the same as the current “output state (out)” value.
527     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 2, model);
528     if (!output)
529     {
530         return Fail("%s: Could not read output 2: output", __func__);
531     }
532 
533     // set the params structure for the AddLstmLayer call
534     LstmInputParams params;
535     params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
536     params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
537     params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
538     params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
539     params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
540     params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
541     params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
542     params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
543     params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
544     params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
545     params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
546     params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
547     params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
548     params.m_CellBias = cellBiasPin.GetConstTensorPtr();
549     params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
550     params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
551     params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
552     params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
553     params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
554     params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
555     params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
556 
557     // set the layer descriptor
558     QLstmDescriptor desc;
559     desc.m_CellClip = cellClip;
560     desc.m_ProjectionClip = projClip;
561     desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
562                           params.m_RecurrentToInputWeights == nullptr ||
563                           params.m_InputGateBias == nullptr);
564     desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
565                               params.m_CellToOutputWeights != nullptr);
566     desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
567     desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
568                                params.m_ForgetLayerNormWeights != nullptr ||
569                                params.m_CellLayerNormWeights != nullptr ||
570                                params.m_OutputLayerNormWeights != nullptr);
571     desc.m_InputIntermediateScale = matMulInputGate;
572     desc.m_ForgetIntermediateScale = matMulForgetGate;
573     desc.m_CellIntermediateScale = matMulCellGate;
574     desc.m_OutputIntermediateScale = matMulOutputGate;
575     desc.m_HiddenStateScale = projInputScale;
576     desc.m_HiddenStateZeroPoint = projInputZeroPoint;
577 
578     // validate the optional input groups
579     if (desc.m_CifgEnabled &&
580         (params.m_InputToInputWeights != nullptr ||
581          params.m_RecurrentToInputWeights != nullptr ||
582          params.m_InputGateBias != nullptr))
583     {
584         return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
585                     " and input gate bias must be provided", __func__);
586     }
587 
588     if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
589     {
590         return Fail("%s: projection bias should not be provided without projection weights", __func__);
591     }
592 
593     if (desc.m_PeepholeEnabled &&
594         (params.m_CellToForgetWeights == nullptr ||
595          params.m_CellToOutputWeights == nullptr ||
596          (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
597     {
598         return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
599                     " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
600     }
601 
602     if (desc.m_LayerNormEnabled &&
603         (params.m_ForgetLayerNormWeights == nullptr ||
604          params.m_CellLayerNormWeights == nullptr ||
605          params.m_OutputLayerNormWeights == nullptr ||
606          (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
607     {
608         return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
609                     " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
610     }
611 
612 
613     // Basic parameters
614     LstmInputParamsInfo paramsInfo;
615     paramsInfo.m_InputToForgetWeights     = &(params.m_InputToForgetWeights->GetInfo());
616     paramsInfo.m_InputToCellWeights       = &(params.m_InputToCellWeights->GetInfo());
617     paramsInfo.m_InputToOutputWeights     = &(params.m_InputToOutputWeights->GetInfo());
618     paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
619     paramsInfo.m_RecurrentToCellWeights   = &(params.m_RecurrentToCellWeights->GetInfo());
620     paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
621     paramsInfo.m_ForgetGateBias           = &(params.m_ForgetGateBias->GetInfo());
622     paramsInfo.m_CellBias                 = &(params.m_CellBias->GetInfo());
623     paramsInfo.m_OutputGateBias           = &(params.m_OutputGateBias->GetInfo());
624 
625     // Inputs
626     const TensorInfo& inputInfo = input.GetTensorInfo();
627     const TensorInfo& outputStatePrevTimeStepInfo = outputStatePrevTimeStep.GetTensorInfo();
628     const TensorInfo& cellStatePrevTimeStepInfo = cellStatePrevTimeStep.GetTensorInfo();
629 
630     // Outputs
631     TensorInfo outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
632     TensorInfo outputInfo = GetTensorInfoForOperand(*output);
633     const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
634 
635     // Optional parameters
636     if (!desc.m_CifgEnabled)
637     {
638         paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
639         paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
640         if (desc.m_PeepholeEnabled)
641         {
642             paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
643         }
644         paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
645     }
646 
647 
648     if (desc.m_ProjectionEnabled)
649     {
650         paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
651         if (params.m_ProjectionBias != nullptr)
652         {
653             paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
654         }
655     }
656     else
657     {
658         // If Projection is disabled, override non-const outputs to change the quant info with hidden params, then
659         // create a new const TensorInfo based on this
660         outputStateOutInfo.SetQuantizationScale(projInputScale);
661         outputStateOutInfo.SetQuantizationOffset(projInputZeroPoint);
662         outputInfo.SetQuantizationScale(projInputScale);
663         outputInfo.SetQuantizationOffset(projInputZeroPoint);
664     }
665 
666     const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
667     const TensorInfo constOutputInfo(outputInfo);
668 
669     if (desc.m_PeepholeEnabled)
670     {
671         paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
672         paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
673     }
674 
675     if (desc.m_LayerNormEnabled)
676     {
677         if(!desc.m_CifgEnabled)
678         {
679             paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
680         }
681         paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
682         paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
683         paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
684     }
685 
686     // Check if the layer is supported
687     bool isSupported = false;
688     armnn::BackendId setBackend;
689     auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
690     {
691         FORWARD_LAYER_SUPPORT_FUNC(__func__,
692                                    IsQLstmSupported,
693                                    data.m_Backends,
694                                    isSupported,
695                                    setBackend,
696                                    inputInfo,
697                                    outputStatePrevTimeStepInfo,
698                                    cellStatePrevTimeStepInfo,
699                                    constOutputStateOutInfo,
700                                    cellStateOutInfo,
701                                    constOutputInfo,
702                                    desc,
703                                    paramsInfo);
704     };
705 
706     bool isDynamic = false;
707     if (!IsDynamicTensor(constOutputStateOutInfo) &&
708         !IsDynamicTensor(cellStateOutInfo)  &&
709         !IsDynamicTensor(constOutputInfo))
710     {
711         validateFunc(outputInfo, isSupported);
712     }
713     else
714     {
715         isDynamic = true;
716         isSupported = AreDynamicTensorsSupported();
717     }
718 
719     if (!isSupported)
720     {
721         return false;
722     }
723 
724     // Add the layer
725     IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
726     layer->SetBackendId(setBackend);
727 
728     input.Connect(layer->GetInputSlot(0));
729     outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
730     cellStatePrevTimeStep.Connect(layer->GetInputSlot(2));
731 
732     if (!isDynamic)
733     {
734         return ( SetupAndTrackLayerOutputSlot<HalPolicy>(
735                        operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
736                  SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 1, *layer, 1, model, data) &&
737                  SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 2, *layer, 2, model, data, &constOutputInfo));
738     }
739     else
740     {
741         return ( SetupAndTrackLayerOutputSlot<HalPolicy>(
742                        operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
743                  SetupAndTrackLayerOutputSlot<HalPolicy>(
744                        operation, 1, *layer, 1, model, data, nullptr, validateFunc,
745                        ActivationFn::kActivationNone, true) &&
746                  SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 2, *layer, 2, model, data, &constOutputInfo));
747     }
748 }
749 
750 template<typename HalPolicy,
751          typename HalOperation = typename HalPolicy::Operation,
752          typename HalModel     = typename HalPolicy::Model>
ConvertRank(const HalOperation & operation,const HalModel & model,ConversionData & data)753 bool ConvertRank(const HalOperation& operation, const HalModel& model, ConversionData& data)
754 {
755     using HalOperand = typename HalPolicy::Operand;
756 
757     const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
758     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
759 
760     if (inputOperand == nullptr || outputOperand == nullptr)
761     {
762         return Fail("%s: Operation has invalid inputs", __func__);
763     }
764 
765     const Shape inputOperandShape = GetOperandShape(*inputOperand);
766     const Shape outputOperandShape = GetOperandShape(*outputOperand);
767 
768     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
769     if (!input.IsValid())
770     {
771         return Fail("%s: Could not read input 0", __func__);
772     }
773 
774     armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
775     if (IsDynamicTensor(outInfo))
776     {
777         return Fail("%s: Dynamic output tensors are not supported", __func__);
778     }
779 
780     bool isSupported = false;
781     armnn::BackendId setBackend;
782     FORWARD_LAYER_SUPPORT_FUNC(__func__,
783                                IsRankSupported,
784                                data.m_Backends,
785                                isSupported,
786                                setBackend,
787                                input.GetTensorInfo(),
788                                outInfo);
789     if (!isSupported)
790     {
791         return false;
792     }
793 
794     armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
795     layer->SetBackendId(setBackend);
796     if (!layer)
797     {
798         return Fail("%s: Could not add the RankLayer", __func__);
799     }
800     input.Connect(layer->GetInputSlot(0));
801 
802     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, &outInfo);
803 }
804 
805 } // armnn_driver namespace
806