xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/WorkloadFactory.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
10 #include <armnn/backends/IBackendInternal.hpp>
11 #include <armnn/backends/ILayerSupport.hpp>
12 #include <armnn/BackendHelper.hpp>
13 #include <armnn/BackendRegistry.hpp>
14 #include <armnn/utility/PolymorphicDowncast.hpp>
15 #include <armnn/utility/TransformIterator.hpp>
16 
17 #include <armnn/backends/WorkloadFactory.hpp>
18 
19 #include <sstream>
20 
21 namespace armnn
22 {
23 
24 namespace
25 {
26 using LayerList = std::list<Layer*>;
27 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
28 
OverrideDataType(const TensorInfo & info,Optional<DataType> type)29 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
30 {
31     if (!type)
32     {
33         return info;
34     }
35 
36     return TensorInfo(info.GetShape(),
37                       type.value(),
38                       info.GetQuantizationScale(),
39                       info.GetQuantizationOffset(),
40                       info.IsConstant());
41 }
42 
43 } // anonymous namespace
44 
GetBiasTypeFromWeightsType(armnn::Optional<armnn::DataType> weightsType)45 inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Optional<armnn::DataType> weightsType)
46 {
47     if (!weightsType)
48     {
49         return weightsType;
50     }
51 
52     switch(weightsType.value())
53     {
54         case armnn::DataType::BFloat16:
55         case armnn::DataType::Float16:
56         case armnn::DataType::Float32:
57             return weightsType;
58         case armnn::DataType::QAsymmS8:
59         case armnn::DataType::QAsymmU8:
60         case armnn::DataType::QSymmS8:
61         case armnn::DataType::QSymmS16:
62             return armnn::DataType::Signed32;
63         default:
64             ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
65     }
66     return armnn::EmptyOptional();
67 }
68 
69 
IsLayerConfigurationSupported(const BackendId & backendId,const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported,const ModelOptions & modelOptions)70 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
71                                                      const IConnectableLayer& connectableLayer,
72                                                      Optional<DataType> dataType,
73                                                      std::string& outReasonIfUnsupported,
74                                                      const ModelOptions& modelOptions)
75 {
76     Optional<std::string&> reason = outReasonIfUnsupported;
77     bool result;
78     const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
79 
80     auto const& backendRegistry = BackendRegistryInstance();
81     if (!backendRegistry.IsBackendRegistered(backendId))
82     {
83         std::stringstream ss;
84         ss << connectableLayer.GetName() << " is not supported on " << backendId
85            << " because this backend is not registered.";
86 
87         outReasonIfUnsupported = ss.str();
88         return false;
89     }
90 
91     auto backendFactory = backendRegistry.GetFactory(backendId);
92     auto backendObject = backendFactory();
93     auto layerSupport = backendObject->GetLayerSupport(modelOptions);
94     auto layerSupportObject = LayerSupportHandle(layerSupport, backendId);
95 
96     switch(layer.GetType())
97     {
98         case LayerType::Activation:
99         {
100             auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
102             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103             result = layerSupportObject.IsActivationSupported(
104                                            OverrideDataType(input, dataType),
105                                            OverrideDataType(output, dataType),
106                                            cLayer->GetParameters(),
107                                            reason);
108             break;
109         }
110         case LayerType::Addition:
111         {
112             ARMNN_NO_DEPRECATE_WARN_BEGIN
113             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
114             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
115             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
116             result = layerSupportObject.IsAdditionSupported(
117                                         OverrideDataType(input0, dataType),
118                                         OverrideDataType(input1, dataType),
119                                         OverrideDataType(output, dataType),
120                                         reason);
121             ARMNN_NO_DEPRECATE_WARN_END
122             break;
123         }
124         case LayerType::ArgMinMax:
125         {
126             auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
127             const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
128 
129             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
130             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
131             result = layerSupportObject.IsArgMinMaxSupported(
132                     OverrideDataType(input, dataType),
133                     OverrideDataType(output, DataType::Signed32),
134                     descriptor,
135                     reason);
136             break;
137         }
138         case LayerType::BatchMatMul:
139         {
140             auto cLayer = PolymorphicDowncast<const BatchMatMulLayer*>(&layer);
141             const BatchMatMulDescriptor& descriptor = cLayer->GetParameters();
142 
143             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
144             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
145             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
146             result = layerSupportObject.IsBatchMatMulSupported(
147                             OverrideDataType(input0, dataType),
148                             OverrideDataType(input1, dataType),
149                             OverrideDataType(output, dataType),
150                             descriptor,
151                             reason);
152             break;
153         }
154         case LayerType::BatchNormalization:
155         {
156             auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
157             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
158             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159             const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
160             const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
161             const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
162             const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
163             result = layerSupportObject.IsBatchNormalizationSupported(
164                                                    OverrideDataType(input, dataType),
165                                                    OverrideDataType(output, dataType),
166                                                    OverrideDataType(mean, dataType),
167                                                    OverrideDataType(var, dataType),
168                                                    OverrideDataType(beta, dataType),
169                                                    OverrideDataType(gamma, dataType),
170                                                    cLayer->GetParameters(),
171                                                    reason);
172             break;
173         }
174         case LayerType::BatchToSpaceNd:
175         {
176             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
177             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
178             auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
179 
180             result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
181                                                                   OverrideDataType(output, dataType),
182                                                                   cLayer->GetParameters(),
183                                                                   reason);
184             break;
185         }
186         case LayerType::Cast:
187         {
188             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
189             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
190 
191             result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
192                                                         OverrideDataType(output, dataType),
193                                                         reason);
194             break;
195         }
196         case LayerType::ChannelShuffle:
197         {
198             auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
199 
200             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
201             const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
202 
203             const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
204 
205             result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
206                                                                   OverrideDataType(output, dataType),
207                                                                   descriptor,
208                                                                   reason);
209             break;
210         }
211         case LayerType::Comparison:
212         {
213             auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
214 
215             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
216             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
217             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
218 
219             result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
220                                                               OverrideDataType(input1, dataType),
221                                                               OverrideDataType(output, DataType::Boolean),
222                                                               cLayer->GetParameters(),
223                                                               reason);
224             break;
225         }
226         case LayerType::Constant:
227         {
228             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
229             result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
230             break;
231         }
232         case LayerType::ConvertFp16ToFp32:
233         {
234             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
235             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
236             result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
237             break;
238         }
239         case LayerType::ConvertFp32ToFp16:
240         {
241             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
242             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
243             result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
244             break;
245         }
246         case LayerType::Convolution2d:
247         {
248             auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
249 
250             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
251                                                        dataType);
252             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
253             ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
254                              "Convolution2dLayer: Weights should be connected as a Constant Layer.");
255             const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
256                                                         dataType);
257 
258             const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
259 
260             // Construct optional biases object based on the value of m_BiasEnabled
261             Optional<TensorInfo> biases;
262             if (descriptor.m_BiasEnabled)
263             {
264                 ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
265                                  "Convolution2dLayer: Bias should be connected as a Constant Layer.");
266                 biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
267                                           GetBiasTypeFromWeightsType(dataType));
268             }
269 
270             result = layerSupportObject.IsConvolution2dSupported(
271                                               input,
272                                               output,
273                                               descriptor,
274                                               weights,
275                                               biases,
276                                               reason);
277             break;
278         }
279         case LayerType::Convolution3d:
280         {
281             auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
282 
283             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
284                                                        dataType);
285             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
286 
287             ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
288                              "Convolution3dLayer: Weights should be connected as a Constant Layer.");
289             const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
290                                                         dataType);
291 
292             const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
293 
294             // Construct optional biases object based on the value of m_BiasEnabled
295             Optional<TensorInfo> biases;
296             if (descriptor.m_BiasEnabled)
297             {
298                 biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
299                                           GetBiasTypeFromWeightsType(dataType));
300             }
301 
302             result = layerSupportObject.IsConvolution3dSupported(
303                                               input,
304                                               output,
305                                               descriptor,
306                                               weights,
307                                               biases,
308                                               reason);
309             break;
310         }
311         case LayerType::Debug:
312         {
313             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
314             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
315 
316             result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
317                                                           OverrideDataType(output, dataType),
318                                                           reason);
319             break;
320         }
321         case LayerType::DepthToSpace:
322         {
323             auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
324 
325             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
326             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
327 
328             result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
329                                                                  OverrideDataType(output, dataType),
330                                                                  cLayer->GetParameters(),
331                                                                  reason);
332             break;
333         }
334         case LayerType::DepthwiseConvolution2d:
335         {
336             auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
337             const TensorInfo& input   = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
338                                                          dataType);
339             const TensorInfo& output  = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
340             const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
341                                                          dataType);
342 
343             ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
344 
345             const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
346 
347             // Construct optional biases object based on the value of m_BiasEnabled
348             Optional<TensorInfo> biases;
349             if (descriptor.m_BiasEnabled)
350             {
351                 biases = OverrideDataType(cLayer->GetInputSlot(2).GetConnection()->GetTensorInfo(),
352                                           GetBiasTypeFromWeightsType(dataType));
353             }
354 
355             result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
356                                                                         output,
357                                                                         descriptor,
358                                                                         weights,
359                                                                         biases,
360                                                                         reason);
361             break;
362         }
363         case LayerType::Dequantize:
364         {
365             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
366             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
367 
368             result = layerSupportObject.IsDequantizeSupported(input,
369                                                               OverrideDataType(output, dataType),
370                                                               reason);
371             break;
372         }
373         case LayerType::DetectionPostProcess:
374         {
375             auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
376             const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
377             const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
378             const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
379 
380             const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
381             const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
382             const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
383             const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
384 
385             const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
386             result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
387                                                                         scores,
388                                                                         anchors,
389                                                                         detectionBoxes,
390                                                                         detectionClasses,
391                                                                         detectionScores,
392                                                                         numDetections,
393                                                                         descriptor,
394                                                                         reason);
395             break;
396         }
397         case LayerType::ElementwiseBinary:
398         {
399             auto cLayer = PolymorphicDowncast<const ElementwiseBinaryLayer*>(&layer);
400 
401             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
402             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
403             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
404             std::vector<TensorInfo> infos = { OverrideDataType(input0, dataType),
405                                               OverrideDataType(input1, dataType),
406                                               OverrideDataType(output, dataType) };
407             result = layerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
408                                                     infos,
409                                                     cLayer->GetParameters(),
410                                                     EmptyOptional(),
411                                                     EmptyOptional(),
412                                                     reason);
413             break;
414         }
415         case LayerType::ElementwiseUnary:
416         {
417             auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
418 
419             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
420             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
421 
422             result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
423                                                                     OverrideDataType(output, dataType),
424                                                                     cLayer->GetParameters(),
425                                                                     reason);
426             break;
427         }
428         case LayerType::Fill:
429         {
430             auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
431             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
432             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
433             const FillDescriptor& descriptor = cLayer->GetParameters();
434 
435             result = layerSupportObject.IsFillSupported(
436                 OverrideDataType(input, dataType),
437                 OverrideDataType(output, dataType),
438                 descriptor,
439                 reason);
440             break;
441         }
442         case LayerType::FakeQuantization:
443         {
444             auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
445             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
446             result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
447                                                                     cLayer->GetParameters(),
448                                                                     reason);
449             break;
450         }
451         case LayerType::Floor:
452         {
453             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
454             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
455             result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
456                                                          OverrideDataType(output, dataType),
457                                                          reason);
458             break;
459         }
460         case LayerType::FullyConnected:
461         {
462             auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
463             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
464             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
465 
466             const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
467             TensorInfo weightsInfo;
468             const TensorInfo* weightsInfoPtr = nullptr;
469 
470             weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
471             weightsInfoPtr = &weightsInfo;
472 
473             TensorInfo biasInfo;
474             const TensorInfo* biasInfoPtr = nullptr;
475             static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
476             static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
477             static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
478             static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
479 
480             if (descriptor.m_BiasEnabled)
481             {
482                 biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
483                 biasInfoPtr = &biasInfo;
484             }
485             else
486             {
487                 // If biases are not enabled pass a dummy tensorinfo for the validation
488                 switch(input.GetDataType())
489                 {
490                     case DataType::BFloat16:
491                     {
492                         biasInfoPtr = &dummyBFloat16Bias;
493                         break;
494                     }
495                     case DataType::Float16:
496                     {
497                         biasInfoPtr = &dummyFloat16Bias;
498                         break;
499                     }
500                     case DataType::Float32:
501                     {
502                         biasInfoPtr = &dummyFloat32Bias;
503                         break;
504                     }
505                     case DataType::QAsymmU8:
506                     case DataType::QAsymmS8:
507                     case DataType::QSymmS8:
508                     case DataType::QSymmS16:
509                     {
510                         biasInfoPtr = &dummyQA8Bias;
511                         break;
512                     }
513                     default:
514                     {
515                         ARMNN_ASSERT_MSG(false, "Unexpected bias type");
516                     }
517                 }
518             }
519             result = layerSupportObject.IsFullyConnectedSupported(
520                                                OverrideDataType(input, dataType),
521                                                OverrideDataType(output, dataType),
522                                                *weightsInfoPtr,
523                                                *biasInfoPtr,
524                                                descriptor,
525                                                reason);
526             break;
527         }
528         case LayerType::Gather:
529         {
530             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
531             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
532             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
533             auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
534             const GatherDescriptor& descriptor = cLayer->GetParameters();
535             result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
536                                                           input1,
537                                                           OverrideDataType(output, dataType),
538                                                           descriptor,
539                                                           reason);
540             break;
541         }
542         case LayerType::GatherNd:
543         {
544             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
545             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
546             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
547             result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
548                                                             input1,
549                                                             OverrideDataType(output, dataType),
550                                                             reason);
551             break;
552         }
553         case LayerType::Input:
554         {
555             const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
556             result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
557             break;
558         }
559         case LayerType::InstanceNormalization:
560         {
561             auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
562             const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
563 
564             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
565             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
566 
567             result = layerSupportObject.IsInstanceNormalizationSupported(
568                 OverrideDataType(input, dataType),
569                 OverrideDataType(output, dataType),
570                 descriptor,
571                 reason);
572             break;
573         }
574         case LayerType::L2Normalization:
575         {
576             auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
577             const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
578 
579             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
580             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
581 
582             result = layerSupportObject.IsL2NormalizationSupported(
583                                                 OverrideDataType(input, dataType),
584                                                 OverrideDataType(output, dataType),
585                                                 descriptor,
586                                                 reason);
587             break;
588         }
589         case LayerType::LogicalBinary:
590         {
591             auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
592 
593             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
594             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
595             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
596 
597             result = layerSupportObject.IsLogicalBinarySupported(input0,
598                                                                  input1,
599                                                                  output,
600                                                                  cLayer->GetParameters(),
601                                                                  reason);
602             break;
603         }
604         case LayerType::LogSoftmax:
605         {
606             auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
607 
608             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
609             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
610 
611             result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
612                                                               OverrideDataType(output, dataType),
613                                                               cLayer->GetParameters(),
614                                                               reason);
615             break;
616         }
617         case LayerType::Lstm:
618         {
619             auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
620             const LstmDescriptor& descriptor = cLayer->GetParameters();
621 
622             // All inputs.
623             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
624                                                        dataType);
625             const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
626                                                                dataType);
627             const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
628                                                              dataType);
629             // All outputs
630             const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
631             const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
632             const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
633             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
634 
635             // Basic parameters
636             const TensorInfo& inputToForgetWeights
637                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
638             const TensorInfo& inputToCellWeights
639                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
640             const TensorInfo& inputToOutputWeights
641                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
642             const TensorInfo& recurrentToForgetWeights
643                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
644             const TensorInfo& recurrentToCellWeights
645                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
646             const TensorInfo& recurrentToOutputWeights
647                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
648             const TensorInfo& forgetGateBias
649                     = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
650             const TensorInfo& cellBias
651                     = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
652             const TensorInfo& outputGateBias
653                     = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
654 
655             LstmInputParamsInfo paramsInfo;
656 
657             paramsInfo.m_InputToForgetWeights     = &inputToForgetWeights;
658             paramsInfo.m_InputToCellWeights       = &inputToCellWeights;
659             paramsInfo.m_InputToOutputWeights     = &inputToOutputWeights;
660             paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
661             paramsInfo.m_RecurrentToCellWeights   = &recurrentToCellWeights;
662             paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
663             paramsInfo.m_ForgetGateBias           = &forgetGateBias;
664             paramsInfo.m_CellBias                 = &cellBias;
665             paramsInfo.m_OutputGateBias           = &outputGateBias;
666 
667 
668             // Optional parameters
669             TensorInfo optInputToInputWeights;
670             TensorInfo optRecurrentToInputWeights;
671             TensorInfo optCellToInputWeights;
672             TensorInfo optInputGateBias;
673             TensorInfo optProjectionWeights;
674             TensorInfo optProjectionBias;
675             TensorInfo optCellToForgetWeights;
676             TensorInfo optCellToOutputWeights;
677             TensorInfo optInputLayerNormWeights;
678             TensorInfo optForgetLayerNormWeights;
679             TensorInfo optCellLayerNormWeights;
680             TensorInfo optOutputLayerNormWeights;
681 
682             if(!descriptor.m_CifgEnabled)
683             {
684                 optInputToInputWeights =
685                     OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
686                 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
687 
688                 optRecurrentToInputWeights =
689                     OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
690                 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
691                 optInputGateBias =
692                        OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
693                 paramsInfo.m_InputGateBias = &optInputGateBias;
694             }
695 
696             if(descriptor.m_ProjectionEnabled)
697             {
698                 optProjectionWeights =
699                     OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
700                 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
701                 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
702                 {
703                     optProjectionBias =
704                         OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
705                     paramsInfo.m_ProjectionBias = &optProjectionBias;
706                 }
707             }
708 
709             if(descriptor.m_PeepholeEnabled)
710             {
711                 if(!descriptor.m_CifgEnabled)
712                 {
713                     optCellToInputWeights =
714                             OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
715                                              dataType);
716                     paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
717                 }
718                 optCellToForgetWeights =
719                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
720                 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
721                 optCellToOutputWeights =
722                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
723                 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
724             }
725 
726             if(descriptor.m_LayerNormEnabled)
727             {
728                 if (!descriptor.m_CifgEnabled)
729                 {
730                     optInputLayerNormWeights = OverrideDataType(
731                             cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
732                     paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
733                 }
734 
735                 optForgetLayerNormWeights = OverrideDataType(
736                         cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
737                 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
738 
739                 optCellLayerNormWeights = OverrideDataType(
740                         cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
741                 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
742 
743                 optOutputLayerNormWeights = OverrideDataType(
744                         cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
745                 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
746             }
747 
748             result = layerSupportObject.IsLstmSupported(
749                                      input,
750                                      outputStateIn,
751                                      cellStateIn,
752                                      scratchBuffer,
753                                      outputStateOut,
754                                      cellStateOut,
755                                      output,
756                                      descriptor,
757                                      paramsInfo,
758                                      reason);
759             break;
760         }
761         case LayerType::Maximum:
762         {
763             ARMNN_NO_DEPRECATE_WARN_BEGIN
764             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
765             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
766             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
767 
768             result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
769                                                            OverrideDataType(input1, dataType),
770                                                            OverrideDataType(output, dataType),
771                                                            reason);
772             ARMNN_NO_DEPRECATE_WARN_END
773             break;
774         }
775         case LayerType::MemCopy:
776         {
777             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
778             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
779 
780             result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
781                                                            OverrideDataType(output, dataType),
782                                                            reason);
783             break;
784         }
785         case LayerType::MemImport:
786         {
787             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
788             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
789 
790             result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
791                                                              OverrideDataType(output, dataType),
792                                                              reason);
793             break;
794         }
795         case LayerType::Merge:
796         {
797             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
798             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
799             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
800 
801             result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
802                                                          OverrideDataType(input1, dataType),
803                                                          OverrideDataType(output, dataType),
804                                                          reason);
805             break;
806         }
807         case LayerType::Concat:
808         {
809             auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
810 
811             // Get vector of all inputs.
812             auto getTensorInfo = [&dataType](const InputSlot& slot)
813                 {
814                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
815                 };
816 
817             auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
818             auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
819             std::vector<TensorInfo> inputs(beginI, endI);
820 
821             auto getTensorInfoPtr = [](const TensorInfo& info)
822                 {
823                     return &info;
824                 };
825 
826             auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
827             auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
828             std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
829 
830             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
831 
832             result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
833 
834 
835             break;
836         }
837         case LayerType::Multiplication:
838         {
839             ARMNN_NO_DEPRECATE_WARN_BEGIN
840             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
841             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
842             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
843             result = layerSupportObject.IsMultiplicationSupported(
844                                                OverrideDataType(input0, dataType),
845                                                OverrideDataType(input1, dataType),
846                                                OverrideDataType(output, dataType),
847                                                reason);
848             ARMNN_NO_DEPRECATE_WARN_END
849             break;
850         }
851         case LayerType::Normalization:
852         {
853             auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
854             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
855             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
856             result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
857                                                                  OverrideDataType(output, dataType),
858                                                                  cLayer->GetParameters(),
859                                                                  reason);
860             break;
861         }
862         case LayerType::Output:
863         {
864             const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
865             result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
866             break;
867         }
868         case LayerType::Permute:
869         {
870             auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
871             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
872             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
873             result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
874                                                            OverrideDataType(output, dataType),
875                                                            cLayer->GetParameters(),
876                                                            reason);
877             break;
878         }
879         case LayerType::Pad:
880         {
881             auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
882             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
883             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
884             result = layerSupportObject.IsPadSupported(
885                                     OverrideDataType(input, dataType),
886                                     OverrideDataType(output, dataType),
887                                     cLayer->GetParameters(),
888                                     reason);
889             break;
890         }
891         case LayerType::Pooling2d:
892         {
893             auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
894             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
895             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
896             result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
897                                                              OverrideDataType(output, dataType),
898                                                              cLayer->GetParameters(),
899                                                              reason);
900             break;
901         }
902         case LayerType::Pooling3d:
903         {
904             auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
905             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
906             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
907             result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
908                                                              OverrideDataType(output, dataType),
909                                                              cLayer->GetParameters(),
910                                                              reason);
911             break;
912         }
913         case LayerType::PreCompiled:
914         {
915             auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
916             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
917             result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
918                                                                cLayer->GetParameters(),
919                                                                reason);
920             break;
921         }
922         case LayerType::Quantize:
923         {
924             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
925             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
926             result = layerSupportObject.IsQuantizeSupported(input, output, reason);
927             break;
928         }
929         case LayerType::QLstm:
930         {
931             auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
932             const QLstmDescriptor& descriptor = cLayer->GetParameters();
933 
934             // Inputs
935             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
936             const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
937             const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
938 
939             // Outputs
940             const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
941             const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
942             const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
943 
944             // Lstm parameters
945             LstmInputParamsInfo paramsInfo;
946 
947             // Basic parameters
948             ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
949             ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
950             ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
951             paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
952             paramsInfo.m_InputToCellWeights   = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
953             paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
954 
955             paramsInfo.m_RecurrentToForgetWeights =
956                     &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
957             paramsInfo.m_RecurrentToCellWeights   =
958                     &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
959             paramsInfo.m_RecurrentToOutputWeights =
960                     &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
961 
962             paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
963             paramsInfo.m_CellBias       = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
964             paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
965 
966             if(!descriptor.m_CifgEnabled)
967             {
968                 paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
969                 paramsInfo.m_RecurrentToInputWeights =
970                         &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
971                 paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
972             }
973 
974             if(descriptor.m_ProjectionEnabled)
975             {
976                 paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
977 
978                 // Projection bias is optional even if projection is enabled
979                 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
980                 {
981                     paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
982                 }
983             }
984 
985             if(descriptor.m_PeepholeEnabled)
986             {
987                 if (!descriptor.m_CifgEnabled)
988                 {
989                     paramsInfo.m_CellToInputWeights =
990                             &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
991                 }
992 
993                 paramsInfo.m_CellToForgetWeights =
994                         &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
995                 paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
996             }
997 
998             if(descriptor.m_LayerNormEnabled)
999             {
1000                 if (!descriptor.m_CifgEnabled)
1001                 {
1002                     paramsInfo.m_InputLayerNormWeights =
1003                             &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
1004                 }
1005 
1006                 paramsInfo.m_ForgetLayerNormWeights =
1007                         &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
1008                 paramsInfo.m_CellLayerNormWeights =
1009                         &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
1010                 paramsInfo.m_OutputLayerNormWeights =
1011                         &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
1012             }
1013 
1014             result = layerSupportObject.IsQLstmSupported(input,
1015                                                          previousOutputIn,
1016                                                          previousCellStateIn,
1017                                                          outputStateOut,
1018                                                          cellStateOut,
1019                                                          output,
1020                                                          descriptor,
1021                                                          paramsInfo,
1022                                                          reason);
1023             break;
1024         }
1025         case LayerType::QuantizedLstm:
1026         {
1027             auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1028 
1029             // Inputs
1030             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1031             const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1032             const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
1033 
1034             // Outputs
1035             const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1036             const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1037 
1038             // QuantizedLstm parameters
1039             QuantizedLstmInputParamsInfo paramsInfo;
1040 
1041             paramsInfo.m_InputToInputWeights      =
1042                     &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1043             paramsInfo.m_InputToForgetWeights     =
1044                     &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1045             paramsInfo.m_InputToCellWeights       =
1046                     &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1047             paramsInfo.m_InputToOutputWeights     =
1048                     &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1049 
1050             paramsInfo.m_RecurrentToInputWeights  =
1051                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1052             paramsInfo.m_RecurrentToForgetWeights =
1053                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1054             paramsInfo.m_RecurrentToCellWeights   =
1055                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1056             paramsInfo.m_RecurrentToOutputWeights =
1057                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1058 
1059             paramsInfo.m_InputGateBias            =
1060                     &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1061             paramsInfo.m_ForgetGateBias           =
1062                     &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1063             paramsInfo.m_CellBias                 =
1064                     &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1065             paramsInfo.m_OutputGateBias           =
1066                     &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1067 
1068             result = layerSupportObject.IsQuantizedLstmSupported(input,
1069                                                                  previousCellStateIn,
1070                                                                  previousOutputIn,
1071                                                                  cellStateOut,
1072                                                                  output,
1073                                                                  paramsInfo,
1074                                                                  reason);
1075             break;
1076         }
1077         case LayerType::Division:
1078         {
1079             ARMNN_NO_DEPRECATE_WARN_BEGIN
1080             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1081             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1082             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1083             result = layerSupportObject.IsDivisionSupported(
1084                                          OverrideDataType(input0, dataType),
1085                                          OverrideDataType(input1, dataType),
1086                                          OverrideDataType(output, dataType),
1087                                          reason);
1088             ARMNN_NO_DEPRECATE_WARN_END
1089             break;
1090         }
1091         case LayerType::Rank:
1092         {
1093             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1094             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1095             result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1096                                                         OverrideDataType(output, dataType),
1097                                                         reason);
1098             break;
1099         }
1100         case LayerType::Reshape:
1101         {
1102             auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1103             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1104             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1105             result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1106                                                            OverrideDataType(output, dataType),
1107                                                            cLayer->GetParameters(),
1108                                                            reason);
1109             break;
1110         }
1111         case LayerType::Resize:
1112         {
1113             auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1114             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1115             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1116             result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1117                                                           OverrideDataType(output, dataType),
1118                                                           cLayer->GetParameters(),
1119                                                           reason);
1120             break;
1121         }
1122         case LayerType::Shape:
1123         {
1124             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1125             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1126 
1127             result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1128                                                          OverrideDataType(output, dataType),
1129                                                          reason);
1130             break;
1131         }
1132         case LayerType::Slice:
1133         {
1134             auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1135 
1136             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1137             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1138 
1139             result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1140                                                          OverrideDataType(output, dataType),
1141                                                          cLayer->GetParameters(),
1142                                                          reason);
1143             break;
1144         }
1145         case LayerType::Softmax:
1146         {
1147             auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1148             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1149             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1150             result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1151                                                            OverrideDataType(output, dataType),
1152                                                            cLayer->GetParameters(),
1153                                                            reason);
1154             break;
1155         }
1156         case LayerType::SpaceToBatchNd:
1157         {
1158             auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1159             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1160             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1161             result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1162                                                                   OverrideDataType(output, dataType),
1163                                                                   cLayer->GetParameters(),
1164                                                                   reason);
1165             break;
1166         }
1167         case LayerType::SpaceToDepth:
1168         {
1169             auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1170 
1171             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1172             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1173 
1174             result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1175                                                                 OverrideDataType(output, dataType),
1176                                                                 cLayer->GetParameters(),
1177                                                                 reason);
1178             break;
1179         }
1180         case LayerType::Splitter:
1181         {
1182             auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1183             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1184 
1185             // Get vector of all outputs.
1186             auto getTensorInfo = [&dataType](const OutputSlot& slot)
1187             {
1188                 return OverrideDataType(slot.GetTensorInfo(), dataType);
1189             };
1190             auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1191             auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1192             std::vector<TensorInfo> outputs(beginI, endI);
1193 
1194             const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1195 
1196             result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1197                                                             outputPtrs,
1198                                                             cLayer->GetParameters(),
1199                                                             reason);
1200             break;
1201         }
1202         case LayerType::Stack:
1203         {
1204             auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1205 
1206             // Get vector of all inputs.
1207             auto getTensorInfo = [&dataType](const InputSlot& slot)
1208                 {
1209                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1210                 };
1211             auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1212             auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1213             std::vector<TensorInfo> inputs(beginI, endI);
1214 
1215             auto getTensorInfoPtr = [](const TensorInfo& info)
1216                 {
1217                     return &info;
1218                 };
1219             auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1220             auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1221             std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1222 
1223             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1224 
1225             result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1226 
1227             break;
1228         }
1229         case LayerType::StandIn:
1230         {
1231             auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1232 
1233             // Get vector of all inputs.
1234             auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1235                 {
1236                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1237                 };
1238             auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1239                 {
1240                     return OverrideDataType(slot.GetTensorInfo(), dataType);
1241                 };
1242             auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1243             auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1244             std::vector<TensorInfo> inputs(beginI, endI);
1245 
1246             auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1247             auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1248             std::vector<TensorInfo> outputs(beginO, endO);
1249 
1250 
1251             auto getTensorInfoPtr = [](const TensorInfo& info)
1252                 {
1253                     return &info;
1254                 };
1255             auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1256             auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1257             std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1258 
1259             auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1260             auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1261             std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1262 
1263 
1264             result = layerSupportObject.IsStandInSupported(inputPtrs,
1265                                                            outputPtrs,
1266                                                            cLayer->GetParameters(),
1267                                                            reason);
1268             break;
1269         }
1270         case LayerType::StridedSlice:
1271         {
1272             auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1273             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1274             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1275             result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1276                                                                 OverrideDataType(output, dataType),
1277                                                                 cLayer->GetParameters(),
1278                                                                 reason);
1279             break;
1280         }
1281         case LayerType::Subtraction:
1282         {
1283             ARMNN_NO_DEPRECATE_WARN_BEGIN
1284             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1285             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1286             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1287             result = layerSupportObject.IsSubtractionSupported(
1288                                             OverrideDataType(input0, dataType),
1289                                             OverrideDataType(input1, dataType),
1290                                             OverrideDataType(output, dataType),
1291                                             reason);
1292             ARMNN_NO_DEPRECATE_WARN_END
1293             break;
1294         }
1295         case LayerType::Switch:
1296         {
1297             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1298             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1299             const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1300             const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1301             result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1302                                                           OverrideDataType(input1, dataType),
1303                                                           OverrideDataType(output0, dataType),
1304                                                           OverrideDataType(output1, dataType),
1305                                                           reason);
1306             break;
1307         }
1308         case LayerType::Mean:
1309         {
1310             auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1311             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1312             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1313             result = layerSupportObject.IsMeanSupported(
1314                                      OverrideDataType(input, dataType),
1315                                      OverrideDataType(output, dataType),
1316                                      cLayer->GetParameters(),
1317                                      reason);
1318             break;
1319         }
1320         case LayerType::Minimum:
1321         {
1322             ARMNN_NO_DEPRECATE_WARN_BEGIN
1323             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1324             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1325             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1326             result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1327                                                            OverrideDataType(input1, dataType),
1328                                                            OverrideDataType(output, dataType),
1329                                                            reason);
1330             ARMNN_NO_DEPRECATE_WARN_END
1331             break;
1332         }
1333         case LayerType::Prelu:
1334         {
1335             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1336             const TensorInfo& alpha  = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1337             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1338             result = layerSupportObject.IsPreluSupported(OverrideDataType(input,  dataType),
1339                                                          OverrideDataType(alpha,  dataType),
1340                                                          OverrideDataType(output, dataType),
1341                                                          reason);
1342             break;
1343         }
1344         case LayerType::Transpose:
1345         {
1346             auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1347             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1348             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1349             result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1350                                                              OverrideDataType(output, dataType),
1351                                                              cLayer->GetParameters(),
1352                                                              reason);
1353             break;
1354         }
1355         case LayerType::TransposeConvolution2d:
1356         {
1357             auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1358 
1359             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1360                                                        dataType);
1361             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1362 
1363             const TransposeConvolution2dDescriptor& descriptor  = cLayer->GetParameters();
1364 
1365             Optional<TensorInfo> biases;
1366             if (descriptor.m_BiasEnabled)
1367             {
1368                 ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1369                 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1370                                           GetBiasTypeFromWeightsType(dataType));
1371             }
1372 
1373             ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1374             const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1375 
1376             result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1377                                                                           output,
1378                                                                           descriptor,
1379                                                                           weights,
1380                                                                           biases,
1381                                                                           reason);
1382 
1383             break;
1384         }
1385         case LayerType::Reduce:
1386         {
1387             auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1388             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1389             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1390 
1391             result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1392                                                           OverrideDataType(output, dataType),
1393                                                           cLayer->GetParameters(),
1394                                                           reason);
1395             break;
1396         }
1397         case LayerType::UnidirectionalSequenceLstm:
1398         {
1399             auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1400             const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1401 
1402             // All inputs.
1403             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1404                                                        dataType);
1405             const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1406                                                                dataType);
1407             const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1408                                                              dataType);
1409             // Outputs
1410             const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1411             const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1412             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1413 
1414             // Basic parameters
1415             const TensorInfo& inputToForgetWeights
1416                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1417             const TensorInfo& inputToCellWeights
1418                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1419             const TensorInfo& inputToOutputWeights
1420                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1421             const TensorInfo& recurrentToForgetWeights
1422                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1423             const TensorInfo& recurrentToCellWeights
1424                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1425             const TensorInfo& recurrentToOutputWeights
1426                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1427             const TensorInfo& forgetGateBias
1428                     = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1429             const TensorInfo& cellBias
1430                     = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1431             const TensorInfo& outputGateBias
1432                     = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1433 
1434             LstmInputParamsInfo paramsInfo;
1435 
1436             paramsInfo.m_InputToForgetWeights     = &inputToForgetWeights;
1437             paramsInfo.m_InputToCellWeights       = &inputToCellWeights;
1438             paramsInfo.m_InputToOutputWeights     = &inputToOutputWeights;
1439             paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1440             paramsInfo.m_RecurrentToCellWeights   = &recurrentToCellWeights;
1441             paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1442             paramsInfo.m_ForgetGateBias           = &forgetGateBias;
1443             paramsInfo.m_CellBias                 = &cellBias;
1444             paramsInfo.m_OutputGateBias           = &outputGateBias;
1445 
1446             // Optional parameters
1447             TensorInfo optInputToInputWeights;
1448             TensorInfo optRecurrentToInputWeights;
1449             TensorInfo optCellToInputWeights;
1450             TensorInfo optInputGateBias;
1451             TensorInfo optProjectionWeights;
1452             TensorInfo optProjectionBias;
1453             TensorInfo optCellToForgetWeights;
1454             TensorInfo optCellToOutputWeights;
1455             TensorInfo optInputLayerNormWeights;
1456             TensorInfo optForgetLayerNormWeights;
1457             TensorInfo optCellLayerNormWeights;
1458             TensorInfo optOutputLayerNormWeights;
1459 
1460             if(!descriptor.m_CifgEnabled)
1461             {
1462                 optInputToInputWeights =
1463                     OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1464                 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1465 
1466                 optRecurrentToInputWeights =
1467                     OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1468                 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1469                 optInputGateBias =
1470                        OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1471                 paramsInfo.m_InputGateBias = &optInputGateBias;
1472             }
1473 
1474             if(descriptor.m_ProjectionEnabled)
1475             {
1476                 optProjectionWeights =
1477                     OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1478                 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1479                 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1480                 {
1481                     optProjectionBias =
1482                         OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1483                     paramsInfo.m_ProjectionBias = &optProjectionBias;
1484                 }
1485             }
1486 
1487             if(descriptor.m_PeepholeEnabled)
1488             {
1489                 if(!descriptor.m_CifgEnabled)
1490                 {
1491                     optCellToInputWeights =
1492                             OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1493                                              dataType);
1494                     paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1495                 }
1496                 optCellToForgetWeights =
1497                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1498                 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1499                 optCellToOutputWeights =
1500                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1501                 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1502             }
1503 
1504             if(descriptor.m_LayerNormEnabled)
1505             {
1506                 if (!descriptor.m_CifgEnabled)
1507                 {
1508                     optInputLayerNormWeights = OverrideDataType(
1509                             cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1510                     paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1511                 }
1512 
1513                 optForgetLayerNormWeights = OverrideDataType(
1514                         cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1515                 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1516 
1517                 optCellLayerNormWeights = OverrideDataType(
1518                         cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1519                 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1520 
1521                 optOutputLayerNormWeights = OverrideDataType(
1522                         cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1523                 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1524             }
1525 
1526             result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1527                                                                               outputStateIn,
1528                                                                               cellStateIn,
1529                                                                               outputStateOut,
1530                                                                               cellStateOut,
1531                                                                               output,
1532                                                                               descriptor,
1533                                                                               paramsInfo,
1534                                                                               reason);
1535             break;
1536         }
1537         default:
1538         {
1539             ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1540             reason.value() = "Unrecognised layer type";
1541             result = false;
1542             break;
1543         }
1544     }
1545     return result;
1546 }
1547 
IsLayerSupported(const BackendId & backendId,const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported)1548 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
1549                                         const IConnectableLayer& connectableLayer,
1550                                         Optional<DataType> dataType,
1551                                         std::string& outReasonIfUnsupported)
1552 {
1553     return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1554 }
1555 
IsLayerSupported(const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported)1556 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
1557                                         Optional<DataType> dataType,
1558                                         std::string& outReasonIfUnsupported)
1559 {
1560     auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1561     return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1562 }
1563 
1564 // TODO merge with defaulted modelOptions above
IsLayerSupported(const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported,const ModelOptions & modelOptions)1565 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
1566                                         Optional<DataType> dataType,
1567                                         std::string& outReasonIfUnsupported,
1568                                         const ModelOptions& modelOptions)
1569 {
1570     auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1571     return IsLayerConfigurationSupported(layer->GetBackendId(),
1572                                          connectableLayer,
1573                                          dataType,
1574                                          outReasonIfUnsupported,
1575                                          modelOptions);
1576 }
1577 
IsLayerSupported(const BackendId & backendId,const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported,const ModelOptions & modelOptions)1578 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
1579                                         const IConnectableLayer& connectableLayer,
1580                                         Optional<DataType> dataType,
1581                                         std::string& outReasonIfUnsupported,
1582                                         const ModelOptions& modelOptions)
1583 {
1584     return IsLayerConfigurationSupported(backendId,
1585                                          connectableLayer,
1586                                          dataType,
1587                                          outReasonIfUnsupported,
1588                                          modelOptions);
1589 }
1590 ARMNN_NO_DEPRECATE_WARN_BEGIN
CreateWorkload(LayerType type,const QueueDescriptor & descriptor,const WorkloadInfo & info) const1591 std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
1592                                                             const QueueDescriptor& descriptor,
1593                                                             const WorkloadInfo& info) const
1594 {
1595     switch(type)
1596     {
1597         case LayerType::Activation :
1598         {
1599             auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
1600             return CreateActivation(*activationQueueDescriptor, info);
1601         }
1602         case LayerType::Addition :
1603         {
1604             auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
1605             return CreateAddition(*additionQueueDescriptor, info);
1606         }
1607         case LayerType::ArgMinMax :
1608         {
1609             auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
1610             return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
1611         }
1612         case LayerType::BatchNormalization :
1613         {
1614             auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
1615             return CreateBatchNormalization(*batchNormQueueDescriptor, info);
1616         }
1617         case LayerType::BatchToSpaceNd :
1618         {
1619             auto batchToSpaceNdQueueDescriptor
1620                     = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
1621             return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
1622         }
1623         case LayerType::Cast :
1624         {
1625             auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
1626             return CreateCast(*castQueueDescriptor, info);
1627         }
1628         case LayerType::ChannelShuffle :
1629         {
1630             auto channelShuffleQueueDescriptor
1631                     = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
1632             return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
1633         }
1634         case LayerType::Comparison :
1635         {
1636             auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
1637             return CreateComparison(*comparisonQueueDescriptor, info);
1638         }
1639         case LayerType::Concat :
1640         {
1641             auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
1642             return CreateConcat(*concatQueueDescriptor, info);
1643         }
1644         case LayerType::Constant :
1645         {
1646             auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
1647             return CreateConstant(*constantQueueDescriptor, info);
1648         }
1649         case LayerType::ConvertFp16ToFp32:
1650         {
1651             auto convertFp16ToFp32QueueDescriptor
1652                     = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
1653             return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
1654         }
1655         case LayerType::ConvertFp32ToFp16:
1656         {
1657             auto convertFp32ToFp16QueueDescriptor
1658                     = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
1659             return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
1660         }
1661         case LayerType::Convolution2d:
1662         {
1663             auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
1664             return CreateConvolution2d(*convolution2dQueueDescriptor, info);
1665         }
1666         case LayerType::Convolution3d:
1667         {
1668             auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
1669             return CreateConvolution3d(*convolution3dQueueDescriptor, info);
1670         }
1671         case LayerType::Debug:
1672         {
1673             auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
1674             return CreateDebug(*debugQueueDescriptor, info);
1675         }
1676         case LayerType::DepthToSpace:
1677         {
1678             auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
1679             return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
1680         }
1681         case LayerType::DepthwiseConvolution2d:
1682         {
1683             auto depthwiseConvolution2DQueueDescriptor
1684                     = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
1685             return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
1686         }
1687         case LayerType::Dequantize:
1688         {
1689             auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
1690             return CreateDequantize(*dequantizeQueueDescriptor, info);
1691         }
1692         case LayerType::DetectionPostProcess:
1693         {
1694             auto detectionPostProcessQueueDescriptor
1695                     = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
1696             return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
1697         }
1698         case LayerType::Division:
1699         {
1700             auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
1701             return CreateDivision(*divisionQueueDescriptor, info);
1702         }
1703         case LayerType::ElementwiseBinary:
1704         {
1705             auto queueDescriptor = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
1706             return CreateWorkload(LayerType::ElementwiseBinary, *queueDescriptor, info);
1707         }
1708         case LayerType::ElementwiseUnary:
1709         {
1710             auto elementwiseUnaryQueueDescriptor
1711                     = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
1712             return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
1713 
1714         }
1715         case LayerType::FakeQuantization:
1716         {
1717             auto fakeQuantizationQueueDescriptor
1718                     = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
1719             return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
1720         }
1721         case LayerType::Fill:
1722         {
1723             auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
1724             return CreateFill(*fillQueueDescriptor, info);
1725         }
1726         case LayerType::Floor:
1727         {
1728             auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
1729             return CreateFloor(*floorQueueDescriptor, info);
1730         }
1731         case LayerType::FullyConnected:
1732         {
1733             auto fullyConnectedQueueDescriptor
1734                     = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
1735             return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
1736         }
1737         case LayerType::Gather:
1738         {
1739             auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
1740             return CreateGather(*gatherQueueDescriptor, info);
1741         }
1742         case LayerType::Input:
1743         {
1744             auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
1745             return CreateInput(*inputQueueDescriptor, info);
1746         }
1747         case LayerType::InstanceNormalization:
1748         {
1749             auto instanceNormalizationQueueDescriptor
1750                     = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
1751             return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
1752         }
1753         case LayerType::L2Normalization:
1754         {
1755             auto l2NormalizationQueueDescriptor
1756                     = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
1757             return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
1758         }
1759         case LayerType::LogicalBinary:
1760         {
1761             auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
1762             return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
1763         }
1764         case LayerType::LogSoftmax:
1765         {
1766             auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
1767             return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
1768         }
1769         case LayerType::Lstm:
1770         {
1771             auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
1772             return CreateLstm(*lstmQueueDescriptor, info);
1773         }
1774         case LayerType::Maximum:
1775         {
1776             auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
1777             return CreateMaximum(*maximumQueueDescriptor, info);
1778         }
1779         case LayerType::Mean:
1780         {
1781             auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
1782             return CreateMean(*meanQueueDescriptor, info);
1783         }
1784         case LayerType::MemCopy:
1785         {
1786             auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
1787             return CreateMemCopy(*memCopyQueueDescriptor, info);
1788         }
1789         case LayerType::MemImport:
1790         {
1791             auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
1792             return CreateMemImport(*memImportQueueDescriptor, info);
1793         }
1794         case LayerType::Minimum:
1795         {
1796             auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
1797             return CreateMinimum(*minimumQueueDescriptor, info);
1798         }
1799         case LayerType::Multiplication:
1800         {
1801             auto multiplicationQueueDescriptor
1802                     = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
1803             return CreateMultiplication(*multiplicationQueueDescriptor, info);
1804         }
1805         case LayerType::Normalization:
1806         {
1807             auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
1808             return CreateNormalization(*normalizationQueueDescriptor, info);
1809         }
1810         case LayerType::Output:
1811         {
1812             auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
1813             return CreateOutput(*outputQueueDescriptor, info);
1814         }
1815         case LayerType::Pad:
1816         {
1817             auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
1818             return CreatePad(*padQueueDescriptor, info);
1819         }
1820         case LayerType::Permute:
1821         {
1822             auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
1823             return CreatePermute(*permuteQueueDescriptor, info);
1824         }
1825         case LayerType::Pooling2d:
1826         {
1827             auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
1828             return CreatePooling2d(*pooling2dQueueDescriptor, info);
1829         }
1830         case LayerType::Pooling3d:
1831         {
1832             auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
1833             return CreatePooling3d(*pooling3dQueueDescriptor, info);
1834         }
1835         case LayerType::PreCompiled:
1836         {
1837             auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
1838             return CreatePreCompiled(*preCompiledQueueDescriptor, info);
1839         }
1840         case LayerType::Prelu:
1841         {
1842             auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
1843             return CreatePrelu(*preluQueueDescriptor, info);
1844         }
1845         case LayerType::QLstm:
1846         {
1847             auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
1848             return CreateQLstm(*qlstmQueueDescriptor, info);
1849         }
1850         case LayerType::Quantize:
1851         {
1852             auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
1853             return CreateQuantize(*quantizeQueueDescriptor, info);
1854         }
1855         case LayerType::Rank:
1856         {
1857             auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
1858             return CreateRank(*rankQueueDescriptor, info);
1859         }
1860         case LayerType::Reduce:
1861         {
1862             auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
1863             return CreateReduce(*reduceQueueDescriptor, info);
1864         }
1865         case LayerType::Reshape:
1866         {
1867             auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
1868             return CreateReshape(*reshapeQueueDescriptor, info);
1869         }
1870         case LayerType::Resize:
1871         {
1872             auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
1873             return CreateResize(*resizeQueueDescriptor, info);
1874         }
1875         case LayerType::Shape:
1876         {
1877             auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
1878             return CreateShape(*shapeQueueDescriptor, info);
1879         }
1880         case LayerType::Slice:
1881         {
1882             auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
1883             return CreateSlice(*sliceQueueDescriptor, info);
1884         }
1885         case LayerType::Softmax:
1886         {
1887             auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
1888             return CreateSoftmax(*softmaxQueueDescriptor, info);
1889         }
1890         case LayerType::SpaceToBatchNd:
1891         {
1892             auto spaceToBatchNdQueueDescriptor
1893                     = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
1894             return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
1895         }
1896         case LayerType::SpaceToDepth:
1897         {
1898             auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
1899             return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
1900         }
1901         case LayerType::Splitter:
1902         {
1903             auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
1904             return CreateSplitter(*splitterQueueDescriptor, info);
1905         }
1906         case LayerType::Stack:
1907         {
1908             auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
1909             return CreateStack(*stackQueueDescriptor, info);
1910         }
1911         case LayerType::StridedSlice:
1912         {
1913             auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
1914             return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
1915         }
1916         case LayerType::Subtraction:
1917         {
1918             auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
1919             return CreateSubtraction(*subtractionQueueDescriptor, info);
1920         }
1921         case LayerType::Transpose:
1922         {
1923             auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
1924             return CreateTranspose(*transposeQueueDescriptor, info);
1925         }
1926         case LayerType::TransposeConvolution2d:
1927         {
1928             auto transposeConvolution2dQueueDescriptor
1929                     = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
1930             return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
1931         }
1932         case LayerType::UnidirectionalSequenceLstm:
1933         {
1934             auto unidirectionalSequenceLstmQueueDescriptor
1935                     = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
1936             return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
1937         }
1938         default:
1939             return nullptr;
1940     }
1941 }
1942 ARMNN_NO_DEPRECATE_WARN_END
1943 
CreateActivation(const ActivationQueueDescriptor &,const WorkloadInfo &) const1944 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1945                                                               const WorkloadInfo& /*info*/) const
1946 {
1947     return std::unique_ptr<IWorkload>();
1948 }
1949 
CreateAddition(const AdditionQueueDescriptor &,const WorkloadInfo &) const1950 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1951                                                             const WorkloadInfo& /*info*/) const
1952 {
1953     return std::unique_ptr<IWorkload>();
1954 }
1955 
CreateArgMinMax(const ArgMinMaxQueueDescriptor &,const WorkloadInfo &) const1956 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1957                                                              const WorkloadInfo& /*info*/) const
1958 {
1959     return std::unique_ptr<IWorkload>();
1960 }
1961 
CreateBatchNormalization(const BatchNormalizationQueueDescriptor &,const WorkloadInfo &) const1962 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1963     const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1964 {
1965     return std::unique_ptr<IWorkload>();
1966 }
1967 
CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &,const WorkloadInfo &) const1968 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1969                                                                   const WorkloadInfo& /*Info*/) const
1970 {
1971     return std::unique_ptr<IWorkload>();
1972 }
1973 
CreateCast(const CastQueueDescriptor &,const WorkloadInfo &) const1974 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1975                                                        const WorkloadInfo& /*info*/) const
1976 {
1977     return std::unique_ptr<IWorkload>();
1978 }
1979 
CreateChannelShuffle(const ChannelShuffleQueueDescriptor &,const WorkloadInfo &) const1980 std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
1981                                                                   const WorkloadInfo& /*info*/) const
1982 {
1983     return std::unique_ptr<IWorkload>();
1984 }
1985 
CreateComparison(const ComparisonQueueDescriptor &,const WorkloadInfo &) const1986 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1987                                                               const WorkloadInfo& /*info*/) const
1988 {
1989     return std::unique_ptr<IWorkload>();
1990 }
1991 
CreateConcat(const ConcatQueueDescriptor &,const WorkloadInfo &) const1992 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1993                                                           const WorkloadInfo& /*info*/) const
1994 {
1995     return std::unique_ptr<IWorkload>();
1996 }
1997 
CreateConstant(const ConstantQueueDescriptor &,const WorkloadInfo &) const1998 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1999                                                             const WorkloadInfo& /*info*/) const
2000 {
2001     return std::unique_ptr<IWorkload>();
2002 }
2003 
CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &,const WorkloadInfo &) const2004 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
2005                                                                      const WorkloadInfo& /*info*/) const
2006 {
2007     return std::unique_ptr<IWorkload>();
2008 }
2009 
CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &,const WorkloadInfo &) const2010 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
2011                                                                      const WorkloadInfo& /*info*/) const
2012 {
2013     return std::unique_ptr<IWorkload>();
2014 }
2015 
CreateConvolution2d(const Convolution2dQueueDescriptor &,const WorkloadInfo &) const2016 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
2017                                                                  const WorkloadInfo& /*info*/) const
2018 {
2019     return std::unique_ptr<IWorkload>();
2020 }
2021 
CreateConvolution3d(const Convolution3dQueueDescriptor &,const WorkloadInfo &) const2022 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/,
2023                                                                  const WorkloadInfo& /*info*/) const
2024 {
2025     return std::unique_ptr<IWorkload>();
2026 }
2027 
CreateDebug(const DebugQueueDescriptor &,const WorkloadInfo &) const2028 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
2029                                                          const WorkloadInfo& /*info*/) const
2030 {
2031     return std::unique_ptr<IWorkload>();
2032 }
2033 
CreateDepthToSpace(const DepthToSpaceQueueDescriptor &,const WorkloadInfo &) const2034 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
2035                                                                 const WorkloadInfo& /*info*/) const
2036 {
2037     return std::unique_ptr<IWorkload>();
2038 }
2039 
CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &,const WorkloadInfo &) const2040 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
2041     const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2042 {
2043     return std::unique_ptr<IWorkload>();
2044 }
2045 
CreateDequantize(const DequantizeQueueDescriptor &,const WorkloadInfo &) const2046 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
2047     const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2048 {
2049     return std::unique_ptr<IWorkload>();
2050 }
2051 
CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &,const WorkloadInfo &) const2052 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
2053     const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2054 {
2055     return std::unique_ptr<IWorkload>();
2056 }
2057 
CreateDivision(const DivisionQueueDescriptor &,const WorkloadInfo &) const2058 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
2059                                                             const WorkloadInfo& /*info*/) const
2060 {
2061     return std::unique_ptr<IWorkload>();
2062 }
2063 
CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &,const WorkloadInfo &) const2064 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2065                                                                     const WorkloadInfo& /*info*/) const
2066 {
2067     return std::unique_ptr<IWorkload>();
2068 }
2069 
CreateFakeQuantization(const FakeQuantizationQueueDescriptor &,const WorkloadInfo &) const2070 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
2071                                                                     const WorkloadInfo& /*info*/) const
2072 {
2073     return std::unique_ptr<IWorkload>();
2074 }
2075 
CreateFill(const FillQueueDescriptor &,const WorkloadInfo &) const2076 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
2077                                                         const WorkloadInfo& /*info*/) const
2078 {
2079     return std::unique_ptr<IWorkload>();
2080 }
2081 
CreateFloor(const FloorQueueDescriptor &,const WorkloadInfo &) const2082 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
2083                                                          const WorkloadInfo& /*info*/) const
2084 {
2085     return std::unique_ptr<IWorkload>();
2086 }
2087 
CreateFullyConnected(const FullyConnectedQueueDescriptor &,const WorkloadInfo &) const2088 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
2089                                                                   const WorkloadInfo& /*info*/) const
2090 {
2091     return std::unique_ptr<IWorkload>();
2092 }
2093 
CreateGather(const GatherQueueDescriptor &,const WorkloadInfo &) const2094 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
2095                                                           const WorkloadInfo& /*info*/) const
2096 {
2097     return std::unique_ptr<IWorkload>();
2098 }
2099 
CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &,const WorkloadInfo &) const2100 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
2101     const InstanceNormalizationQueueDescriptor& /*descriptor*/,
2102     const WorkloadInfo& /*info*/) const
2103 {
2104     return std::unique_ptr<IWorkload>();
2105 }
2106 
CreateL2Normalization(const L2NormalizationQueueDescriptor &,const WorkloadInfo &) const2107 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
2108                                                                    const WorkloadInfo& /*info*/) const
2109 {
2110     return std::unique_ptr<IWorkload>();
2111 }
2112 
CreateLogicalBinary(const LogicalBinaryQueueDescriptor &,const WorkloadInfo &) const2113 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
2114                                                                  const WorkloadInfo& /*info*/) const
2115 {
2116     return std::unique_ptr<IWorkload>();
2117 }
2118 
CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor &,const WorkloadInfo &) const2119 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2120                                                                 const WorkloadInfo& /*info*/) const
2121 {
2122     return std::unique_ptr<IWorkload>();
2123 }
2124 
CreateLogSoftmax(const LogSoftmaxQueueDescriptor &,const WorkloadInfo &) const2125 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
2126                                                               const WorkloadInfo& /*info*/) const
2127 {
2128     return std::unique_ptr<IWorkload>();
2129 }
2130 
CreateLstm(const LstmQueueDescriptor &,const WorkloadInfo &) const2131 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
2132                                                         const WorkloadInfo& /*info*/) const
2133 {
2134     return std::unique_ptr<IWorkload>();
2135 }
2136 
CreateMaximum(const MaximumQueueDescriptor &,const WorkloadInfo &) const2137 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
2138                                                            const WorkloadInfo& /*info*/) const
2139 {
2140     return std::unique_ptr<IWorkload>();
2141 }
2142 
CreateMean(const MeanQueueDescriptor &,const WorkloadInfo &) const2143 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
2144                                                         const WorkloadInfo& /*Info*/) const
2145 {
2146     return std::unique_ptr<IWorkload>();
2147 }
2148 
CreateMemCopy(const MemCopyQueueDescriptor &,const WorkloadInfo &) const2149 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
2150                                                            const WorkloadInfo& /*info*/) const
2151 {
2152     return std::unique_ptr<IWorkload>();
2153 }
2154 
CreateMemImport(const MemImportQueueDescriptor &,const WorkloadInfo &) const2155 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
2156                                                              const WorkloadInfo& /*info*/) const
2157 {
2158     return std::unique_ptr<IWorkload>();
2159 }
2160 
CreateMerge(const MergeQueueDescriptor &,const WorkloadInfo &) const2161 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
2162                                                          const WorkloadInfo& /*info*/) const
2163 {
2164     return std::unique_ptr<IWorkload>();
2165 }
2166 
CreateMinimum(const MinimumQueueDescriptor &,const WorkloadInfo &) const2167 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
2168                                                            const WorkloadInfo& /*info*/) const
2169 {
2170     return std::unique_ptr<IWorkload>();
2171 }
2172 
CreateMultiplication(const MultiplicationQueueDescriptor &,const WorkloadInfo &) const2173 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
2174                                                                   const WorkloadInfo& /*info*/) const
2175 {
2176     return std::unique_ptr<IWorkload>();
2177 }
2178 
CreateNormalization(const NormalizationQueueDescriptor &,const WorkloadInfo &) const2179 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
2180                                                                  const WorkloadInfo& /*info*/) const
2181 {
2182     return std::unique_ptr<IWorkload>();
2183 }
2184 
CreateOutput(const OutputQueueDescriptor &,const WorkloadInfo &) const2185 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
2186                                                           const WorkloadInfo& /*info*/) const
2187 {
2188     return std::unique_ptr<IWorkload>();
2189 }
2190 
CreatePad(const PadQueueDescriptor &,const WorkloadInfo &) const2191 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
2192                                                        const WorkloadInfo& /*Info*/) const
2193 {
2194     return std::unique_ptr<IWorkload>();
2195 }
2196 
CreatePermute(const PermuteQueueDescriptor &,const WorkloadInfo &) const2197 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
2198                                                            const WorkloadInfo& /*info*/) const
2199 {
2200     return std::unique_ptr<IWorkload>();
2201 }
2202 
CreatePooling2d(const Pooling2dQueueDescriptor &,const WorkloadInfo &) const2203 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
2204                                                              const WorkloadInfo& /*info*/) const
2205 {
2206     return std::unique_ptr<IWorkload>();
2207 }
2208 
CreatePooling3d(const Pooling3dQueueDescriptor &,const WorkloadInfo &) const2209 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& /*descriptor*/,
2210                                                              const WorkloadInfo& /*info*/) const
2211 {
2212     return std::unique_ptr<IWorkload>();
2213 }
2214 
CreatePreCompiled(const PreCompiledQueueDescriptor &,const WorkloadInfo &) const2215 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
2216                                                                const WorkloadInfo& /*info*/) const
2217 {
2218     return std::unique_ptr<IWorkload>();
2219 }
2220 
CreatePrelu(const PreluQueueDescriptor &,const WorkloadInfo &) const2221 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
2222                                                          const WorkloadInfo &/*info*/) const
2223 {
2224     return std::unique_ptr<IWorkload>();
2225 }
2226 
CreateQuantize(const QuantizeQueueDescriptor &,const WorkloadInfo &) const2227 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
2228                                                             const WorkloadInfo& /*Info*/) const
2229 {
2230     return std::unique_ptr<IWorkload>();
2231 }
2232 
CreateQLstm(const QLstmQueueDescriptor &,const WorkloadInfo &) const2233 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
2234                                                          const WorkloadInfo& /*info*/) const
2235 {
2236     return std::unique_ptr<IWorkload>();
2237 }
2238 
CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &,const WorkloadInfo &) const2239 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
2240                                                                  const WorkloadInfo& /*info*/) const
2241 {
2242     return std::unique_ptr<IWorkload>();
2243 }
CreateRank(const RankQueueDescriptor &,const WorkloadInfo &) const2244 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
2245                                                         const WorkloadInfo& /*info*/) const
2246 {
2247     return std::unique_ptr<IWorkload>();
2248 }
2249 
CreateReduce(const ReduceQueueDescriptor &,const WorkloadInfo &) const2250 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
2251                                                           const WorkloadInfo& /*info*/) const
2252 {
2253     return std::unique_ptr<IWorkload>();
2254 }
2255 
CreateReshape(const ReshapeQueueDescriptor &,const WorkloadInfo &) const2256 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
2257                                                            const WorkloadInfo& /*info*/) const
2258 {
2259     return std::unique_ptr<IWorkload>();
2260 }
2261 
CreateResize(const ResizeQueueDescriptor &,const WorkloadInfo &) const2262 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
2263                                                             const WorkloadInfo& /*info*/) const
2264 {
2265     return std::unique_ptr<IWorkload>();
2266 }
2267 
CreateShape(const ShapeQueueDescriptor &,const WorkloadInfo &) const2268 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
2269                                                          const WorkloadInfo& /*info*/) const
2270 {
2271     return std::unique_ptr<IWorkload>();
2272 }
2273 
CreateSlice(const SliceQueueDescriptor &,const WorkloadInfo &) const2274 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
2275                                                          const WorkloadInfo& /*info*/) const
2276 {
2277     return std::unique_ptr<IWorkload>();
2278 }
2279 
CreateSoftmax(const SoftmaxQueueDescriptor &,const WorkloadInfo &) const2280 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
2281                                                            const WorkloadInfo& /*info*/) const
2282 {
2283     return std::unique_ptr<IWorkload>();
2284 }
2285 
CreateSplitter(const SplitterQueueDescriptor &,const WorkloadInfo &) const2286 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
2287                                                             const WorkloadInfo& /*info*/) const
2288 {
2289     return std::unique_ptr<IWorkload>();
2290 }
2291 
CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &,const WorkloadInfo &) const2292 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
2293                                                                   const WorkloadInfo& /*info*/) const
2294 {
2295     return std::unique_ptr<IWorkload>();
2296 }
2297 
CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &,const WorkloadInfo &) const2298 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
2299                                                                 const WorkloadInfo& /*info*/) const
2300 {
2301     return std::unique_ptr<IWorkload>();
2302 }
2303 
CreateStack(const StackQueueDescriptor &,const WorkloadInfo &) const2304 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
2305                                                          const WorkloadInfo& /*info*/) const
2306 {
2307     return std::unique_ptr<IWorkload>();
2308 }
2309 
CreateStridedSlice(const StridedSliceQueueDescriptor &,const WorkloadInfo &) const2310 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
2311                                                                 const WorkloadInfo& /*info*/) const
2312 {
2313     return std::unique_ptr<IWorkload>();
2314 }
2315 
CreateSubtraction(const SubtractionQueueDescriptor &,const WorkloadInfo &) const2316 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
2317                                                                const WorkloadInfo& /*info*/) const
2318 {
2319     return std::unique_ptr<IWorkload>();
2320 }
2321 
CreateSwitch(const SwitchQueueDescriptor &,const WorkloadInfo &) const2322 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
2323                                                           const WorkloadInfo& /*info*/) const
2324 {
2325     return std::unique_ptr<IWorkload>();
2326 }
2327 
CreateTranspose(const TransposeQueueDescriptor &,const WorkloadInfo &) const2328 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
2329                                                              const WorkloadInfo& /*info*/) const
2330 {
2331     return std::unique_ptr<IWorkload>();
2332 }
2333 
CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &,const WorkloadInfo &) const2334 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
2335     const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
2336     const WorkloadInfo& /*info*/) const
2337 {
2338     return std::unique_ptr<IWorkload>();
2339 }
2340 
CreateUnidirectionalSequenceLstm(const UnidirectionalSequenceLstmQueueDescriptor &,const WorkloadInfo &) const2341 std::unique_ptr<IWorkload> IWorkloadFactory::CreateUnidirectionalSequenceLstm(
2342         const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
2343         const WorkloadInfo& /*info*/) const
2344 {
2345     return std::unique_ptr<IWorkload>();
2346 }
2347 
CreateInput(const InputQueueDescriptor &,const WorkloadInfo &) const2348 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInput(
2349         const InputQueueDescriptor& /*descriptor*/,
2350         const WorkloadInfo& /*info*/) const
2351 {
2352     return std::unique_ptr<IWorkload>();
2353 }
2354 
2355 } // namepsace armnn
2356