xref: /aosp_15_r20/external/armnn/include/armnn/backends/WorkloadData.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2021-2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "TensorHandle.hpp"
8 
9 #include <armnn/Deprecated.hpp>
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/Exceptions.hpp>
12 #include <armnn/Types.hpp>
13 #include <armnn/Tensor.hpp>
14 #include <common/include/ProfilingGuid.hpp>
15 
16 namespace armnn
17 {
18 
19 //A helper function that returns the bias data type required for given input data type.
20 DataType GetBiasDataType(DataType inputDataType);
21 
22 struct WorkloadInfo;
23 
24 struct QueueDescriptor
25 {
26     std::vector<ITensorHandle*> m_Inputs;
27     std::vector<ITensorHandle*> m_Outputs;
28     void* m_AdditionalInfoObject;
29 
30     virtual ~QueueDescriptor() = default;
31 
32     void ValidateTensorNumDimensions(const TensorInfo& tensor,
33                                      std::string const& descName,
34                                      unsigned int numDimensions,
35                                      std::string const& tensorName) const;
36 
37     void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
38                                      unsigned int numDimension,
39                                      unsigned int numElements,
40                                      std::string const& tensorName) const;
41 
42     void ValidateInputsOutputs(const std::string& descName,
43                                unsigned int numExpectedIn,
44                                unsigned int numExpectedOut) const;
45 
46     template<typename T>
GetAdditionalInformationarmnn::QueueDescriptor47     const T* GetAdditionalInformation() const
48     {
49         return static_cast<T*>(m_AdditionalInfoObject);
50     }
51 
52     bool m_AllowExpandedDims = false;
53 
54 protected:
QueueDescriptorarmnn::QueueDescriptor55     QueueDescriptor()
56         : m_AdditionalInfoObject(nullptr)
57     {}
58     QueueDescriptor(QueueDescriptor const&) = default;
59     QueueDescriptor& operator=(QueueDescriptor const&) = default;
60 };
61 
62 // Base class for queue descriptors which contain parameters.
63 template <typename LayerDescriptor>
64 struct QueueDescriptorWithParameters : public QueueDescriptor
65 {
66     LayerDescriptor m_Parameters;
67 
68     virtual ~QueueDescriptorWithParameters() = default;
69 
70 protected:
71     QueueDescriptorWithParameters() = default;
72     QueueDescriptorWithParameters(QueueDescriptorWithParameters const&) = default;
73     QueueDescriptorWithParameters& operator=(QueueDescriptorWithParameters const&) = default;
74 };
75 
76 struct MapQueueDescriptor : QueueDescriptor
77 {
78     void Validate(const WorkloadInfo& workloadInfo) const;
79 };
80 
81 struct UnmapQueueDescriptor : QueueDescriptor
82 {
83     void Validate(const WorkloadInfo& workloadInfo) const;
84 };
85 
86 struct MemCopyQueueDescriptor : QueueDescriptor
87 {
88     void Validate(const WorkloadInfo& workloadInfo) const;
89 };
90 
91 using InputQueueDescriptor = MemCopyQueueDescriptor;
92 using OutputQueueDescriptor = MemCopyQueueDescriptor;
93 
94 struct MemImportQueueDescriptor : QueueDescriptor
95 {
96     void Validate(const WorkloadInfo& workloadInfo) const;
97 };
98 
99 struct MemSyncQueueDescriptor : QueueDescriptor
100 {
101     void Validate(const WorkloadInfo& workloadInfo) const;
102 };
103 
104 // Softmax layer workload data.
105 struct SoftmaxQueueDescriptor : QueueDescriptorWithParameters<SoftmaxDescriptor>
106 {
107     void Validate(const WorkloadInfo& workloadInfo) const;
108 };
109 
110 // Splitter layer workload data.
111 struct SplitterQueueDescriptor : QueueDescriptorWithParameters<ViewsDescriptor>
112 {
113     struct ViewOrigin
114     {
ViewOriginarmnn::SplitterQueueDescriptor::ViewOrigin115         ViewOrigin() {}
ViewOriginarmnn::SplitterQueueDescriptor::ViewOrigin116         ViewOrigin(std::vector<unsigned int> const& origin) : m_Origin(origin) {}
117 
118         //View origin (size of the vector is the same as number of dimensions of the view).
119         std::vector<unsigned int> m_Origin;
120     };
121 
122     //View defines a tensor that will be carved from the input tensor.
123     //View origins are stored here, the extents are defined by sizes of the output tensors.
124     std::vector<ViewOrigin> m_ViewOrigins;
125 
126     void Validate(const WorkloadInfo& workloadInfo) const;
127 };
128 
129 // Concat layer workload data.
130 struct ConcatQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
131 {
132     struct ViewOrigin
133     {
ViewOriginarmnn::ConcatQueueDescriptor::ViewOrigin134         ViewOrigin() {}
ViewOriginarmnn::ConcatQueueDescriptor::ViewOrigin135         ViewOrigin(const std::vector<unsigned int>& origin) : m_Origin(origin) {}
136 
137         //View origin (size of the vector is the same as number of dimensions of the view).
138         std::vector<unsigned int> m_Origin;
139     };
140 
141     //View defines a sub-area of the output tensor that will be filled with the corresponding input tensor.
142     //View origins are stored here, the extents are defined by sizes of the input tensors.
143     std::vector<ViewOrigin> m_ViewOrigins;
144 
145     void Validate(const WorkloadInfo& workloadInfo) const;
146 };
147 
148 // Deprecated. Use ConcatQueueDescriptor instead
149 using MergerQueueDescriptor = ConcatQueueDescriptor;
150 
151 // Stack layer workload data.
152 struct StackQueueDescriptor : QueueDescriptorWithParameters<StackDescriptor>
153 {
154     void Validate(const WorkloadInfo& workloadInfo) const;
155 };
156 
157 // Activation layer workload data.
158 struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor>
159 {
160     void Validate(const WorkloadInfo& workloadInfo) const;
161 };
162 
163 struct ArgMinMaxQueueDescriptor : QueueDescriptorWithParameters<ArgMinMaxDescriptor>
164 {
165     void Validate(const WorkloadInfo& workloadInfo) const;
166 };
167 
168 struct CastQueueDescriptor : QueueDescriptor
169 {
170     void Validate(const WorkloadInfo& workloadInfo) const;
171 };
172 
173 // Fill layer workload data.
174 struct FillQueueDescriptor : QueueDescriptorWithParameters<FillDescriptor>
175 {
176     void Validate(const WorkloadInfo& workloadInfo) const;
177 };
178 
179 // Fully connected layer workload data.
180 struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnectedDescriptor>
181 {
182     void Validate(const WorkloadInfo& workloadInfo) const;
183 };
184 
185 // Permute layer workload data.
186 struct PermuteQueueDescriptor : QueueDescriptorWithParameters<PermuteDescriptor>
187 {
188     void Validate(const WorkloadInfo& workloadInfo) const;
189 };
190 
191 // Pooling 2D layer workload data.
192 struct Pooling2dQueueDescriptor : QueueDescriptorWithParameters<Pooling2dDescriptor>
193 {
194     void Validate(const WorkloadInfo& workloadInfo) const;
195 };
196 
197 // Pooling 3D layer workload data.
198 struct Pooling3dQueueDescriptor : QueueDescriptorWithParameters<Pooling3dDescriptor>
199 {
200     void Validate(const WorkloadInfo& workloadInfo) const;
201 };
202 
203 
204 // Convolution 2D layer workload data.
205 struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2dDescriptor>
206 {
207     void Validate(const WorkloadInfo& workloadInfo) const;
208 };
209 
210 // Convolution 3D layer workload data.
211 struct Convolution3dQueueDescriptor : QueueDescriptorWithParameters<Convolution3dDescriptor>
212 {
213     void Validate(const WorkloadInfo& workloadInfo) const;
214 };
215 
216 /// Depthwise Convolution 2D layer workload data.
217 ///
218 /// @note
219 /// The weights are in the format [1, H, W, I*M]. Where I is the input channel size, M the depthwise mutliplier and
220 /// H, W is the height and width of the filter kernel. If per channel quantization is applied
221 /// the weights will be quantized along the last dimension/axis (I*M) which corresponds to the output channel size.
222 /// If per channel quantization is applied the weights tensor will have I*M scales, one for each dimension
223 /// of the quantization axis. You have to be aware of this when reshaping the weights tensor.
224 /// Splitting the I*M axis, e.g. [1, H, W, I*M] --> [H, W, I, M], won't work without taking care of the
225 /// corresponding quantization scales.
226 /// If there is no per channel quantization applied reshaping the weights tensor won't cause any issues. There are
227 /// preconfigured permutation functions available @link WorkloadUtils.hpp here.
228 ///
229 struct DepthwiseConvolution2dQueueDescriptor : QueueDescriptorWithParameters<DepthwiseConvolution2dDescriptor>
230 {
231     void Validate(const WorkloadInfo& workloadInfo) const;
232 };
233 
234 struct DetectionPostProcessQueueDescriptor : QueueDescriptorWithParameters<DetectionPostProcessDescriptor>
235 {
DetectionPostProcessQueueDescriptorarmnn::DetectionPostProcessQueueDescriptor236     DetectionPostProcessQueueDescriptor()
237         : m_Anchors(nullptr)
238     {
239     }
240 
241     const ConstTensorHandle* m_Anchors;
242 
243     void Validate(const WorkloadInfo& workloadInfo) const;
244 };
245 
246 // Normalization layer workload data.
247 struct NormalizationQueueDescriptor : QueueDescriptorWithParameters<NormalizationDescriptor>
248 {
249     void Validate(const WorkloadInfo& workloadInfo) const;
250 };
251 
252 // Add layer workload data.
253 struct AdditionQueueDescriptor : QueueDescriptor
254 {
255     void Validate(const WorkloadInfo& workloadInfo) const;
256 };
257 
258 // Multiplication layer workload data.
259 struct MultiplicationQueueDescriptor : QueueDescriptor
260 {
261     void Validate(const WorkloadInfo& workloadInfo) const;
262 };
263 
264 // Division layer workload data.
265 struct DivisionQueueDescriptor : QueueDescriptor
266 {
267     void Validate(const WorkloadInfo& workloadInfo) const;
268 };
269 
270 // Subtraction layer workload data.
271 struct SubtractionQueueDescriptor : QueueDescriptor
272 {
273     void Validate(const WorkloadInfo& workloadInfo) const;
274 };
275 
276 // Maximum layer workload data.
277 struct MaximumQueueDescriptor : QueueDescriptor
278 {
279     void Validate(const WorkloadInfo& workloadInfo) const;
280 };
281 
282 // Mean layer workload data.
283 struct MeanQueueDescriptor : QueueDescriptorWithParameters<MeanDescriptor>
284 {
285     void Validate(const WorkloadInfo& workloadInfo) const;
286 };
287 
288 // Pad layer workload data
289 struct PadQueueDescriptor : QueueDescriptorWithParameters<PadDescriptor>
290 {
291     void Validate(const WorkloadInfo& workloadInfo) const;
292 };
293 
294 struct QuantizeQueueDescriptor : QueueDescriptor
295 {
296     void Validate(const WorkloadInfo& workloadInfo) const;
297 };
298 
299 // Deprecated use ComparisonQueueDescriptor instead
300 struct EqualQueueDescriptor : QueueDescriptor
301 {
302     void Validate(const WorkloadInfo& workloadInfo) const;
303 };
304 
305 // Batch norm layer workload data.
306 struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNormalizationDescriptor>
307 {
BatchNormalizationQueueDescriptorarmnn::BatchNormalizationQueueDescriptor308     BatchNormalizationQueueDescriptor()
309         : m_Mean(nullptr)
310         , m_Variance(nullptr)
311         , m_Beta(nullptr)
312         , m_Gamma(nullptr)
313     {
314     }
315 
316     const ConstTensorHandle* m_Mean;
317     const ConstTensorHandle* m_Variance;
318     const ConstTensorHandle* m_Beta;
319     const ConstTensorHandle* m_Gamma;
320 
321     void Validate(const WorkloadInfo& workloadInfo) const;
322 };
323 
324 struct RankQueueDescriptor : QueueDescriptor
325 {
326     void Validate(const WorkloadInfo& workloadInfo) const;
327 };
328 
329 struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor>
330 {
331     void Validate(const WorkloadInfo& workloadInfo) const;
332 };
333 
334 struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuantizationDescriptor>
335 {
FakeQuantizationQueueDescriptorarmnn::FakeQuantizationQueueDescriptor336     FakeQuantizationQueueDescriptor()
337     : m_Min(nullptr)
338     , m_Max(nullptr)
339     {
340     }
341 
342     const ConstTensorHandle* m_Min;
343     const ConstTensorHandle* m_Max;
344 
345     void Validate(const WorkloadInfo& workloadInfo) const;
346 };
347 
348 struct InstanceNormalizationQueueDescriptor : QueueDescriptorWithParameters<InstanceNormalizationDescriptor>
349 {
350     void Validate(const WorkloadInfo& workloadInfo) const;
351 };
352 
353 struct L2NormalizationQueueDescriptor : QueueDescriptorWithParameters<L2NormalizationDescriptor>
354 {
355     void Validate(const WorkloadInfo& workloadInfo) const;
356 };
357 
358 struct LogSoftmaxQueueDescriptor : QueueDescriptorWithParameters<LogSoftmaxDescriptor>
359 {
360     void Validate(const WorkloadInfo& workloadInfo) const;
361 };
362 
363 struct ConstantQueueDescriptor : QueueDescriptor
364 {
ConstantQueueDescriptorarmnn::ConstantQueueDescriptor365     ConstantQueueDescriptor()
366         : m_LayerOutput(nullptr)
367     {
368     }
369 
370     const ConstTensorHandle* m_LayerOutput;
371 
372     void Validate(const WorkloadInfo& workloadInfo) const;
373 };
374 
375 struct ReshapeQueueDescriptor : QueueDescriptorWithParameters<ReshapeDescriptor>
376 {
377     void Validate(const WorkloadInfo& workloadInfo) const;
378 };
379 
380 struct SpaceToBatchNdQueueDescriptor : QueueDescriptorWithParameters<SpaceToBatchNdDescriptor>
381 {
382     void Validate(const WorkloadInfo& workloadInfo) const;
383 };
384 
385 struct SpaceToDepthQueueDescriptor : QueueDescriptorWithParameters<SpaceToDepthDescriptor>
386 {
387     void Validate(const WorkloadInfo& workloadInfo) const;
388 };
389 
390 struct FloorQueueDescriptor : QueueDescriptor
391 {
392     void Validate(const WorkloadInfo& workloadInfo) const;
393 };
394 
395 struct LstmQueueDescriptor : QueueDescriptorWithParameters<LstmDescriptor>
396 {
LstmQueueDescriptorarmnn::LstmQueueDescriptor397     LstmQueueDescriptor()
398         : m_InputToInputWeights(nullptr)
399         , m_InputToForgetWeights(nullptr)
400         , m_InputToCellWeights(nullptr)
401         , m_InputToOutputWeights(nullptr)
402         , m_RecurrentToInputWeights(nullptr)
403         , m_RecurrentToForgetWeights(nullptr)
404         , m_RecurrentToCellWeights(nullptr)
405         , m_RecurrentToOutputWeights(nullptr)
406         , m_CellToInputWeights(nullptr)
407         , m_CellToForgetWeights(nullptr)
408         , m_CellToOutputWeights(nullptr)
409         , m_InputGateBias(nullptr)
410         , m_ForgetGateBias(nullptr)
411         , m_CellBias(nullptr)
412         , m_OutputGateBias(nullptr)
413         , m_ProjectionWeights(nullptr)
414         , m_ProjectionBias(nullptr)
415         , m_InputLayerNormWeights(nullptr)
416         , m_ForgetLayerNormWeights(nullptr)
417         , m_CellLayerNormWeights(nullptr)
418         , m_OutputLayerNormWeights(nullptr)
419     {
420     }
421 
422     const ConstTensorHandle* m_InputToInputWeights;
423     const ConstTensorHandle* m_InputToForgetWeights;
424     const ConstTensorHandle* m_InputToCellWeights;
425     const ConstTensorHandle* m_InputToOutputWeights;
426     const ConstTensorHandle* m_RecurrentToInputWeights;
427     const ConstTensorHandle* m_RecurrentToForgetWeights;
428     const ConstTensorHandle* m_RecurrentToCellWeights;
429     const ConstTensorHandle* m_RecurrentToOutputWeights;
430     const ConstTensorHandle* m_CellToInputWeights;
431     const ConstTensorHandle* m_CellToForgetWeights;
432     const ConstTensorHandle* m_CellToOutputWeights;
433     const ConstTensorHandle* m_InputGateBias;
434     const ConstTensorHandle* m_ForgetGateBias;
435     const ConstTensorHandle* m_CellBias;
436     const ConstTensorHandle* m_OutputGateBias;
437     const ConstTensorHandle* m_ProjectionWeights;
438     const ConstTensorHandle* m_ProjectionBias;
439     const ConstTensorHandle* m_InputLayerNormWeights;
440     const ConstTensorHandle* m_ForgetLayerNormWeights;
441     const ConstTensorHandle* m_CellLayerNormWeights;
442     const ConstTensorHandle* m_OutputLayerNormWeights;
443 
444     void Validate(const WorkloadInfo& workloadInfo) const;
445 };
446 
447 struct ConvertFp16ToFp32QueueDescriptor : QueueDescriptor
448 {
449     void Validate(const WorkloadInfo& workloadInfo) const;
450 };
451 
452 struct ConvertFp32ToFp16QueueDescriptor : QueueDescriptor
453 {
454     void Validate(const WorkloadInfo& workloadInfo) const;
455 };
456 
457 struct BatchToSpaceNdQueueDescriptor : QueueDescriptorWithParameters<BatchToSpaceNdDescriptor>
458 {
459     void Validate(const WorkloadInfo& workloadInfo) const;
460 };
461 
462 struct StridedSliceQueueDescriptor : QueueDescriptorWithParameters<StridedSliceDescriptor>
463 {
464     void Validate(const WorkloadInfo& workloadInfo) const;
465 };
466 
467 // Minimum layer workload data.
468 struct MinimumQueueDescriptor : QueueDescriptor
469 {
470     void Validate(const WorkloadInfo& workloadInfo) const;
471 };
472 
473 // Deprecated use ComparisonQueueDescriptor instead
474 struct GreaterQueueDescriptor : QueueDescriptor
475 {
476     void Validate(const WorkloadInfo& workloadInfo) const;
477 };
478 
479 struct DebugQueueDescriptor : QueueDescriptor
480 {
DebugQueueDescriptorarmnn::DebugQueueDescriptor481     DebugQueueDescriptor() : m_Guid(0) {}
482 
483     void Validate(const WorkloadInfo& workloadInfo) const;
484 
485     LayerGuid m_Guid;
486     std::string m_LayerName;
487     unsigned int m_SlotIndex;
488 
489     bool m_LayerOutputToFile = false;
490 };
491 
492 struct RsqrtQueueDescriptor : QueueDescriptor
493 {
494     void Validate(const WorkloadInfo& workloadInfo) const;
495 };
496 
497 struct GatherNdQueueDescriptor : QueueDescriptor
498 {
499     void Validate(const WorkloadInfo& workloadInfo) const;
500 };
501 
502 struct GatherQueueDescriptor : QueueDescriptorWithParameters<GatherDescriptor>
503 {
504     void Validate(const WorkloadInfo& workloadInfo) const;
505 };
506 
507 struct PreCompiledQueueDescriptor : QueueDescriptorWithParameters<PreCompiledDescriptor>
508 {
PreCompiledQueueDescriptorarmnn::PreCompiledQueueDescriptor509     PreCompiledQueueDescriptor()
510         : m_PreCompiledObject(nullptr)
511     {
512     }
513 
514     void* m_PreCompiledObject;
515 
516     void Validate(const WorkloadInfo& workloadInfo) const;
517 };
518 
519 struct DequantizeQueueDescriptor : QueueDescriptor
520 {
521     void Validate(const WorkloadInfo& workloadInfo) const;
522 };
523 
524 struct MergeQueueDescriptor : QueueDescriptor
525 {
526     void Validate(const WorkloadInfo& workloadInfo) const;
527 };
528 
529 struct SwitchQueueDescriptor : QueueDescriptor
530 {
531     void Validate(const WorkloadInfo& workloadInfo) const;
532 };
533 
534 struct PreluQueueDescriptor : QueueDescriptor
535 {
536     void Validate(const WorkloadInfo& workloadInfo) const;
537 };
538 
539 struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParameters<TransposeConvolution2dDescriptor>
540 {
TransposeConvolution2dQueueDescriptorarmnn::TransposeConvolution2dQueueDescriptor541     TransposeConvolution2dQueueDescriptor() :
542         m_Weight(nullptr),
543         m_Bias(nullptr)
544     {}
545 
546     const ConstTensorHandle* m_Weight;
547     const ConstTensorHandle* m_Bias;
548 
549     void Validate(const WorkloadInfo& workloadInfo) const;
550 };
551 
552 struct TransposeQueueDescriptor : QueueDescriptorWithParameters<TransposeDescriptor>
553 {
554     void Validate(const WorkloadInfo& workloadInfo) const;
555 };
556 
557 struct QLstmQueueDescriptor : QueueDescriptorWithParameters<QLstmDescriptor>
558 {
QLstmQueueDescriptorarmnn::QLstmQueueDescriptor559     QLstmQueueDescriptor()
560             : m_InputToInputWeights(nullptr)
561             , m_InputToForgetWeights(nullptr)
562             , m_InputToCellWeights(nullptr)
563             , m_InputToOutputWeights(nullptr)
564             , m_RecurrentToInputWeights(nullptr)
565             , m_RecurrentToForgetWeights(nullptr)
566             , m_RecurrentToCellWeights(nullptr)
567             , m_RecurrentToOutputWeights(nullptr)
568             , m_CellToInputWeights(nullptr)
569             , m_CellToForgetWeights(nullptr)
570             , m_CellToOutputWeights(nullptr)
571             , m_InputGateBias(nullptr)
572             , m_ForgetGateBias(nullptr)
573             , m_CellBias(nullptr)
574             , m_OutputGateBias(nullptr)
575             , m_ProjectionWeights(nullptr)
576             , m_ProjectionBias(nullptr)
577             , m_InputLayerNormWeights(nullptr)
578             , m_ForgetLayerNormWeights(nullptr)
579             , m_CellLayerNormWeights(nullptr)
580             , m_OutputLayerNormWeights(nullptr)
581     {
582     }
583 
584     const ConstTensorHandle* m_InputToInputWeights;
585     const ConstTensorHandle* m_InputToForgetWeights;
586     const ConstTensorHandle* m_InputToCellWeights;
587     const ConstTensorHandle* m_InputToOutputWeights;
588     const ConstTensorHandle* m_RecurrentToInputWeights;
589     const ConstTensorHandle* m_RecurrentToForgetWeights;
590     const ConstTensorHandle* m_RecurrentToCellWeights;
591     const ConstTensorHandle* m_RecurrentToOutputWeights;
592     const ConstTensorHandle* m_CellToInputWeights;
593     const ConstTensorHandle* m_CellToForgetWeights;
594     const ConstTensorHandle* m_CellToOutputWeights;
595     const ConstTensorHandle* m_InputGateBias;
596     const ConstTensorHandle* m_ForgetGateBias;
597     const ConstTensorHandle* m_CellBias;
598     const ConstTensorHandle* m_OutputGateBias;
599     const ConstTensorHandle* m_ProjectionWeights;
600     const ConstTensorHandle* m_ProjectionBias;
601     const ConstTensorHandle* m_InputLayerNormWeights;
602     const ConstTensorHandle* m_ForgetLayerNormWeights;
603     const ConstTensorHandle* m_CellLayerNormWeights;
604     const ConstTensorHandle* m_OutputLayerNormWeights;
605 
606     void Validate(const WorkloadInfo& workloadInfo) const;
607 };
608 
609 struct QuantizedLstmQueueDescriptor : QueueDescriptor
610 {
QuantizedLstmQueueDescriptorarmnn::QuantizedLstmQueueDescriptor611     QuantizedLstmQueueDescriptor()
612         : m_InputToInputWeights(nullptr)
613         , m_InputToForgetWeights(nullptr)
614         , m_InputToCellWeights(nullptr)
615         , m_InputToOutputWeights(nullptr)
616 
617         , m_RecurrentToInputWeights(nullptr)
618         , m_RecurrentToForgetWeights(nullptr)
619         , m_RecurrentToCellWeights(nullptr)
620         , m_RecurrentToOutputWeights(nullptr)
621 
622         , m_InputGateBias(nullptr)
623         , m_ForgetGateBias(nullptr)
624         , m_CellBias(nullptr)
625         , m_OutputGateBias(nullptr)
626     {}
627 
628     const ConstTensorHandle* m_InputToInputWeights;
629     const ConstTensorHandle* m_InputToForgetWeights;
630     const ConstTensorHandle* m_InputToCellWeights;
631     const ConstTensorHandle* m_InputToOutputWeights;
632 
633     const ConstTensorHandle* m_RecurrentToInputWeights;
634     const ConstTensorHandle* m_RecurrentToForgetWeights;
635     const ConstTensorHandle* m_RecurrentToCellWeights;
636     const ConstTensorHandle* m_RecurrentToOutputWeights;
637 
638     const ConstTensorHandle* m_InputGateBias;
639     const ConstTensorHandle* m_ForgetGateBias;
640     const ConstTensorHandle* m_CellBias;
641     const ConstTensorHandle* m_OutputGateBias;
642 
643     void Validate(const WorkloadInfo& workloadInfo) const;
644 };
645 
646 struct AbsQueueDescriptor : QueueDescriptor
647 {
648     void Validate(const WorkloadInfo& workloadInfo) const;
649 };
650 
651 struct SliceQueueDescriptor : QueueDescriptorWithParameters<SliceDescriptor>
652 {
653     void Validate(const WorkloadInfo& workloadInfo) const;
654 };
655 
656 struct DepthToSpaceQueueDescriptor : QueueDescriptorWithParameters<DepthToSpaceDescriptor>
657 {
658     void Validate(const WorkloadInfo& workloadInfo) const;
659 };
660 
661 struct ComparisonQueueDescriptor : QueueDescriptorWithParameters<ComparisonDescriptor>
662 {
663     void Validate(const WorkloadInfo& workloadInfo) const;
664 };
665 
666 struct ElementwiseBinaryQueueDescriptor : QueueDescriptorWithParameters<ElementwiseBinaryDescriptor>
667 {
668     void Validate(const WorkloadInfo& workloadInfo) const;
669 };
670 
671 struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters<ElementwiseUnaryDescriptor>
672 {
673     void Validate(const WorkloadInfo& workloadInfo) const;
674 };
675 
676 struct LogicalBinaryQueueDescriptor : QueueDescriptorWithParameters<LogicalBinaryDescriptor>
677 {
678     void Validate(const WorkloadInfo& workloadInfo) const;
679 };
680 
681 struct ReduceQueueDescriptor : QueueDescriptorWithParameters<ReduceDescriptor>
682 {
683     void Validate(const WorkloadInfo& workloadInfo) const;
684 };
685 
686 struct ShapeQueueDescriptor : QueueDescriptor
687 {
688     void Validate(const WorkloadInfo& workloadInfo) const;
689 };
690 
691 struct UnidirectionalSequenceLstmQueueDescriptor : QueueDescriptorWithParameters<LstmDescriptor>
692 {
UnidirectionalSequenceLstmQueueDescriptorarmnn::UnidirectionalSequenceLstmQueueDescriptor693     UnidirectionalSequenceLstmQueueDescriptor()
694         : m_InputToInputWeights(nullptr)
695         , m_InputToForgetWeights(nullptr)
696         , m_InputToCellWeights(nullptr)
697         , m_InputToOutputWeights(nullptr)
698         , m_RecurrentToInputWeights(nullptr)
699         , m_RecurrentToForgetWeights(nullptr)
700         , m_RecurrentToCellWeights(nullptr)
701         , m_RecurrentToOutputWeights(nullptr)
702         , m_CellToInputWeights(nullptr)
703         , m_CellToForgetWeights(nullptr)
704         , m_CellToOutputWeights(nullptr)
705         , m_InputGateBias(nullptr)
706         , m_ForgetGateBias(nullptr)
707         , m_CellBias(nullptr)
708         , m_OutputGateBias(nullptr)
709         , m_ProjectionWeights(nullptr)
710         , m_ProjectionBias(nullptr)
711         , m_InputLayerNormWeights(nullptr)
712         , m_ForgetLayerNormWeights(nullptr)
713         , m_CellLayerNormWeights(nullptr)
714         , m_OutputLayerNormWeights(nullptr)
715     {
716     }
717 
718     const ConstTensorHandle* m_InputToInputWeights;
719     const ConstTensorHandle* m_InputToForgetWeights;
720     const ConstTensorHandle* m_InputToCellWeights;
721     const ConstTensorHandle* m_InputToOutputWeights;
722     const ConstTensorHandle* m_RecurrentToInputWeights;
723     const ConstTensorHandle* m_RecurrentToForgetWeights;
724     const ConstTensorHandle* m_RecurrentToCellWeights;
725     const ConstTensorHandle* m_RecurrentToOutputWeights;
726     const ConstTensorHandle* m_CellToInputWeights;
727     const ConstTensorHandle* m_CellToForgetWeights;
728     const ConstTensorHandle* m_CellToOutputWeights;
729     const ConstTensorHandle* m_InputGateBias;
730     const ConstTensorHandle* m_ForgetGateBias;
731     const ConstTensorHandle* m_CellBias;
732     const ConstTensorHandle* m_OutputGateBias;
733     const ConstTensorHandle* m_ProjectionWeights;
734     const ConstTensorHandle* m_ProjectionBias;
735     const ConstTensorHandle* m_InputLayerNormWeights;
736     const ConstTensorHandle* m_ForgetLayerNormWeights;
737     const ConstTensorHandle* m_CellLayerNormWeights;
738     const ConstTensorHandle* m_OutputLayerNormWeights;
739 
740     void Validate(const WorkloadInfo& workloadInfo) const;
741 };
742 
743 struct ChannelShuffleQueueDescriptor : QueueDescriptorWithParameters<ChannelShuffleDescriptor>
744 {
745     void Validate(const WorkloadInfo& workloadInfo) const;
746 };
747 
748 struct BatchMatMulQueueDescriptor : QueueDescriptorWithParameters<BatchMatMulDescriptor>
749 {
750     void Validate(const WorkloadInfo& workloadInfo) const;
751 };
752 
753 } // namespace armnn
754