1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp" // Required for class equivalence declarations.
9 #include "Tensor.hpp"
10 #include "Types.hpp"
11 #include <armnn/Exceptions.hpp>
12
13 #include <cstdint>
14 #include <iterator>
15 #include <utility>
16 #include <vector>
17
18 namespace armnn
19 {
20
21 /// Base class for all descriptors.
22 struct BaseDescriptor
23 {
IsNullarmnn::BaseDescriptor24 virtual bool IsNull() const { return false; }
25 virtual ~BaseDescriptor() = default;
26 };
27
28 /// Null Descriptor used as a return value from the IConnectableLayer GetParameters method
29 /// by layers which do not have a descriptor
30 struct NullDescriptor : BaseDescriptor
31 {
IsNullarmnn::NullDescriptor32 bool IsNull() const override { return true; }
33 };
34
35 /// An ActivationDescriptor for the ActivationLayer.
36 struct ActivationDescriptor : BaseDescriptor
37 {
ActivationDescriptorarmnn::ActivationDescriptor38 ActivationDescriptor()
39 : m_Function(ActivationFunction::Sigmoid)
40 , m_A(0)
41 , m_B(0)
42 {}
43
ActivationDescriptorarmnn::ActivationDescriptor44 ActivationDescriptor(armnn::ActivationFunction activation,
45 float a = 0,
46 float b = 0)
47 : m_Function(activation)
48 , m_A(a)
49 , m_B(b)
50 {}
51
operator ==armnn::ActivationDescriptor52 bool operator ==(const ActivationDescriptor &rhs) const
53 {
54 return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
55 }
56
57 /// @brief The activation function to use
58 /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
59 ActivationFunction m_Function;
60 /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
61 float m_A;
62 /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
63 float m_B;
64 };
65
66 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
67 struct ArgMinMaxDescriptor : BaseDescriptor
68 {
ArgMinMaxDescriptorarmnn::ArgMinMaxDescriptor69 ArgMinMaxDescriptor()
70 : m_Function(ArgMinMaxFunction::Min)
71 , m_Axis(-1)
72 , m_Output_Type(armnn::DataType::Signed32)
73 {}
74
operator ==armnn::ArgMinMaxDescriptor75 bool operator ==(const ArgMinMaxDescriptor &rhs) const
76 {
77 return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
78 }
79
80 /// Specify if the function is to find Min or Max.
81 ArgMinMaxFunction m_Function;
82 /// Axis to reduce across the input tensor.
83 int m_Axis;
84 /// Deprecated and will be removed in future release.
85 armnn::DataType m_Output_Type;
86 };
87
88 /// A ComparisonDescriptor for the ComparisonLayer
89 struct ComparisonDescriptor : BaseDescriptor
90 {
ComparisonDescriptorarmnn::ComparisonDescriptor91 ComparisonDescriptor()
92 : ComparisonDescriptor(ComparisonOperation::Equal)
93 {}
94
ComparisonDescriptorarmnn::ComparisonDescriptor95 ComparisonDescriptor(ComparisonOperation operation)
96 : m_Operation(operation)
97 {}
98
operator ==armnn::ComparisonDescriptor99 bool operator ==(const ComparisonDescriptor &rhs) const
100 {
101 return m_Operation == rhs.m_Operation;
102 }
103
104 /// Specifies the comparison operation to execute
105 ComparisonOperation m_Operation;
106 };
107
108 /// A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer
109 struct ElementwiseBinaryDescriptor : BaseDescriptor
110 {
ElementwiseBinaryDescriptorarmnn::ElementwiseBinaryDescriptor111 ElementwiseBinaryDescriptor()
112 : ElementwiseBinaryDescriptor(BinaryOperation::Add)
113 {}
114
ElementwiseBinaryDescriptorarmnn::ElementwiseBinaryDescriptor115 ElementwiseBinaryDescriptor(BinaryOperation operation)
116 : m_Operation(operation)
117 {}
118
operator ==armnn::ElementwiseBinaryDescriptor119 bool operator ==(const ElementwiseBinaryDescriptor &rhs) const
120 {
121 return m_Operation == rhs.m_Operation;
122 }
123
124 /// Specifies the elementwiseBinary operation to execute
125 BinaryOperation m_Operation;
126 };
127
128 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
129 struct ElementwiseUnaryDescriptor : BaseDescriptor
130 {
ElementwiseUnaryDescriptorarmnn::ElementwiseUnaryDescriptor131 ElementwiseUnaryDescriptor()
132 : ElementwiseUnaryDescriptor(UnaryOperation::Abs)
133 {}
134
ElementwiseUnaryDescriptorarmnn::ElementwiseUnaryDescriptor135 ElementwiseUnaryDescriptor(UnaryOperation operation)
136 : m_Operation(operation)
137 {}
138
operator ==armnn::ElementwiseUnaryDescriptor139 bool operator ==(const ElementwiseUnaryDescriptor &rhs) const
140 {
141 return m_Operation == rhs.m_Operation;
142 }
143
144 /// Specifies the elementwiseUnary operation to execute
145 UnaryOperation m_Operation;
146 };
147
148 /// A PermuteDescriptor for the PermuteLayer.
149 struct PermuteDescriptor : BaseDescriptor
150 {
PermuteDescriptorarmnn::PermuteDescriptor151 PermuteDescriptor()
152 : m_DimMappings{}
153 {}
154
PermuteDescriptorarmnn::PermuteDescriptor155 PermuteDescriptor(const PermutationVector& dimMappings)
156 : m_DimMappings(dimMappings)
157 {}
158
operator ==armnn::PermuteDescriptor159 bool operator ==(const PermuteDescriptor &rhs) const
160 {
161 return m_DimMappings.IsEqual(rhs.m_DimMappings);
162 }
163
164 /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
165 /// source and target potentially have different memory layouts e.g.
166 /// Input Shape {1, 1, 4, 4}
167 /// Permutation Vector {0, 2, 3, 1}
168 /// Output Shape {1, 4, 1, 4}
169 /// dim "0" goes into index 0 ([ 1, X, X, X ])
170 /// dim "1" goes into index 2 ([ 1, X, 1, X ])
171 /// dim "2" goes into index 3 ([ 1, X, 1, 4 ])
172 /// dim "3" goes into index 1 ([ 1, 4, 1, 4 ])
173 PermutationVector m_DimMappings;
174 };
175
176 /// A SoftmaxDescriptor for the SoftmaxLayer.
177 struct SoftmaxDescriptor : BaseDescriptor
178 {
SoftmaxDescriptorarmnn::SoftmaxDescriptor179 SoftmaxDescriptor()
180 : m_Beta(1.0f)
181 , m_Axis(-1)
182 {}
183
operator ==armnn::SoftmaxDescriptor184 bool operator ==(const SoftmaxDescriptor& rhs) const
185 {
186 return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
187 }
188
189 /// Exponentiation value.
190 float m_Beta;
191 /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
192 int m_Axis;
193 };
194
195 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
196 using LogSoftmaxDescriptor = SoftmaxDescriptor;
197
198 /// @brief An OriginsDescriptor for the ConcatLayer.
199 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
200 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
201 struct OriginsDescriptor : BaseDescriptor
202 {
203 OriginsDescriptor();
204 OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
205 OriginsDescriptor(const OriginsDescriptor& other);
206 OriginsDescriptor(OriginsDescriptor&& other);
207
208 ~OriginsDescriptor();
209
210 OriginsDescriptor& operator=(OriginsDescriptor rhs);
211
212 bool operator ==(const OriginsDescriptor& rhs) const;
213
214 /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
215 /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
216 /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
217 Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
218 /// Get the number of views.
219 uint32_t GetNumViews() const;
220 /// Get the number of dimensions.
221 uint32_t GetNumDimensions() const;
222 /// Return the view origin at the int value idx.
223 const uint32_t* GetViewOrigin(uint32_t idx) const;
224 /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
225 /// The number of views must match number of elements in the new ordering array.
226 void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
227 /// Swap the ViewsDescriptor values first and second.
228 friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
229 /// Set the concatenation axis value.
230 void SetConcatAxis(unsigned int concatAxis);
231 /// Get the concatenation axis value.
232 unsigned int GetConcatAxis() const;
233
234 private:
235 unsigned int m_ConcatAxis;
236 uint32_t m_NumViews;
237 uint32_t m_NumDimensions;
238 uint32_t** m_ViewOrigins;
239 };
240
241 /// @brief A ViewsDescriptor for the SplitterLayer.
242 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
243 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
244 struct ViewsDescriptor : BaseDescriptor
245 {
246 ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
247 ViewsDescriptor(const ViewsDescriptor& other);
248 ViewsDescriptor();
249 ViewsDescriptor(ViewsDescriptor&& other);
250
251 ~ViewsDescriptor();
252
253 ViewsDescriptor& operator=(ViewsDescriptor rhs);
254
255 bool operator ==(const ViewsDescriptor& rhs) const;
256
257 /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
258 /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
259 /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
260 Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
261 /// @brief Set the size of the views. The arguments are: view, dimension, value.
262 /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
263 /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
264 Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
265
266 /// Get the number of views.
267 uint32_t GetNumViews() const;
268 /// Get the number of dimensions.
269 uint32_t GetNumDimensions() const;
270 /// Get the view origin at the int value idx.
271 const uint32_t* GetViewOrigin(uint32_t idx) const;
272 /// Get the view sizes at the int value idx.
273 const uint32_t* GetViewSizes(uint32_t idx) const;
274 /// Get the View Origins
275 const OriginsDescriptor& GetOrigins() const;
276
277 /// Swap the ViewsDescriptor value first and second.
278 friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
279 private:
280 OriginsDescriptor m_Origins;
281 uint32_t** m_ViewSizes;
282 };
283
284
285 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
286 /// concatenation of a number of input tensors.
287 template <typename TensorShapeIt>
CreateDescriptorForConcatenation(TensorShapeIt first,TensorShapeIt last,unsigned int concatenationDimension)288 OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first,
289 TensorShapeIt last,
290 unsigned int concatenationDimension)
291 {
292 auto numInputs = std::distance(first, last);
293
294 if (numInputs < 2)
295 {
296 throw InvalidArgumentException("Concatenation requires at least 2 inputs");
297 }
298
299 const auto& firstInputShape = *first;
300
301 const unsigned int numDimensions = firstInputShape.GetNumDimensions();
302 for (auto it = first + 1; it != last; ++it)
303 {
304 if (it->GetNumDimensions() != numDimensions)
305 {
306 throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
307 }
308 }
309
310 if (concatenationDimension >= numDimensions)
311 {
312 throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
313 }
314
315 for (auto it = first; it != last; ++it)
316 {
317 for (unsigned int d = 0; d < numDimensions; ++d)
318 {
319 const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
320 if (!dimSizeOk)
321 {
322 throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
323 " except the concatenation dimension");
324 }
325 }
326 }
327
328 OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
329 viewsDescriptor.SetConcatAxis(concatenationDimension);
330
331 uint32_t viewIndex = 0u;
332 uint32_t coordAlongConcatDim = 0u;
333 for (auto it = first; it != last; ++it)
334 {
335 const auto& inputShape = *it;
336
337 for (unsigned int i = 0; i < concatenationDimension; ++i)
338 {
339 viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
340 }
341
342 viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
343 unsigned int dimSize = inputShape[concatenationDimension];
344 coordAlongConcatDim += dimSize;
345
346
347 for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
348 {
349 viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
350 }
351
352 ++viewIndex;
353 }
354
355 return viewsDescriptor;
356 }
357
358 /// A Pooling2dDescriptor for the Pooling2dLayer.
359 struct Pooling2dDescriptor : BaseDescriptor
360 {
Pooling2dDescriptorarmnn::Pooling2dDescriptor361 Pooling2dDescriptor()
362 : m_PoolType(PoolingAlgorithm::Max)
363 , m_PadLeft(0)
364 , m_PadRight(0)
365 , m_PadTop(0)
366 , m_PadBottom(0)
367 , m_PoolWidth(0)
368 , m_PoolHeight(0)
369 , m_StrideX(0)
370 , m_StrideY(0)
371 , m_OutputShapeRounding(OutputShapeRounding::Floor)
372 , m_PaddingMethod(PaddingMethod::Exclude)
373 , m_DataLayout(DataLayout::NCHW)
374 {}
375
operator ==armnn::Pooling2dDescriptor376 bool operator ==(const Pooling2dDescriptor& rhs) const
377 {
378 return m_PoolType == rhs.m_PoolType &&
379 m_PadLeft == rhs.m_PadLeft &&
380 m_PadRight == rhs.m_PadRight &&
381 m_PadTop == rhs.m_PadTop &&
382 m_PadBottom == rhs.m_PadBottom &&
383 m_PoolWidth == rhs.m_PoolWidth &&
384 m_PoolHeight == rhs.m_PoolHeight &&
385 m_StrideX == rhs.m_StrideX &&
386 m_StrideY == rhs.m_StrideY &&
387 m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
388 m_PaddingMethod == rhs.m_PaddingMethod &&
389 m_DataLayout == rhs.m_DataLayout;
390 }
391
392 /// The pooling algorithm to use (Max. Average, L2).
393 PoolingAlgorithm m_PoolType;
394 /// Padding left value in the width dimension.
395 uint32_t m_PadLeft;
396 /// Padding right value in the width dimension.
397 uint32_t m_PadRight;
398 /// Padding top value in the height dimension.
399 uint32_t m_PadTop;
400 /// Padding bottom value in the height dimension.
401 uint32_t m_PadBottom;
402 /// Pooling width value.
403 uint32_t m_PoolWidth;
404 /// Pooling height value.
405 uint32_t m_PoolHeight;
406 /// Stride value when proceeding through input for the width dimension.
407 uint32_t m_StrideX;
408 /// Stride value when proceeding through input for the height dimension.
409 uint32_t m_StrideY;
410 /// The rounding method for the output shape. (Floor, Ceiling).
411 OutputShapeRounding m_OutputShapeRounding;
412 /// The padding method to be used. (Exclude, IgnoreValue).
413 PaddingMethod m_PaddingMethod;
414 /// The data layout to be used (NCHW, NHWC).
415 DataLayout m_DataLayout;
416 };
417
418 /// A Pooling3dDescriptor for the Pooling3dLayer.
419 struct Pooling3dDescriptor : BaseDescriptor
420 {
Pooling3dDescriptorarmnn::Pooling3dDescriptor421 Pooling3dDescriptor()
422 : m_PoolType(PoolingAlgorithm::Max)
423 , m_PadLeft(0)
424 , m_PadRight(0)
425 , m_PadTop(0)
426 , m_PadBottom(0)
427 , m_PadFront(0)
428 , m_PadBack(0)
429 , m_PoolWidth(0)
430 , m_PoolHeight(0)
431 , m_PoolDepth(0)
432 , m_StrideX(0)
433 , m_StrideY(0)
434 , m_StrideZ(0)
435 , m_OutputShapeRounding(OutputShapeRounding::Floor)
436 , m_PaddingMethod(PaddingMethod::Exclude)
437 , m_DataLayout(DataLayout::NCDHW)
438 {}
439
operator ==armnn::Pooling3dDescriptor440 bool operator ==(const Pooling3dDescriptor& rhs) const
441 {
442 return m_PoolType == rhs.m_PoolType &&
443 m_PadLeft == rhs.m_PadLeft &&
444 m_PadRight == rhs.m_PadRight &&
445 m_PadTop == rhs.m_PadTop &&
446 m_PadBottom == rhs.m_PadBottom &&
447 m_PadFront == rhs.m_PadFront &&
448 m_PadBack == rhs.m_PadBack &&
449 m_PoolWidth == rhs.m_PoolWidth &&
450 m_PoolHeight == rhs.m_PoolHeight &&
451 m_PoolDepth == rhs.m_PoolDepth &&
452 m_StrideX == rhs.m_StrideX &&
453 m_StrideY == rhs.m_StrideY &&
454 m_StrideZ == rhs.m_StrideZ &&
455 m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
456 m_PaddingMethod == rhs.m_PaddingMethod &&
457 m_DataLayout == rhs.m_DataLayout;
458 }
459
460 /// The pooling algorithm to use (Max. Average, L2).
461 PoolingAlgorithm m_PoolType;
462 /// Padding left value in the width dimension.
463 uint32_t m_PadLeft;
464 /// Padding right value in the width dimension.
465 uint32_t m_PadRight;
466 /// Padding top value in the height dimension.
467 uint32_t m_PadTop;
468 /// Padding bottom value in the height dimension.
469 uint32_t m_PadBottom;
470 /// Padding front value in the depth dimension.
471 uint32_t m_PadFront;
472 /// Padding back value in the depth dimension.
473 uint32_t m_PadBack;
474 /// Pooling width value.
475 uint32_t m_PoolWidth;
476 /// Pooling height value.
477 uint32_t m_PoolHeight;
478 /// Pooling depth value.
479 uint32_t m_PoolDepth;
480 /// Stride value when proceeding through input for the width dimension.
481 uint32_t m_StrideX;
482 /// Stride value when proceeding through input for the height dimension.
483 uint32_t m_StrideY;
484 /// Stride value when proceeding through input for the depth dimension.
485 uint32_t m_StrideZ;
486 /// The rounding method for the output shape. (Floor, Ceiling).
487 OutputShapeRounding m_OutputShapeRounding;
488 /// The padding method to be used. (Exclude, IgnoreValue).
489 PaddingMethod m_PaddingMethod;
490 /// The data layout to be used (NCDHW, NDHWC).
491 DataLayout m_DataLayout;
492 };
493
494 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
495 struct FullyConnectedDescriptor : BaseDescriptor
496 {
FullyConnectedDescriptorarmnn::FullyConnectedDescriptor497 FullyConnectedDescriptor()
498 : m_BiasEnabled(false)
499 , m_TransposeWeightMatrix(false)
500 , m_ConstantWeights(true)
501 {}
502
operator ==armnn::FullyConnectedDescriptor503 bool operator ==(const FullyConnectedDescriptor& rhs) const
504 {
505 return m_BiasEnabled == rhs.m_BiasEnabled
506 && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix
507 && m_ConstantWeights == rhs.m_ConstantWeights;
508 }
509
510 /// Get the number of inputs.
511 uint32_t GetNumInputs() const;
512
513 /// Enable/disable bias.
514 bool m_BiasEnabled;
515 /// Enable/disable transpose weight matrix.
516 bool m_TransposeWeightMatrix;
517 /// Enable/disable constant weights and biases.
518 bool m_ConstantWeights;
519 };
520
521 /// A Convolution2dDescriptor for the Convolution2dLayer.
522 struct Convolution2dDescriptor : BaseDescriptor
523 {
Convolution2dDescriptorarmnn::Convolution2dDescriptor524 Convolution2dDescriptor()
525 : m_PadLeft(0)
526 , m_PadRight(0)
527 , m_PadTop(0)
528 , m_PadBottom(0)
529 , m_StrideX(1)
530 , m_StrideY(1)
531 , m_DilationX(1)
532 , m_DilationY(1)
533 , m_BiasEnabled(false)
534 , m_DataLayout(DataLayout::NCHW)
535 {}
536
operator ==armnn::Convolution2dDescriptor537 bool operator ==(const Convolution2dDescriptor& rhs) const
538 {
539 return m_PadLeft == rhs.m_PadLeft &&
540 m_PadRight == rhs.m_PadRight &&
541 m_PadTop == rhs.m_PadTop &&
542 m_PadBottom == rhs.m_PadBottom &&
543 m_StrideX == rhs.m_StrideX &&
544 m_StrideY == rhs.m_StrideY &&
545 m_DilationX == rhs.m_DilationX &&
546 m_DilationY == rhs.m_DilationY &&
547 m_BiasEnabled == rhs.m_BiasEnabled &&
548 m_DataLayout == rhs.m_DataLayout;
549 }
550 uint32_t GetNumInputs() const;
551
552
553 /// Padding left value in the width dimension.
554 uint32_t m_PadLeft;
555 /// Padding right value in the width dimension.
556 uint32_t m_PadRight;
557 /// Padding top value in the height dimension.
558 uint32_t m_PadTop;
559 /// Padding bottom value in the height dimension.
560 uint32_t m_PadBottom;
561 /// Stride value when proceeding through input for the width dimension.
562 uint32_t m_StrideX;
563 /// Stride value when proceeding through input for the height dimension.
564 uint32_t m_StrideY;
565 /// Dilation along x axis
566 uint32_t m_DilationX;
567 /// Dilation along y axis
568 uint32_t m_DilationY;
569 /// Enable/disable bias.
570 bool m_BiasEnabled;
571 /// The data layout to be used (NCHW, NHWC).
572 DataLayout m_DataLayout;
573 };
574
575 /// A Convolution3dDescriptor for the Convolution3dLayer.
576 struct Convolution3dDescriptor : BaseDescriptor
577 {
Convolution3dDescriptorarmnn::Convolution3dDescriptor578 Convolution3dDescriptor()
579 : m_PadLeft(0)
580 , m_PadRight(0)
581 , m_PadTop(0)
582 , m_PadBottom(0)
583 , m_PadFront(0)
584 , m_PadBack(0)
585 , m_StrideX(1)
586 , m_StrideY(1)
587 , m_StrideZ(1)
588 , m_DilationX(1)
589 , m_DilationY(1)
590 , m_DilationZ(1)
591 , m_BiasEnabled(false)
592 , m_DataLayout(DataLayout::NDHWC)
593 {}
594
operator ==armnn::Convolution3dDescriptor595 bool operator ==(const Convolution3dDescriptor& rhs) const
596 {
597 return m_PadLeft == rhs.m_PadLeft &&
598 m_PadRight == rhs.m_PadRight &&
599 m_PadTop == rhs.m_PadTop &&
600 m_PadBottom == rhs.m_PadBottom &&
601 m_PadFront == rhs.m_PadFront &&
602 m_PadBack == rhs.m_PadBack &&
603 m_StrideX == rhs.m_StrideX &&
604 m_StrideY == rhs.m_StrideY &&
605 m_StrideZ == rhs.m_StrideZ &&
606 m_DilationX == rhs.m_DilationX &&
607 m_DilationY == rhs.m_DilationY &&
608 m_DilationZ == rhs.m_DilationZ &&
609 m_BiasEnabled == rhs.m_BiasEnabled &&
610 m_DataLayout == rhs.m_DataLayout;
611 }
612
613 /// Get the number of views/inputs.
614 uint32_t GetNumInputs() const;
615
616 /// Padding left value in the width dimension.
617 uint32_t m_PadLeft;
618 /// Padding right value in the width dimension.
619 uint32_t m_PadRight;
620 /// Padding top value in the height dimension.
621 uint32_t m_PadTop;
622 /// Padding bottom value in the height dimension.
623 uint32_t m_PadBottom;
624 /// Padding front value in the depth dimension.
625 uint32_t m_PadFront;
626 /// Padding back value in the depth dimension.
627 uint32_t m_PadBack;
628 /// Stride value when proceeding through input for the width dimension.
629 uint32_t m_StrideX;
630 /// Stride value when proceeding through input for the height dimension.
631 uint32_t m_StrideY;
632 /// Stride value when proceeding through input for the depth dimension.
633 uint32_t m_StrideZ;
634 /// Dilation along x axis
635 uint32_t m_DilationX;
636 /// Dilation along y axis
637 uint32_t m_DilationY;
638 /// Dilation along z axis
639 uint32_t m_DilationZ;
640 /// Enable/disable bias.
641 bool m_BiasEnabled;
642 /// The data layout to be used (NDHWC, NCDHW).
643 DataLayout m_DataLayout;
644 };
645
646 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
647 struct DepthwiseConvolution2dDescriptor : BaseDescriptor
648 {
DepthwiseConvolution2dDescriptorarmnn::DepthwiseConvolution2dDescriptor649 DepthwiseConvolution2dDescriptor()
650 : m_PadLeft(0)
651 , m_PadRight(0)
652 , m_PadTop(0)
653 , m_PadBottom(0)
654 , m_StrideX(1)
655 , m_StrideY(1)
656 , m_DilationX(1)
657 , m_DilationY(1)
658 , m_BiasEnabled(false)
659 , m_DataLayout(DataLayout::NCHW)
660 {}
661
operator ==armnn::DepthwiseConvolution2dDescriptor662 bool operator ==(const DepthwiseConvolution2dDescriptor& rhs) const
663 {
664 return m_PadLeft == rhs.m_PadLeft &&
665 m_PadRight == rhs.m_PadRight &&
666 m_PadTop == rhs.m_PadTop &&
667 m_PadBottom == rhs.m_PadBottom &&
668 m_StrideX == rhs.m_StrideX &&
669 m_StrideY == rhs.m_StrideY &&
670 m_DilationX == rhs.m_DilationX &&
671 m_DilationY == rhs.m_DilationY &&
672 m_BiasEnabled == rhs.m_BiasEnabled &&
673 m_DataLayout == rhs.m_DataLayout;
674 }
675
676 /// Get the number of views/inputs.
677 uint32_t GetNumInputs() const;
678
679 /// Padding left value in the width dimension.
680 uint32_t m_PadLeft;
681 /// Padding right value in the width dimension.
682 uint32_t m_PadRight;
683 /// Padding top value in the height dimension.
684 uint32_t m_PadTop;
685 /// Padding bottom value in the height dimension.
686 uint32_t m_PadBottom;
687 /// Stride value when proceeding through input for the width dimension.
688 uint32_t m_StrideX;
689 /// Stride value when proceeding through input for the height dimension.
690 uint32_t m_StrideY;
691 /// Dilation factor value for width dimension.
692 uint32_t m_DilationX;
693 /// Dilation factor value for height dimension.
694 uint32_t m_DilationY;
695 /// Enable/disable bias.
696 bool m_BiasEnabled;
697 /// The data layout to be used (NCHW, NHWC).
698 DataLayout m_DataLayout;
699 };
700
701 struct DetectionPostProcessDescriptor : BaseDescriptor
702 {
DetectionPostProcessDescriptorarmnn::DetectionPostProcessDescriptor703 DetectionPostProcessDescriptor()
704 : m_MaxDetections(0)
705 , m_MaxClassesPerDetection(1)
706 , m_DetectionsPerClass(1)
707 , m_NmsScoreThreshold(0)
708 , m_NmsIouThreshold(0)
709 , m_NumClasses(0)
710 , m_UseRegularNms(false)
711 , m_ScaleX(0)
712 , m_ScaleY(0)
713 , m_ScaleW(0)
714 , m_ScaleH(0)
715 {}
716
operator ==armnn::DetectionPostProcessDescriptor717 bool operator ==(const DetectionPostProcessDescriptor& rhs) const
718 {
719 return m_MaxDetections == rhs.m_MaxDetections &&
720 m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
721 m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
722 m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
723 m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
724 m_NumClasses == rhs.m_NumClasses &&
725 m_UseRegularNms == rhs.m_UseRegularNms &&
726 m_ScaleX == rhs.m_ScaleX &&
727 m_ScaleY == rhs.m_ScaleY &&
728 m_ScaleW == rhs.m_ScaleW &&
729 m_ScaleH == rhs.m_ScaleH;
730 }
731
732 /// Maximum numbers of detections.
733 uint32_t m_MaxDetections;
734 /// Maximum numbers of classes per detection, used in Fast NMS.
735 uint32_t m_MaxClassesPerDetection;
736 /// Detections per classes, used in Regular NMS.
737 uint32_t m_DetectionsPerClass;
738 /// NMS score threshold.
739 float m_NmsScoreThreshold;
740 /// Intersection over union threshold.
741 float m_NmsIouThreshold;
742 /// Number of classes.
743 uint32_t m_NumClasses;
744 /// Use Regular NMS.
745 bool m_UseRegularNms;
746 /// Center size encoding scale x.
747 float m_ScaleX;
748 /// Center size encoding scale y.
749 float m_ScaleY;
750 /// Center size encoding scale weight.
751 float m_ScaleW;
752 /// Center size encoding scale height.
753 float m_ScaleH;
754 };
755
756 /// A NormalizationDescriptor for the NormalizationLayer.
757 struct NormalizationDescriptor : BaseDescriptor
758 {
NormalizationDescriptorarmnn::NormalizationDescriptor759 NormalizationDescriptor()
760 : m_NormChannelType(NormalizationAlgorithmChannel::Across)
761 , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
762 , m_NormSize(0)
763 , m_Alpha(0.f)
764 , m_Beta(0.f)
765 , m_K(0.f)
766 , m_DataLayout(DataLayout::NCHW)
767 {}
768
operator ==armnn::NormalizationDescriptor769 bool operator ==(const NormalizationDescriptor& rhs) const
770 {
771 return m_NormChannelType == rhs.m_NormChannelType &&
772 m_NormMethodType == rhs.m_NormMethodType &&
773 m_NormSize == rhs.m_NormSize &&
774 m_Alpha == rhs.m_Alpha &&
775 m_Beta == rhs.m_Beta &&
776 m_K == rhs.m_K &&
777 m_DataLayout == rhs.m_DataLayout;
778 }
779
780 /// Normalization channel algorithm to use (Across, Within).
781 NormalizationAlgorithmChannel m_NormChannelType;
782 /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
783 NormalizationAlgorithmMethod m_NormMethodType;
784 /// Depth radius value.
785 uint32_t m_NormSize;
786 /// Alpha value for the normalization equation.
787 float m_Alpha;
788 /// Beta value for the normalization equation.
789 float m_Beta;
790 /// Kappa value used for the across channel normalization equation.
791 float m_K;
792 /// The data layout to be used (NCHW, NHWC).
793 DataLayout m_DataLayout;
794 };
795
796 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
797 struct L2NormalizationDescriptor : BaseDescriptor
798 {
L2NormalizationDescriptorarmnn::L2NormalizationDescriptor799 L2NormalizationDescriptor()
800 : m_Eps(1e-12f)
801 , m_DataLayout(DataLayout::NCHW)
802 {}
803
operator ==armnn::L2NormalizationDescriptor804 bool operator ==(const L2NormalizationDescriptor& rhs) const
805 {
806 return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
807 }
808
809 /// Used to avoid dividing by zero.
810 float m_Eps;
811 /// The data layout to be used (NCHW, NHWC).
812 DataLayout m_DataLayout;
813 };
814
815 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
816 struct BatchNormalizationDescriptor : BaseDescriptor
817 {
BatchNormalizationDescriptorarmnn::BatchNormalizationDescriptor818 BatchNormalizationDescriptor()
819 : m_Eps(0.0001f)
820 , m_DataLayout(DataLayout::NCHW)
821 {}
822
operator ==armnn::BatchNormalizationDescriptor823 bool operator ==(const BatchNormalizationDescriptor& rhs) const
824 {
825 return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
826 }
827
828 /// Value to add to the variance. Used to avoid dividing by zero.
829 float m_Eps;
830 /// The data layout to be used (NCHW, NHWC).
831 DataLayout m_DataLayout;
832 };
833
834 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
835 struct InstanceNormalizationDescriptor : BaseDescriptor
836 {
InstanceNormalizationDescriptorarmnn::InstanceNormalizationDescriptor837 InstanceNormalizationDescriptor()
838 : m_Gamma(1.0f)
839 , m_Beta(0.0f)
840 , m_Eps(1e-12f)
841 , m_DataLayout(DataLayout::NCHW)
842 {}
843
operator ==armnn::InstanceNormalizationDescriptor844 bool operator ==(const InstanceNormalizationDescriptor& rhs) const
845 {
846 return m_Gamma == rhs.m_Gamma &&
847 m_Beta == rhs.m_Beta &&
848 m_Eps == rhs.m_Eps &&
849 m_DataLayout == rhs.m_DataLayout;
850 }
851
852 /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
853 float m_Gamma;
854 /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
855 float m_Beta;
856 /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
857 float m_Eps;
858 /// The data layout to be used (NCHW, NHWC).
859 DataLayout m_DataLayout;
860 };
861
862 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
863 struct BatchToSpaceNdDescriptor : BaseDescriptor
864 {
BatchToSpaceNdDescriptorarmnn::BatchToSpaceNdDescriptor865 BatchToSpaceNdDescriptor()
866 : m_BlockShape({1, 1})
867 , m_Crops({{0, 0}, {0, 0}})
868 , m_DataLayout(DataLayout::NCHW)
869 {}
870
BatchToSpaceNdDescriptorarmnn::BatchToSpaceNdDescriptor871 BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
872 std::vector<std::pair<unsigned int, unsigned int>> crops)
873 : m_BlockShape(blockShape)
874 , m_Crops(crops)
875 , m_DataLayout(DataLayout::NCHW)
876 {}
877
operator ==armnn::BatchToSpaceNdDescriptor878 bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
879 {
880 return m_BlockShape == rhs.m_BlockShape &&
881 m_Crops == rhs.m_Crops &&
882 m_DataLayout == rhs.m_DataLayout;
883 }
884
885 /// Block shape values.
886 std::vector<unsigned int> m_BlockShape;
887 /// The values to crop from the input dimension.
888 std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
889 /// The data layout to be used (NCHW, NHWC).
890 DataLayout m_DataLayout;
891 };
892
893 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
894 struct FakeQuantizationDescriptor : BaseDescriptor
895 {
FakeQuantizationDescriptorarmnn::FakeQuantizationDescriptor896 FakeQuantizationDescriptor()
897 : m_Min(-6.0f)
898 , m_Max(6.0f)
899 {}
900
operator ==armnn::FakeQuantizationDescriptor901 bool operator ==(const FakeQuantizationDescriptor& rhs) const
902 {
903 return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
904 }
905
906 /// Minimum value.
907 float m_Min;
908 /// Maximum value.
909 float m_Max;
910 };
911
912 /// A FillDescriptor for the FillLayer
913 struct FillDescriptor : BaseDescriptor
914 {
FillDescriptorarmnn::FillDescriptor915 FillDescriptor()
916 : m_Value(0)
917 {}
918
FillDescriptorarmnn::FillDescriptor919 FillDescriptor(const float& value)
920 : m_Value(value)
921 {}
922
operator ==armnn::FillDescriptor923 bool operator ==(const FillDescriptor& rhs) const
924 {
925 return m_Value == rhs.m_Value;
926 }
927
928 float m_Value;
929 };
930
931 /// A GatherDescriptor for the GatherLayer.
932 struct GatherDescriptor : BaseDescriptor
933 {
GatherDescriptorarmnn::GatherDescriptor934 GatherDescriptor()
935 : m_Axis(0)
936 {}
937
GatherDescriptorarmnn::GatherDescriptor938 GatherDescriptor(int32_t axis)
939 : m_Axis(axis)
940 {}
941
operator ==armnn::GatherDescriptor942 bool operator ==(const GatherDescriptor& rhs) const
943 {
944 return m_Axis == rhs.m_Axis;
945 }
946
947 /// The axis in params to gather indices from
948 int32_t m_Axis;
949 };
950
951 /// A ResizeDescriptor for the ResizeLayer.
952 struct ResizeDescriptor : BaseDescriptor
953 {
ResizeDescriptorarmnn::ResizeDescriptor954 ResizeDescriptor()
955 : m_TargetWidth(0)
956 , m_TargetHeight(0)
957 , m_Method(ResizeMethod::NearestNeighbor)
958 , m_DataLayout(DataLayout::NCHW)
959 , m_AlignCorners(false)
960 , m_HalfPixelCenters(false)
961 {}
962
operator ==armnn::ResizeDescriptor963 bool operator ==(const ResizeDescriptor& rhs) const
964 {
965 return m_TargetWidth == rhs.m_TargetWidth &&
966 m_TargetHeight == rhs.m_TargetHeight &&
967 m_Method == rhs.m_Method &&
968 m_DataLayout == rhs.m_DataLayout &&
969 m_AlignCorners == rhs.m_AlignCorners &&
970 m_HalfPixelCenters == rhs.m_HalfPixelCenters;
971 }
972
973 /// Target width value.
974 uint32_t m_TargetWidth;
975 /// Target height value.
976 uint32_t m_TargetHeight;
977 /// The Interpolation method to use
978 /// (Bilinear, NearestNeighbor).
979 ResizeMethod m_Method;
980 /// The data layout to be used (NCHW, NHWC).
981 DataLayout m_DataLayout;
982 /// Aligned corners
983 bool m_AlignCorners;
984 /// Half Pixel Centers
985 bool m_HalfPixelCenters;
986 };
987
988
989 /// A ReshapeDescriptor for the ReshapeLayer.
990 struct ReshapeDescriptor : BaseDescriptor
991 {
ReshapeDescriptorarmnn::ReshapeDescriptor992 ReshapeDescriptor()
993 : m_TargetShape()
994 {}
995
ReshapeDescriptorarmnn::ReshapeDescriptor996 ReshapeDescriptor(const TensorShape& shape)
997 : m_TargetShape(shape)
998 {}
999
operator ==armnn::ReshapeDescriptor1000 bool operator ==(const ReshapeDescriptor& rhs) const
1001 {
1002 return m_TargetShape == rhs.m_TargetShape;
1003 }
1004
1005 /// Target shape value.
1006 TensorShape m_TargetShape;
1007 };
1008
1009 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
1010 struct SpaceToBatchNdDescriptor : BaseDescriptor
1011 {
SpaceToBatchNdDescriptorarmnn::SpaceToBatchNdDescriptor1012 SpaceToBatchNdDescriptor()
1013 : m_BlockShape({1, 1})
1014 , m_PadList({{0, 0}, {0, 0}})
1015 , m_DataLayout(DataLayout::NCHW)
1016 {}
1017
SpaceToBatchNdDescriptorarmnn::SpaceToBatchNdDescriptor1018 SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
1019 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
1020 : m_BlockShape(blockShape)
1021 , m_PadList(padList)
1022 , m_DataLayout(DataLayout::NCHW)
1023 {}
1024
operator ==armnn::SpaceToBatchNdDescriptor1025 bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
1026 {
1027 return m_BlockShape == rhs.m_BlockShape &&
1028 m_PadList == rhs.m_PadList &&
1029 m_DataLayout == rhs.m_DataLayout;
1030 }
1031
1032 /// Block shape value.
1033 std::vector<unsigned int> m_BlockShape;
1034 /// @brief Specifies the padding values for the input dimension:
1035 /// heightPad{top, bottom} widthPad{left, right}.
1036 std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1037 /// The data layout to be used (NCHW, NHWC).
1038 DataLayout m_DataLayout;
1039 };
1040
1041 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
1042 struct SpaceToDepthDescriptor : BaseDescriptor
1043 {
SpaceToDepthDescriptorarmnn::SpaceToDepthDescriptor1044 SpaceToDepthDescriptor()
1045 : SpaceToDepthDescriptor(1u, DataLayout::NHWC)
1046 {}
1047
SpaceToDepthDescriptorarmnn::SpaceToDepthDescriptor1048 SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
1049 : m_BlockSize(blockSize)
1050 , m_DataLayout(dataLayout)
1051 {}
1052
operator ==armnn::SpaceToDepthDescriptor1053 bool operator ==(const SpaceToDepthDescriptor& rhs) const
1054 {
1055 return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
1056 }
1057
1058 /// Scalar specifying the input block size. It must be >= 1
1059 unsigned int m_BlockSize;
1060
1061 /// The data layout to be used (NCHW, NHWC).
1062 DataLayout m_DataLayout;
1063 };
1064
1065 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
1066 using DepthToSpaceDescriptor = SpaceToDepthDescriptor;
1067
1068 /// An LstmDescriptor for the LstmLayer.
1069 struct LstmDescriptor : BaseDescriptor
1070 {
LstmDescriptorarmnn::LstmDescriptor1071 LstmDescriptor()
1072 : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
1073 , m_ClippingThresCell(0.0)
1074 , m_ClippingThresProj(0.0)
1075 , m_CifgEnabled(true)
1076 , m_PeepholeEnabled(false)
1077 , m_ProjectionEnabled(false)
1078 , m_LayerNormEnabled(false)
1079 , m_TimeMajor(false)
1080 , m_InputIntermediateScale(0.0)
1081 , m_ForgetIntermediateScale(0.0)
1082 , m_CellIntermediateScale(0.0)
1083 , m_OutputIntermediateScale(0.0)
1084 , m_HiddenStateZeroPoint(0)
1085 , m_HiddenStateScale(0.0)
1086 {}
1087
operator ==armnn::LstmDescriptor1088 bool operator ==(const LstmDescriptor& rhs) const
1089 {
1090 return m_ActivationFunc == rhs.m_ActivationFunc &&
1091 m_ClippingThresCell == rhs.m_ClippingThresCell &&
1092 m_ClippingThresProj == rhs.m_ClippingThresProj &&
1093 m_CifgEnabled == rhs.m_CifgEnabled &&
1094 m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1095 m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1096 m_TimeMajor == rhs.m_TimeMajor &&
1097 m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1098 m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1099 m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1100 m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1101 m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1102 m_HiddenStateScale == rhs.m_HiddenStateScale;
1103 }
1104
1105 /// @brief The activation function to use.
1106 /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
1107 uint32_t m_ActivationFunc;
1108 /// Clipping threshold value for the cell state.
1109 float m_ClippingThresCell;
1110 /// Clipping threshold value for the projection.
1111 float m_ClippingThresProj;
1112 /// Enable/disable cifg (coupled input & forget gate).
1113 bool m_CifgEnabled;
1114 /// Enable/disable peephole.
1115 bool m_PeepholeEnabled;
1116 /// Enable/disable the projection layer.
1117 bool m_ProjectionEnabled;
1118 /// Enable/disable layer normalization
1119 bool m_LayerNormEnabled;
1120 /// Enable/disable time major
1121 bool m_TimeMajor;
1122 /// Input intermediate quantization scale
1123 float m_InputIntermediateScale;
1124 /// Forget intermediate quantization scale
1125 float m_ForgetIntermediateScale;
1126 /// Cell intermediate quantization scale
1127 float m_CellIntermediateScale;
1128 /// Output intermediate quantization scale
1129 float m_OutputIntermediateScale;
1130 /// Hidden State zero point
1131 int32_t m_HiddenStateZeroPoint;
1132 /// Hidden State quantization scale
1133 float m_HiddenStateScale;
1134 };
1135
1136 using UnidirectionalSequenceLstmDescriptor = LstmDescriptor;
1137
1138 /// A MeanDescriptor for the MeanLayer.
1139 struct MeanDescriptor : BaseDescriptor
1140 {
MeanDescriptorarmnn::MeanDescriptor1141 MeanDescriptor()
1142 : m_Axis()
1143 , m_KeepDims(false)
1144 {}
1145
MeanDescriptorarmnn::MeanDescriptor1146 MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
1147 : m_Axis(axis)
1148 , m_KeepDims(keepDims)
1149 {}
1150
operator ==armnn::MeanDescriptor1151 bool operator ==(const MeanDescriptor& rhs) const
1152 {
1153 return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
1154 }
1155
1156 /// Values for the dimensions to reduce.
1157 std::vector<unsigned int> m_Axis;
1158 /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
1159 bool m_KeepDims;
1160 };
1161
1162 /// A PadDescriptor for the PadLayer.
1163 struct PadDescriptor : BaseDescriptor
1164 {
PadDescriptorarmnn::PadDescriptor1165 PadDescriptor() : m_PadValue(0), m_PaddingMode(PaddingMode::Constant)
1166 {}
1167
PadDescriptorarmnn::PadDescriptor1168 PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList,
1169 const float& padValue = 0,
1170 const PaddingMode& paddingMode = PaddingMode::Constant)
1171 : m_PadList(padList)
1172 , m_PadValue(padValue)
1173 , m_PaddingMode(paddingMode)
1174 {}
1175
operator ==armnn::PadDescriptor1176 bool operator ==(const PadDescriptor& rhs) const
1177 {
1178 return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue && m_PaddingMode == rhs.m_PaddingMode;
1179 }
1180
1181 /// @brief Specifies the padding for input dimension.
1182 /// First is the number of values to add before the tensor in the dimension.
1183 /// Second is the number of values to add after the tensor in the dimension.
1184 /// The number of pairs should match the number of dimensions in the input tensor.
1185 std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1186
1187 /// Optional value to use for padding, defaults to 0
1188 float m_PadValue;
1189
1190 /// Specifies the Padding mode (Constant, Reflect or Symmetric)
1191 PaddingMode m_PaddingMode;
1192 };
1193
1194 /// A SliceDescriptor for the SliceLayer.
1195 struct SliceDescriptor : BaseDescriptor
1196 {
SliceDescriptorarmnn::SliceDescriptor1197 SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1198 : m_Begin(begin)
1199 , m_Size(size)
1200 {}
1201
SliceDescriptorarmnn::SliceDescriptor1202 SliceDescriptor() : SliceDescriptor({}, {})
1203 {}
1204
operator ==armnn::SliceDescriptor1205 bool operator ==(const SliceDescriptor& rhs) const
1206 {
1207 return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1208 }
1209
1210 /// Beginning indices of the slice in each dimension.
1211 std::vector<unsigned int> m_Begin;
1212
1213 /// Size of the slice in each dimension.
1214 std::vector<unsigned int> m_Size;
1215 };
1216
1217 /// A StackDescriptor for the StackLayer.
1218 struct StackDescriptor : BaseDescriptor
1219 {
StackDescriptorarmnn::StackDescriptor1220 StackDescriptor()
1221 : m_Axis(0)
1222 , m_NumInputs(0)
1223 , m_InputShape()
1224 {}
1225
StackDescriptorarmnn::StackDescriptor1226 StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1227 : m_Axis(axis)
1228 , m_NumInputs(numInputs)
1229 , m_InputShape(inputShape)
1230 {}
1231
operator ==armnn::StackDescriptor1232 bool operator ==(const StackDescriptor& rhs) const
1233 {
1234 return m_Axis == rhs.m_Axis &&
1235 m_NumInputs == rhs.m_NumInputs &&
1236 m_InputShape == rhs.m_InputShape;
1237 }
1238
1239 /// 0-based axis along which to stack the input tensors.
1240 uint32_t m_Axis;
1241 /// Number of input tensors.
1242 uint32_t m_NumInputs;
1243 /// Required shape of all input tensors.
1244 TensorShape m_InputShape;
1245 };
1246
1247 /// A StandInDescriptor for the StandIn layer
1248 struct StandInDescriptor : BaseDescriptor
1249 {
StandInDescriptorarmnn::StandInDescriptor1250 StandInDescriptor() {};
1251
StandInDescriptorarmnn::StandInDescriptor1252 StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1253 : m_NumInputs(numInputs)
1254 , m_NumOutputs(numOutputs)
1255 {}
1256
operator ==armnn::StandInDescriptor1257 bool operator ==(const StandInDescriptor& rhs) const
1258 {
1259 return m_NumInputs == rhs.m_NumInputs &&
1260 m_NumOutputs == rhs.m_NumOutputs;
1261 }
1262
1263 /// Number of input tensors
1264 uint32_t m_NumInputs = 0;
1265 /// Number of output tensors
1266 uint32_t m_NumOutputs = 0;
1267 };
1268
1269 /// A StridedSliceDescriptor for the StridedSliceLayer.
1270 struct StridedSliceDescriptor : BaseDescriptor
1271 {
StridedSliceDescriptorarmnn::StridedSliceDescriptor1272 StridedSliceDescriptor(const std::vector<int>& begin,
1273 const std::vector<int>& end,
1274 const std::vector<int>& stride)
1275 : m_Begin(begin)
1276 , m_End(end)
1277 , m_Stride(stride)
1278 , m_BeginMask(0)
1279 , m_EndMask(0)
1280 , m_ShrinkAxisMask(0)
1281 , m_EllipsisMask(0)
1282 , m_NewAxisMask(0)
1283 , m_DataLayout(DataLayout::NCHW)
1284 {}
1285
StridedSliceDescriptorarmnn::StridedSliceDescriptor1286 StridedSliceDescriptor()
1287 : StridedSliceDescriptor({}, {}, {})
1288 {}
1289
operator ==armnn::StridedSliceDescriptor1290 bool operator ==(const StridedSliceDescriptor& rhs) const
1291 {
1292 return m_Begin == rhs.m_Begin &&
1293 m_End == rhs.m_End &&
1294 m_Stride == rhs.m_Stride &&
1295 m_BeginMask == rhs.m_BeginMask &&
1296 m_EndMask == rhs.m_EndMask &&
1297 m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1298 m_EllipsisMask == rhs.m_EllipsisMask &&
1299 m_NewAxisMask == rhs.m_NewAxisMask &&
1300 m_DataLayout == rhs.m_DataLayout;
1301 }
1302
1303 int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1304 int GetStopForAxis(const TensorShape& inputShape,
1305 unsigned int axis,
1306 int startForAxis) const;
1307
1308 /// Begin values for the input that will be sliced.
1309 std::vector<int> m_Begin;
1310 /// End values for the input that will be sliced.
1311 std::vector<int> m_End;
1312 /// Stride values for the input that will be sliced.
1313 std::vector<int> m_Stride;
1314
1315 /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1316 /// range is used for the dimension.
1317 int32_t m_BeginMask;
1318 /// @brief End mask value. If set, then the end is disregarded and the fullest range
1319 /// is used for the dimension.
1320 int32_t m_EndMask;
1321 /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1322 int32_t m_ShrinkAxisMask;
1323 /// Ellipsis mask value.
1324 int32_t m_EllipsisMask;
1325 /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1326 /// a new 1 dimension is inserted to this location of the output tensor.
1327 int32_t m_NewAxisMask;
1328
1329 /// The data layout to be used (NCHW, NHWC).
1330 DataLayout m_DataLayout;
1331 };
1332
1333 /// A PreCompiledDescriptor for the PreCompiledLayer.
1334 struct PreCompiledDescriptor : BaseDescriptor
1335 {
PreCompiledDescriptorarmnn::PreCompiledDescriptor1336 PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1337 : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1338 {}
1339
1340 ~PreCompiledDescriptor() = default;
1341
1342 unsigned int m_NumInputSlots;
1343 unsigned int m_NumOutputSlots;
1344 };
1345
1346 /// A QLstmDescriptor for the QLstmLayer.
1347 struct QLstmDescriptor : BaseDescriptor
1348 {
QLstmDescriptorarmnn::QLstmDescriptor1349 QLstmDescriptor()
1350 : m_CellClip(0.0)
1351 , m_ProjectionClip(0.0)
1352 , m_CifgEnabled(true)
1353 , m_PeepholeEnabled(false)
1354 , m_ProjectionEnabled(false)
1355 , m_LayerNormEnabled(false)
1356 , m_InputIntermediateScale(0.0)
1357 , m_ForgetIntermediateScale(0.0)
1358 , m_CellIntermediateScale(0.0)
1359 , m_OutputIntermediateScale(0.0)
1360 , m_HiddenStateZeroPoint(0)
1361 , m_HiddenStateScale(0.0)
1362 {}
1363
operator ==armnn::QLstmDescriptor1364 bool operator ==(const QLstmDescriptor& rhs) const
1365 {
1366 return m_CellClip == rhs.m_CellClip &&
1367 m_ProjectionClip == rhs.m_ProjectionClip &&
1368 m_CifgEnabled == rhs.m_CifgEnabled &&
1369 m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1370 m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1371 m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1372 m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1373 m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1374 m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1375 m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1376 m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1377 m_HiddenStateScale == rhs.m_HiddenStateScale;
1378 }
1379
1380 /// Clipping threshold value for the cell state
1381 float m_CellClip;
1382 /// Clipping threshold value for the projection
1383 float m_ProjectionClip;
1384 /// Enable/disable CIFG (coupled input & forget gate).
1385 bool m_CifgEnabled;
1386 /// Enable/disable peephole
1387 bool m_PeepholeEnabled;
1388 /// Enable/disable the projection layer
1389 bool m_ProjectionEnabled;
1390 /// Enable/disable layer normalization
1391 bool m_LayerNormEnabled;
1392 /// Input intermediate quantization scale
1393 float m_InputIntermediateScale;
1394 /// Forget intermediate quantization scale
1395 float m_ForgetIntermediateScale;
1396 /// Cell intermediate quantization scale
1397 float m_CellIntermediateScale;
1398 /// Output intermediate quantization scale
1399 float m_OutputIntermediateScale;
1400 /// Hidden State zero point
1401 int32_t m_HiddenStateZeroPoint;
1402 /// Hidden State quantization scale
1403 float m_HiddenStateScale;
1404 };
1405
1406 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1407 struct TransposeConvolution2dDescriptor : BaseDescriptor
1408 {
TransposeConvolution2dDescriptorarmnn::TransposeConvolution2dDescriptor1409 TransposeConvolution2dDescriptor() :
1410 m_PadLeft(0),
1411 m_PadRight(0),
1412 m_PadTop(0),
1413 m_PadBottom(0),
1414 m_StrideX(0),
1415 m_StrideY(0),
1416 m_BiasEnabled(false),
1417 m_DataLayout(DataLayout::NCHW),
1418 m_OutputShapeEnabled(false)
1419 {}
1420
operator ==armnn::TransposeConvolution2dDescriptor1421 bool operator ==(const TransposeConvolution2dDescriptor& rhs) const
1422 {
1423 return m_PadLeft == rhs.m_PadLeft &&
1424 m_PadRight == rhs.m_PadRight &&
1425 m_PadTop == rhs.m_PadTop &&
1426 m_PadBottom == rhs.m_PadBottom &&
1427 m_StrideX == rhs.m_StrideX &&
1428 m_StrideY == rhs.m_StrideY &&
1429 m_BiasEnabled == rhs.m_BiasEnabled &&
1430 m_DataLayout == rhs.m_DataLayout &&
1431 m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1432 m_OutputShape == rhs.m_OutputShape;
1433 }
1434
1435 /// Padding left value in the width dimension.
1436 uint32_t m_PadLeft;
1437 /// Padding right value in the width dimension.
1438 uint32_t m_PadRight;
1439 /// Padding top value in the height dimension.
1440 uint32_t m_PadTop;
1441 /// Padding bottom value in the height dimension.
1442 uint32_t m_PadBottom;
1443 /// Stride value when proceeding through input for the width dimension.
1444 uint32_t m_StrideX;
1445 /// Stride value when proceeding through input for the height dimension.
1446 uint32_t m_StrideY;
1447 /// Enable/disable bias.
1448 bool m_BiasEnabled;
1449 /// The data layout to be used (NCHW, NHWC).
1450 DataLayout m_DataLayout;
1451 /// Output shape if it has been specified.
1452 bool m_OutputShapeEnabled;
1453 std::vector<unsigned int> m_OutputShape;
1454 };
1455
1456 /// A TransposeDescriptor for the TransposeLayer.
1457 struct TransposeDescriptor : BaseDescriptor
1458 {
TransposeDescriptorarmnn::TransposeDescriptor1459 TransposeDescriptor()
1460 : m_DimMappings{}
1461 {}
1462
TransposeDescriptorarmnn::TransposeDescriptor1463 TransposeDescriptor(const PermutationVector& dimMappings)
1464 : m_DimMappings(dimMappings)
1465 {}
1466
operator ==armnn::TransposeDescriptor1467 bool operator ==(const TransposeDescriptor &rhs) const
1468 {
1469 return m_DimMappings.IsEqual(rhs.m_DimMappings);
1470 }
1471
1472 /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1473 /// source and target potentially have different memory layouts e.g.
1474 /// Input Shape {1, 1, 4, 4}
1475 /// Permutation Vector {0, 2, 3, 1}
1476 /// Output Shape {1, 4, 4, 1}
1477 /// dim "0" of input goes into index 0 ([ 1, X, X, X])
1478 /// dim "2" of input goes into index 1 ([ 1, 4, X, X ])
1479 /// dim "3" of input goes into index 2 ([ 1, 4, 4, X ])
1480 /// dim "1" of input goes into index 3 ([ 1, 4, 4, 1 ])
1481 PermutationVector m_DimMappings;
1482 };
1483
1484 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1485 struct LogicalBinaryDescriptor : BaseDescriptor
1486 {
LogicalBinaryDescriptorarmnn::LogicalBinaryDescriptor1487 LogicalBinaryDescriptor()
1488 : LogicalBinaryDescriptor(LogicalBinaryOperation::LogicalAnd)
1489 {}
1490
LogicalBinaryDescriptorarmnn::LogicalBinaryDescriptor1491 LogicalBinaryDescriptor(LogicalBinaryOperation operation)
1492 : m_Operation(operation)
1493 {}
1494
operator ==armnn::LogicalBinaryDescriptor1495 bool operator ==(const LogicalBinaryDescriptor &rhs) const
1496 {
1497 return m_Operation == rhs.m_Operation;
1498 }
1499
1500 /// Specifies the logical operation to execute
1501 LogicalBinaryOperation m_Operation;
1502 };
1503
1504 /// A ReduceDescriptor for the REDUCE operators.
1505 struct ReduceDescriptor : BaseDescriptor
1506 {
ReduceDescriptorarmnn::ReduceDescriptor1507 ReduceDescriptor()
1508 : m_KeepDims(false)
1509 , m_vAxis()
1510 , m_ReduceOperation(ReduceOperation::Sum)
1511 {}
1512
operator ==armnn::ReduceDescriptor1513 bool operator ==(const ReduceDescriptor& rhs) const
1514 {
1515 return m_KeepDims == rhs.m_KeepDims &&
1516 m_vAxis == rhs.m_vAxis &&
1517 m_ReduceOperation == rhs.m_ReduceOperation;
1518 }
1519
1520 /// if true then output shape has no change.
1521 bool m_KeepDims;
1522 /// The indices of the dimensions to reduce.
1523 std::vector<uint32_t> m_vAxis;
1524 /// Specifies the reduction operation to execute
1525 ReduceOperation m_ReduceOperation;
1526 };
1527
1528 /// A ChannelShuffleDescriptor for the ChannelShuffle operator
1529 struct ChannelShuffleDescriptor : BaseDescriptor
1530 {
ChannelShuffleDescriptorarmnn::ChannelShuffleDescriptor1531 ChannelShuffleDescriptor()
1532 : m_NumGroups(0), m_Axis(0)
1533 {}
1534
ChannelShuffleDescriptorarmnn::ChannelShuffleDescriptor1535 ChannelShuffleDescriptor(const uint32_t& numGroups, const uint32_t& axis)
1536 : m_NumGroups(numGroups), m_Axis(axis)
1537 {}
1538
operator ==armnn::ChannelShuffleDescriptor1539 bool operator ==(const ChannelShuffleDescriptor& rhs) const
1540 {
1541 return m_NumGroups == rhs.m_NumGroups;
1542 }
1543
1544 /// Number of groups for the channel shuffle operation
1545 uint32_t m_NumGroups;
1546 /// Axis to apply channel shuffle operation on
1547 uint32_t m_Axis;
1548 };
1549
1550 /// A BatchMatMulDescriptor for the BatchMatMul operator
1551 struct BatchMatMulDescriptor : BaseDescriptor
1552 {
BatchMatMulDescriptorarmnn::BatchMatMulDescriptor1553 BatchMatMulDescriptor(bool transposeX = false,
1554 bool transposeY = false,
1555 bool adjointX = false,
1556 bool adjointY = false,
1557 DataLayout dataLayoutX = DataLayout::NCHW,
1558 DataLayout dataLayoutY = DataLayout::NCHW)
1559 : m_TransposeX(transposeX)
1560 , m_TransposeY(transposeY)
1561 , m_AdjointX(adjointX)
1562 , m_AdjointY(adjointY)
1563 , m_DataLayoutX(dataLayoutX)
1564 , m_DataLayoutY(dataLayoutY)
1565 {}
1566
operator ==armnn::BatchMatMulDescriptor1567 bool operator ==(const BatchMatMulDescriptor &rhs) const
1568 {
1569 return m_TransposeX == rhs.m_TransposeX &&
1570 m_TransposeY == rhs.m_TransposeY &&
1571 m_AdjointX == rhs.m_AdjointX &&
1572 m_AdjointY == rhs.m_AdjointY &&
1573 m_DataLayoutX == rhs.m_DataLayoutX &&
1574 m_DataLayoutY == rhs.m_DataLayoutY;
1575 }
1576
1577 /// Transpose the slices of each input tensor
1578 /// Transpose and Adjoint can not both be set to true for the same tensor at the same time
1579 bool m_TransposeX;
1580 bool m_TransposeY;
1581
1582 /// Adjoint the slices of each input tensor
1583 /// Transpose and Adjoint can not both be set to true for the same tensor at the same time
1584 bool m_AdjointX;
1585 bool m_AdjointY;
1586
1587 /// Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
1588 DataLayout m_DataLayoutX;
1589 DataLayout m_DataLayoutY;
1590
1591 ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use ABI Stable "
1592 "GetAxesToMul(DataLayout dataLayout, const TensorShape& tensorShape) instead.",
1593 "23.05")
1594 static std::pair<std::pair<unsigned int, unsigned int>, std::pair<unsigned int, unsigned int>> GetAxesToMul(
1595 const BatchMatMulDescriptor& desc,
1596 const TensorShape& tensorXShape,
1597 const TensorShape& tensorYShape);
1598
1599 ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use ABI Stable "
1600 "GetAxesNotMul(DataLayout dataLayout, const TensorShape& tensorShape) instead.",
1601 "23.05")
1602 static std::pair<std::vector<unsigned int>, std::vector<unsigned int>> GetAxesNotMul(
1603 const BatchMatMulDescriptor& desc,
1604 const TensorShape& inputXShape,
1605 const TensorShape& inputYShape);
1606
1607 /// Static helper to get the two axes (for each input) for multiplication
1608 static std::pair<unsigned int, unsigned int> GetAxesToMul(
1609 DataLayout dataLayout,
1610 const TensorShape& tensorShape);
1611
1612 /// Static helper to get the axes (for each input) that will not be multiplied together
1613 static std::vector<unsigned int> GetAxesNotMul(
1614 DataLayout dataLayout,
1615 const TensorShape& tensorShape);
1616
1617 /// Static helper to get the axes which will be transposed
1618 static PermutationVector GetPermuteVec(
1619 DataLayout dataLayout,
1620 const TensorShape& tensorShape);
1621 };
1622
1623 } // namespace armnn
1624