1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "armnn/Descriptors.hpp"
6 #include "armnn/Logging.hpp"
7
8 #include <armnn/utility/Assert.hpp>
9 #include <armnn/utility/NumericCast.hpp>
10
11 #include <algorithm>
12 #include <array>
13 #include <vector>
14
15 #include <fmt/format.h>
16
17 namespace armnn
18 {
19
PermutationVector(const ValueType * dimMappings,const SizeType numDimMappings)20 PermutationVector::PermutationVector(const ValueType *dimMappings, const SizeType numDimMappings)
21 {
22 // Validation
23
24 if (numDimMappings > MaxNumOfTensorDimensions)
25 {
26 throw InvalidArgumentException(
27 fmt::format("The number of mappings ({0}) cannot be greater "
28 "than the maximum number of dimensions supported ({1})",
29 numDimMappings,
30 MaxNumOfTensorDimensions));
31 }
32
33 if ((dimMappings == nullptr) && (numDimMappings != 0))
34 {
35 throw InvalidArgumentException("Dimension mappings must not be NULL if the number of mappings is positive");
36 }
37
38 for (SizeType i = 0; i < numDimMappings; ++i)
39 {
40 const ValueType dstIndex = dimMappings[i];
41 if (dstIndex >= numDimMappings)
42 {
43 throw InvalidArgumentException(
44 fmt::format("Dimension mapping at index {0} is invalid: "
45 "{1} is outside of the valid range [0,{2}]",
46 i,
47 dstIndex,
48 (numDimMappings - 1)));
49 }
50 }
51
52 // Validation: Detect duplicates
53 {
54 std::array<bool, MaxNumOfTensorDimensions> observedDims;
55 observedDims.fill(false);
56
57 for (SizeType i = 0; i < numDimMappings; ++i)
58 {
59 const ValueType dstIndex = dimMappings[i];
60 if (observedDims[dstIndex])
61 {
62 throw InvalidArgumentException("Invalid dimension mappings: Two or more source dimensions are mapped "
63 "to the same output dimension");
64 }
65 observedDims[dstIndex] = true;
66 }
67 }
68
69 // Initialize
70 for (SizeType i = 0; i < numDimMappings; ++i)
71 {
72 m_DimMappings[i] = dimMappings[i];
73 }
74 m_NumDimMappings = numDimMappings;
75 }
76
PermutationVector(std::initializer_list<ValueType> dimMappings)77 PermutationVector::PermutationVector(std::initializer_list<ValueType> dimMappings)
78 : PermutationVector(dimMappings.begin(), armnn::numeric_cast<SizeType>(dimMappings.size()))
79 {
80 }
81
OriginsDescriptor()82 OriginsDescriptor::OriginsDescriptor()
83 : m_ConcatAxis(1)
84 , m_NumViews(0)
85 , m_NumDimensions(0)
86 , m_ViewOrigins(nullptr)
87 {}
88
OriginsDescriptor(uint32_t numViews,uint32_t numDimensions)89 OriginsDescriptor::OriginsDescriptor(uint32_t numViews, uint32_t numDimensions /*= 4*/)
90 : m_ConcatAxis(1)
91 , m_NumViews(numViews)
92 , m_NumDimensions(numDimensions)
93 , m_ViewOrigins(numViews && numDimensions > 0 ? new uint32_t *[numViews]() : nullptr)
94 {
95 for (uint32_t i = 0; m_NumDimensions > 0 && i < m_NumViews; ++i)
96 {
97 m_ViewOrigins[i] = new uint32_t[m_NumDimensions]();
98 }
99 }
100
OriginsDescriptor(const OriginsDescriptor & other)101 OriginsDescriptor::OriginsDescriptor(const OriginsDescriptor& other)
102 : m_ConcatAxis(other.m_ConcatAxis)
103 , m_NumViews(other.m_NumViews)
104 , m_NumDimensions(other.m_NumDimensions)
105 , m_ViewOrigins(other.m_NumViews && other.m_NumDimensions > 0 ? new uint32_t *[other.m_NumViews]() : nullptr)
106 {
107 for (uint32_t i = 0; m_NumDimensions > 0 && i < m_NumViews; ++i)
108 {
109 m_ViewOrigins[i] = new uint32_t[m_NumDimensions]();
110 memcpy(m_ViewOrigins[i], other.m_ViewOrigins[i], m_NumDimensions * sizeof(uint32_t));
111 }
112 }
113
OriginsDescriptor(OriginsDescriptor && other)114 OriginsDescriptor::OriginsDescriptor(OriginsDescriptor&& other)
115 : OriginsDescriptor()
116 {
117 swap(*this, other);
118 }
119
~OriginsDescriptor()120 OriginsDescriptor::~OriginsDescriptor()
121 {
122 for (uint32_t i = 0; m_NumDimensions > 0 && i < m_NumViews; ++i)
123 {
124 delete[] m_ViewOrigins[i];
125 }
126 delete[] m_ViewOrigins;
127 }
128
operator =(OriginsDescriptor rhs)129 OriginsDescriptor& OriginsDescriptor::operator=(OriginsDescriptor rhs)
130 {
131 swap(*this, rhs);
132 return *this;
133 }
134
operator ==(const OriginsDescriptor & rhs) const135 bool OriginsDescriptor::operator==(const OriginsDescriptor& rhs) const
136 {
137 if (GetNumViews() != rhs.GetNumViews() ||
138 GetNumDimensions() != rhs.GetNumDimensions() ||
139 GetConcatAxis() != rhs.GetConcatAxis())
140 {
141 return false;
142 }
143
144 for (unsigned int i = 0u; i < GetNumViews(); ++i)
145 {
146 for (unsigned int j = 0u; j < GetNumDimensions(); ++j)
147 {
148 if (GetViewOrigin(i)[j] != rhs.GetViewOrigin(i)[j])
149 {
150 return false;
151 }
152 }
153 }
154
155 return true;
156 }
157
SetConcatAxis(unsigned int concatAxis)158 void OriginsDescriptor::SetConcatAxis(unsigned int concatAxis)
159 {
160 m_ConcatAxis = concatAxis;
161 }
GetConcatAxis() const162 unsigned int OriginsDescriptor::GetConcatAxis() const
163 {
164 return m_ConcatAxis;
165 }
166
SetViewOriginCoord(uint32_t view,uint32_t coord,uint32_t value)167 Status OriginsDescriptor::SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
168 {
169 if (view >= m_NumViews)
170 {
171 ARMNN_LOG(error) << "OriginsDescriptor::SetViewOriginCoord: view argument:" << view <<
172 " is out of range";
173 return Status::Failure;
174 }
175 if (coord >= m_NumDimensions)
176 {
177 ARMNN_LOG(error) << "OriginsDescriptor::SetViewOriginCoord: coord argument:" << coord <<
178 " is out of range";
179 return Status::Failure;
180 }
181
182 m_ViewOrigins[view][coord] = value;
183 return Status::Success;
184 }
185
186
GetNumViews() const187 uint32_t OriginsDescriptor::GetNumViews() const
188 {
189 return m_NumViews;
190 }
191
GetNumDimensions() const192 uint32_t OriginsDescriptor::GetNumDimensions() const
193 {
194 return m_NumDimensions;
195 }
196
GetViewOrigin(uint32_t idx) const197 const uint32_t* OriginsDescriptor::GetViewOrigin(uint32_t idx) const
198 {
199 return m_ViewOrigins ? m_ViewOrigins[idx] : nullptr;
200 }
201
202
203 // Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
ReorderOrigins(unsigned int * newOrdering,unsigned int numNewOrdering)204 void OriginsDescriptor::ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering)
205 {
206 ARMNN_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
207 "elements in the new ordering array");
208 std::vector<uint32_t*> viewOrigins(&m_ViewOrigins[0], &m_ViewOrigins[m_NumViews]);
209
210 for (unsigned int i = 0; i < numNewOrdering; ++i)
211 {
212 m_ViewOrigins[i] = viewOrigins[newOrdering[i]];
213 }
214 }
215
ViewsDescriptor()216 ViewsDescriptor::ViewsDescriptor()
217 : m_Origins()
218 , m_ViewSizes(nullptr)
219 {}
220
ViewsDescriptor(uint32_t numViews,uint32_t numDimensions)221 ViewsDescriptor::ViewsDescriptor(uint32_t numViews, uint32_t numDimensions /*= 4*/)
222 : m_Origins(numViews, numDimensions)
223 , m_ViewSizes(numViews > 0 && numDimensions > 0 ?
224 new uint32_t *[numViews]() : nullptr)
225 {
226 if (m_ViewSizes)
227 {
228 for (uint32_t i = 0; GetNumDimensions() > 0 && i < GetNumViews(); ++i)
229 {
230 m_ViewSizes[i] = new uint32_t[GetNumDimensions()]();
231 }
232 }
233 }
234
ViewsDescriptor(const ViewsDescriptor & other)235 ViewsDescriptor::ViewsDescriptor(const ViewsDescriptor& other)
236 : m_Origins(other.m_Origins)
237 , m_ViewSizes(other.GetNumViews() > 0 && other.GetNumDimensions() > 0 ?
238 new uint32_t *[other.GetNumViews()]() : nullptr)
239 {
240 if (m_ViewSizes)
241 {
242 for (uint32_t i = 0; GetNumDimensions() > 0 && i < GetNumViews(); ++i)
243 {
244 m_ViewSizes[i] = new uint32_t[GetNumDimensions()]();
245 memcpy(m_ViewSizes[i], other.m_ViewSizes[i], GetNumDimensions() * sizeof(uint32_t));
246 }
247 }
248 }
249
ViewsDescriptor(ViewsDescriptor && other)250 ViewsDescriptor::ViewsDescriptor(ViewsDescriptor&& other)
251 : ViewsDescriptor()
252 {
253 swap(*this, other);
254 }
255
~ViewsDescriptor()256 ViewsDescriptor::~ViewsDescriptor()
257 {
258 if (m_ViewSizes)
259 {
260 for (uint32_t i = 0; GetNumDimensions() > 0 && i < GetNumViews(); ++i)
261 {
262 delete[] m_ViewSizes[i];
263 }
264 delete[] m_ViewSizes;
265 }
266 }
267
operator =(ViewsDescriptor rhs)268 ViewsDescriptor& ViewsDescriptor::operator=(ViewsDescriptor rhs)
269 {
270 swap(*this, rhs);
271 return *this;
272 }
273
operator ==(const ViewsDescriptor & rhs) const274 bool ViewsDescriptor::operator==(const ViewsDescriptor& rhs) const
275 {
276 if (GetNumViews() != rhs.GetNumViews() || GetNumDimensions() != rhs.GetNumDimensions())
277 {
278 return false;
279 }
280
281 for (unsigned int i = 0u; i < GetNumViews(); ++i)
282 {
283 for (unsigned int j = 0u; j < GetNumDimensions(); ++j)
284 {
285 if (GetViewOrigin(i)[j] != rhs.GetViewOrigin(i)[j] || GetViewSizes(i)[j] != rhs.GetViewSizes(i)[j])
286 {
287 return false;
288 }
289 }
290 }
291
292 return true;
293 }
294
GetNumViews() const295 uint32_t ViewsDescriptor::GetNumViews() const
296 {
297 return m_Origins.GetNumViews();
298 }
299
GetNumDimensions() const300 uint32_t ViewsDescriptor::GetNumDimensions() const
301 {
302 return m_Origins.GetNumDimensions();
303 }
304
GetViewOrigin(uint32_t idx) const305 const uint32_t* ViewsDescriptor::GetViewOrigin(uint32_t idx) const
306 {
307 return m_Origins.GetViewOrigin(idx);
308 }
309
SetViewOriginCoord(uint32_t view,uint32_t coord,uint32_t value)310 Status ViewsDescriptor::SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
311 {
312 return m_Origins.SetViewOriginCoord(view, coord, value);
313 }
314
SetViewSize(uint32_t view,uint32_t coord,uint32_t value)315 Status ViewsDescriptor::SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
316 {
317 if (!m_ViewSizes)
318 {
319 ARMNN_LOG(error) << "ViewsDescriptor::SetViewSize: invalid view sizes";
320 return Status::Failure;
321 }
322
323 if (view >= GetNumViews())
324 {
325 ARMNN_LOG(error) << "ViewsDescriptor::SetViewSize: view argument:" << view <<
326 " is out of range";
327 return Status::Failure;
328 }
329 if (coord >= GetNumDimensions())
330 {
331 ARMNN_LOG(error) << "ViewsDescriptor::SetViewSize: coord argument:" << coord <<
332 " is out of range";
333 return Status::Failure;
334 }
335
336 m_ViewSizes[view][coord] = value;
337 return Status::Success;
338 }
339
GetViewSizes(uint32_t idx) const340 const uint32_t* ViewsDescriptor::GetViewSizes(uint32_t idx) const
341 {
342 return m_ViewSizes ? m_ViewSizes[idx] : nullptr;
343 }
344
GetOrigins() const345 const OriginsDescriptor& ViewsDescriptor::GetOrigins() const
346 {
347 return m_Origins;
348 }
349
swap(OriginsDescriptor & first,OriginsDescriptor & second)350 void swap(OriginsDescriptor& first, OriginsDescriptor& second)
351 {
352 using std::swap;
353 swap(first.m_NumViews, second.m_NumViews);
354 swap(first.m_NumDimensions, second.m_NumDimensions);
355 swap(first.m_ViewOrigins, second.m_ViewOrigins);
356 swap(first.m_ConcatAxis, second.m_ConcatAxis);
357 }
358
swap(ViewsDescriptor & first,ViewsDescriptor & second)359 void swap(ViewsDescriptor& first, ViewsDescriptor& second)
360 {
361 using std::swap;
362 swap(first.m_Origins, second.m_Origins);
363 swap(first.m_ViewSizes, second.m_ViewSizes);
364 }
365
GetStartForAxis(const TensorShape & inputShape,unsigned int axis) const366 int StridedSliceDescriptor::GetStartForAxis(const TensorShape& inputShape,
367 unsigned int axis) const
368 {
369 int start = m_Begin[axis];
370
371 if (m_BeginMask & (1 << axis))
372 {
373 if (m_Stride[axis] > 0)
374 {
375 start = std::numeric_limits<int>::min();
376 }
377 else
378 {
379 start = std::numeric_limits<int>::max();
380 }
381 }
382
383 const int axisSize = armnn::numeric_cast<int>(inputShape[axis]);
384 if (start < 0)
385 {
386 start += (axisSize);
387 }
388
389 return std::max(0, std::min(start, axisSize - 1));
390
391 }
392
GetStopForAxis(const TensorShape & inputShape,unsigned int axis,int startForAxis) const393 int StridedSliceDescriptor::GetStopForAxis(const TensorShape& inputShape,
394 unsigned int axis,
395 int startForAxis) const
396 {
397
398 if (m_ShrinkAxisMask & (1 << axis))
399 {
400 return startForAxis + 1;
401 }
402
403 int stop = m_End[axis];
404
405 if (m_EndMask & (1 << axis))
406 {
407 if (m_Stride[axis] > 0)
408 {
409 stop = std::numeric_limits<int>::max();
410 }
411 else
412 {
413 stop = std::numeric_limits<int>::min();
414 }
415 }
416
417 const int axisSize = armnn::numeric_cast<int>(inputShape[axis]);
418 if (stop < 0)
419 {
420 stop += axisSize;
421 }
422
423 return m_Stride[axis] > 0 ? std::max(0, std::min(stop, axisSize)) :
424 std::max(-1, std::min(stop, axisSize - 1));
425
426 }
427
GetNumInputs(bool biasEnabled)428 uint32_t GetNumInputs(bool biasEnabled)
429 {
430 unsigned int numInputs = 2;
431 if (biasEnabled)
432 {
433 numInputs = 3;
434 }
435 return numInputs;
436 }
437
GetNumInputs() const438 uint32_t Convolution3dDescriptor::GetNumInputs() const
439 {
440 return armnn::GetNumInputs(m_BiasEnabled);
441 }
442
GetNumInputs() const443 uint32_t Convolution2dDescriptor::GetNumInputs() const
444 {
445 return armnn::GetNumInputs(m_BiasEnabled);
446 }
447
GetNumInputs() const448 uint32_t FullyConnectedDescriptor::GetNumInputs() const
449 {
450 return armnn::GetNumInputs(m_BiasEnabled);
451 }
452
GetNumInputs() const453 uint32_t DepthwiseConvolution2dDescriptor::GetNumInputs() const
454 {
455 return armnn::GetNumInputs(m_BiasEnabled);
456 }
457
458 std::pair<std::pair<unsigned int, unsigned int>, std::pair<unsigned int, unsigned int>>
GetAxesToMul(const BatchMatMulDescriptor & desc,const TensorShape & tensorXShape,const TensorShape & tensorYShape)459 BatchMatMulDescriptor::GetAxesToMul(
460 const BatchMatMulDescriptor& desc,
461 const TensorShape& tensorXShape,
462 const TensorShape& tensorYShape)
463 {
464 return { GetAxesToMul(desc.m_DataLayoutX, tensorXShape),
465 GetAxesToMul(desc.m_DataLayoutY, tensorYShape) };
466 }
GetAxesNotMul(const BatchMatMulDescriptor & desc,const TensorShape & inputXShape,const TensorShape & inputYShape)467 std::pair<std::vector<unsigned int>, std::vector<unsigned int>> BatchMatMulDescriptor::GetAxesNotMul(
468 const BatchMatMulDescriptor& desc,
469 const TensorShape& inputXShape,
470 const TensorShape& inputYShape)
471 {
472 return { GetAxesNotMul(desc.m_DataLayoutX, inputXShape),
473 GetAxesNotMul(desc.m_DataLayoutY, inputYShape) };
474 }
475
GetAxesToMul(DataLayout dataLayout,const TensorShape & tensorShape)476 std::pair<unsigned int, unsigned int> BatchMatMulDescriptor::GetAxesToMul(
477 DataLayout dataLayout,
478 const TensorShape& tensorShape)
479 {
480 auto numDims = tensorShape.GetNumDimensions();
481 std::pair<unsigned int, unsigned int> axes = { numDims-2, numDims-1 };
482 switch(dataLayout)
483 {
484 case DataLayout::NDHWC:
485 case DataLayout::NHWC:
486 axes.first -= 1;
487 axes.second -= 1;
488 break;
489 case DataLayout::NCDHW:
490 case DataLayout::NCHW:
491 default:
492 break;
493 }
494 return axes;
495 }
496
GetAxesNotMul(DataLayout dataLayout,const TensorShape & tensorShape)497 std::vector<unsigned int> BatchMatMulDescriptor::GetAxesNotMul(
498 DataLayout dataLayout,
499 const TensorShape& tensorShape)
500 {
501 auto axesToMul = BatchMatMulDescriptor::GetAxesToMul(dataLayout, tensorShape);
502 std::vector<unsigned int> axesNotMul;
503 for(unsigned int i = 0; i < tensorShape.GetNumDimensions(); i++)
504 {
505 if(i == axesToMul.first || i == axesToMul.second)
506 {
507 continue;
508 }
509 axesNotMul.push_back(i);
510 }
511 return axesNotMul;
512 }
513
GetPermuteVec(DataLayout dataLayout,const TensorShape & tensorShape)514 PermutationVector BatchMatMulDescriptor::GetPermuteVec(
515 DataLayout dataLayout,
516 const TensorShape& tensorShape)
517 {
518 std::vector<unsigned int> vec;
519 auto axesToMul = BatchMatMulDescriptor::GetAxesToMul(dataLayout, tensorShape);
520 for(unsigned int i = 0; i < tensorShape.GetNumDimensions(); i++)
521 {
522 if(i == axesToMul.first)
523 {
524 vec.push_back(i+1);
525 }
526 else if(i == axesToMul.second)
527 {
528 vec.push_back(i-1);
529 }
530 else
531 {
532 vec.push_back(i);
533 }
534 }
535 return PermutationVector(vec.data(),
536 static_cast<unsigned int>(vec.size()));
537 }
538
539 }
540