1 //
2 // Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3 // Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
4 // SPDX-License-Identifier: MIT
5 //
6
7 #include "ReduceLayer.hpp"
8 #include "LayerCloneBase.hpp"
9
10 #include <armnn/TypesUtils.hpp>
11
12 #include <armnn/backends/WorkloadData.hpp>
13 #include <armnn/backends/WorkloadFactory.hpp>
14
15 namespace armnn
16 {
17
ReduceLayer(const ReduceDescriptor & param,const char * name)18 ReduceLayer::ReduceLayer(const ReduceDescriptor& param, const char* name)
19 : LayerWithParameters(1, 1, LayerType::Reduce, param, name)
20 {
21 }
22
CreateWorkload(const IWorkloadFactory & factory) const23 std::unique_ptr<IWorkload> ReduceLayer::CreateWorkload(const IWorkloadFactory& factory) const
24 {
25 ReduceQueueDescriptor descriptor;
26 descriptor.m_Parameters.m_vAxis = m_Param.m_vAxis;
27 descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
28 descriptor.m_Parameters.m_ReduceOperation = m_Param.m_ReduceOperation;
29 SetAdditionalInfo(descriptor);
30
31 return factory.CreateWorkload(LayerType::Reduce, descriptor, PrepInfoAndDesc(descriptor));
32 }
33
Clone(Graph & graph) const34 ReduceLayer* ReduceLayer::Clone(Graph& graph) const
35 {
36 auto layer = CloneBase<ReduceLayer>(graph, m_Param, GetName());
37 layer->m_Param.m_vAxis = m_Param.m_vAxis;
38 layer->m_Param.m_KeepDims = m_Param.m_KeepDims;
39 layer->m_Param.m_ReduceOperation = m_Param.m_ReduceOperation;
40
41 return std::move(layer);
42 }
43
ValidateTensorShapesFromInputs()44 void ReduceLayer::ValidateTensorShapesFromInputs()
45 {
46 VerifyLayerConnections(1, CHECK_LOCATION());
47
48 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
49
50 VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
51
52 const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
53
54 ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
55 "ReduceLayer: Reduce supports up to 4D input.");
56
57 std::vector<TensorShape> inferredShapes = InferOutputShapes( {input.GetShape() });
58
59 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReduceLayer");
60 }
61
InferOutputShapes(const std::vector<TensorShape> & inputShapes) const62 std::vector<TensorShape> ReduceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
63 {
64 ARMNN_ASSERT(inputShapes.size() == 1);
65 const TensorShape& input = inputShapes[0];
66
67 ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
68 "ReduceLayer: Reduce supports up to 4D input.");
69
70 unsigned int rank = input.GetNumDimensions();
71 unsigned int outputRank = 0;
72
73 // Calculate output dimension
74 if (m_Param.m_KeepDims)
75 {
76 outputRank = rank;
77 }
78 else if (m_Param.m_vAxis.empty())
79 {
80 outputRank = 1;
81 }
82 else if (m_Param.m_vAxis.size() > input.GetNumDimensions())
83 {
84 throw LayerValidationException("ReduceLayer: Dimensions to reduce can not be bigger than input dimensions");
85 }
86 else
87 {
88 outputRank = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Param.m_vAxis.size());
89 if (outputRank == 0)
90 {
91 outputRank = 1;
92 }
93 }
94
95 std::vector<unsigned int> dimSizes(outputRank, 1);
96 if (!m_Param.m_vAxis.empty())
97 {
98 // Skip the dimension that has been reduced unless keepDims is true.
99 unsigned int outputIndex = 0;
100 for (unsigned int i = 0; i < input.GetNumDimensions(); ++i)
101 {
102 if (std::find(m_Param.m_vAxis.begin(), m_Param.m_vAxis.end(), i) == m_Param.m_vAxis.end())
103 {
104 dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(input[i]);
105 ++outputIndex;
106 }
107 else if (m_Param.m_KeepDims)
108 {
109 dimSizes[outputIndex] = 1;
110 ++outputIndex;
111 }
112 }
113 }
114 return std::vector<TensorShape>({ TensorShape(outputRank, dimSizes.data()) });
115 }
116
ExecuteStrategy(IStrategy & strategy) const117 void ReduceLayer::ExecuteStrategy(IStrategy& strategy) const
118 {
119 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
120 }
121
122 } // namespace armnn
123