1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "Pooling2dLayer.hpp"
7
8 #include "LayerCloneBase.hpp"
9
10 #include <armnn/TypesUtils.hpp>
11
12 #include <armnnUtils/DataLayoutIndexed.hpp>
13
14 #include <armnn/backends/WorkloadData.hpp>
15 #include <armnn/backends/WorkloadFactory.hpp>
16
17 using namespace armnnUtils;
18
19 namespace armnn
20 {
21
Pooling2dLayer(const Pooling2dDescriptor & param,const char * name)22 Pooling2dLayer::Pooling2dLayer(const Pooling2dDescriptor& param, const char* name)
23 : LayerWithParameters(1, 1, LayerType::Pooling2d, param, name)
24 {
25 }
26
CreateWorkload(const IWorkloadFactory & factory) const27 std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
28 {
29 Pooling2dQueueDescriptor descriptor;
30 SetAdditionalInfo(descriptor);
31
32 return factory.CreateWorkload(LayerType::Pooling2d, descriptor, PrepInfoAndDesc(descriptor));
33 }
34
Clone(Graph & graph) const35 Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
36 {
37 return CloneBase<Pooling2dLayer>(graph, m_Param, GetName());
38 }
39
InferOutputShapes(const std::vector<TensorShape> & inputShapes) const40 std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
41 {
42 ARMNN_ASSERT(inputShapes.size() == 1);
43 const TensorShape& inputShape = inputShapes[0];
44 const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
45
46 // If we support multiple batch dimensions in the future, then this assert will need to change.
47 ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
48
49 unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
50 unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
51 unsigned int inChannels = inputShape[dimensionIndices.GetChannelsIndex()];
52 unsigned int inBatchSize = inputShape[0];
53
54 bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0);
55 unsigned int outWidth = 1;
56 unsigned int outHeight = 1;
57 if (!isGlobalPooling)
58 {
59 ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
60 "Stride can only be zero when performing global pooling");
61
62 auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
63 {
64 unsigned int readSize = inSize + lowPad + highPad - poolSize;
65 float div = static_cast<float>(readSize) / static_cast<float>(stride);
66
67 unsigned int size = 0;
68 switch (outputShapeRounding)
69 {
70 case OutputShapeRounding::Ceiling:
71 size = static_cast<unsigned int>(ceil(div)) + 1;
72 break;
73 case OutputShapeRounding ::Floor:
74 size = static_cast<unsigned int>(floor(div)) + 1;
75 break;
76 default:
77 ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
78 }
79
80 // MakeS sure that border operations will start from inside the input and not the padded area.
81 // This is what CL does...
82 if ((size - 1)*stride >= inSize + lowPad)
83 {
84 --size;
85 }
86
87 return size;
88 };
89
90 outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX,
91 m_Param.m_OutputShapeRounding);
92 outHeight = CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
93 m_Param.m_OutputShapeRounding);
94 }
95 unsigned int outChannels = inChannels;
96 unsigned int outBatchSize = inBatchSize;
97
98 TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
99 TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) :
100 TensorShape( { outBatchSize, outChannels, outHeight, outWidth });
101
102 return std::vector<TensorShape>({ tensorShape });
103 }
104
ValidateTensorShapesFromInputs()105 void Pooling2dLayer::ValidateTensorShapesFromInputs()
106 {
107 VerifyLayerConnections(1, CHECK_LOCATION());
108
109 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
110
111 VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
112
113 auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
114
115 ARMNN_ASSERT(inferredShapes.size() == 1);
116
117 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
118 }
119
ExecuteStrategy(IStrategy & strategy) const120 void Pooling2dLayer::ExecuteStrategy(IStrategy& strategy) const
121 {
122 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
123 }
124
125 } // namespace armnn
126