xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/Pooling2dEndToEndTestImpl.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/INetwork.hpp>
8 #include <armnn/Types.hpp>
9 
10 #include <CommonTestUtils.hpp>
11 #include <ResolveType.hpp>
12 
13 #include <doctest/doctest.h>
14 
15 namespace
16 {
17 
18 using namespace armnn;
19 
20 template<typename armnn::DataType DataType>
CreatePooling2dNetwork(const armnn::TensorShape & inputShape,const armnn::TensorShape & outputShape,PaddingMethod padMethod=PaddingMethod::Exclude,PoolingAlgorithm poolAlg=PoolingAlgorithm::Max,const float qScale=1.0f,const int32_t qOffset=0)21 armnn::INetworkPtr CreatePooling2dNetwork(const armnn::TensorShape& inputShape,
22                                           const armnn::TensorShape& outputShape,
23                                           PaddingMethod padMethod = PaddingMethod::Exclude,
24                                           PoolingAlgorithm poolAlg = PoolingAlgorithm::Max,
25                                           const float qScale = 1.0f,
26                                           const int32_t qOffset = 0)
27 {
28     INetworkPtr network(INetwork::Create());
29 
30     TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
31     TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset, true);
32 
33     Pooling2dDescriptor descriptor;
34     descriptor.m_PoolType = poolAlg;
35     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
36     descriptor.m_StrideX = descriptor.m_StrideY = 1;
37     descriptor.m_PadLeft = 1;
38     descriptor.m_PadRight = 1;
39     descriptor.m_PadTop = 1;
40     descriptor.m_PadBottom = 1;
41     descriptor.m_PaddingMethod = padMethod;
42     descriptor.m_DataLayout = DataLayout::NHWC;
43 
44     IConnectableLayer* pool = network->AddPooling2dLayer(descriptor, "pool");
45     IConnectableLayer* input = network->AddInputLayer(0, "input");
46     IConnectableLayer* output = network->AddOutputLayer(0, "output");
47 
48     Connect(input, pool, inputTensorInfo, 0, 0);
49     Connect(pool, output, outputTensorInfo, 0, 0);
50 
51     return network;
52 }
53 
54 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
MaxPool2dEndToEnd(const std::vector<armnn::BackendId> & backends,PaddingMethod padMethod=PaddingMethod::Exclude)55 void MaxPool2dEndToEnd(const std::vector<armnn::BackendId>& backends,
56                        PaddingMethod padMethod = PaddingMethod::Exclude)
57 {
58     const TensorShape& inputShape = { 1, 3, 3, 1 };
59     const TensorShape& outputShape = { 1, 3, 3, 1 };
60 
61     INetworkPtr network = CreatePooling2dNetwork<ArmnnType>(inputShape, outputShape, padMethod);
62 
63     CHECK(network);
64 
65     std::vector<T> inputData{ 1, 2, 3,
66                               4, 5, 6,
67                               7, 8, 9 };
68     std::vector<T> expectedOutput{ 5, 6, 6,
69                                    8, 9, 9,
70                                    8, 9, 9 };
71 
72     std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
73     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
74 
75     EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
76 }
77 
78 template<armnn::DataType ArmnnType>
MaxPool2dEndToEndFloat16(const std::vector<armnn::BackendId> & backends)79 void MaxPool2dEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
80 {
81     using namespace half_float::literal;
82     using Half = half_float::half;
83 
84     const TensorShape& inputShape = { 1, 3, 3, 1 };
85     const TensorShape& outputShape = { 1, 3, 3, 1 };
86 
87     INetworkPtr network = CreatePooling2dNetwork<ArmnnType>(inputShape, outputShape);
88     CHECK(network);
89 
90     std::vector<Half> inputData{ 1._h, 2._h, 3._h,
91                                  4._h, 5._h, 6._h,
92                                  7._h, 8._h, 9._h };
93     std::vector<Half> expectedOutput{ 5._h, 6._h, 6._h,
94                                       8._h, 9._h, 9._h,
95                                       8._h, 9._h, 9._h };
96 
97     std::map<int, std::vector<Half>> inputTensorData = { { 0, inputData } };
98     std::map<int, std::vector<Half>> expectedOutputData = { { 0, expectedOutput } };
99 
100     EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
101 }
102 
103 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
AvgPool2dEndToEnd(const std::vector<armnn::BackendId> & backends,PaddingMethod padMethod=PaddingMethod::Exclude)104 void AvgPool2dEndToEnd(const std::vector<armnn::BackendId>& backends,
105                        PaddingMethod padMethod = PaddingMethod::Exclude)
106 {
107     const TensorShape& inputShape =  { 1, 3, 3, 1 };
108     const TensorShape& outputShape =  { 1, 3, 3, 1 };
109 
110     INetworkPtr network = CreatePooling2dNetwork<ArmnnType>(
111         inputShape, outputShape, padMethod, PoolingAlgorithm::Average);
112     CHECK(network);
113 
114     std::vector<T> inputData{ 1, 2, 3,
115                               4, 5, 6,
116                               7, 8, 9 };
117     std::vector<T> expectedOutput;
118     if (padMethod == PaddingMethod::Exclude)
119     {
120         expectedOutput  = { 3.f , 3.5f, 4.f ,
121                             4.5f, 5.f , 5.5f,
122                             6.f , 6.5f, 7.f  };
123     }
124     else
125     {
126         expectedOutput  = { 1.33333f, 2.33333f, 1.77778f,
127                             3.f     , 5.f     , 3.66667f,
128                             2.66667f, 4.33333f, 3.11111f };
129     }
130 
131     std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
132     std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
133 
134     EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
135                                                 inputTensorData,
136                                                 expectedOutputData,
137                                                 backends,
138                                                 0.00001f);
139 }
140 
141 template<armnn::DataType ArmnnType>
AvgPool2dEndToEndFloat16(const std::vector<armnn::BackendId> & backends,PaddingMethod padMethod=PaddingMethod::IgnoreValue)142 void AvgPool2dEndToEndFloat16(const std::vector<armnn::BackendId>& backends,
143                               PaddingMethod padMethod = PaddingMethod::IgnoreValue)
144 {
145     using namespace half_float::literal;
146     using Half = half_float::half;
147 
148     const TensorShape& inputShape =  { 1, 3, 3, 1 };
149     const TensorShape& outputShape =  { 1, 3, 3, 1 };
150 
151     INetworkPtr network = CreatePooling2dNetwork<ArmnnType>(
152         inputShape, outputShape, padMethod, PoolingAlgorithm::Average);
153     CHECK(network);
154 
155     std::vector<Half> inputData{ 1._h, 2._h, 3._h,
156                                  4._h, 5._h, 6._h,
157                                  7._h, 8._h, 9._h };
158     std::vector<Half> expectedOutput{ 1.33333_h, 2.33333_h, 1.77778_h,
159                                       3._h     , 5._h     , 3.66667_h,
160                                       2.66667_h, 4.33333_h, 3.11111_h };
161 
162     std::map<int, std::vector<Half>> inputTensorData = { { 0, inputData } };
163     std::map<int, std::vector<Half>> expectedOutputData = { { 0, expectedOutput } };
164 
165     EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
166                                                 inputTensorData,
167                                                 expectedOutputData,
168                                                 backends,
169                                                 0.00001f);
170 }
171 
172 } // anonymous namespace
173