1*c217d954SCole Faust /*
2*c217d954SCole Faust * Copyright (c) 2017-2021, 2023 Arm Limited.
3*c217d954SCole Faust *
4*c217d954SCole Faust * SPDX-License-Identifier: MIT
5*c217d954SCole Faust *
6*c217d954SCole Faust * Permission is hereby granted, free of charge, to any person obtaining a copy
7*c217d954SCole Faust * of this software and associated documentation files (the "Software"), to
8*c217d954SCole Faust * deal in the Software without restriction, including without limitation the
9*c217d954SCole Faust * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10*c217d954SCole Faust * sell copies of the Software, and to permit persons to whom the Software is
11*c217d954SCole Faust * furnished to do so, subject to the following conditions:
12*c217d954SCole Faust *
13*c217d954SCole Faust * The above copyright notice and this permission notice shall be included in all
14*c217d954SCole Faust * copies or substantial portions of the Software.
15*c217d954SCole Faust *
16*c217d954SCole Faust * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*c217d954SCole Faust * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*c217d954SCole Faust * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19*c217d954SCole Faust * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20*c217d954SCole Faust * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21*c217d954SCole Faust * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*c217d954SCole Faust * SOFTWARE.
23*c217d954SCole Faust */
24*c217d954SCole Faust #include "PoolingLayer.h"
25*c217d954SCole Faust
26*c217d954SCole Faust #include "arm_compute/core/Types.h"
27*c217d954SCole Faust #include "arm_compute/core/utils/misc/ShapeCalculator.h"
28*c217d954SCole Faust #include "tests/validation/Helpers.h"
29*c217d954SCole Faust
30*c217d954SCole Faust namespace arm_compute
31*c217d954SCole Faust {
32*c217d954SCole Faust namespace test
33*c217d954SCole Faust {
34*c217d954SCole Faust namespace validation
35*c217d954SCole Faust {
36*c217d954SCole Faust namespace reference
37*c217d954SCole Faust {
38*c217d954SCole Faust using namespace arm_compute::misc::shape_calculator;
39*c217d954SCole Faust
40*c217d954SCole Faust template <typename T, typename ACC_T, typename std::enable_if<is_floating_point<T>::value, int>::type>
pooling_layer_internal(const SimpleTensor<T> & src,const PoolingLayerInfo & info,SimpleTensor<uint32_t> * indices,DataLayout data_layout)41*c217d954SCole Faust SimpleTensor<T> pooling_layer_internal(const SimpleTensor<T> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
42*c217d954SCole Faust {
43*c217d954SCole Faust // Create reference
44*c217d954SCole Faust SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info), src.data_type(), 1 };
45*c217d954SCole Faust auto pooled_shape = compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info);
46*c217d954SCole Faust if(indices)
47*c217d954SCole Faust {
48*c217d954SCole Faust *indices = SimpleTensor<uint32_t> { pooled_shape, DataType::U32, 1 };
49*c217d954SCole Faust }
50*c217d954SCole Faust const int pool_size_x = info.is_global_pooling ? src.shape().x() : info.pool_size.width;
51*c217d954SCole Faust const int pool_size_y = info.is_global_pooling ? src.shape().y() : info.pool_size.height;
52*c217d954SCole Faust PoolingType type = info.pool_type;
53*c217d954SCole Faust int pool_stride_x = info.pad_stride_info.stride().first;
54*c217d954SCole Faust int pool_stride_y = info.pad_stride_info.stride().second;
55*c217d954SCole Faust int pad_left = info.pad_stride_info.pad_left();
56*c217d954SCole Faust int pad_top = info.pad_stride_info.pad_top();
57*c217d954SCole Faust int pad_right = info.pad_stride_info.pad_right();
58*c217d954SCole Faust int pad_bottom = info.pad_stride_info.pad_bottom();
59*c217d954SCole Faust bool exclude_padding = info.exclude_padding;
60*c217d954SCole Faust
61*c217d954SCole Faust const auto w_src = static_cast<int>(src.shape()[0]);
62*c217d954SCole Faust const auto h_src = static_cast<int>(src.shape()[1]);
63*c217d954SCole Faust const auto z_src = static_cast<int>(src.shape()[2]);
64*c217d954SCole Faust const auto b_src = static_cast<int>(src.shape()[3]);
65*c217d954SCole Faust
66*c217d954SCole Faust const int upper_dims = src.shape().total_size() / (w_src * h_src);
67*c217d954SCole Faust
68*c217d954SCole Faust const auto w_dst = static_cast<int>(dst.shape()[0]);
69*c217d954SCole Faust const auto h_dst = static_cast<int>(dst.shape()[1]);
70*c217d954SCole Faust const auto z_dst = static_cast<int>(dst.shape()[2]);
71*c217d954SCole Faust
72*c217d954SCole Faust TensorShape shape_nhwc(src.shape());
73*c217d954SCole Faust permute(shape_nhwc, PermutationVector(2U, 0U, 1U));
74*c217d954SCole Faust if(type == PoolingType::MAX)
75*c217d954SCole Faust {
76*c217d954SCole Faust for(int b = 0; b < b_src; ++b)
77*c217d954SCole Faust {
78*c217d954SCole Faust for(int r = 0; r < z_src; ++r)
79*c217d954SCole Faust {
80*c217d954SCole Faust for(int h = 0; h < h_dst; ++h)
81*c217d954SCole Faust {
82*c217d954SCole Faust for(int w = 0; w < w_dst; ++w)
83*c217d954SCole Faust {
84*c217d954SCole Faust int wstart = w * pool_stride_x - pad_left;
85*c217d954SCole Faust int hstart = h * pool_stride_y - pad_top;
86*c217d954SCole Faust int wend = std::min(wstart + pool_size_x, w_src);
87*c217d954SCole Faust int hend = std::min(hstart + pool_size_y, h_src);
88*c217d954SCole Faust wstart = std::max(wstart, 0);
89*c217d954SCole Faust hstart = std::max(hstart, 0);
90*c217d954SCole Faust auto max_val = -std::numeric_limits<ACC_T>::infinity();
91*c217d954SCole Faust int max_index{ 0 };
92*c217d954SCole Faust for(int y = hstart; y < hend; ++y)
93*c217d954SCole Faust {
94*c217d954SCole Faust for(int x = wstart; x < wend; ++x)
95*c217d954SCole Faust {
96*c217d954SCole Faust const auto val = static_cast<ACC_T>(src[b * z_src * h_src * w_src + r * h_src * w_src + y * w_src + x]);
97*c217d954SCole Faust if(val > max_val)
98*c217d954SCole Faust {
99*c217d954SCole Faust max_val = val;
100*c217d954SCole Faust if(data_layout == DataLayout::NCHW)
101*c217d954SCole Faust {
102*c217d954SCole Faust max_index = coord2index(src.shape(), Coordinates(x, y, r, 0));
103*c217d954SCole Faust }
104*c217d954SCole Faust else
105*c217d954SCole Faust {
106*c217d954SCole Faust max_index = coord2index(shape_nhwc, Coordinates(r, x, y, 0));
107*c217d954SCole Faust }
108*c217d954SCole Faust }
109*c217d954SCole Faust }
110*c217d954SCole Faust }
111*c217d954SCole Faust
112*c217d954SCole Faust dst[b * z_dst * h_dst * w_dst + r * h_dst * w_dst + h * w_dst + w] = static_cast<T>(max_val);
113*c217d954SCole Faust if(indices)
114*c217d954SCole Faust {
115*c217d954SCole Faust (*indices)[b * z_dst * h_dst * w_dst + r * h_dst * w_dst + h * w_dst + w] = max_index;
116*c217d954SCole Faust }
117*c217d954SCole Faust }
118*c217d954SCole Faust }
119*c217d954SCole Faust }
120*c217d954SCole Faust }
121*c217d954SCole Faust }
122*c217d954SCole Faust else // Average or l2 pooling
123*c217d954SCole Faust {
124*c217d954SCole Faust for(int r = 0; r < upper_dims; ++r)
125*c217d954SCole Faust {
126*c217d954SCole Faust for(int h = 0; h < h_dst; ++h)
127*c217d954SCole Faust {
128*c217d954SCole Faust for(int w = 0; w < w_dst; ++w)
129*c217d954SCole Faust {
130*c217d954SCole Faust ACC_T avg_val(0);
131*c217d954SCole Faust int wstart = w * pool_stride_x - pad_left;
132*c217d954SCole Faust int hstart = h * pool_stride_y - pad_top;
133*c217d954SCole Faust int wend = std::min(wstart + pool_size_x, w_src + pad_right);
134*c217d954SCole Faust int hend = std::min(hstart + pool_size_y, h_src + pad_bottom);
135*c217d954SCole Faust int pool = (hend - hstart) * (wend - wstart);
136*c217d954SCole Faust wstart = std::max(wstart, 0);
137*c217d954SCole Faust hstart = std::max(hstart, 0);
138*c217d954SCole Faust wend = std::min(wend, w_src);
139*c217d954SCole Faust hend = std::min(hend, h_src);
140*c217d954SCole Faust // Exclude padding pixels from the average
141*c217d954SCole Faust if(exclude_padding)
142*c217d954SCole Faust {
143*c217d954SCole Faust pool = (hend - hstart) * (wend - wstart);
144*c217d954SCole Faust }
145*c217d954SCole Faust
146*c217d954SCole Faust if(type == PoolingType::AVG)
147*c217d954SCole Faust {
148*c217d954SCole Faust for(int y = hstart; y < hend; ++y)
149*c217d954SCole Faust {
150*c217d954SCole Faust for(int x = wstart; x < wend; ++x)
151*c217d954SCole Faust {
152*c217d954SCole Faust avg_val += static_cast<ACC_T>(src[r * h_src * w_src + y * w_src + x]);
153*c217d954SCole Faust }
154*c217d954SCole Faust }
155*c217d954SCole Faust dst[r * h_dst * w_dst + h * w_dst + w] = avg_val / pool;
156*c217d954SCole Faust }
157*c217d954SCole Faust else
158*c217d954SCole Faust {
159*c217d954SCole Faust for(int y = hstart; y < hend; ++y)
160*c217d954SCole Faust {
161*c217d954SCole Faust for(int x = wstart; x < wend; ++x)
162*c217d954SCole Faust {
163*c217d954SCole Faust const auto val = static_cast<ACC_T>(src[r * h_src * w_src + y * w_src + x]);
164*c217d954SCole Faust avg_val += val * val;
165*c217d954SCole Faust }
166*c217d954SCole Faust }
167*c217d954SCole Faust dst[r * h_dst * w_dst + h * w_dst + w] = static_cast<T>(std::sqrt(avg_val / pool));
168*c217d954SCole Faust }
169*c217d954SCole Faust }
170*c217d954SCole Faust }
171*c217d954SCole Faust }
172*c217d954SCole Faust }
173*c217d954SCole Faust return dst;
174*c217d954SCole Faust }
175*c217d954SCole Faust
176*c217d954SCole Faust template SimpleTensor<float> pooling_layer_internal<float>(const SimpleTensor<float> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
177*c217d954SCole Faust
178*c217d954SCole Faust template SimpleTensor<half> pooling_layer_internal<half>(const SimpleTensor<half> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
179*c217d954SCole Faust
180*c217d954SCole Faust template SimpleTensor<half> pooling_layer_internal<half, float>(const SimpleTensor<half> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
181*c217d954SCole Faust
182*c217d954SCole Faust template <typename T>
pooling_layer(const SimpleTensor<T> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)183*c217d954SCole Faust SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
184*c217d954SCole Faust {
185*c217d954SCole Faust ARM_COMPUTE_UNUSED(output_qinfo);
186*c217d954SCole Faust return pooling_layer_internal<T, T>(src, info, indices, data_layout);
187*c217d954SCole Faust }
188*c217d954SCole Faust
189*c217d954SCole Faust template <>
pooling_layer(const SimpleTensor<uint8_t> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)190*c217d954SCole Faust SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices,
191*c217d954SCole Faust DataLayout data_layout)
192*c217d954SCole Faust {
193*c217d954SCole Faust SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
194*c217d954SCole Faust SimpleTensor<float> dst_tmp = pooling_layer_internal<float>(src_tmp, info, indices, data_layout);
195*c217d954SCole Faust SimpleTensor<uint8_t> dst = convert_to_asymmetric<uint8_t>(dst_tmp, output_qinfo);
196*c217d954SCole Faust return dst;
197*c217d954SCole Faust }
198*c217d954SCole Faust
199*c217d954SCole Faust template <>
pooling_layer(const SimpleTensor<int8_t> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)200*c217d954SCole Faust SimpleTensor<int8_t> pooling_layer<int8_t>(const SimpleTensor<int8_t> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
201*c217d954SCole Faust {
202*c217d954SCole Faust SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
203*c217d954SCole Faust SimpleTensor<float> dst_tmp = pooling_layer_internal<float>(src_tmp, info, indices, data_layout);
204*c217d954SCole Faust SimpleTensor<int8_t> dst = convert_to_asymmetric<int8_t>(dst_tmp, output_qinfo);
205*c217d954SCole Faust return dst;
206*c217d954SCole Faust }
207*c217d954SCole Faust
208*c217d954SCole Faust template <>
pooling_layer(const SimpleTensor<half> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)209*c217d954SCole Faust SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
210*c217d954SCole Faust {
211*c217d954SCole Faust ARM_COMPUTE_UNUSED(output_qinfo);
212*c217d954SCole Faust if(src.data_type() == DataType::F16 && info.fp_mixed_precision)
213*c217d954SCole Faust {
214*c217d954SCole Faust return pooling_layer_internal<half, float>(src, info, indices, data_layout);
215*c217d954SCole Faust }
216*c217d954SCole Faust
217*c217d954SCole Faust return pooling_layer_internal<half>(src, info, indices, data_layout);
218*c217d954SCole Faust }
219*c217d954SCole Faust
220*c217d954SCole Faust template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
221*c217d954SCole Faust
222*c217d954SCole Faust } // namespace reference
223*c217d954SCole Faust } // namespace validation
224*c217d954SCole Faust } // namespace test
225*c217d954SCole Faust } // namespace arm_compute
226