xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/reference/DeconvolutionLayer.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "ConvolutionLayer.h"
25 
26 #include "tests/validation/Helpers.h"
27 
28 namespace arm_compute
29 {
30 namespace test
31 {
32 namespace validation
33 {
34 namespace reference
35 {
36 template <typename T, typename TW, typename TB>
deconvolution_layer(const SimpleTensor<T> & src,const SimpleTensor<TW> & weights,const SimpleTensor<TB> & bias,const TensorShape & output_shape,const PadStrideInfo & info,QuantizationInfo out_qinfo)37 SimpleTensor<T> deconvolution_layer(const SimpleTensor<T> &src, const SimpleTensor<TW> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape,
38                                     const PadStrideInfo &info, QuantizationInfo out_qinfo)
39 {
40     // Create reference
41     const unsigned int pad_left           = info.pad_left();
42     const unsigned int pad_right          = info.pad_right();
43     const unsigned int pad_top            = info.pad_top();
44     const unsigned int pad_bottom         = info.pad_bottom();
45     const int          stride_x           = info.stride().first;
46     const int          stride_y           = info.stride().second;
47     const int          weights_width      = weights.shape().x();
48     const int          weights_height     = weights.shape().y();
49     const int          weights_upper_dims = weights.shape().total_size() / (weights_width * weights_height);
50 
51     ARM_COMPUTE_ERROR_ON(pad_left > (weights.shape().x() - 1));
52     ARM_COMPUTE_ERROR_ON(pad_right > (weights.shape().x() - 1));
53     ARM_COMPUTE_ERROR_ON(pad_top > (weights.shape().y() - 1));
54     ARM_COMPUTE_ERROR_ON(pad_bottom > (weights.shape().y() - 1));
55 
56     // Find the upsampled dimensions
57     unsigned int out_x = (src.shape().x() - 1) * stride_x + 1;
58     unsigned int out_y = (src.shape().y() - 1) * stride_y + 1;
59 
60     // Find the padding needed for the convolution with stride 1 in order to match output shape
61     unsigned int deconv_pad_x = output_shape.x() - (out_x - weights_width + 1);
62     unsigned int deconv_pad_y = output_shape.y() - (out_y - weights_height + 1);
63     out_x += deconv_pad_x;
64     out_y += deconv_pad_y;
65 
66     unsigned int deconv_pad_left  = pad_right > pad_left ? pad_right - pad_left : 0;
67     unsigned int deconv_pad_right = pad_left > pad_right ? pad_left - pad_right : 0;
68     deconv_pad_x -= deconv_pad_left + deconv_pad_right;
69     ARM_COMPUTE_ERROR_ON((deconv_pad_x % 2) != 0);
70     deconv_pad_left += deconv_pad_x / 2;
71     deconv_pad_right += deconv_pad_x / 2;
72 
73     unsigned int deconv_pad_top    = pad_bottom > pad_top ? pad_bottom - pad_top : 0;
74     unsigned int deconv_pad_bottom = pad_top > pad_bottom ? pad_top - pad_bottom : 0;
75     deconv_pad_y -= deconv_pad_top + deconv_pad_bottom;
76     ARM_COMPUTE_ERROR_ON((deconv_pad_y % 2) != 0);
77     deconv_pad_top += deconv_pad_y / 2;
78     deconv_pad_bottom += deconv_pad_y / 2;
79 
80     TensorShape scaled_shape = src.shape();
81     scaled_shape.set(0, out_x);
82     scaled_shape.set(1, out_y);
83     SimpleTensor<T> scaled{ scaled_shape, src.data_type(), 1, src.quantization_info() };
84 
85     const int width_in      = src.shape().x();
86     const int height_in     = src.shape().y();
87     const int width_scaled  = scaled.shape().x();
88     const int height_scaled = scaled.shape().y();
89     const int num_2d_slices = src.shape().total_size() / (width_in * height_in);
90 
91     if(src.data_type() == DataType::QASYMM8 || src.data_type() == DataType::QASYMM8_SIGNED)
92     {
93         const auto quantized_zero = static_cast<T>(src.quantization_info().uniform().offset);
94         std::fill_n(scaled.data(), scaled.num_elements(), quantized_zero);
95     }
96     else
97     {
98         std::fill_n(scaled.data(), scaled.num_elements(), T(0));
99     }
100 
101     // Flip weights by 180 degrees
102     SimpleTensor<TW> weights_flipped{ weights.shape(), weights.data_type(), 1, weights.quantization_info(), weights.data_layout() };
103 #if defined(_OPENMP)
104     #pragma omp parallel for
105 #endif /* _OPENMP */
106     for(int ud = 0; ud < weights_upper_dims; ++ud)
107     {
108         const int offset = ud * weights_width * weights_height;
109         for(int y = 0; y < weights_height; ++y)
110         {
111             for(int x = 0; x < weights_width; ++x)
112             {
113                 weights_flipped[offset + (weights_height - 1 - y) * weights_width + (weights_width - 1 - x)] = weights[offset + y * weights_width + x];
114             }
115         }
116     }
117 #if defined(_OPENMP)
118     #pragma omp parallel for
119 #endif /* _OPENMP */
120     for(int slice = 0; slice < num_2d_slices; ++slice)
121     {
122         const int offset_slice_in  = slice * width_in * height_in;
123         const int offset_slice_out = slice * width_scaled * height_scaled;
124         const int start_x          = deconv_pad_left;
125         const int start_y          = deconv_pad_top;
126         const int end_x            = width_scaled - deconv_pad_right;
127         const int end_y            = height_scaled - deconv_pad_bottom;
128 
129         for(int yi = start_y, in_y = 0; yi < end_y; yi += stride_y, in_y++)
130         {
131             for(int xi = start_x, in_x = 0; xi < end_x; xi += stride_x, in_x++)
132             {
133                 const T *in  = src.data() + offset_slice_in + in_y * width_in + in_x;
134                 T       *out = scaled.data() + offset_slice_out + xi + yi * width_scaled;
135                 *out         = *in;
136             }
137         }
138     }
139 
140     const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL);
141     return convolution_layer(scaled, weights_flipped, bias, output_shape, conv_info, Size2D(1U, 1U), 1, out_qinfo);
142 }
143 
144 template SimpleTensor<uint8_t> deconvolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
145                                                    const PadStrideInfo &info, QuantizationInfo out_quant_info);
146 template SimpleTensor<uint8_t> deconvolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
147                                                    const PadStrideInfo &info, QuantizationInfo out_quant_info);
148 template SimpleTensor<int8_t> deconvolution_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
149                                                   const PadStrideInfo &info, QuantizationInfo out_quant_info);
150 template SimpleTensor<float> deconvolution_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &output_shape,
151                                                  const PadStrideInfo &info, QuantizationInfo out_quant_info);
152 template SimpleTensor<half> deconvolution_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &output_shape,
153                                                 const PadStrideInfo &info, QuantizationInfo out_quant_info);
154 } // namespace reference
155 } // namespace validation
156 } // namespace test
157 } // namespace arm_compute