1 /* 2 * Copyright (c) 2017-2022 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #include "arm_compute/core/TensorShape.h" 25 #include "arm_compute/core/Types.h" 26 #include "arm_compute/core/utils/misc/ShapeCalculator.h" 27 #include "tests/AssetsLibrary.h" 28 #include "tests/Globals.h" 29 #include "tests/IAccessor.h" 30 #include "tests/framework/Asserts.h" 31 #include "tests/framework/Fixture.h" 32 #include "tests/validation/Helpers.h" 33 #include "tests/validation/reference/DeconvolutionLayer.h" 34 35 #include <random> 36 37 namespace arm_compute 38 { 39 namespace test 40 { 41 namespace validation 42 { 43 using namespace arm_compute::misc::shape_calculator; 44 45 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW> 46 class DeconvolutionLayerFixtureBase : public framework::Fixture 47 { 48 public: 49 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type; 50 51 public: 52 template <typename...> setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,DataType data_type,DataType weights_data_type,DataLayout data_layout,QuantizationInfo input_quantization_info,QuantizationInfo output_quantization_info,QuantizationInfo weights_quantization_info,bool add_bias)53 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, 54 DataType data_type, DataType weights_data_type, DataLayout data_layout, 55 QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, QuantizationInfo weights_quantization_info, bool add_bias) 56 { 57 _data_type = data_type; 58 _weights_data_type = weights_data_type; 59 _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; 60 _data_layout = data_layout; 61 _input_quantization_info = input_quantization_info; 62 _output_quantization_info = output_quantization_info; 63 _weights_quantization_info = weights_quantization_info; 64 65 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); 66 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); 67 } 68 69 protected: 70 template <typename U> fill(U && tensor,int i)71 void fill(U &&tensor, int i) 72 { 73 switch(tensor.data_type()) 74 { 75 case DataType::QASYMM8: 76 { 77 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); 78 std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second); 79 library->fill(tensor, distribution, i); 80 break; 81 } 82 case DataType::QASYMM8_SIGNED: 83 { 84 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); 85 std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second); 86 library->fill(tensor, distribution, i); 87 break; 88 } 89 case DataType::QSYMM8_PER_CHANNEL: 90 { 91 int min_bound = 128; 92 int max_bound = -127; 93 for(size_t i = 0; i < _input_quantization_info.scale().size(); i++) 94 { 95 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f); 96 if(bounds.first < min_bound) 97 { 98 min_bound = bounds.first; 99 } 100 if(bounds.second > max_bound) 101 { 102 max_bound = bounds.second; 103 } 104 } 105 std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound); 106 library->fill(tensor, distribution, i); 107 break; 108 } 109 case DataType::S32: 110 { 111 std::uniform_int_distribution<int32_t> distribution(-100, 100); 112 library->fill(tensor, distribution, i); 113 break; 114 } 115 case DataType::F16: 116 { 117 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; 118 library->fill(tensor, distribution, i); 119 break; 120 } 121 case DataType::F32: 122 { 123 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); 124 library->fill(tensor, distribution, i); 125 break; 126 } 127 default: 128 library->fill_tensor_uniform(tensor, i); 129 } 130 } 131 132 template <typename U> fill_zeros(U && tensor)133 void fill_zeros(U &&tensor) 134 { 135 switch(tensor.data_type()) 136 { 137 case DataType::S32: 138 { 139 library->fill_tensor_value(tensor, 0); 140 break; 141 } 142 case DataType::F16: 143 library->fill_tensor_value(tensor, static_cast<half>(0.0f)); 144 break; 145 case DataType::F32: 146 library->fill_tensor_value(tensor, static_cast<float>(0.0f)); 147 break; 148 default: 149 ARM_COMPUTE_ERROR("Not supported"); 150 } 151 } 152 compute_target(TensorShape input_shape,TensorShape weights_shape,const TensorShape bias_shape,TensorShape output_shape,const PadStrideInfo & info,bool add_bias)153 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape bias_shape, TensorShape output_shape, 154 const PadStrideInfo &info, bool add_bias) 155 { 156 if(_data_layout == DataLayout::NHWC) 157 { 158 permute(input_shape, PermutationVector(2U, 0U, 1U)); 159 permute(weights_shape, PermutationVector(2U, 0U, 1U)); 160 permute(output_shape, PermutationVector(2U, 0U, 1U)); 161 } 162 163 // Create tensors 164 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _input_quantization_info, _data_layout); 165 TensorType weights = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout); 166 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _input_quantization_info, _data_layout); 167 TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _output_quantization_info, _data_layout); 168 169 // Create and configure function 170 FunctionType conv; 171 conv.configure(&src, &weights, add_bias ? &bias : nullptr, &dst, info); 172 173 ARM_COMPUTE_ASSERT(src.info()->is_resizable()); 174 ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); 175 if(add_bias) 176 { 177 ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); 178 } 179 ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); 180 181 // Allocate tensors 182 src.allocator()->allocate(); 183 weights.allocator()->allocate(); 184 if(add_bias) 185 { 186 bias.allocator()->allocate(); 187 } 188 dst.allocator()->allocate(); 189 190 ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); 191 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); 192 if(add_bias) 193 { 194 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); 195 } 196 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); 197 198 // Fill tensors 199 fill(AccessorType(src), 0); 200 fill(AccessorType(weights), 1); 201 if(add_bias) 202 { 203 fill(AccessorType(bias), 2); 204 } 205 206 // Compute DeconvolutionLayer function 207 conv.run(); 208 return dst; 209 } 210 compute_reference(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const TensorShape & output_shape,const PadStrideInfo & info,bool add_bias)211 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, 212 const PadStrideInfo &info, bool add_bias) 213 { 214 // Create reference 215 SimpleTensor<T> src{ input_shape, _data_type, 1, _input_quantization_info }; 216 SimpleTensor<TW> weights{ weights_shape, _weights_data_type, 1, _weights_quantization_info }; 217 SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _input_quantization_info }; 218 219 // Fill reference 220 fill(src, 0); 221 fill(weights, 1); 222 223 if(add_bias) 224 { 225 fill(bias, 2); 226 } 227 else 228 { 229 fill_zeros(bias); 230 } 231 return reference::deconvolution_layer<T, TW>(src, weights, bias, output_shape, info, _output_quantization_info); 232 } 233 234 TensorType _target{}; 235 SimpleTensor<T> _reference{}; 236 DataType _data_type{}; 237 DataType _weights_data_type{}; 238 DataType _bias_data_type{}; 239 DataLayout _data_layout{}; 240 QuantizationInfo _input_quantization_info{}; 241 QuantizationInfo _output_quantization_info{}; 242 QuantizationInfo _weights_quantization_info{}; 243 }; 244 245 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> 246 class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T> 247 { 248 public: 249 template <typename...> setup(TensorShape input_shape,unsigned int sx,unsigned int sy,unsigned int padx,unsigned int pady,unsigned int num_kernels,DataType data_type,DataLayout data_layout,bool add_bias)250 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, 251 unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias) 252 { 253 ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); 254 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); 255 const TensorShape bias_shape(num_kernels); 256 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); 257 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); 258 TensorInfo input_info(input_shape, 1, data_type); 259 TensorInfo weights_info(weights_shape, 1, data_type); 260 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); 261 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(), 262 QuantizationInfo(), QuantizationInfo(), add_bias); 263 } 264 }; 265 266 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> 267 class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T> 268 { 269 public: 270 template <typename...> setup(TensorShape input_shape,unsigned int sx,unsigned int sy,unsigned int pad_left,unsigned int pad_right,unsigned int pad_top,unsigned int pad_bottom,unsigned int num_kernels,DataType data_type,DataLayout data_layout,bool add_bias)271 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int pad_left, unsigned int pad_right, unsigned int pad_top, 272 unsigned int pad_bottom, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias) 273 { 274 ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); 275 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); 276 const TensorShape bias_shape(num_kernels); 277 const PadStrideInfo info(sx, sy, pad_left, pad_right, pad_top, pad_bottom, DimensionRoundingType::CEIL); 278 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); 279 TensorInfo input_info(input_shape, 1, data_type); 280 TensorInfo weights_info(weights_shape, 1, data_type); 281 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); 282 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(), 283 QuantizationInfo(), QuantizationInfo(), add_bias); 284 } 285 }; 286 287 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> 288 class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T> 289 { 290 public: 291 template <typename...> setup(TensorShape input_shape,unsigned int sx,unsigned int sy,unsigned int padx,unsigned int pady,unsigned int num_kernels,DataType data_type,DataLayout data_layout,QuantizationInfo input_quantization_info,QuantizationInfo output_quantization_info,bool add_bias)292 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, 293 unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias) 294 { 295 ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); 296 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); 297 const TensorShape bias_shape(num_kernels); 298 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); 299 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); 300 TensorInfo input_info(input_shape, 1, data_type, input_quantization_info); 301 TensorInfo weights_info(weights_shape, 1, data_type, input_quantization_info); 302 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); 303 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, 304 input_quantization_info, 305 output_quantization_info, input_quantization_info, add_bias); 306 } 307 }; 308 309 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW, unsigned int kernel_size_x, unsigned int kernel_size_y> 310 class DeconvolutionValidationQuantizedPerChannelFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW> 311 { 312 public: 313 template <typename...> setup(TensorShape input_shape,unsigned int sx,unsigned int sy,unsigned int padx,unsigned int pady,unsigned int num_kernels,DataType data_type,DataLayout data_layout,QuantizationInfo input_quantization_info,QuantizationInfo output_quantization_info,bool add_bias,DataType weights_data_type)314 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, 315 unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias, 316 DataType weights_data_type) 317 { 318 ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); 319 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); 320 const TensorShape bias_shape(num_kernels); 321 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); 322 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); 323 TensorInfo input_info(input_shape, 1, data_type, input_quantization_info); 324 TensorInfo weights_info(weights_shape, 1, weights_data_type, input_quantization_info); 325 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); 326 327 std::vector<float> weights_scales{}; 328 std::mt19937 gen(library->seed()); 329 std::uniform_real_distribution<float> dis(0.01f, 1.f); 330 for(size_t i = 0; i < output_shape[2]; ++i) 331 { 332 weights_scales.push_back(dis(gen)); 333 } 334 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, weights_data_type, data_layout, 335 input_quantization_info, 336 output_quantization_info, QuantizationInfo(weights_scales), add_bias); 337 } 338 }; 339 340 } // namespace validation 341 } // namespace test 342 } // namespace arm_compute 343