xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE
25 #define ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "tests/AssetsLibrary.h"
31 #include "tests/Globals.h"
32 #include "tests/IAccessor.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Fixture.h"
35 #include "tests/validation/Helpers.h"
36 #include "tests/validation/reference/ActivationLayer.h"
37 #include "tests/validation/reference/DepthwiseConvolutionLayer.h"
38 
39 #include "utils/Utils.h"
40 
41 #include <random>
42 
43 namespace arm_compute
44 {
45 namespace test
46 {
47 namespace validation
48 {
49 using namespace arm_compute::misc::shape_calculator;
50 
51 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
52 class DepthwiseConvolutionLayerValidationGenericFixture : public framework::Fixture
53 {
54 public:
55     using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
56 
57 public:
58     template <typename...>
59     void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation,
60                unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type,
61                QuantizationInfo input_quantization_info, QuantizationInfo weights_quantization_info, QuantizationInfo output_quantization_info,
62                DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false, bool in_place = false, bool run_twice = false)
63     {
64         ARM_COMPUTE_ERROR_ON(mixed_layout && in_place);
65         _mixed_layout              = mixed_layout;
66         _input_shape               = in_shape;
67         _input_data_type           = input_data_type;
68         _weights_data_type         = weights_data_type;
69         _input_quantization_info   = input_quantization_info;
70         _weights_quantization_info = weights_quantization_info;
71         _output_quantization_info  = output_quantization_info;
72         _data_layout               = data_layout;
73         _pad_stride_info           = pad_stride_info;
74         _act_info                  = act_info;
75         _depth_multiplier          = depth_multiplier;
76         _dilation                  = dilation;
77         _in_place                  = in_place;
78         _run_twice                 = run_twice;
79 
80         _bias_data_type = is_data_type_quantized(_input_data_type) ? DataType::S32 : _input_data_type;
81 
82         _weights_shape = TensorShape(kernel_size.width, kernel_size.height);
83 
84         const TensorInfo      in_info(_input_shape, 1, _input_data_type);
85         const TensorInfo      we_info(_weights_shape, 1, _weights_data_type);
86         const ConvolutionInfo info{ _pad_stride_info, _depth_multiplier, _act_info, _dilation };
87         _output_shape = compute_depthwise_convolution_shape(in_info, we_info, info);
88 
89         _weights_shape.set(2, _output_shape.z());
90         _biases_shape = TensorShape(_weights_shape[2]);
91     }
92 
configure_target()93     void configure_target()
94     {
95         TensorShape input_shape   = _input_shape;
96         TensorShape weights_shape = _weights_shape;
97         TensorShape output_shape  = _output_shape;
98 
99         if(_data_layout == DataLayout::NHWC)
100         {
101             permute(input_shape, PermutationVector(2U, 0U, 1U));
102             permute(weights_shape, PermutationVector(2U, 0U, 1U));
103             permute(output_shape, PermutationVector(2U, 0U, 1U));
104         }
105 
106         // Create tensors
107         _src                      = create_tensor<TensorType>(input_shape, _input_data_type, 1, _input_quantization_info, _data_layout);
108         _weights                  = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout);
109         if(_run_twice) {
110             _weights.info()->set_are_values_constant(false);
111         }
112         _biases                   = create_tensor<TensorType>(_biases_shape, _bias_data_type, 1, _input_quantization_info, _data_layout);
113         TensorType *target_to_use = nullptr;
114         if(!_in_place)
115         {
116             _target       = create_tensor<TensorType>(output_shape, _input_data_type, 1, _output_quantization_info, _data_layout);
117             target_to_use = &_target;
118         }
119 
120         add_padding_x({ &_src, &_biases }, _data_layout);
121         add_padding_x({ &_weights }, _data_layout, true);
122         if(!_in_place)
123         {
124             add_padding_x({ &_target }, _data_layout);
125         }
126 
127         // Create Depthwise Convolution configure function
128         _dwc.configure(&_src, &_weights, &_biases, target_to_use, _pad_stride_info, _depth_multiplier, _act_info, _dilation);
129 
130         ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
131         ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
132         ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
133         ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
134     }
135 
allocate_and_run_target()136     void allocate_and_run_target()
137     {
138         // Allocate tensors
139         _src.allocator()->allocate();
140         _weights.allocator()->allocate();
141         _biases.allocator()->allocate();
142 
143         ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
144         ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
145         ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
146 
147         if(!_in_place)
148         {
149             _target.allocator()->allocate();
150             ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
151         }
152 
153         // Fill tensors
154         fill(AccessorType(_src), 0);
155         fill(AccessorType(_weights), 1);
156         fill(AccessorType(_biases), 2);
157 
158         // Run with variable input
159         if(_run_twice) {
160             _dwc.run();
161 
162             // Fill tensors with a new seed
163             fill(AccessorType(_src), 3);
164             fill(AccessorType(_weights), 4);
165             fill(AccessorType(_biases), 5);
166         }
167 
168         if(_mixed_layout)
169         {
170             mix_layout(_dwc, _src, _target);
171         }
172         else
173         {
174             // Compute function
175             _dwc.run();
176         }
177     }
178 
compute_reference()179     void compute_reference()
180     {
181         SimpleTensor<T>     src{ _input_shape, _input_data_type, 1, _input_quantization_info };
182         SimpleTensor<TW>    weights{ _weights_shape, _weights_data_type, 1, _weights_quantization_info };
183         SimpleTensor<TBias> biases{ _biases_shape, _bias_data_type, 1, _input_quantization_info };
184 
185         fill(src, 0);
186         fill(weights, 1);
187         fill(biases, 2);
188         if(_run_twice) {
189             SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info, _depth_multiplier, _dilation, _output_quantization_info);
190             if(_act_info.enabled()) {
191                 reference::activation_layer<T>(depth_out, _act_info);
192             }
193 
194             fill(src, 3);
195             fill(weights, 4);
196             fill(biases, 5);
197         }
198 
199         SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info, _depth_multiplier, _dilation, _output_quantization_info);
200         _reference                = (_act_info.enabled()) ? reference::activation_layer<T>(depth_out, _act_info) : depth_out;
201     }
202 
203 protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)204     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
205     {
206         ARM_COMPUTE_ERROR_ON(_in_place);
207         // Test Multi DataLayout graph cases, when the data layout changes after configure
208         src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
209         dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
210 
211         // Compute Convolution function
212         layer.run();
213 
214         // Reinstating original data layout for the test suite to properly check the values
215         src.info()->set_data_layout(_data_layout);
216         dst.info()->set_data_layout(_data_layout);
217     }
218 
219     template <typename U>
fill(U && tensor,int i)220     void fill(U &&tensor, int i)
221     {
222         switch(tensor.data_type())
223         {
224             case DataType::QASYMM8:
225             {
226                 std::uniform_int_distribution<uint32_t> distribution(0, 15);
227                 library->fill(tensor, distribution, i);
228                 break;
229             }
230             case DataType::QASYMM8_SIGNED:
231             case DataType::QSYMM8_PER_CHANNEL:
232             {
233                 std::uniform_int_distribution<int32_t> distribution(-10, 10);
234                 library->fill(tensor, distribution, i);
235                 break;
236             }
237             case DataType::F16:
238             {
239                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
240                 library->fill(tensor, distribution, i);
241                 break;
242             }
243             case DataType::F32:
244             {
245                 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
246                 library->fill(tensor, distribution, i);
247                 break;
248             }
249             case DataType::S32:
250             {
251                 std::uniform_int_distribution<int32_t> distribution(-100, 100);
252                 library->fill(tensor, distribution, i);
253                 break;
254             }
255             default:
256                 library->fill_tensor_uniform(tensor, i);
257         }
258     }
259 
260     TensorType      _target{};
261     SimpleTensor<T> _reference{};
262 
263     TensorType   _src{};
264     TensorType   _weights{};
265     TensorType   _biases{};
266     FunctionType _dwc{};
267 
268     TensorShape         _input_shape{};
269     TensorShape         _weights_shape{};
270     TensorShape         _biases_shape{};
271     TensorShape         _output_shape{};
272     DataType            _input_data_type{};
273     DataType            _weights_data_type{};
274     DataType            _bias_data_type{};
275     QuantizationInfo    _input_quantization_info{};
276     QuantizationInfo    _weights_quantization_info{};
277     QuantizationInfo    _output_quantization_info{};
278     DataLayout          _data_layout{};
279     PadStrideInfo       _pad_stride_info{};
280     ActivationLayerInfo _act_info{};
281     unsigned int        _depth_multiplier{};
282     Size2D              _dilation{};
283     bool                _mixed_layout{ false };
284     bool                _in_place{ false };
285     bool                _run_twice{ false };
286 };
287 
288 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false, bool in_place = false, bool run_twice = false>
289 class DepthwiseConvolutionLayerValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
290 {
291 public:
292     template <typename...>
setup(TensorShape in_shape,Size2D kernel_size,PadStrideInfo pad_stride_info,Size2D dilation,unsigned int depth_multiplier,DataType data_type,DataLayout data_layout,ActivationLayerInfo act_info)293     void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, DataLayout data_layout,
294                ActivationLayerInfo act_info)
295     {
296         DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
297                                                                                                                data_type, data_type, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(),
298                                                                                                                data_layout, act_info, mixed_layout, in_place, run_twice);
299     }
300 };
301 
302 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
303 class DepthwiseConvolutionLayerNativeValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
304 {
305 public:
306     template <typename...>
setup(size_t width,size_t height,size_t channel,size_t batch,Size2D kernel_size,size_t depth_multiplier,Size2D dilation,Size2D stride,bool padding_valid,DataType data_type,DataLayout data_layout)307     void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type,
308                DataLayout data_layout)
309     {
310         _dilation         = dilation;
311         _depth_multiplier = depth_multiplier;
312         _data_type        = data_type;
313         _data_layout      = data_layout;
314 
315         _input_shape   = TensorShape(width, height, channel, batch);
316         _weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier);
317         _biases_shape  = TensorShape(_weights_shape.z());
318 
319         if(padding_valid)
320         {
321             _conv_info = PadStrideInfo(stride.width, stride.height);
322         }
323         else
324         {
325             _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation);
326         }
327     }
328 
configure_target()329     void configure_target()
330     {
331         TensorShape input_shape   = _input_shape;
332         TensorShape weights_shape = _weights_shape;
333 
334         if(_data_layout == DataLayout::NHWC)
335         {
336             permute(input_shape, PermutationVector(2U, 0U, 1U));
337             permute(weights_shape, PermutationVector(2U, 0U, 1U));
338         }
339 
340         // Create tensors
341         _src     = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
342         _weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
343         _biases  = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout);
344         _target  = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout);
345 
346         add_padding_x({ &_src, &_biases, &_target }, _data_layout);
347         add_padding_x({ &_weights }, _data_layout, true);
348         add_padding_y({ &_src, &_target }, _data_layout);
349 
350         // Create Depthwise Convolution configure function
351         const ConvolutionInfo info
352         {
353             _conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation
354         };
355         _dwc.configure(_src.info(), _weights.info(), _biases.info(), _target.info(), info);
356 
357         ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
358         ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
359         ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
360         ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
361     }
362 
allocate_and_run_target()363     void allocate_and_run_target()
364     {
365         // Allocate tensors
366         _src.allocator()->allocate();
367         _weights.allocator()->allocate();
368         _biases.allocator()->allocate();
369         _target.allocator()->allocate();
370 
371         ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
372         ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
373         ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
374         ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
375 
376         // Fill tensors
377         fill(AccessorType(_src), 0);
378         fill(AccessorType(_weights), 1);
379         fill(AccessorType(_biases), 2);
380 
381         arm_compute::ITensorPack pack;
382         pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_0, &_src);
383         pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_1, &_weights);
384         pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_2, &_biases);
385         pack.add_tensor(arm_compute::TensorType::ACL_DST, &_target);
386 
387         // Compute function
388         _dwc.run(pack);
389     }
390 
compute_reference()391     void compute_reference()
392     {
393         SimpleTensor<T> src{ _input_shape, _data_type };
394         SimpleTensor<T> weights{ _weights_shape, _data_type };
395         SimpleTensor<T> biases{ _biases_shape, _data_type };
396 
397         fill(src, 0);
398         fill(weights, 1);
399         fill(biases, 2);
400 
401         const ConvolutionInfo info{ _conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation };
402         const TensorShape     dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info);
403         _reference                      = reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation);
404     }
405 
406 protected:
407     template <typename U>
fill(U && tensor,int i)408     void fill(U &&tensor, int i)
409     {
410         switch(tensor.data_type())
411         {
412             case DataType::F32:
413             {
414                 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
415                 library->fill(tensor, distribution, i);
416                 break;
417             }
418             default:
419                 library->fill_tensor_uniform(tensor, i);
420         }
421     }
422 
423     TensorType      _target{};
424     SimpleTensor<T> _reference{};
425 
426     TensorType   _src{};
427     TensorType   _weights{};
428     TensorType   _biases{};
429     FunctionType _dwc{};
430 
431     TensorShape   _input_shape{};
432     TensorShape   _weights_shape{};
433     TensorShape   _biases_shape{};
434     DataType      _data_type{};
435     DataLayout    _data_layout{};
436     PadStrideInfo _conv_info{};
437     Size2D        _dilation{};
438     unsigned int  _depth_multiplier{};
439 };
440 
441 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool in_place = false>
442 class DepthwiseConvolutionLayerNativeConfigurableValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
443 {
444 public:
445     template <typename...>
setup(size_t width,size_t height,size_t channel,size_t batch,Size2D kernel_size,size_t depth_multiplier,Size2D dilation,Size2D stride,bool padding_valid,DataType data_type,DataLayout data_layout,const ActivationLayerInfo & act_info,unsigned int n0,bool export_to_cl_image)446     void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type,
447                DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0, bool export_to_cl_image)
448     {
449         _dilation           = dilation;
450         _depth_multiplier   = depth_multiplier;
451         _data_type          = data_type;
452         _data_layout        = data_layout;
453         _act_info           = act_info;
454         _n0                 = n0;
455         _export_to_cl_image = export_to_cl_image;
456         _in_place           = in_place;
457 
458         _input_shape   = TensorShape(width, height, channel, batch);
459         _weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier);
460         _biases_shape  = TensorShape(_weights_shape.z());
461 
462         if(padding_valid)
463         {
464             _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation);
465         }
466         else
467         {
468             _conv_info = PadStrideInfo(stride.width, stride.height);
469         }
470     }
471 
configure_target()472     void configure_target()
473     {
474 #if defined(ARM_COMPUTE_OPENCL_ENABLED)
475         if(_export_to_cl_image)
476         {
477             _validate_output &= image2d_from_buffer_supported(CLKernelLibrary::get().get_device());
478             _validate_output &= (get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) != 0);
479         }
480 #endif // ARM_COMPUTE_OPENCL_ENABLED
481 
482         if(!_validate_output)
483         {
484             return;
485         }
486 
487         TensorShape input_shape   = _input_shape;
488         TensorShape weights_shape = _weights_shape;
489 
490         if(_data_layout == DataLayout::NHWC)
491         {
492             permute(input_shape, PermutationVector(2U, 0U, 1U));
493             permute(weights_shape, PermutationVector(2U, 0U, 1U));
494         }
495 
496         // Create tensors
497         _src                      = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
498         _weights                  = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
499         _biases                   = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout);
500         TensorType *target_to_use = nullptr;
501         if(!_in_place)
502         {
503             _target       = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout);
504             target_to_use = &_target;
505         }
506 
507         DWCComputeKernelInfo dwc_info;
508         dwc_info.n0                         = _n0;
509         dwc_info.m0                         = _conv_info.stride().first == 1 && _dilation.x() == 1 ? 8 : 1;
510         dwc_info.export_input_to_cl_image   = false;
511         dwc_info.export_weights_to_cl_image = _export_to_cl_image;
512 
513         const ConvolutionInfo conv_kernel_info
514         {
515             _conv_info, _depth_multiplier, _act_info, _dilation
516         };
517 
518         add_padding_x({ &_src, &_biases, &_target }, _data_layout);
519         add_padding_x({ &_weights }, _data_layout, _export_to_cl_image); // Don't add left padding if cl image will be used
520 
521         // Create Depthwise Convolution configure function
522         _dwc.configure(&_src, &_weights, &_biases, target_to_use, dwc_info, conv_kernel_info);
523 
524         ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
525         ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
526         ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
527         ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
528     }
529 
allocate_and_run_target()530     void allocate_and_run_target()
531     {
532         if(!_validate_output)
533         {
534             return;
535         }
536 
537         // Allocate tensors
538         _src.allocator()->allocate();
539         _weights.allocator()->allocate();
540         _biases.allocator()->allocate();
541 
542         ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
543         ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
544         ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
545         if(!_in_place)
546         {
547             _target.allocator()->allocate();
548             ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
549         }
550 
551         // Fill tensors
552         fill(AccessorType(_src), 0);
553         fill(AccessorType(_weights), 1);
554         fill(AccessorType(_biases), 2);
555 
556         // Test Multi DataLayout graph cases, when the data layout changes after configure
557         _src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
558         if(!_in_place)
559         {
560             _target.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
561         }
562 
563         // Compute function
564         _dwc.run();
565 
566         // Reinstating original data layout for the test suite to properly check the values
567         if(!_in_place)
568         {
569             _target.info()->set_data_layout(_data_layout);
570         }
571     }
572 
compute_reference()573     void compute_reference()
574     {
575         if(!_validate_output)
576         {
577             return;
578         }
579 
580         SimpleTensor<T> src{ _input_shape, _data_type };
581         SimpleTensor<T> weights{ _weights_shape, _data_type };
582         SimpleTensor<T> biases{ _biases_shape, _data_type };
583 
584         fill(src, 0);
585         fill(weights, 1);
586         fill(biases, 2);
587 
588         const ConvolutionInfo info{ _conv_info, _depth_multiplier, _act_info, _dilation };
589         const TensorShape     dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info);
590         _reference                      = reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation), _act_info);
591     }
592 
593 protected:
594     template <typename U>
fill(U && tensor,int i)595     void fill(U &&tensor, int i)
596     {
597         switch(tensor.data_type())
598         {
599             case DataType::F32:
600             {
601                 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
602                 library->fill(tensor, distribution, i);
603                 break;
604             }
605             case DataType::F16:
606             {
607                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
608                 library->fill(tensor, distribution, i);
609                 break;
610             }
611             default:
612                 library->fill_tensor_uniform(tensor, i);
613         }
614     }
615 
616     TensorType      _target{};
617     SimpleTensor<T> _reference{};
618 
619     TensorType   _src{};
620     TensorType   _weights{};
621     TensorType   _biases{};
622     FunctionType _dwc{};
623 
624     TensorShape         _input_shape{};
625     TensorShape         _weights_shape{};
626     TensorShape         _biases_shape{};
627     DataType            _data_type{};
628     DataLayout          _data_layout{};
629     PadStrideInfo       _conv_info{};
630     ActivationLayerInfo _act_info{};
631     Size2D              _dilation{};
632     unsigned int        _depth_multiplier{};
633     unsigned int        _n0{};
634     bool                _export_to_cl_image{};
635     bool                _validate_output{ true };
636     bool                _in_place{ false };
637 };
638 
639 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false, bool in_place = false>
640 class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
641 {
642 public:
643     template <typename...>
setup(TensorShape in_shape,Size2D kernel_size,PadStrideInfo pad_stride_info,Size2D dilation,unsigned int depth_multiplier,DataType data_type,QuantizationInfo input_quantization_info,QuantizationInfo output_quantization_info,DataLayout data_layout,ActivationLayerInfo act_info)644     void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type,
645                QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
646     {
647         DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type,
648                                                                                                                data_type, input_quantization_info, input_quantization_info, output_quantization_info,
649                                                                                                                data_layout, act_info, mixed_layout, in_place);
650     }
651 };
652 
653 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW, bool in_place = false>
654 class DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
655 {
656 public:
657     template <typename...>
setup(TensorShape in_shape,Size2D kernel_size,PadStrideInfo pad_stride_info,Size2D dilation,unsigned int depth_multiplier,DataType input_data_type,DataType weights_data_type,QuantizationInfo input_quantization_info,QuantizationInfo output_quantization_info,DataLayout data_layout,ActivationLayerInfo act_info)658     void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type,
659                QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
660     {
661         const float out_scale = output_quantization_info.uniform().scale;
662         const float in_scale  = input_quantization_info.uniform().scale;
663 
664         std::vector<float>                    weights_scales{};
665         std::mt19937                          gen(library->seed());
666         std::uniform_real_distribution<float> dis(0.01f, out_scale / in_scale);
667         for(size_t i = 0; i < in_shape.z() * depth_multiplier; ++i)
668         {
669             weights_scales.push_back(dis(gen));
670         }
671 
672         DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
673                                                                                                                 input_data_type, weights_data_type,
674                                                                                                                 input_quantization_info, QuantizationInfo(weights_scales), output_quantization_info,
675                                                                                                                 data_layout, act_info, false, in_place);
676     }
677 };
678 } // namespace validation
679 } // namespace test
680 } // namespace arm_compute
681 #endif /* ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE */
682