1*c217d954SCole Faust /*
2*c217d954SCole Faust * Copyright (c) 2017-2023 Arm Limited.
3*c217d954SCole Faust *
4*c217d954SCole Faust * SPDX-License-Identifier: MIT
5*c217d954SCole Faust *
6*c217d954SCole Faust * Permission is hereby granted, free of charge, to any person obtaining a copy
7*c217d954SCole Faust * of this software and associated documentation files (the "Software"), to
8*c217d954SCole Faust * deal in the Software without restriction, including without limitation the
9*c217d954SCole Faust * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10*c217d954SCole Faust * sell copies of the Software, and to permit persons to whom the Software is
11*c217d954SCole Faust * furnished to do so, subject to the following conditions:
12*c217d954SCole Faust *
13*c217d954SCole Faust * The above copyright notice and this permission notice shall be included in all
14*c217d954SCole Faust * copies or substantial portions of the Software.
15*c217d954SCole Faust *
16*c217d954SCole Faust * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*c217d954SCole Faust * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*c217d954SCole Faust * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19*c217d954SCole Faust * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20*c217d954SCole Faust * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21*c217d954SCole Faust * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*c217d954SCole Faust * SOFTWARE.
23*c217d954SCole Faust */
24*c217d954SCole Faust #ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
25*c217d954SCole Faust #define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
26*c217d954SCole Faust
27*c217d954SCole Faust #include "arm_compute/core/TensorShape.h"
28*c217d954SCole Faust #include "arm_compute/core/Types.h"
29*c217d954SCole Faust #include "arm_compute/graph/Utils.h"
30*c217d954SCole Faust #ifdef ARM_COMPUTE_OPENCL_ENABLED
31*c217d954SCole Faust #include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
32*c217d954SCole Faust #endif // ARM_COMPUTE_OPENCL_ENABLED
33*c217d954SCole Faust #include "arm_compute/runtime/NEON/NEScheduler.h"
34*c217d954SCole Faust #include "src/core/NEON/kernels/arm_gemm/utils.hpp"
35*c217d954SCole Faust #include "src/graph/mutators/MutatorUtils.h"
36*c217d954SCole Faust #include "tests/AssetsLibrary.h"
37*c217d954SCole Faust #include "tests/Globals.h"
38*c217d954SCole Faust #include "tests/IAccessor.h"
39*c217d954SCole Faust #include "tests/framework/Asserts.h"
40*c217d954SCole Faust #include "tests/framework/Fixture.h"
41*c217d954SCole Faust #include "tests/validation/Helpers.h"
42*c217d954SCole Faust #include "tests/validation/reference/ActivationLayer.h"
43*c217d954SCole Faust #include "tests/validation/reference/ConvolutionLayer.h"
44*c217d954SCole Faust #include "tests/validation/reference/PadLayer.h"
45*c217d954SCole Faust #include "tests/validation/reference/Permute.h"
46*c217d954SCole Faust #include "tests/validation/reference/Utils.h"
47*c217d954SCole Faust
48*c217d954SCole Faust #include <random>
49*c217d954SCole Faust #include <type_traits>
50*c217d954SCole Faust
51*c217d954SCole Faust namespace arm_compute
52*c217d954SCole Faust {
53*c217d954SCole Faust namespace test
54*c217d954SCole Faust {
55*c217d954SCole Faust namespace validation
56*c217d954SCole Faust {
57*c217d954SCole Faust namespace detail
58*c217d954SCole Faust {
59*c217d954SCole Faust template <typename ConvolutionFunction, typename TensorType>
60*c217d954SCole Faust #ifdef ARM_COMPUTE_OPENCL_ENABLED
61*c217d954SCole Faust std::enable_if_t<!std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
62*c217d954SCole Faust #else // ARM_COMPUTE_OPENCL_ENABLED
63*c217d954SCole Faust void
64*c217d954SCole Faust #endif // ARM_COMPUTE_OPENCL_ENABLED
configure_conv_function(ConvolutionFunction & func,TensorType * src,const TensorType * weights,const TensorType * bias,TensorType * dst,const PadStrideInfo & info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,unsigned int num_groups)65*c217d954SCole Faust configure_conv_function(ConvolutionFunction &func,
66*c217d954SCole Faust TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
67*c217d954SCole Faust const PadStrideInfo &info, const WeightsInfo &weights_info,
68*c217d954SCole Faust const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
69*c217d954SCole Faust {
70*c217d954SCole Faust func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, false /* enable_fast_math */, num_groups);
71*c217d954SCole Faust }
72*c217d954SCole Faust
73*c217d954SCole Faust #ifdef ARM_COMPUTE_OPENCL_ENABLED
74*c217d954SCole Faust template <typename ConvolutionFunction, typename TensorType>
75*c217d954SCole Faust std::enable_if_t<std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
configure_conv_function(ConvolutionFunction & func,TensorType * src,const TensorType * weights,const TensorType * bias,TensorType * dst,const PadStrideInfo & info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,unsigned int num_groups)76*c217d954SCole Faust configure_conv_function(ConvolutionFunction &func,
77*c217d954SCole Faust TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
78*c217d954SCole Faust const PadStrideInfo &info, const WeightsInfo &weights_info,
79*c217d954SCole Faust const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
80*c217d954SCole Faust {
81*c217d954SCole Faust func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, num_groups);
82*c217d954SCole Faust }
83*c217d954SCole Faust #endif // ARM_COMPUTE_OPENCL_ENABLED
84*c217d954SCole Faust } // namespace detail
85*c217d954SCole Faust
86*c217d954SCole Faust template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
87*c217d954SCole Faust class ConvolutionValidationGenericFixture : public framework::Fixture
88*c217d954SCole Faust {
89*c217d954SCole Faust public:
90*c217d954SCole Faust using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
91*c217d954SCole Faust || std::is_same<typename std::decay<T>::type, int8_t>::value,
92*c217d954SCole Faust int32_t, T >::type;
93*c217d954SCole Faust
94*c217d954SCole Faust public:
95*c217d954SCole Faust template <typename...>
96*c217d954SCole Faust void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
97*c217d954SCole Faust DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
98*c217d954SCole Faust bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}))
99*c217d954SCole Faust {
100*c217d954SCole Faust _mixed_layout = mixed_layout;
101*c217d954SCole Faust _data_type = data_type;
102*c217d954SCole Faust _weights_data_type = weights_data_type;
103*c217d954SCole Faust _is_quantized = is_data_type_quantized_asymmetric(data_type);
104*c217d954SCole Faust _is_bfloat16 = data_type == DataType::BFLOAT16;
105*c217d954SCole Faust _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
106*c217d954SCole Faust _output_data_type = _is_bfloat16 ? DataType::F32 : data_type;
107*c217d954SCole Faust _quantization_info = quantization_info;
108*c217d954SCole Faust _weight_quantization_info = weight_quantization_info;
109*c217d954SCole Faust _data_layout = data_layout;
110*c217d954SCole Faust
111*c217d954SCole Faust _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer);
112*c217d954SCole Faust _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer);
113*c217d954SCole Faust }
114*c217d954SCole Faust
115*c217d954SCole Faust protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)116*c217d954SCole Faust void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
117*c217d954SCole Faust {
118*c217d954SCole Faust // Test Multi DataLayout graph cases, when the data layout changes after configure
119*c217d954SCole Faust src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
120*c217d954SCole Faust dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
121*c217d954SCole Faust
122*c217d954SCole Faust // Compute Convolution function
123*c217d954SCole Faust layer.run();
124*c217d954SCole Faust
125*c217d954SCole Faust // Reinstating original data layout for the test suite to properly check the values
126*c217d954SCole Faust src.info()->set_data_layout(_data_layout);
127*c217d954SCole Faust dst.info()->set_data_layout(_data_layout);
128*c217d954SCole Faust }
129*c217d954SCole Faust
regularize_values(void * values,size_t size)130*c217d954SCole Faust void regularize_values(void *values, size_t size)
131*c217d954SCole Faust {
132*c217d954SCole Faust float *fvalues = static_cast<float *>(values);
133*c217d954SCole Faust for(size_t i = 0; i < size; ++i)
134*c217d954SCole Faust {
135*c217d954SCole Faust fvalues[i] = float(bfloat16(fvalues[i]));
136*c217d954SCole Faust }
137*c217d954SCole Faust }
138*c217d954SCole Faust
139*c217d954SCole Faust template <typename U>
fill(U && tensor,int i)140*c217d954SCole Faust void fill(U &&tensor, int i)
141*c217d954SCole Faust {
142*c217d954SCole Faust switch(tensor.data_type())
143*c217d954SCole Faust {
144*c217d954SCole Faust case DataType::QASYMM8:
145*c217d954SCole Faust {
146*c217d954SCole Faust std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
147*c217d954SCole Faust std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
148*c217d954SCole Faust library->fill(tensor, distribution, i);
149*c217d954SCole Faust break;
150*c217d954SCole Faust }
151*c217d954SCole Faust case DataType::QASYMM8_SIGNED:
152*c217d954SCole Faust {
153*c217d954SCole Faust std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
154*c217d954SCole Faust std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
155*c217d954SCole Faust library->fill(tensor, distribution, i);
156*c217d954SCole Faust break;
157*c217d954SCole Faust }
158*c217d954SCole Faust case DataType::QSYMM8_PER_CHANNEL:
159*c217d954SCole Faust {
160*c217d954SCole Faust int min_bound = 128;
161*c217d954SCole Faust int max_bound = -127;
162*c217d954SCole Faust for(size_t i = 0; i < _weight_quantization_info.scale().size(); i++)
163*c217d954SCole Faust {
164*c217d954SCole Faust std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
165*c217d954SCole Faust if(bounds.first < min_bound)
166*c217d954SCole Faust {
167*c217d954SCole Faust min_bound = bounds.first;
168*c217d954SCole Faust }
169*c217d954SCole Faust if(bounds.second > max_bound)
170*c217d954SCole Faust {
171*c217d954SCole Faust max_bound = bounds.second;
172*c217d954SCole Faust }
173*c217d954SCole Faust }
174*c217d954SCole Faust std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
175*c217d954SCole Faust library->fill(tensor, distribution, i);
176*c217d954SCole Faust break;
177*c217d954SCole Faust }
178*c217d954SCole Faust case DataType::S32:
179*c217d954SCole Faust {
180*c217d954SCole Faust std::uniform_int_distribution<int32_t> distribution(-100, 100);
181*c217d954SCole Faust library->fill(tensor, distribution, i);
182*c217d954SCole Faust break;
183*c217d954SCole Faust }
184*c217d954SCole Faust case DataType::BFLOAT16:
185*c217d954SCole Faust {
186*c217d954SCole Faust arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{ -1.0f, 1.0f };
187*c217d954SCole Faust library->fill(tensor, distribution, i);
188*c217d954SCole Faust break;
189*c217d954SCole Faust }
190*c217d954SCole Faust case DataType::F16:
191*c217d954SCole Faust {
192*c217d954SCole Faust arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
193*c217d954SCole Faust library->fill(tensor, distribution, i);
194*c217d954SCole Faust break;
195*c217d954SCole Faust }
196*c217d954SCole Faust case DataType::F32:
197*c217d954SCole Faust {
198*c217d954SCole Faust std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
199*c217d954SCole Faust library->fill(tensor, distribution, i);
200*c217d954SCole Faust break;
201*c217d954SCole Faust }
202*c217d954SCole Faust default:
203*c217d954SCole Faust library->fill_tensor_uniform(tensor, i);
204*c217d954SCole Faust }
205*c217d954SCole Faust }
206*c217d954SCole Faust
207*c217d954SCole Faust // given input is IN nchw format
208*c217d954SCole Faust TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
209*c217d954SCole Faust bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
210*c217d954SCole Faust {
211*c217d954SCole Faust ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
212*c217d954SCole Faust
213*c217d954SCole Faust const unsigned int num_groups = input_shape[2] / weights_shape[2];
214*c217d954SCole Faust
215*c217d954SCole Faust if(_data_layout == DataLayout::NHWC)
216*c217d954SCole Faust {
217*c217d954SCole Faust permute(input_shape, PermutationVector(2U, 0U, 1U));
218*c217d954SCole Faust permute(weights_shape, PermutationVector(2U, 0U, 1U));
219*c217d954SCole Faust permute(output_shape, PermutationVector(2U, 0U, 1U));
220*c217d954SCole Faust
221*c217d954SCole Faust if(pre_pad_layer.size() > 0)
222*c217d954SCole Faust {
223*c217d954SCole Faust // make sure paddings exist for each c,h,w dimensions
224*c217d954SCole Faust for(unsigned int i = 0; i < 3 - pre_pad_layer.size(); ++i)
225*c217d954SCole Faust {
226*c217d954SCole Faust pre_pad_layer.push_back({ 0, 0 });
227*c217d954SCole Faust }
228*c217d954SCole Faust
229*c217d954SCole Faust // rotate padding info from nchw to nhwc
230*c217d954SCole Faust std::rotate(pre_pad_layer.begin(), pre_pad_layer.begin() + 2, pre_pad_layer.begin() + 3);
231*c217d954SCole Faust }
232*c217d954SCole Faust }
233*c217d954SCole Faust
234*c217d954SCole Faust const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
235*c217d954SCole Faust const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
236*c217d954SCole Faust
237*c217d954SCole Faust WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
238*c217d954SCole Faust TensorShape reshaped_weights_shape(weights_shape);
239*c217d954SCole Faust
240*c217d954SCole Faust // Create tensors
241*c217d954SCole Faust TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
242*c217d954SCole Faust TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
243*c217d954SCole Faust TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
244*c217d954SCole Faust TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout);
245*c217d954SCole Faust
246*c217d954SCole Faust // Create and configure function
247*c217d954SCole Faust FunctionType conv;
248*c217d954SCole Faust
249*c217d954SCole Faust const unsigned int height_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::HEIGHT);
250*c217d954SCole Faust const unsigned int width_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::WIDTH);
251*c217d954SCole Faust
252*c217d954SCole Faust const PaddingInfo pad_w = width_index < pre_pad_layer.size() ? pre_pad_layer[width_index] : PaddingInfo(0, 0);
253*c217d954SCole Faust const PaddingInfo pad_h = height_index < pre_pad_layer.size() ? pre_pad_layer[height_index] : PaddingInfo(0, 0);
254*c217d954SCole Faust
255*c217d954SCole Faust if(pre_pad_layer.size() > 0 && arm_compute::graph::is_padding_in_height_or_width(_data_layout, pre_pad_layer))
256*c217d954SCole Faust {
257*c217d954SCole Faust // this is the logic implemented in NodeFusionMutator -> fuse_pad_with_convolution
258*c217d954SCole Faust const PadStrideInfo new_conv_info(
259*c217d954SCole Faust info.stride().first,
260*c217d954SCole Faust info.stride().second,
261*c217d954SCole Faust info.pad_left() + pad_w.first,
262*c217d954SCole Faust info.pad_right() + pad_w.second,
263*c217d954SCole Faust info.pad_top() + pad_h.first,
264*c217d954SCole Faust info.pad_bottom() + pad_h.second,
265*c217d954SCole Faust info.round());
266*c217d954SCole Faust detail::configure_conv_function(conv, &src, &weights, &bias, &dst, new_conv_info, weights_info, dilation, act_info, num_groups);
267*c217d954SCole Faust }
268*c217d954SCole Faust else
269*c217d954SCole Faust {
270*c217d954SCole Faust detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
271*c217d954SCole Faust }
272*c217d954SCole Faust
273*c217d954SCole Faust ARM_COMPUTE_ASSERT(src.info()->is_resizable());
274*c217d954SCole Faust ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
275*c217d954SCole Faust ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
276*c217d954SCole Faust ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
277*c217d954SCole Faust
278*c217d954SCole Faust add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
279*c217d954SCole Faust
280*c217d954SCole Faust // Allocate tensors
281*c217d954SCole Faust src.allocator()->allocate();
282*c217d954SCole Faust weights.allocator()->allocate();
283*c217d954SCole Faust bias.allocator()->allocate();
284*c217d954SCole Faust dst.allocator()->allocate();
285*c217d954SCole Faust
286*c217d954SCole Faust ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
287*c217d954SCole Faust ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
288*c217d954SCole Faust ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
289*c217d954SCole Faust ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
290*c217d954SCole Faust
291*c217d954SCole Faust // Fill tensors
292*c217d954SCole Faust fill(AccessorType(src), 0);
293*c217d954SCole Faust fill(AccessorType(weights), 1);
294*c217d954SCole Faust fill(AccessorType(bias), 2);
295*c217d954SCole Faust
296*c217d954SCole Faust if(_mixed_layout)
297*c217d954SCole Faust {
298*c217d954SCole Faust mix_layout(conv, src, dst);
299*c217d954SCole Faust }
300*c217d954SCole Faust else
301*c217d954SCole Faust {
302*c217d954SCole Faust // Compute Convolution function
303*c217d954SCole Faust conv.run();
304*c217d954SCole Faust }
305*c217d954SCole Faust
306*c217d954SCole Faust return dst;
307*c217d954SCole Faust }
308*c217d954SCole Faust
309*c217d954SCole Faust SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
310*c217d954SCole Faust const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
311*c217d954SCole Faust {
312*c217d954SCole Faust ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
313*c217d954SCole Faust
314*c217d954SCole Faust const unsigned int num_groups = input_shape[2] / weights_shape[2];
315*c217d954SCole Faust
316*c217d954SCole Faust // Setup reference data types
317*c217d954SCole Faust const DataType src_dt = _is_bfloat16 ? DataType::F32 : _data_type;
318*c217d954SCole Faust const DataType weights_dt = _is_bfloat16 ? DataType::F32 : _weights_data_type;
319*c217d954SCole Faust const DataType bias_dt = _is_bfloat16 ? DataType::F32 : _bias_data_type;
320*c217d954SCole Faust
321*c217d954SCole Faust // Create reference
322*c217d954SCole Faust SimpleTensor<T> src{ input_shape, src_dt, 1, _quantization_info };
323*c217d954SCole Faust SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info };
324*c217d954SCole Faust SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info };
325*c217d954SCole Faust
326*c217d954SCole Faust fill(src, 0);
327*c217d954SCole Faust fill(weights, 1);
328*c217d954SCole Faust fill(bias, 2);
329*c217d954SCole Faust
330*c217d954SCole Faust // Fill with bfloat16 to perform the conversion and reduce the mismatches in the output
331*c217d954SCole Faust if(_is_bfloat16)
332*c217d954SCole Faust {
333*c217d954SCole Faust regularize_values(static_cast<void *>(src.data()), src.num_elements());
334*c217d954SCole Faust regularize_values(static_cast<void *>(weights.data()), weights.num_elements());
335*c217d954SCole Faust }
336*c217d954SCole Faust
337*c217d954SCole Faust if(pre_pad_layer.size() > 0)
338*c217d954SCole Faust {
339*c217d954SCole Faust src = reference::pad_layer<T>(src, pre_pad_layer, PixelValue(0), PaddingMode::CONSTANT);
340*c217d954SCole Faust }
341*c217d954SCole Faust
342*c217d954SCole Faust return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
343*c217d954SCole Faust act_info) :
344*c217d954SCole Faust reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
345*c217d954SCole Faust }
346*c217d954SCole Faust
347*c217d954SCole Faust TensorType _target{};
348*c217d954SCole Faust SimpleTensor<T> _reference{};
349*c217d954SCole Faust DataType _data_type{};
350*c217d954SCole Faust DataType _weights_data_type{};
351*c217d954SCole Faust DataType _bias_data_type{};
352*c217d954SCole Faust DataType _output_data_type{};
353*c217d954SCole Faust DataLayout _data_layout{};
354*c217d954SCole Faust QuantizationInfo _quantization_info{};
355*c217d954SCole Faust QuantizationInfo _weight_quantization_info{};
356*c217d954SCole Faust bool _is_quantized = false;
357*c217d954SCole Faust bool _is_bfloat16 = false;
358*c217d954SCole Faust bool _mixed_layout = false;
359*c217d954SCole Faust };
360*c217d954SCole Faust
361*c217d954SCole Faust template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
362*c217d954SCole Faust class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
363*c217d954SCole Faust {
364*c217d954SCole Faust public:
365*c217d954SCole Faust template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,bool reshape_weights,DataType data_type,DataLayout data_layout,ActivationLayerInfo act_info)366*c217d954SCole Faust void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
367*c217d954SCole Faust DataLayout data_layout, ActivationLayerInfo act_info)
368*c217d954SCole Faust {
369*c217d954SCole Faust ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
370*c217d954SCole Faust data_type, data_type, data_layout,
371*c217d954SCole Faust QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout);
372*c217d954SCole Faust }
373*c217d954SCole Faust };
374*c217d954SCole Faust
375*c217d954SCole Faust template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
376*c217d954SCole Faust class ConvolutionValidationWithPaddingFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
377*c217d954SCole Faust {
378*c217d954SCole Faust public:
379*c217d954SCole Faust template <typename...>
380*c217d954SCole Faust void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
381*c217d954SCole Faust DataLayout data_layout, ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
382*c217d954SCole Faust {
383*c217d954SCole Faust ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
384*c217d954SCole Faust data_type, data_type, data_layout,
385*c217d954SCole Faust QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout, pre_pad_layer);
386*c217d954SCole Faust }
387*c217d954SCole Faust };
388*c217d954SCole Faust
389*c217d954SCole Faust template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
390*c217d954SCole Faust class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
391*c217d954SCole Faust {
392*c217d954SCole Faust public:
393*c217d954SCole Faust template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,bool reshape_weights,DataType data_type,DataLayout data_layout,QuantizationInfo quantization_info,ActivationLayerInfo act_info)394*c217d954SCole Faust void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
395*c217d954SCole Faust DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
396*c217d954SCole Faust {
397*c217d954SCole Faust ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
398*c217d954SCole Faust data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout);
399*c217d954SCole Faust }
400*c217d954SCole Faust };
401*c217d954SCole Faust
402*c217d954SCole Faust template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
403*c217d954SCole Faust class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
404*c217d954SCole Faust {
405*c217d954SCole Faust public:
406*c217d954SCole Faust template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,bool reshape_weights,DataType data_type,DataLayout data_layout,QuantizationInfo quantization_info,ActivationLayerInfo act_info,DataType weights_data_type)407*c217d954SCole Faust void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
408*c217d954SCole Faust DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type)
409*c217d954SCole Faust {
410*c217d954SCole Faust std::vector<float> weights_scales{};
411*c217d954SCole Faust std::mt19937 gen(library->seed());
412*c217d954SCole Faust std::uniform_real_distribution<float> dis(0.01f, 1.f);
413*c217d954SCole Faust for(size_t i = 0; i < output_shape[2]; ++i)
414*c217d954SCole Faust {
415*c217d954SCole Faust weights_scales.push_back(dis(gen));
416*c217d954SCole Faust }
417*c217d954SCole Faust ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
418*c217d954SCole Faust reshape_weights, data_type, weights_data_type, data_layout,
419*c217d954SCole Faust quantization_info, QuantizationInfo(weights_scales), act_info);
420*c217d954SCole Faust }
421*c217d954SCole Faust };
422*c217d954SCole Faust
423*c217d954SCole Faust #ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
prepare_weights(const TensorInfo tensor_info,const arm_compute::WeightFormat weight_format)424*c217d954SCole Faust inline TensorInfo prepare_weights(const TensorInfo tensor_info, const arm_compute::WeightFormat weight_format)
425*c217d954SCole Faust {
426*c217d954SCole Faust const DataLayout data_layout = tensor_info.data_layout();
427*c217d954SCole Faust ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
428*c217d954SCole Faust const DataType data_type = tensor_info.data_type();
429*c217d954SCole Faust const TensorShape tensor_shape = tensor_info.tensor_shape();
430*c217d954SCole Faust const int N = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
431*c217d954SCole Faust const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
432*c217d954SCole Faust const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
433*c217d954SCole Faust const int C = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
434*c217d954SCole Faust
435*c217d954SCole Faust const int interleave_by = arm_compute::interleave_by(weight_format);
436*c217d954SCole Faust const int block_by = arm_compute::block_by(weight_format);
437*c217d954SCole Faust const int Ip = arm_gemm::roundup<unsigned int>(C, block_by); // C'=I'
438*c217d954SCole Faust const int Op = arm_gemm::roundup<unsigned int>(N, interleave_by); // O'=N'
439*c217d954SCole Faust
440*c217d954SCole Faust arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes();
441*c217d954SCole Faust strides_in_bytes.set(1, Ip * interleave_by * H * W * tensor_info.element_size());
442*c217d954SCole Faust strides_in_bytes.set(2, Ip * Op * tensor_info.element_size());
443*c217d954SCole Faust
444*c217d954SCole Faust const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes();
445*c217d954SCole Faust
446*c217d954SCole Faust // Total size needs to include padded dimensions
447*c217d954SCole Faust const size_t total_size_in_bytes = Op * H * W * Ip * tensor_info.element_size();
448*c217d954SCole Faust
449*c217d954SCole Faust const TensorShape TS(Ip, W, H, Op);
450*c217d954SCole Faust
451*c217d954SCole Faust TensorInfo new_tensor_info = tensor_info;
452*c217d954SCole Faust new_tensor_info.init(TS, 1 /*num_channels, deprecated*/, data_type, strides_in_bytes,
453*c217d954SCole Faust offset_first_element_in_bytes, total_size_in_bytes);
454*c217d954SCole Faust return new_tensor_info;
455*c217d954SCole Faust }
456*c217d954SCole Faust
457*c217d954SCole Faust template <typename ScalarType, typename AccessorType>
rearrange_data(const AccessorType src,AccessorType dst,const arm_compute::WeightFormat weight_format)458*c217d954SCole Faust inline void rearrange_data(const AccessorType src, AccessorType dst, const arm_compute::WeightFormat weight_format)
459*c217d954SCole Faust {
460*c217d954SCole Faust ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(weight_format), framework::LogLevel::ERRORS);
461*c217d954SCole Faust // Data Layout: OHWIo<interleave_by>i<block_by>
462*c217d954SCole Faust const int interleave_by = arm_compute::interleave_by(weight_format);
463*c217d954SCole Faust const int block_by = arm_compute::block_by(weight_format);
464*c217d954SCole Faust const TensorShape src_tensor_shape = src.shape();
465*c217d954SCole Faust const DataLayout data_layout = src.data_layout();
466*c217d954SCole Faust ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
467*c217d954SCole Faust const unsigned int O = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
468*c217d954SCole Faust const unsigned int H = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
469*c217d954SCole Faust const unsigned int W = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
470*c217d954SCole Faust const unsigned int I = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
471*c217d954SCole Faust const unsigned int Ip = arm_gemm::roundup<unsigned int>(I, block_by); // C'=I'
472*c217d954SCole Faust const unsigned int Op = arm_gemm::roundup<unsigned int>(O, interleave_by); // N'=O'
473*c217d954SCole Faust
474*c217d954SCole Faust ARM_COMPUTE_EXPECT_EQUAL(Op * H * W * Ip, (unsigned)dst.num_elements(), framework::LogLevel::ERRORS);
475*c217d954SCole Faust ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS);
476*c217d954SCole Faust
477*c217d954SCole Faust const ScalarType *src_ptr = reinterpret_cast<const ScalarType *>(src.data());
478*c217d954SCole Faust ScalarType *dst_ptr = reinterpret_cast<ScalarType *>(dst.data());
479*c217d954SCole Faust for(unsigned i = 0; i < I; ++i)
480*c217d954SCole Faust for(unsigned w = 0; w < W; ++w)
481*c217d954SCole Faust for(unsigned h = 0; h < H; ++h)
482*c217d954SCole Faust for(unsigned o = 0; o < O; ++o)
483*c217d954SCole Faust {
484*c217d954SCole Faust ScalarType src_element;
485*c217d954SCole Faust switch(data_layout)
486*c217d954SCole Faust {
487*c217d954SCole Faust case DataLayout::NHWC:
488*c217d954SCole Faust {
489*c217d954SCole Faust src_element = src_ptr[o * H * W * I + h * W * I + w * I + i];
490*c217d954SCole Faust }
491*c217d954SCole Faust break;
492*c217d954SCole Faust default:
493*c217d954SCole Faust {
494*c217d954SCole Faust ARM_COMPUTE_ERROR("Unsupported memory layout.");
495*c217d954SCole Faust }
496*c217d954SCole Faust }
497*c217d954SCole Faust const int x5 = std::floor(((float)o) / interleave_by);
498*c217d954SCole Faust const int x4 = h;
499*c217d954SCole Faust const int x3 = w;
500*c217d954SCole Faust const int x2 = std::floor((float)i / block_by);
501*c217d954SCole Faust const int x1 = o % interleave_by;
502*c217d954SCole Faust const int x0 = i % block_by;
503*c217d954SCole Faust unsigned dst_idx = x5 * H * W * Ip * interleave_by
504*c217d954SCole Faust + x4 * W * Ip * interleave_by
505*c217d954SCole Faust + x3 * Ip * interleave_by
506*c217d954SCole Faust + x2 * interleave_by * block_by
507*c217d954SCole Faust + x1 * block_by
508*c217d954SCole Faust + x0;
509*c217d954SCole Faust dst_ptr[dst_idx] = src_element;
510*c217d954SCole Faust }
511*c217d954SCole Faust }
512*c217d954SCole Faust
513*c217d954SCole Faust template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
514*c217d954SCole Faust class VariableWeightsFixtureBaseClass : public framework::Fixture
515*c217d954SCole Faust {
516*c217d954SCole Faust public:
517*c217d954SCole Faust template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,DataLayout data_layout,const DataType data_type)518*c217d954SCole Faust void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataLayout data_layout,
519*c217d954SCole Faust const DataType data_type)
520*c217d954SCole Faust {
521*c217d954SCole Faust conv = std::make_unique<ConvolutionFunction>();
522*c217d954SCole Faust // prepare data
523*c217d954SCole Faust _data_layout = data_layout;
524*c217d954SCole Faust // Fixed format kernels for variable weights can work only with NHWC format.
525*c217d954SCole Faust ARM_COMPUTE_EXPECT_EQUAL(_data_layout, DataLayout::NHWC, framework::LogLevel::ERRORS);
526*c217d954SCole Faust _data_type = data_type;
527*c217d954SCole Faust // run the code
528*c217d954SCole Faust compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
529*c217d954SCole Faust compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
530*c217d954SCole Faust }
teardown()531*c217d954SCole Faust void teardown()
532*c217d954SCole Faust {
533*c217d954SCole Faust _target.allocator()->free();
534*c217d954SCole Faust }
535*c217d954SCole Faust
536*c217d954SCole Faust protected:
537*c217d954SCole Faust template <typename U>
fill(U && tensor,int i)538*c217d954SCole Faust void fill(U &&tensor, int i)
539*c217d954SCole Faust {
540*c217d954SCole Faust switch(tensor.data_type())
541*c217d954SCole Faust {
542*c217d954SCole Faust case DataType::F16:
543*c217d954SCole Faust {
544*c217d954SCole Faust arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
545*c217d954SCole Faust library->fill(tensor, distribution, i);
546*c217d954SCole Faust break;
547*c217d954SCole Faust }
548*c217d954SCole Faust case DataType::F32:
549*c217d954SCole Faust {
550*c217d954SCole Faust std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
551*c217d954SCole Faust library->fill(tensor, distribution, i);
552*c217d954SCole Faust break;
553*c217d954SCole Faust }
554*c217d954SCole Faust default:
555*c217d954SCole Faust library->fill_tensor_uniform(tensor, i);
556*c217d954SCole Faust }
557*c217d954SCole Faust }
558*c217d954SCole Faust
559*c217d954SCole Faust private:
560*c217d954SCole Faust virtual void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
561*c217d954SCole Faust const PadStrideInfo &conv_info,
562*c217d954SCole Faust const Size2D &dilation) = 0;
563*c217d954SCole Faust
compute_target(TensorShape input_shape,TensorShape weights_shape,const TensorShape & bias_shape,TensorShape output_shape,const PadStrideInfo & conv_info,const Size2D & dilation)564*c217d954SCole Faust void compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &conv_info,
565*c217d954SCole Faust const Size2D &dilation)
566*c217d954SCole Faust {
567*c217d954SCole Faust // The dataset is always in NCHW format - we need to make C the
568*c217d954SCole Faust // innermost dimension because the fixed-format kernel work only
569*c217d954SCole Faust // with NHWC layout.
570*c217d954SCole Faust permute(input_shape, PermutationVector(2U, 0U, 1U));
571*c217d954SCole Faust permute(weights_shape, PermutationVector(2U, 0U, 1U));
572*c217d954SCole Faust permute(output_shape, PermutationVector(2U, 0U, 1U));
573*c217d954SCole Faust const auto src_tensor_info = TensorInfo(input_shape, 1, _data_type, _data_layout);
574*c217d954SCole Faust const auto weight_tensor_info = TensorInfo(weights_shape, 1, _data_type, _data_layout);
575*c217d954SCole Faust const auto bias_tensor_info = TensorInfo(bias_shape, 1, _data_type, _data_layout);
576*c217d954SCole Faust auto dst_tensor_info = TensorInfo(output_shape, 1, _data_type, _data_layout);
577*c217d954SCole Faust
578*c217d954SCole Faust const int kernel_height = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT)];
579*c217d954SCole Faust const int kernel_width = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH)];
580*c217d954SCole Faust const int num_kernels = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::BATCHES)];
581*c217d954SCole Faust
582*c217d954SCole Faust const WeightsInfo query_weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, arm_compute::WeightFormat::ANY);
583*c217d954SCole Faust const bool kernel_found = bool(ConvolutionFunction::has_opt_impl(_computed_weight_format, &src_tensor_info, &weight_tensor_info,
584*c217d954SCole Faust &bias_tensor_info, &dst_tensor_info, conv_info, query_weights_info));
585*c217d954SCole Faust // Make surethat the setup founds a fixed-format kernel as requested by the test case.
586*c217d954SCole Faust ARM_COMPUTE_EXPECT(kernel_found, framework::LogLevel::ERRORS);
587*c217d954SCole Faust ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(_computed_weight_format), framework::LogLevel::ERRORS);
588*c217d954SCole Faust
589*c217d954SCole Faust const WeightsInfo weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, _computed_weight_format);
590*c217d954SCole Faust configure_and_execute_kernel(src_tensor_info, weight_tensor_info, bias_tensor_info, dst_tensor_info, weights_info, conv_info,
591*c217d954SCole Faust dilation);
592*c217d954SCole Faust }
compute_reference(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const TensorShape & output_shape,const PadStrideInfo & info,const Size2D & dilation)593*c217d954SCole Faust void compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
594*c217d954SCole Faust const Size2D &dilation)
595*c217d954SCole Faust {
596*c217d954SCole Faust ARM_COMPUTE_UNUSED(input_shape, weights_shape, bias_shape, output_shape, info,
597*c217d954SCole Faust dilation);
598*c217d954SCole Faust
599*c217d954SCole Faust // Create reference
600*c217d954SCole Faust SimpleTensor<ScalarType> src{ input_shape, _data_type };
601*c217d954SCole Faust SimpleTensor<ScalarType> weights{ weights_shape, _data_type };
602*c217d954SCole Faust SimpleTensor<ScalarType> bias{ bias_shape, _data_type };
603*c217d954SCole Faust fill(src, 0);
604*c217d954SCole Faust fill(bias, 1);
605*c217d954SCole Faust fill(weights, 3);
606*c217d954SCole Faust _reference = reference::convolution_layer<ScalarType>(src, weights, bias, output_shape, info, dilation, 1 /*num_groups*/);
607*c217d954SCole Faust }
608*c217d954SCole Faust DataLayout _data_layout{};
609*c217d954SCole Faust DataType _data_type{};
610*c217d954SCole Faust
611*c217d954SCole Faust protected:
612*c217d954SCole Faust std::unique_ptr<ConvolutionFunction> conv{};
613*c217d954SCole Faust arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
614*c217d954SCole Faust TensorClass _target{};
615*c217d954SCole Faust SimpleTensor<ScalarType> _reference{};
616*c217d954SCole Faust };
617*c217d954SCole Faust
618*c217d954SCole Faust template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
619*c217d954SCole Faust class VariableWeightsFixture : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
620*c217d954SCole Faust {
configure_and_execute_kernel(TensorInfo src_tensor_info,TensorInfo weight_tensor_info,TensorInfo bias_tensor_info,TensorInfo dst_tensor_info,const WeightsInfo weights_info,const PadStrideInfo & conv_info,const Size2D & dilation)621*c217d954SCole Faust void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
622*c217d954SCole Faust const PadStrideInfo &conv_info,
623*c217d954SCole Faust const Size2D &dilation)
624*c217d954SCole Faust {
625*c217d954SCole Faust this->conv->configure(&src_tensor_info, &weight_tensor_info, &bias_tensor_info, &dst_tensor_info, conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
626*c217d954SCole Faust
627*c217d954SCole Faust // Allocate input tensors
628*c217d954SCole Faust auto src = create_tensor<TensorClass>(src_tensor_info);
629*c217d954SCole Faust auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
630*c217d954SCole Faust const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
631*c217d954SCole Faust auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
632*c217d954SCole Faust auto bias = create_tensor<TensorClass>(bias_tensor_info);
633*c217d954SCole Faust src.allocator()->allocate();
634*c217d954SCole Faust weights_original.allocator()->allocate();
635*c217d954SCole Faust weights_transformed.allocator()->allocate();
636*c217d954SCole Faust bias.allocator()->allocate();
637*c217d954SCole Faust // Allocate destination tensor
638*c217d954SCole Faust this->_target = create_tensor<TensorClass>(dst_tensor_info);
639*c217d954SCole Faust this->_target.allocator()->allocate();
640*c217d954SCole Faust
641*c217d954SCole Faust // Prepare source and biases that are left unchanged.
642*c217d954SCole Faust this->fill(AccessorType(src), 0);
643*c217d954SCole Faust this->fill(AccessorType(bias), 1);
644*c217d954SCole Faust
645*c217d954SCole Faust // First run
646*c217d954SCole Faust this->fill(AccessorType(weights_original), 2);
647*c217d954SCole Faust rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
648*c217d954SCole Faust ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weights_transformed }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &(this->_target) } };
649*c217d954SCole Faust this->conv->run(run_pack);
650*c217d954SCole Faust // Second run, with new weights
651*c217d954SCole Faust this->fill(AccessorType(weights_original), 3);
652*c217d954SCole Faust rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
653*c217d954SCole Faust this->conv->run(run_pack);
654*c217d954SCole Faust src.allocator()->free();
655*c217d954SCole Faust weights_original.allocator()->free();
656*c217d954SCole Faust weights_transformed.allocator()->free();
657*c217d954SCole Faust bias.allocator()->free();
658*c217d954SCole Faust }
659*c217d954SCole Faust };
660*c217d954SCole Faust
661*c217d954SCole Faust template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
662*c217d954SCole Faust class VariableWeightsFixtureNEInterface : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
663*c217d954SCole Faust {
configure_and_execute_kernel(TensorInfo src_tensor_info,TensorInfo weight_tensor_info,TensorInfo bias_tensor_info,TensorInfo dst_tensor_info,const WeightsInfo weights_info,const PadStrideInfo & conv_info,const Size2D & dilation)664*c217d954SCole Faust void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
665*c217d954SCole Faust const PadStrideInfo &conv_info,
666*c217d954SCole Faust const Size2D &dilation)
667*c217d954SCole Faust {
668*c217d954SCole Faust // Allocate input tensors
669*c217d954SCole Faust auto src = create_tensor<TensorClass>(src_tensor_info);
670*c217d954SCole Faust auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
671*c217d954SCole Faust const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
672*c217d954SCole Faust auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
673*c217d954SCole Faust auto bias = create_tensor<TensorClass>(bias_tensor_info);
674*c217d954SCole Faust src.allocator()->allocate();
675*c217d954SCole Faust weights_original.allocator()->allocate();
676*c217d954SCole Faust weights_transformed.allocator()->allocate();
677*c217d954SCole Faust bias.allocator()->allocate();
678*c217d954SCole Faust // Allocate destination tensor
679*c217d954SCole Faust this->_target = create_tensor<TensorClass>(dst_tensor_info);
680*c217d954SCole Faust this->_target.allocator()->allocate();
681*c217d954SCole Faust this->conv->configure(&src, &weights_transformed, &bias, &(this->_target), conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
682*c217d954SCole Faust // Prepare source and biases that are left unchanged.
683*c217d954SCole Faust this->fill(AccessorType(src), 0);
684*c217d954SCole Faust this->fill(AccessorType(bias), 1);
685*c217d954SCole Faust
686*c217d954SCole Faust // First run
687*c217d954SCole Faust this->fill(AccessorType(weights_original), 2);
688*c217d954SCole Faust rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
689*c217d954SCole Faust this->conv->run();
690*c217d954SCole Faust // Second run, with new weights
691*c217d954SCole Faust this->fill(AccessorType(weights_original), 3);
692*c217d954SCole Faust rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
693*c217d954SCole Faust this->conv->run();
694*c217d954SCole Faust src.allocator()->free();
695*c217d954SCole Faust weights_original.allocator()->free();
696*c217d954SCole Faust weights_transformed.allocator()->free();
697*c217d954SCole Faust bias.allocator()->free();
698*c217d954SCole Faust }
699*c217d954SCole Faust };
700*c217d954SCole Faust
701*c217d954SCole Faust template <typename ConvolutionClass, bool enable_fast_math>
702*c217d954SCole Faust class HasOptImplFixture : public framework::Fixture
703*c217d954SCole Faust {
704*c217d954SCole Faust public:
705*c217d954SCole Faust template <typename...>
setup(DataType data_type,arm_compute::WeightFormat query_weight_format)706*c217d954SCole Faust void setup(DataType data_type, arm_compute::WeightFormat query_weight_format)
707*c217d954SCole Faust {
708*c217d954SCole Faust auto conv = std::make_unique<ConvolutionClass>();
709*c217d954SCole Faust const auto src_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
710*c217d954SCole Faust const auto weight_info = TensorInfo(TensorShape(64, 3U, 3U, 64U), 1, enable_fast_math ? DataType::BFLOAT16 : data_type, DataLayout::NHWC);
711*c217d954SCole Faust const auto bias_info = TensorInfo(TensorShape(64U), 1, data_type, DataLayout::NHWC);
712*c217d954SCole Faust auto dst_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
713*c217d954SCole Faust const auto conv_info = PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR);
714*c217d954SCole Faust const WeightsInfo weights_info(false, 3U, 3U, 64U, false, query_weight_format);
715*c217d954SCole Faust _kernel_found = bool(ConvolutionClass::has_opt_impl(_computed_weight_format, &src_info, &weight_info,
716*c217d954SCole Faust &bias_info, &dst_info, conv_info, weights_info,
717*c217d954SCole Faust /*dilation*/ Size2D(1U, 1U), /*act_info*/ ActivationLayerInfo(), enable_fast_math));
718*c217d954SCole Faust }
719*c217d954SCole Faust
720*c217d954SCole Faust protected:
721*c217d954SCole Faust bool _kernel_found{ false };
722*c217d954SCole Faust arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
723*c217d954SCole Faust };
724*c217d954SCole Faust #endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
725*c217d954SCole Faust
726*c217d954SCole Faust } // namespace validation
727*c217d954SCole Faust } // namespace test
728*c217d954SCole Faust } // namespace arm_compute
729*c217d954SCole Faust #endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */
730