xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/FFTFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_FFT_FIXTURE
25 #define ARM_COMPUTE_TEST_FFT_FIXTURE
26 
27 #include "arm_compute/core/Types.h"
28 #include "arm_compute/runtime/FunctionDescriptors.h"
29 #include "tests/AssetsLibrary.h"
30 #include "tests/Globals.h"
31 #include "tests/IAccessor.h"
32 #include "tests/framework/Asserts.h"
33 #include "tests/framework/Fixture.h"
34 #include "tests/validation/reference/ActivationLayer.h"
35 #include "tests/validation/reference/ConvolutionLayer.h"
36 #include "tests/validation/reference/DFT.h"
37 
38 #include <random>
39 
40 namespace arm_compute
41 {
42 namespace test
43 {
44 namespace validation
45 {
46 template <typename TensorType, typename AccessorType, typename FunctionType, typename InfoType, typename T>
47 class FFTValidationFixture : public framework::Fixture
48 {
49 public:
50     template <typename...>
setup(TensorShape shape,DataType data_type)51     void setup(TensorShape shape, DataType data_type)
52     {
53         _target    = compute_target(shape, data_type);
54         _reference = compute_reference(shape, data_type);
55         ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(_target.info()->tensor_shape(), _reference.shape());
56     }
57 
58 protected:
59     template <typename U>
fill(U && tensor)60     void fill(U &&tensor)
61     {
62         switch(tensor.data_type())
63         {
64             case DataType::F16:
65             {
66                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -5.0f, 5.0f };
67                 library->fill(tensor, distribution, 0);
68                 break;
69             }
70             case DataType::F32:
71             {
72                 std::uniform_real_distribution<float> distribution(-5.0f, 5.0f);
73                 library->fill(tensor, distribution, 0);
74                 break;
75             }
76             default:
77                 library->fill_tensor_uniform(tensor, 0);
78         }
79     }
80 
compute_target(const TensorShape & shape,DataType data_type)81     TensorType compute_target(const TensorShape &shape, DataType data_type)
82     {
83         // Create tensors
84         TensorType src = create_tensor<TensorType>(shape, data_type, 2);
85         TensorType dst = create_tensor<TensorType>(shape, data_type, 2);
86 
87         // Create and configure function
88         FunctionType fft;
89         fft.configure(&src, &dst, InfoType());
90 
91         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
92         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
93 
94         add_padding_x({ &src, &dst });
95 
96         // Allocate tensors
97         src.allocator()->allocate();
98         dst.allocator()->allocate();
99 
100         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
101         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
102 
103         // Fill tensors
104         fill(AccessorType(src));
105 
106         // Compute function
107         fft.run();
108 
109         return dst;
110     }
111 
compute_reference(const TensorShape & shape,DataType data_type)112     SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type)
113     {
114         // Create reference
115         SimpleTensor<T> src{ shape, data_type, 2 };
116 
117         // Fill reference
118         fill(src);
119         if(std::is_same<InfoType, FFT1DInfo>::value)
120         {
121             return reference::dft_1d(src, reference::FFTDirection::Forward);
122         }
123         else
124         {
125             return reference::dft_2d(src, reference::FFTDirection::Forward);
126         }
127     }
128 
129     TensorType      _target{};
130     SimpleTensor<T> _reference{};
131 };
132 
133 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
134 class FFTConvolutionValidationGenericFixture : public framework::Fixture
135 {
136 public:
137     template <typename...>
138     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
139                DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
140     {
141         _mixed_layout = mixed_layout;
142         _data_type    = data_type;
143         _data_layout  = data_layout;
144 
145         _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
146         _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
147     }
148 
149 protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)150     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
151     {
152         // Test Multi DataLayout graph cases, when the data layout changes after configure
153         src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
154         dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
155 
156         // Compute Convolution function
157         layer.run();
158 
159         // Reinstating original data layout for the test suite to properly check the values
160         src.info()->set_data_layout(_data_layout);
161         dst.info()->set_data_layout(_data_layout);
162     }
163 
164     template <typename U>
fill(U && tensor,int i)165     void fill(U &&tensor, int i)
166     {
167         switch(tensor.data_type())
168         {
169             case DataType::F16:
170             {
171                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
172                 library->fill(tensor, distribution, i);
173                 break;
174             }
175             case DataType::F32:
176             {
177                 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
178                 library->fill(tensor, distribution, i);
179                 break;
180             }
181             default:
182                 library->fill_tensor_uniform(tensor, i);
183         }
184     }
185 
compute_target(TensorShape input_shape,TensorShape weights_shape,const TensorShape & bias_shape,TensorShape output_shape,const PadStrideInfo & info,const Size2D & dilation,const ActivationLayerInfo act_info)186     TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
187                               const Size2D &dilation, const ActivationLayerInfo act_info)
188     {
189         ARM_COMPUTE_UNUSED(dilation);
190         ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
191 
192         if(_data_layout == DataLayout::NHWC)
193         {
194             permute(input_shape, PermutationVector(2U, 0U, 1U));
195             permute(weights_shape, PermutationVector(2U, 0U, 1U));
196             permute(output_shape, PermutationVector(2U, 0U, 1U));
197         }
198 
199         // Create tensors
200         TensorType src     = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
201         TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
202         TensorType bias    = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout);
203         TensorType dst     = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout);
204 
205         add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
206 
207         // Create and configure function
208         FunctionType conv;
209         conv.configure(&src, &weights, &bias, &dst, info, act_info, _data_type == DataType::F16);
210 
211         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
212         ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
213         ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
214         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
215 
216         // Allocate tensors
217         src.allocator()->allocate();
218         weights.allocator()->allocate();
219         bias.allocator()->allocate();
220         dst.allocator()->allocate();
221 
222         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
223         ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
224         ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
225         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
226 
227         // Fill tensors
228         fill(AccessorType(src), 0);
229         fill(AccessorType(weights), 1);
230         fill(AccessorType(bias), 2);
231 
232         if(_mixed_layout)
233         {
234             mix_layout(conv, src, dst);
235         }
236         else
237         {
238             // Compute Convolution function
239             conv.run();
240         }
241         return dst;
242     }
243 
compute_reference(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const TensorShape & output_shape,const PadStrideInfo & info,const Size2D & dilation,const ActivationLayerInfo act_info)244     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
245                                       const Size2D &dilation, const ActivationLayerInfo act_info)
246     {
247         ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
248 
249         // Create reference
250         SimpleTensor<T> src{ input_shape, _data_type, 1 };
251         SimpleTensor<T> weights{ weights_shape, _data_type, 1 };
252         SimpleTensor<T> bias{ bias_shape, _data_type, 1 };
253 
254         // Fill reference
255         fill(src, 0);
256         fill(weights, 1);
257         fill(bias, 2);
258 
259         return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation), act_info) : reference::convolution_layer<T>(src,
260                 weights, bias, output_shape, info, dilation);
261     }
262 
263     TensorType      _target{};
264     SimpleTensor<T> _reference{};
265     DataType        _data_type{};
266     DataLayout      _data_layout{};
267     bool            _mixed_layout{ false };
268 };
269 
270 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
271 class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
272 {
273 public:
274     template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,DataType data_type,DataLayout data_layout,ActivationLayerInfo act_info)275     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
276                DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
277     {
278         FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
279                                                                                                  data_type, data_layout, act_info, mixed_layout);
280     }
281 };
282 } // namespace validation
283 } // namespace test
284 } // namespace arm_compute
285 #endif /* ARM_COMPUTE_TEST_FFT_FIXTURE */
286