xref: /aosp_15_r20/external/ComputeLibrary/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
25 
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Utils.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "arm_compute/runtime/CL/CLScheduler.h"
31 #include "arm_compute/runtime/CPP/CPPScheduler.h"
32 #include "src/core/CL/kernels/CLFFTDigitReverseKernel.h"
33 #include "src/core/CL/kernels/CLFFTRadixStageKernel.h"
34 #include "src/core/CL/kernels/CLFFTScaleKernel.h"
35 #include "src/core/CL/kernels/CLFillBorderKernel.h"
36 #include "src/core/CL/kernels/CLPadLayerKernel.h"
37 #include "src/core/CL/kernels/CLReductionOperationKernel.h"
38 #include "src/core/helpers/AutoConfiguration.h"
39 #include "src/core/utils/helpers/fft.h"
40 
41 #include "src/common/utils/Log.h"
42 
43 namespace arm_compute
44 {
45 namespace
46 {
pad_decomposable(int N)47 int pad_decomposable(int N)
48 {
49     const auto supported_radix = CLFFTRadixStageKernel::supported_radix();
50 
51     int  pad           = 0;
52     bool is_decomposed = false;
53     while(!is_decomposed)
54     {
55         const auto decomposed_vector = arm_compute::helpers::fft::decompose_stages(N++, supported_radix);
56         is_decomposed                = !decomposed_vector.empty();
57         if(!is_decomposed)
58         {
59             ++pad;
60         }
61     }
62     return pad;
63 }
64 } // namespace
CLFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)65 CLFFTConvolutionLayer::CLFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
66     : _memory_group(memory_manager),
67       _flip_weights_func(),
68       _permute_input_func(),
69       _permute_output_func(),
70       _permute_weights_func(),
71       _permute_bias_func(),
72       _pad_input_func(),
73       _pad_weights_func(),
74       _transform_input_func(memory_manager),
75       _transform_weights_func(),
76       _itransform_output_func(memory_manager),
77       _prod_func(),
78       _reduce_func(),
79       _extract_output_func(),
80       _bias_add_func(),
81       _activation_layer_func(),
82       _permuted_input(),
83       _permuted_weights(),
84       _permuted_bias(),
85       _permuted_output(),
86       _padded_input(),
87       _padded_weights(),
88       _flip_axis(),
89       _flipped_weights(),
90       _transformed_input(),
91       _transformed_weights(),
92       _input_weights_product(),
93       _output_product(),
94       _output_reduced(),
95       _itransformed_output(),
96       _reshaped_output(),
97       _bias_output(),
98       _original_weights(nullptr),
99       _original_bias(nullptr),
100       _is_activationlayer_enabled(false),
101       _needs_permute(false),
102       _has_bias(false),
103       _is_prepared(false)
104 {
105 }
106 
configure(ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info,bool enable_fast_math)107 void CLFFTConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
108                                       const ActivationLayerInfo &act_info, bool enable_fast_math)
109 {
110     configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, act_info, enable_fast_math);
111 }
112 
configure(const CLCompileContext & compile_context,ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info,bool enable_fast_math)113 void CLFFTConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
114                                       const ActivationLayerInfo &act_info, bool enable_fast_math)
115 {
116     ARM_COMPUTE_UNUSED(enable_fast_math);
117     ARM_COMPUTE_ERROR_THROW_ON(CLFFTConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info, act_info, enable_fast_math));
118     ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, act_info, enable_fast_math);
119 
120     _original_weights = weights;
121     _original_bias    = biases;
122 
123     // Flat if bias addition is required
124     _has_bias = biases != nullptr;
125 
126     // Get indices for the width and height
127     const size_t idx_width  = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
128     const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
129 
130     // Input shape, kernel size and output tile
131     const Size2D input_dims  = Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
132     const Size2D kernel_size = Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
133     const Size2D pad_valid   = Size2D(pad_decomposable(input_dims.x() + kernel_size.x() - 1),
134                                       pad_decomposable(input_dims.y() + kernel_size.y() - 1));
135     // Tensors to use
136     ICLTensor       *input_to_use   = input;
137     const ICLTensor *weights_to_use = weights;
138     ICLTensor       *output_to_use  = _has_bias ? &_bias_output : output;
139 
140     // Permute bias
141     if(biases != nullptr)
142     {
143         _permute_bias_func.configure(compile_context, biases, &_permuted_bias, PermutationVector(1U, 2U, 0U));
144         _permuted_bias.info()->set_data_layout(DataLayout::NCHW);
145     }
146 
147     // Permute input if needed
148     _needs_permute = input->info()->data_layout() == DataLayout::NHWC;
149     if(_needs_permute)
150     {
151         _memory_group.manage(&_permuted_input);
152         // Configure the function to transform the input tensor from NHWC -> NCHW
153         _permute_input_func.configure(compile_context, input, &_permuted_input, PermutationVector(1U, 2U, 0U));
154         _permuted_input.info()->set_data_layout(DataLayout::NCHW);
155 
156         // Configure the function to transform the weights tensor from HWI -> IHW
157         _permute_weights_func.configure(compile_context, weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
158         _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
159 
160         input_to_use   = &_permuted_input;
161         weights_to_use = &_permuted_weights;
162     }
163 
164     // Flip weights
165     _flipped_weights.allocator()->init(weights_to_use->info()->clone()->set_is_resizable(true).reset_padding());
166     _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32));
167     _flip_weights_func.configure(compile_context, weights_to_use, &_flipped_weights, &_flip_axis);
168 
169     // Pad weights
170     const PaddingList padding_w = { { 0, input_dims.x() + pad_valid.x() - 1 }, { 0, input_dims.y() + pad_valid.y() - 1 } };
171     _pad_weights_func.configure(compile_context, &_flipped_weights, &_padded_weights, padding_w);
172 
173     // Transform weights
174     _transform_weights_func = std::make_unique<CLFFT2D>();
175     _transform_weights_func->configure(compile_context, &_padded_weights, &_transformed_weights, FFT2DInfo());
176 
177     // Pad input
178     const PaddingList padding_in = { { 0, kernel_size.x() + pad_valid.x() - 1 }, { 0, kernel_size.y() + pad_valid.y() - 1 } };
179     _memory_group.manage(&_padded_input);
180     _pad_input_func.configure(compile_context, input_to_use, &_padded_input, padding_in);
181     if(_needs_permute)
182     {
183         _permuted_input.allocator()->allocate();
184     }
185 
186     // Transform input
187     _memory_group.manage(&_transformed_input);
188     _transform_input_func.configure(compile_context, &_padded_input, &_transformed_input, FFT2DInfo());
189     _padded_input.allocator()->allocate();
190 
191     // Perform product
192     _memory_group.manage(&_output_product);
193     _prod_func.configure(compile_context, &_transformed_input, &_transformed_weights, &_output_product);
194     _transformed_input.allocator()->allocate();
195 
196     // Perform reduction
197     _memory_group.manage(&_output_reduced);
198     _reduce_func.configure(compile_context, &_output_product, &_output_reduced, 2, ReductionOperation::SUM);
199     _output_product.allocator()->allocate();
200 
201     // Transform output
202     _memory_group.manage(&_itransformed_output);
203     FFT2DInfo itranform_info;
204     itranform_info.direction = FFTDirection::Inverse;
205     _itransformed_output.allocator()->init(_output_reduced.info()->clone()->set_is_resizable(true).set_num_channels(1).reset_padding());
206     _itransform_output_func.configure(compile_context, &_output_reduced, &_itransformed_output, itranform_info);
207     _output_reduced.allocator()->allocate();
208 
209     // Reshape output
210     TensorShape reshaped_shape = _itransformed_output.info()->tensor_shape();
211     reshaped_shape.remove_dimension(2);
212     _reshaped_output.allocator()->init(_itransformed_output.info()->clone()->set_tensor_shape(reshaped_shape));
213 
214     // Extract correct region
215     const int start_left = kernel_size.x() - conv_info.pad_left() - 1;
216     const int start_top  = kernel_size.y() - conv_info.pad_top() - 1;
217     const int end_right  = _reshaped_output.info()->tensor_shape().x() - (kernel_size.x() - conv_info.pad_right() - 1) - pad_valid.x();
218     const int end_botton = _reshaped_output.info()->tensor_shape().y() - (kernel_size.y() - conv_info.pad_bottom() - 1) - pad_valid.y();
219     if(_has_bias)
220     {
221         _memory_group.manage(&_bias_output);
222     }
223     else if(_needs_permute)
224     {
225         output_to_use = &_permuted_output;
226         _memory_group.manage(&_permuted_output);
227     }
228     _extract_output_func.configure(compile_context, &_reshaped_output, output_to_use, Coordinates(start_left, start_top), Coordinates(end_right, end_botton));
229     _itransformed_output.allocator()->allocate();
230 
231     // Add bias
232     if(biases != nullptr)
233     {
234         output_to_use = output;
235         if(_needs_permute)
236         {
237             output_to_use = &_permuted_output;
238             _memory_group.manage(&_permuted_output);
239         }
240         auto_init_if_empty(*output_to_use->info(), *_bias_output.info());
241         _bias_add_func.configure(compile_context, &_bias_output, &_permuted_bias, output_to_use, ConvertPolicy::WRAP);
242         _bias_output.allocator()->allocate();
243     }
244 
245     // Permute output
246     if(_needs_permute)
247     {
248         // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
249         _permuted_output.info()->set_data_layout(DataLayout::NCHW);
250         _permute_output_func.configure(compile_context, &_permuted_output, output, PermutationVector(2U, 0U, 1U));
251 
252         // Allocate tensors
253         _permuted_output.allocator()->allocate();
254     }
255 
256     // Configure Activation Layer
257     _is_activationlayer_enabled = act_info.enabled();
258     if(_is_activationlayer_enabled)
259     {
260         _activation_layer_func.configure(compile_context, output, nullptr, act_info);
261     }
262 
263     // Setup flip axis data
264     _flip_axis.allocator()->allocate();
265     _flip_axis.map(true);
266     auto axis_data = reinterpret_cast<uint32_t *>(_flip_axis.buffer());
267     axis_data[0]   = 0;
268     axis_data[1]   = 1;
269     _flip_axis.unmap();
270 }
271 
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info,bool enable_fast_math)272 Status CLFFTConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
273                                        const ActivationLayerInfo &act_info, bool enable_fast_math)
274 {
275     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
276     ARM_COMPUTE_RETURN_ERROR_ON((input->data_type() == DataType::F16) && !enable_fast_math);
277     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
278 
279     // Get indices for the width and height
280     const size_t idx_width  = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
281     const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
282 
283     // Input shape, kernel size and output tile
284     const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
285 
286     // Strides
287     const auto strides = conv_info.stride();
288     ARM_COMPUTE_RETURN_ERROR_ON(strides.first != strides.second && strides.first != 1);
289     ARM_COMPUTE_RETURN_ERROR_ON(kernel_size.x() != kernel_size.y());
290     ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_left() != (kernel_size.x() / 2) || conv_info.pad_right() != (kernel_size.x() / 2));
291     ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_top() != (kernel_size.y() / 2) || conv_info.pad_bottom() != (kernel_size.y() / 2));
292 
293     // Validate biases
294     if(biases != nullptr)
295     {
296         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
297         ARM_COMPUTE_RETURN_ERROR_ON(weights->tensor_shape()[3] != biases->tensor_shape().x());
298     }
299 
300     // Checks performed when output is configured
301     if((output != nullptr) && (output->total_size() != 0))
302     {
303         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
304         ARM_COMPUTE_RETURN_ERROR_ON((input->tensor_shape()[idx_height] != output->tensor_shape()[idx_height]) || (input->tensor_shape()[idx_width] != output->tensor_shape()[idx_width]));
305 
306         // Validate Activation Layer
307         if(act_info.enabled())
308         {
309             ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
310         }
311     }
312 
313     return Status{};
314 }
315 
run()316 void CLFFTConvolutionLayer::run()
317 {
318     prepare();
319 
320     MemoryGroupResourceScope scope_mg(_memory_group);
321 
322     // Transform input
323     if(_needs_permute)
324     {
325         _permute_input_func.run();
326     }
327     _pad_input_func.run();
328     _transform_input_func.run();
329 
330     // Perform operations to frequency domain
331     _prod_func.run();
332     _reduce_func.run();
333 
334     // Transform output
335     _itransform_output_func.run();
336     _reshaped_output.allocator()->import_memory(_itransformed_output.cl_buffer());
337     _extract_output_func.run();
338     // Add bias
339     if(_has_bias)
340     {
341         _bias_add_func.run();
342     }
343     if(_needs_permute)
344     {
345         _permute_output_func.run();
346     }
347 
348     // Run activation layer
349     if(_is_activationlayer_enabled)
350     {
351         _activation_layer_func.run();
352     }
353 }
354 
prepare()355 void CLFFTConvolutionLayer::prepare()
356 {
357     if(!_is_prepared)
358     {
359         // Permute bias to NCHW
360         if(_original_bias != nullptr)
361         {
362             _permuted_bias.allocator()->allocate();
363             _permute_bias_func.run();
364             _original_bias->mark_as_unused();
365         }
366 
367         const ICLTensor *cur_weights = _original_weights;
368         // Permute weights
369         if(_needs_permute)
370         {
371             ARM_COMPUTE_ERROR_ON(!cur_weights->is_used());
372 
373             _permuted_weights.allocator()->allocate();
374             _permute_weights_func.run();
375             cur_weights->mark_as_unused();
376             cur_weights = &_permuted_weights;
377         }
378 
379         // Flip weights
380         _flipped_weights.allocator()->allocate();
381         _flip_weights_func.run();
382         cur_weights->mark_as_unused();
383 
384         // Pad weights
385         _padded_weights.allocator()->allocate();
386         _pad_weights_func.run();
387         _flipped_weights.mark_as_unused();
388         CLScheduler::get().queue().finish();
389         _flipped_weights.allocator()->free();
390 
391         // Transform weights to frequency domain
392         _transformed_weights.allocator()->allocate();
393         _transform_weights_func->run();
394         _padded_weights.mark_as_unused();
395         CLScheduler::get().queue().finish();
396         // Delete object and release internal memory
397         _transform_weights_func.reset();
398         _padded_weights.allocator()->free();
399 
400         _is_prepared = true;
401     }
402 }
403 } // namespace arm_compute
404