xref: /aosp_15_r20/external/ComputeLibrary/src/runtime/NEON/functions/NEConvolutionLayer.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
25 
26 #include "arm_compute/core/PixelValue.h"
27 #include "arm_compute/core/Utils.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
30 #include "src/common/utils/Log.h"
31 #include "src/core/helpers/MemoryHelpers.h"
32 #include "src/cpu/operators/CpuConv2d.h"
33 #include "src/cpu/operators/CpuDirectConv2d.h"
34 #include "src/cpu/operators/CpuGemmConv2d.h"
35 #include "src/cpu/operators/CpuGemmDirectConv2d.h"
36 #include "src/cpu/operators/CpuWinogradConv2d.h"
37 
38 namespace arm_compute
39 {
40 using namespace arm_compute::experimental;
41 
42 struct NEConvolutionLayer::Impl
43 {
44     MemoryGroup                        memory_group{};
45     std::shared_ptr<IMemoryManager>    memory_manager{};
46     std::unique_ptr<cpu::ICpuOperator> op{ nullptr };
47     ITensorPack                        run_pack{};
48     ITensorPack                        prep_pack{};
49     WorkspaceData<Tensor>              workspace{};
50     experimental::MemoryRequirements   aux_mem_req{};
51     std::unique_ptr<IFunction>         func{ nullptr };
52 };
53 
NEConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)54 NEConvolutionLayer::NEConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
55     : _impl(std::make_unique<Impl>())
56 {
57     _impl->memory_manager = std::move(memory_manager);
58 }
59 
60 NEConvolutionLayer::~NEConvolutionLayer() = default;
61 
configure(ITensor * input,const ITensor * weights,const ITensor * biases,ITensor * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,bool enable_fast_math,unsigned int num_groups)62 void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
63                                    const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
64 {
65     // Perform validate step
66     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
67     ARM_COMPUTE_UNUSED(num_groups);
68     ARM_COMPUTE_ERROR_THROW_ON(NEConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info,
69                                                             enable_fast_math, num_groups));
70     ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
71 
72     const Conv2dInfo info(conv_info, dilation, act_info, enable_fast_math, num_groups);
73     switch(cpu::CpuConv2d::get_convolution_method(input->info(), weights->info(), output->info(), conv_info, weights_info, dilation, act_info, enable_fast_math))
74     {
75         case ConvolutionMethod::WINOGRAD:
76         case ConvolutionMethod::GEMM:
77         case ConvolutionMethod::GEMM_CONV2D:
78         case ConvolutionMethod::DIRECT:
79         {
80             auto f = std::make_unique<cpu::CpuConv2d>();
81             f->configure(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
82             _impl->op = std::move(f);
83             break;
84         }
85         case ConvolutionMethod::FFT:
86         {
87             auto f = std::make_unique<NEFFTConvolutionLayer>(_impl->memory_manager);
88             f->configure(input, weights, biases, output, conv_info, act_info);
89             _impl->func = std::move(f);
90             break;
91         }
92         default:
93             ARM_COMPUTE_ERROR("Not supported.");
94             break;
95     }
96 
97     if(_impl->op)
98     {
99         _impl->memory_group = MemoryGroup(std::move(_impl->memory_manager));
100         _impl->aux_mem_req  = _impl->op->workspace();
101         _impl->run_pack     = { { ACL_SRC_0, input }, { ACL_SRC_1, weights }, { ACL_SRC_2, biases }, { ACL_DST, output } };
102         _impl->prep_pack    = { { ACL_SRC_1, weights }, { ACL_SRC_2, biases } };
103         _impl->workspace    = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->prep_pack);
104     }
105 }
106 
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,bool enable_fast_math,unsigned int num_groups)107 Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
108                                     const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
109 {
110     const Conv2dInfo info(conv_info, dilation, act_info, enable_fast_math, num_groups);
111     switch(cpu::CpuConv2d::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info, enable_fast_math))
112     {
113         case ConvolutionMethod::WINOGRAD:
114         case ConvolutionMethod::GEMM:
115         case ConvolutionMethod::GEMM_CONV2D:
116         case ConvolutionMethod::DIRECT:
117             ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups));
118             break;
119         case ConvolutionMethod::FFT:
120             ARM_COMPUTE_RETURN_ON_ERROR(NEFFTConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info));
121             break;
122         default:
123             ARM_COMPUTE_ERROR("Not supported.");
124             break;
125     }
126     return Status{};
127 }
128 
get_convolution_method(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,bool enable_fast_math)129 ConvolutionMethod NEConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights,
130                                                              const ITensorInfo *output, const PadStrideInfo &conv_info,
131                                                              const WeightsInfo &weights_info, const Size2D &dilation,
132                                                              const ActivationLayerInfo &act_info, bool enable_fast_math)
133 {
134     return cpu::CpuConv2d::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info, enable_fast_math);
135 }
136 
run()137 void NEConvolutionLayer::run()
138 {
139     prepare();
140 
141     MemoryGroupResourceScope scope_mg(_impl->memory_group);
142 
143     if(_impl->func)
144     {
145         _impl->func->run();
146     }
147     else
148     {
149         _impl->op->run(_impl->run_pack);
150     }
151 }
152 
prepare()153 void NEConvolutionLayer::prepare()
154 {
155     if(_impl->func)
156     {
157         _impl->func->prepare();
158     }
159     else
160     {
161         _impl->op->prepare(_impl->prep_pack);
162 
163         // Release temporary tensors that are only used in prepare stage
164         release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace);
165     }
166 }
167 } // namespace arm_compute
168