xref: /aosp_15_r20/external/ComputeLibrary/src/cpu/operators/CpuGemmDirectConv2d.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2021-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/cpu/operators/CpuGemmDirectConv2d.h"
25 
26 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
27 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
28 #include "arm_compute/runtime/FunctionDescriptors.h"
29 #include "src/common/utils/Log.h"
30 #include "src/core/helpers/MemoryHelpers.h"
31 #include "src/cpu/utils/CpuAuxTensorHandler.h"
32 
33 #include "support/Cast.h"
34 
35 #include <set>
36 
37 namespace arm_compute
38 {
39 namespace cpu
40 {
41 using namespace arm_compute::experimental;
42 using namespace arm_compute::utils::cast;
43 
44 namespace
45 {
calculate_output_stage_metadata(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * dst,const ActivationLayerInfo & act)46 GEMMLowpOutputStageInfo calculate_output_stage_metadata(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const ActivationLayerInfo &act)
47 {
48     // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
49     // Extract and negate input and weights offset
50     const QuantizationInfo        iqinfo    = src->quantization_info();
51     const QuantizationInfo        wqinfo    = weights->quantization_info();
52     const QuantizationInfo        oqinfo    = (dst->total_size() == 0) ? iqinfo : dst->quantization_info();
53     const UniformQuantizationInfo uoqinfo   = oqinfo.uniform();
54     const DataType                data_type = src->data_type();
55     // Merge activation with output stage
56     const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
57                                                                                ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
58                                                                                ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
59                                                                              };
60     PixelValue                                              type_min{};
61     PixelValue                                              type_max{};
62     std::tie(type_min, type_max) = get_min_max(data_type);
63     int32_t min_activation       = type_min.get<int32_t>();
64     int32_t max_activation       = type_max.get<int32_t>();
65     if(supported_acts.count(act.activation()) != 0)
66     {
67         std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act, data_type, uoqinfo);
68     }
69     GEMMLowpOutputStageInfo os_info;
70     os_info.type                     = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
71     os_info.gemmlowp_offset          = uoqinfo.offset;
72     os_info.gemmlowp_min_bound       = min_activation;
73     os_info.gemmlowp_max_bound       = max_activation;
74     os_info.is_quantized_per_channel = (weights->data_type() == DataType::QSYMM8_PER_CHANNEL);
75     quantization::calculate_quantized_multipliers(iqinfo, wqinfo, oqinfo, os_info);
76     return os_info;
77 }
init_assembly_metadata(const Conv2dInfo & info,bool is_indirect)78 cpu::AsmGemmInfo init_assembly_metadata(const Conv2dInfo &info, bool is_indirect)
79 {
80     cpu::AsmGemmInfo asm_info;
81     asm_info.method                  = is_indirect ? cpu::AsmConvMethod::Indirect : cpu::AsmConvMethod::Conv;
82     asm_info.ps_info                 = info.conv_info;
83     asm_info.activation_info         = info.act_info;
84     asm_info.depth_output_gemm3d     = true;
85     asm_info.reinterpret_input_as_3d = true;
86     asm_info.padding_top             = info.conv_info.pad_top();
87     asm_info.padding_left            = info.conv_info.pad_left();
88     asm_info.padding_value           = 0.f;
89     asm_info.negated_offsets         = false;
90     asm_info.fast_mode               = info.enable_fast_math;
91     asm_info.fixed_format            = info.weights_info.weight_format() != WeightFormat::UNSPECIFIED;
92     asm_info.weight_format           = info.weights_info.weight_format();
93     return asm_info;
94 }
95 } // namespace
96 
CpuGemmDirectConv2d()97 CpuGemmDirectConv2d::CpuGemmDirectConv2d()
98     : _gemm_asm_func(std::make_unique<CpuGemmAssemblyDispatch>()),
99       _activation_func(std::make_unique<CpuActivation>()),
100       _weights_permute_func(std::make_unique<CpuPermute>()),
101       _aux_mem(AuxTensorIdx::Count),
102       _perm_weights(),
103       _run_activation(false),
104       _is_prepared(false)
105 {
106 }
107 
108 CpuGemmDirectConv2d::~CpuGemmDirectConv2d() = default;
109 
configure(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,ITensorInfo * dst,const Conv2dInfo & info)110 void CpuGemmDirectConv2d::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const Conv2dInfo &info)
111 {
112     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
113     ARM_COMPUTE_ERROR_THROW_ON(CpuGemmDirectConv2d::validate(src,
114                                                              weights,
115                                                              biases != nullptr ? biases : nullptr,
116                                                              dst,
117                                                              info));
118     ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, info);
119 
120     _run_activation = info.act_info.enabled() && !_gemm_asm_func->is_activation_supported(info.act_info);
121     _is_prepared    = false;
122 
123     _weights_permute_func->configure(weights, &_perm_weights, PermutationVector{ 3, 0, 1, 2 });
124 
125     // Configure assembly dispatch
126     cpu::AsmGemmInfo asm_info = init_assembly_metadata(info, false);
127     if(is_data_type_quantized(src->data_type()))
128     {
129         asm_info.output_stage = calculate_output_stage_metadata(src, weights, dst, info.act_info);
130     }
131     _gemm_asm_func->configure(src, &_perm_weights, biases, dst, asm_info);
132 
133     // Configure activation
134     if(_run_activation)
135     {
136         _activation_func->configure(dst, nullptr, info.act_info);
137     }
138 
139     // Add auxiliary memory requirements of the assembly dispatch
140     auto asm_mem_req           = _gemm_asm_func->workspace();
141     _aux_mem[AsmGemmWorkspace] = asm_mem_req[AsmGemmWorkspace];
142     _aux_mem[Pretranspose]     = asm_mem_req[Pretranspose];
143 
144     if(_aux_mem[Pretranspose].size > 0)
145     {
146         // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
147         _aux_mem[PermutedWeights] = MemoryInfo(offset_int_vec(PermutedWeights), MemoryLifetime::Prepare, weights->total_size());
148     }
149     else
150     {
151         // We must permute weights if they are WeightFormat::UNSPECIFIED
152         if(info.weights_info.weight_format() == WeightFormat::UNSPECIFIED)
153             _aux_mem[PermutedWeights] = MemoryInfo(offset_int_vec(PermutedWeights), MemoryLifetime::Persistent, weights->total_size());
154     }
155 }
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,const Conv2dInfo & info)156 Status CpuGemmDirectConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &info)
157 {
158     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
159     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::BFLOAT16, DataType::F16, DataType::F32);
160     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::BFLOAT16, DataType::F16, DataType::F32);
161     if(!is_fixed_format(info.weights_info.weight_format()))
162     {
163         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, weights);
164     }
165     ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.num_groups > 1, "Grouping (num_groups != 1) is not supported on Neon");
166     ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_layout() != DataLayout::NHWC, "Data layout supported is NHWC");
167     const DataType    data_type = src->data_type();
168     const TensorShape i_shape   = src->tensor_shape();
169     const TensorShape w_shape   = weights->tensor_shape();
170     ARM_COMPUTE_RETURN_ERROR_ON(w_shape[0] != i_shape[0]);
171     ARM_COMPUTE_RETURN_ERROR_ON(info.dilation != Size2D(1U, 1U));
172     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
173     // Validate biases
174     if(biases != nullptr)
175     {
176         if(is_data_type_quantized_asymmetric(data_type))
177         {
178             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
179         }
180         else if(data_type == DataType::BFLOAT16)
181         {
182             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::F32);
183         }
184         else
185         {
186             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, biases);
187         }
188         ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(3));
189         ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
190     }
191 
192     cpu::AsmGemmInfo asm_info = init_assembly_metadata(info, false);
193     ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuGemmAssemblyDispatch::validate(src, weights, biases, dst, asm_info));
194     return Status{};
195 }
run(ITensorPack & tensors)196 void CpuGemmDirectConv2d::run(ITensorPack &tensors)
197 {
198     prepare(tensors);
199 
200     _gemm_asm_func->run(tensors);
201     if(_run_activation)
202     {
203         ITensor    *io = tensors.get_tensor(ACL_DST);
204         ITensorPack pack{ { ACL_SRC, io }, { ACL_DST, io } };
205         _activation_func->run(pack);
206     }
207 }
208 
prepare(ITensorPack & tensors)209 void CpuGemmDirectConv2d::prepare(ITensorPack &tensors)
210 {
211     if(!_is_prepared)
212     {
213         // If we are using fixed-format kernel the weights are already reshaped
214         if(_gemm_asm_func && _gemm_asm_func->isVarWeightsKernel())
215         {
216             _gemm_asm_func->prepare(tensors);
217             _is_prepared = true;
218             return;
219         }
220         const ITensor *weights     = tensors.get_const_tensor(ACL_SRC_1);
221         ITensor       *weights_aux = utils::cast::polymorphic_cast<ITensor *>(tensors.get_tensor(offset_int_vec(PermutedWeights)));
222         ARM_COMPUTE_ERROR_ON_NULLPTR(weights, weights_aux);
223 
224         CpuAuxTensorHandler permuted_weights(_perm_weights, *weights_aux);
225         ITensorPack         permute_tensors{ { ACL_SRC, weights }, { ACL_DST, permuted_weights.get() } };
226         _weights_permute_func->run(permute_tensors);
227 
228         tensors.add_const_tensor(ACL_SRC_1, permuted_weights.get());
229         // Call prepare of assembly dispatch
230         _gemm_asm_func->prepare(tensors);
231 
232         _is_prepared = true;
233     }
234 }
235 
workspace() const236 experimental::MemoryRequirements CpuGemmDirectConv2d::workspace() const
237 {
238     return _aux_mem;
239 }
240 } // namespace cpu
241 } // namespace arm_compute
242