xref: /aosp_15_r20/external/ComputeLibrary/src/cpu/operators/CpuGemmDirectConv2d.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1*c217d954SCole Faust /*
2*c217d954SCole Faust  * Copyright (c) 2021-2023 Arm Limited.
3*c217d954SCole Faust  *
4*c217d954SCole Faust  * SPDX-License-Identifier: MIT
5*c217d954SCole Faust  *
6*c217d954SCole Faust  * Permission is hereby granted, free of charge, to any person obtaining a copy
7*c217d954SCole Faust  * of this software and associated documentation files (the "Software"), to
8*c217d954SCole Faust  * deal in the Software without restriction, including without limitation the
9*c217d954SCole Faust  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10*c217d954SCole Faust  * sell copies of the Software, and to permit persons to whom the Software is
11*c217d954SCole Faust  * furnished to do so, subject to the following conditions:
12*c217d954SCole Faust  *
13*c217d954SCole Faust  * The above copyright notice and this permission notice shall be included in all
14*c217d954SCole Faust  * copies or substantial portions of the Software.
15*c217d954SCole Faust  *
16*c217d954SCole Faust  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*c217d954SCole Faust  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*c217d954SCole Faust  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19*c217d954SCole Faust  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20*c217d954SCole Faust  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21*c217d954SCole Faust  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*c217d954SCole Faust  * SOFTWARE.
23*c217d954SCole Faust  */
24*c217d954SCole Faust #include "src/cpu/operators/CpuGemmDirectConv2d.h"
25*c217d954SCole Faust 
26*c217d954SCole Faust #include "arm_compute/core/utils/misc/ShapeCalculator.h"
27*c217d954SCole Faust #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
28*c217d954SCole Faust #include "arm_compute/runtime/FunctionDescriptors.h"
29*c217d954SCole Faust #include "src/common/utils/Log.h"
30*c217d954SCole Faust #include "src/core/helpers/MemoryHelpers.h"
31*c217d954SCole Faust #include "src/cpu/utils/CpuAuxTensorHandler.h"
32*c217d954SCole Faust 
33*c217d954SCole Faust #include "support/Cast.h"
34*c217d954SCole Faust 
35*c217d954SCole Faust #include <set>
36*c217d954SCole Faust 
37*c217d954SCole Faust namespace arm_compute
38*c217d954SCole Faust {
39*c217d954SCole Faust namespace cpu
40*c217d954SCole Faust {
41*c217d954SCole Faust using namespace arm_compute::experimental;
42*c217d954SCole Faust using namespace arm_compute::utils::cast;
43*c217d954SCole Faust 
44*c217d954SCole Faust namespace
45*c217d954SCole Faust {
calculate_output_stage_metadata(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * dst,const ActivationLayerInfo & act)46*c217d954SCole Faust GEMMLowpOutputStageInfo calculate_output_stage_metadata(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const ActivationLayerInfo &act)
47*c217d954SCole Faust {
48*c217d954SCole Faust     // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
49*c217d954SCole Faust     // Extract and negate input and weights offset
50*c217d954SCole Faust     const QuantizationInfo        iqinfo    = src->quantization_info();
51*c217d954SCole Faust     const QuantizationInfo        wqinfo    = weights->quantization_info();
52*c217d954SCole Faust     const QuantizationInfo        oqinfo    = (dst->total_size() == 0) ? iqinfo : dst->quantization_info();
53*c217d954SCole Faust     const UniformQuantizationInfo uoqinfo   = oqinfo.uniform();
54*c217d954SCole Faust     const DataType                data_type = src->data_type();
55*c217d954SCole Faust     // Merge activation with output stage
56*c217d954SCole Faust     const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
57*c217d954SCole Faust                                                                                ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
58*c217d954SCole Faust                                                                                ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
59*c217d954SCole Faust                                                                              };
60*c217d954SCole Faust     PixelValue                                              type_min{};
61*c217d954SCole Faust     PixelValue                                              type_max{};
62*c217d954SCole Faust     std::tie(type_min, type_max) = get_min_max(data_type);
63*c217d954SCole Faust     int32_t min_activation       = type_min.get<int32_t>();
64*c217d954SCole Faust     int32_t max_activation       = type_max.get<int32_t>();
65*c217d954SCole Faust     if(supported_acts.count(act.activation()) != 0)
66*c217d954SCole Faust     {
67*c217d954SCole Faust         std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act, data_type, uoqinfo);
68*c217d954SCole Faust     }
69*c217d954SCole Faust     GEMMLowpOutputStageInfo os_info;
70*c217d954SCole Faust     os_info.type                     = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
71*c217d954SCole Faust     os_info.gemmlowp_offset          = uoqinfo.offset;
72*c217d954SCole Faust     os_info.gemmlowp_min_bound       = min_activation;
73*c217d954SCole Faust     os_info.gemmlowp_max_bound       = max_activation;
74*c217d954SCole Faust     os_info.is_quantized_per_channel = (weights->data_type() == DataType::QSYMM8_PER_CHANNEL);
75*c217d954SCole Faust     quantization::calculate_quantized_multipliers(iqinfo, wqinfo, oqinfo, os_info);
76*c217d954SCole Faust     return os_info;
77*c217d954SCole Faust }
init_assembly_metadata(const Conv2dInfo & info,bool is_indirect)78*c217d954SCole Faust cpu::AsmGemmInfo init_assembly_metadata(const Conv2dInfo &info, bool is_indirect)
79*c217d954SCole Faust {
80*c217d954SCole Faust     cpu::AsmGemmInfo asm_info;
81*c217d954SCole Faust     asm_info.method                  = is_indirect ? cpu::AsmConvMethod::Indirect : cpu::AsmConvMethod::Conv;
82*c217d954SCole Faust     asm_info.ps_info                 = info.conv_info;
83*c217d954SCole Faust     asm_info.activation_info         = info.act_info;
84*c217d954SCole Faust     asm_info.depth_output_gemm3d     = true;
85*c217d954SCole Faust     asm_info.reinterpret_input_as_3d = true;
86*c217d954SCole Faust     asm_info.padding_top             = info.conv_info.pad_top();
87*c217d954SCole Faust     asm_info.padding_left            = info.conv_info.pad_left();
88*c217d954SCole Faust     asm_info.padding_value           = 0.f;
89*c217d954SCole Faust     asm_info.negated_offsets         = false;
90*c217d954SCole Faust     asm_info.fast_mode               = info.enable_fast_math;
91*c217d954SCole Faust     asm_info.fixed_format            = info.weights_info.weight_format() != WeightFormat::UNSPECIFIED;
92*c217d954SCole Faust     asm_info.weight_format           = info.weights_info.weight_format();
93*c217d954SCole Faust     return asm_info;
94*c217d954SCole Faust }
95*c217d954SCole Faust } // namespace
96*c217d954SCole Faust 
CpuGemmDirectConv2d()97*c217d954SCole Faust CpuGemmDirectConv2d::CpuGemmDirectConv2d()
98*c217d954SCole Faust     : _gemm_asm_func(std::make_unique<CpuGemmAssemblyDispatch>()),
99*c217d954SCole Faust       _activation_func(std::make_unique<CpuActivation>()),
100*c217d954SCole Faust       _weights_permute_func(std::make_unique<CpuPermute>()),
101*c217d954SCole Faust       _aux_mem(AuxTensorIdx::Count),
102*c217d954SCole Faust       _perm_weights(),
103*c217d954SCole Faust       _run_activation(false),
104*c217d954SCole Faust       _is_prepared(false)
105*c217d954SCole Faust {
106*c217d954SCole Faust }
107*c217d954SCole Faust 
108*c217d954SCole Faust CpuGemmDirectConv2d::~CpuGemmDirectConv2d() = default;
109*c217d954SCole Faust 
configure(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,ITensorInfo * dst,const Conv2dInfo & info)110*c217d954SCole Faust void CpuGemmDirectConv2d::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const Conv2dInfo &info)
111*c217d954SCole Faust {
112*c217d954SCole Faust     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
113*c217d954SCole Faust     ARM_COMPUTE_ERROR_THROW_ON(CpuGemmDirectConv2d::validate(src,
114*c217d954SCole Faust                                                              weights,
115*c217d954SCole Faust                                                              biases != nullptr ? biases : nullptr,
116*c217d954SCole Faust                                                              dst,
117*c217d954SCole Faust                                                              info));
118*c217d954SCole Faust     ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, info);
119*c217d954SCole Faust 
120*c217d954SCole Faust     _run_activation = info.act_info.enabled() && !_gemm_asm_func->is_activation_supported(info.act_info);
121*c217d954SCole Faust     _is_prepared    = false;
122*c217d954SCole Faust 
123*c217d954SCole Faust     _weights_permute_func->configure(weights, &_perm_weights, PermutationVector{ 3, 0, 1, 2 });
124*c217d954SCole Faust 
125*c217d954SCole Faust     // Configure assembly dispatch
126*c217d954SCole Faust     cpu::AsmGemmInfo asm_info = init_assembly_metadata(info, false);
127*c217d954SCole Faust     if(is_data_type_quantized(src->data_type()))
128*c217d954SCole Faust     {
129*c217d954SCole Faust         asm_info.output_stage = calculate_output_stage_metadata(src, weights, dst, info.act_info);
130*c217d954SCole Faust     }
131*c217d954SCole Faust     _gemm_asm_func->configure(src, &_perm_weights, biases, dst, asm_info);
132*c217d954SCole Faust 
133*c217d954SCole Faust     // Configure activation
134*c217d954SCole Faust     if(_run_activation)
135*c217d954SCole Faust     {
136*c217d954SCole Faust         _activation_func->configure(dst, nullptr, info.act_info);
137*c217d954SCole Faust     }
138*c217d954SCole Faust 
139*c217d954SCole Faust     // Add auxiliary memory requirements of the assembly dispatch
140*c217d954SCole Faust     auto asm_mem_req           = _gemm_asm_func->workspace();
141*c217d954SCole Faust     _aux_mem[AsmGemmWorkspace] = asm_mem_req[AsmGemmWorkspace];
142*c217d954SCole Faust     _aux_mem[Pretranspose]     = asm_mem_req[Pretranspose];
143*c217d954SCole Faust 
144*c217d954SCole Faust     if(_aux_mem[Pretranspose].size > 0)
145*c217d954SCole Faust     {
146*c217d954SCole Faust         // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
147*c217d954SCole Faust         _aux_mem[PermutedWeights] = MemoryInfo(offset_int_vec(PermutedWeights), MemoryLifetime::Prepare, weights->total_size());
148*c217d954SCole Faust     }
149*c217d954SCole Faust     else
150*c217d954SCole Faust     {
151*c217d954SCole Faust         // We must permute weights if they are WeightFormat::UNSPECIFIED
152*c217d954SCole Faust         if(info.weights_info.weight_format() == WeightFormat::UNSPECIFIED)
153*c217d954SCole Faust             _aux_mem[PermutedWeights] = MemoryInfo(offset_int_vec(PermutedWeights), MemoryLifetime::Persistent, weights->total_size());
154*c217d954SCole Faust     }
155*c217d954SCole Faust }
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,const Conv2dInfo & info)156*c217d954SCole Faust Status CpuGemmDirectConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &info)
157*c217d954SCole Faust {
158*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
159*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::BFLOAT16, DataType::F16, DataType::F32);
160*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::BFLOAT16, DataType::F16, DataType::F32);
161*c217d954SCole Faust     if(!is_fixed_format(info.weights_info.weight_format()))
162*c217d954SCole Faust     {
163*c217d954SCole Faust         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, weights);
164*c217d954SCole Faust     }
165*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.num_groups > 1, "Grouping (num_groups != 1) is not supported on Neon");
166*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_layout() != DataLayout::NHWC, "Data layout supported is NHWC");
167*c217d954SCole Faust     const DataType    data_type = src->data_type();
168*c217d954SCole Faust     const TensorShape i_shape   = src->tensor_shape();
169*c217d954SCole Faust     const TensorShape w_shape   = weights->tensor_shape();
170*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON(w_shape[0] != i_shape[0]);
171*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON(info.dilation != Size2D(1U, 1U));
172*c217d954SCole Faust     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
173*c217d954SCole Faust     // Validate biases
174*c217d954SCole Faust     if(biases != nullptr)
175*c217d954SCole Faust     {
176*c217d954SCole Faust         if(is_data_type_quantized_asymmetric(data_type))
177*c217d954SCole Faust         {
178*c217d954SCole Faust             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
179*c217d954SCole Faust         }
180*c217d954SCole Faust         else if(data_type == DataType::BFLOAT16)
181*c217d954SCole Faust         {
182*c217d954SCole Faust             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::F32);
183*c217d954SCole Faust         }
184*c217d954SCole Faust         else
185*c217d954SCole Faust         {
186*c217d954SCole Faust             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, biases);
187*c217d954SCole Faust         }
188*c217d954SCole Faust         ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(3));
189*c217d954SCole Faust         ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
190*c217d954SCole Faust     }
191*c217d954SCole Faust 
192*c217d954SCole Faust     cpu::AsmGemmInfo asm_info = init_assembly_metadata(info, false);
193*c217d954SCole Faust     ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuGemmAssemblyDispatch::validate(src, weights, biases, dst, asm_info));
194*c217d954SCole Faust     return Status{};
195*c217d954SCole Faust }
run(ITensorPack & tensors)196*c217d954SCole Faust void CpuGemmDirectConv2d::run(ITensorPack &tensors)
197*c217d954SCole Faust {
198*c217d954SCole Faust     prepare(tensors);
199*c217d954SCole Faust 
200*c217d954SCole Faust     _gemm_asm_func->run(tensors);
201*c217d954SCole Faust     if(_run_activation)
202*c217d954SCole Faust     {
203*c217d954SCole Faust         ITensor    *io = tensors.get_tensor(ACL_DST);
204*c217d954SCole Faust         ITensorPack pack{ { ACL_SRC, io }, { ACL_DST, io } };
205*c217d954SCole Faust         _activation_func->run(pack);
206*c217d954SCole Faust     }
207*c217d954SCole Faust }
208*c217d954SCole Faust 
prepare(ITensorPack & tensors)209*c217d954SCole Faust void CpuGemmDirectConv2d::prepare(ITensorPack &tensors)
210*c217d954SCole Faust {
211*c217d954SCole Faust     if(!_is_prepared)
212*c217d954SCole Faust     {
213*c217d954SCole Faust         // If we are using fixed-format kernel the weights are already reshaped
214*c217d954SCole Faust         if(_gemm_asm_func && _gemm_asm_func->isVarWeightsKernel())
215*c217d954SCole Faust         {
216*c217d954SCole Faust             _gemm_asm_func->prepare(tensors);
217*c217d954SCole Faust             _is_prepared = true;
218*c217d954SCole Faust             return;
219*c217d954SCole Faust         }
220*c217d954SCole Faust         const ITensor *weights     = tensors.get_const_tensor(ACL_SRC_1);
221*c217d954SCole Faust         ITensor       *weights_aux = utils::cast::polymorphic_cast<ITensor *>(tensors.get_tensor(offset_int_vec(PermutedWeights)));
222*c217d954SCole Faust         ARM_COMPUTE_ERROR_ON_NULLPTR(weights, weights_aux);
223*c217d954SCole Faust 
224*c217d954SCole Faust         CpuAuxTensorHandler permuted_weights(_perm_weights, *weights_aux);
225*c217d954SCole Faust         ITensorPack         permute_tensors{ { ACL_SRC, weights }, { ACL_DST, permuted_weights.get() } };
226*c217d954SCole Faust         _weights_permute_func->run(permute_tensors);
227*c217d954SCole Faust 
228*c217d954SCole Faust         tensors.add_const_tensor(ACL_SRC_1, permuted_weights.get());
229*c217d954SCole Faust         // Call prepare of assembly dispatch
230*c217d954SCole Faust         _gemm_asm_func->prepare(tensors);
231*c217d954SCole Faust 
232*c217d954SCole Faust         _is_prepared = true;
233*c217d954SCole Faust     }
234*c217d954SCole Faust }
235*c217d954SCole Faust 
workspace() const236*c217d954SCole Faust experimental::MemoryRequirements CpuGemmDirectConv2d::workspace() const
237*c217d954SCole Faust {
238*c217d954SCole Faust     return _aux_mem;
239*c217d954SCole Faust }
240*c217d954SCole Faust } // namespace cpu
241*c217d954SCole Faust } // namespace arm_compute
242