1 /*
2 * Copyright (c) 2017-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
25
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/Utils.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31 #include "arm_compute/runtime/CL/CLScheduler.h"
32 #include "src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h"
33 #include "src/runtime/heuristics/dwc_native/ClDWCNativeKernelConfig.h"
34 #include "src/runtime/heuristics/dwc_native/IClDWCNativeKernelConfig.h"
35
36 #include "src/common/utils/Log.h"
37
38 namespace arm_compute
39 {
40 using namespace arm_compute::misc;
41 using namespace arm_compute::misc::shape_calculator;
42 using namespace arm_compute::cl_dwc;
43
CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)44 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
45 : _memory_group(std::move(memory_manager)),
46 _dwc_native_kernel(std::make_unique<CLDepthwiseConvolutionLayerNativeKernel>()),
47 _permute_input_to_nhwc(),
48 _permute_weights_to_nhwc(),
49 _permute_output_to_nchw(),
50 _permuted_input(),
51 _permuted_weights(),
52 _permuted_output(),
53 _output_multipliers(),
54 _output_shifts(),
55 _original_weights(),
56 _input(),
57 _output(),
58 _needs_permute(false),
59 _is_prepared(false),
60 _is_quantized(false)
61 {
62 }
63
64 CLDepthwiseConvolutionLayer::~CLDepthwiseConvolutionLayer() = default;
65
configure(ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,unsigned int depth_multiplier,ActivationLayerInfo act_info,const Size2D & dilation)66 void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
67 unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
68 {
69 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
70 }
71
configure(const CLCompileContext & compile_context,ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,unsigned int depth_multiplier,ActivationLayerInfo act_info,const Size2D & dilation)72 void CLDepthwiseConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
73 ICLTensor *output, const PadStrideInfo &conv_info,
74 unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
75 {
76 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
77 ARM_COMPUTE_ERROR_THROW_ON(CLDepthwiseConvolutionLayer::validate(input->info(),
78 weights->info(),
79 biases != nullptr ? biases->info() : nullptr,
80 output != nullptr ? output->info() : input->info(),
81 conv_info,
82 depth_multiplier,
83 act_info,
84 dilation));
85 ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
86
87 _is_quantized = is_data_type_quantized(input->info()->data_type());
88 _is_prepared = false;
89 _original_weights = weights;
90 _input = input;
91 _output = output;
92 _needs_permute = input->info()->data_layout() == DataLayout::NCHW;
93
94 const GPUTarget gpu_target = CLScheduler::get().target();
95
96 ICLTensor *input_to_use = input;
97 const ICLTensor *weights_to_use = weights;
98 ICLTensor *output_to_use = output;
99 if(_needs_permute)
100 {
101 _memory_group.manage(&_permuted_input);
102 _memory_group.manage(&_permuted_output);
103
104 // Configure the function to transform the input tensor from NCHW -> NHWC
105 _permute_input_to_nhwc.configure(compile_context, input, &_permuted_input, PermutationVector(2U, 0U, 1U));
106 _permuted_input.info()->set_data_layout(DataLayout::NHWC);
107
108 // Configure the function to transform the weights tensor from IHW -> HWI
109 _permute_weights_to_nhwc.configure(compile_context, weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
110 _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
111
112 // Set output quantization info before dwc kernel configure
113 _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
114
115 input_to_use = &_permuted_input;
116 weights_to_use = &_permuted_weights;
117 output_to_use = &_permuted_output;
118 }
119
120 CLTensor *output_multipliers_to_use = nullptr;
121 CLTensor *output_shifts_to_use = nullptr;
122 if(_is_quantized)
123 {
124 const size_t idx_c = get_data_layout_dimension_index(weights->info()->data_layout(), DataLayoutDimension::CHANNEL);
125 const size_t num_filters = (is_data_type_quantized_per_channel(weights->info()->data_type())) ? weights->info()->dimension(idx_c) : 1;
126
127 _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
128 _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
129
130 output_multipliers_to_use = &_output_multipliers;
131 output_shifts_to_use = &_output_shifts;
132 }
133
134 // Get the depthwise convolution compute parameters
135 auto t = ClDWCNativeKernelConfigurationFactory::create(gpu_target);
136 const DWCComputeKernelInfo dwc_native_compute_info = t->configure(input_to_use->info(), weights_to_use->info(), conv_info, dilation, depth_multiplier);
137
138 const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
139
140 _dwc_native_kernel->set_target(gpu_target);
141 _dwc_native_kernel->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use,
142 dwc_native_compute_info, conv_kernel_info, output_multipliers_to_use, output_shifts_to_use);
143
144 if(_needs_permute)
145 {
146 _permuted_input.allocator()->allocate();
147
148 // Configure the function to transform the convoluted output to NCHW format
149 _permuted_output.info()->set_data_layout(DataLayout::NCHW);
150 _permute_output_to_nchw.configure(compile_context, &_permuted_output, output, PermutationVector(1U, 2U, 0U));
151 _permuted_output.allocator()->allocate();
152 }
153
154 if(_is_quantized)
155 {
156 _output_multipliers.allocator()->allocate();
157 _output_shifts.allocator()->allocate();
158 }
159 }
160
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info,unsigned int depth_multiplier,ActivationLayerInfo act_info,const Size2D & dilation)161 Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
162 const PadStrideInfo &conv_info,
163 unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
164 {
165 const bool in_place = input == output || output == nullptr;
166 if(in_place)
167 {
168 output = input;
169 }
170 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
171 const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
172 const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
173
174 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right());
175 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom());
176
177 const GPUTarget gpu_target = CLScheduler::get().target();
178
179 const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
180
181 const bool needs_permute = input->data_layout() == DataLayout::NCHW;
182
183 const bool is_quantized = is_data_type_quantized(input->data_type());
184
185 TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
186 if(is_quantized)
187 {
188 if(is_data_type_quantized_per_channel(weights->data_type()))
189 {
190 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL);
191
192 const size_t idx_c = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::CHANNEL);
193 output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
194 }
195 else
196 {
197 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
198 }
199 }
200
201 if(needs_permute)
202 {
203 ARM_COMPUTE_RETURN_ERROR_ON_MSG(in_place, "In-place is supported only with NHWC data layout");
204 TensorShape permuted_input_shape = input->tensor_shape();
205 TensorShape permuted_weights_shape = weights->tensor_shape();
206 const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
207 TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, info);
208
209 permute(permuted_input_shape, PermutationVector(2U, 0U, 1U));
210 permute(permuted_weights_shape, PermutationVector(2U, 0U, 1U));
211 permute(permuted_output_shape, PermutationVector(2U, 0U, 1U));
212
213 const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NHWC);
214 const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NHWC);
215 const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NHWC);
216
217 ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &permuted_input, PermutationVector(2U, 0U, 1U)));
218 ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(weights, &permuted_weights, PermutationVector(2U, 0U, 1U)));
219
220 // Get the depthwise convolution compute parameters
221 auto t = ClDWCNativeKernelConfigurationFactory::create(gpu_target);
222 const DWCComputeKernelInfo dwc_native_compute_info = t->configure(&permuted_input, &permuted_weights, conv_info, dilation, depth_multiplier);
223
224 ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output,
225 dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
226 ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U)));
227 }
228 else
229 {
230 // Get the depthwise convolution compute parameters
231 auto t = ClDWCNativeKernelConfigurationFactory::create(gpu_target);
232 const DWCComputeKernelInfo dwc_native_compute_info = t->configure(input, weights, conv_info, dilation, depth_multiplier);
233 ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info,
234 &output_multipliers_shifts_info));
235 }
236 return Status{};
237 }
238
run()239 void CLDepthwiseConvolutionLayer::run()
240 {
241 prepare();
242
243 MemoryGroupResourceScope scope_mg(_memory_group);
244
245 if(_needs_permute)
246 {
247 _permute_input_to_nhwc.run();
248 }
249 CLScheduler::get().enqueue(*_dwc_native_kernel);
250 if(_needs_permute)
251 {
252 _permute_output_to_nchw.run();
253 }
254 }
255
prepare()256 void CLDepthwiseConvolutionLayer::prepare()
257 {
258 if(!_is_prepared)
259 {
260 if(_is_quantized)
261 {
262 _output_multipliers.map();
263 _output_shifts.map();
264 quantization::compute_quantized_multipliers_and_shifts(_input->info(),
265 _original_weights->info(),
266 _output != nullptr ? _output->info() : _input->info(),
267 reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
268 reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
269 _output_multipliers.unmap();
270 _output_shifts.unmap();
271 }
272
273 if(_needs_permute)
274 {
275 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
276
277 _permuted_weights.allocator()->allocate();
278 _permute_weights_to_nhwc.run();
279 _original_weights->mark_as_unused();
280 }
281 _is_prepared = true;
282 }
283 }
284 } // namespace arm_compute
285