1 /*
2 * Copyright (c) 2019-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/CL/kernels/CLSpaceToDepthLayerKernel.h"
25
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/CL/ICLTensor.h"
28 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
29 #include "src/core/CL/CLValidate.h"
30 #include "src/core/helpers/AutoConfiguration.h"
31 #include "src/core/helpers/WindowHelpers.h"
32 #include "support/StringSupport.h"
33
34 using namespace arm_compute::misc::shape_calculator;
35 namespace arm_compute
36 {
37 namespace
38 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,int32_t block_shape)39 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape)
40 {
41 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
42 ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
43 ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
44 ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 1);
45
46 // Validate output if initialized
47 if(output->total_size() != 0)
48 {
49 const DataLayout data_layout = input->data_layout();
50 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
51 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
52 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
53 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
54 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_width] % block_shape != 0);
55 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_height] % block_shape != 0);
56 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] != output->tensor_shape()[idx_batch]);
57 ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] % (block_shape * block_shape) != 0);
58 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != output->tensor_shape().total_size());
59 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
60 }
61
62 return Status{};
63 }
64 } // namespace
65
CLSpaceToDepthLayerKernel()66 CLSpaceToDepthLayerKernel::CLSpaceToDepthLayerKernel()
67 : _input(nullptr), _output(nullptr), _block_shape()
68 {
69 _type = CLKernelType::ELEMENTWISE;
70 }
71
configure(const ICLTensor * input,ICLTensor * output,int32_t block_shape)72 void CLSpaceToDepthLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t block_shape)
73 {
74 configure(CLKernelLibrary::get().get_compile_context(), input, output, block_shape);
75 }
76
configure(const CLCompileContext & compile_context,const ICLTensor * input,ICLTensor * output,int32_t block_shape)77 void CLSpaceToDepthLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
78 {
79 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
80 auto padding_info = get_padding_info({ input, output });
81
82 TensorShape output_shape = compute_space_to_depth_shape(input->info(), block_shape);
83 auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
84
85 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), block_shape));
86
87 _input = input;
88 _output = output;
89 _block_shape = block_shape;
90
91 const int idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
92 const int idx_channel = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL);
93
94 // Create kernel
95 CLBuildOptions build_opts;
96 build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(output->info()->data_type())));
97 build_opts.add_option("-DCHANNEL_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_channel)));
98 build_opts.add_option("-DBLOCK_SHAPE=" + support::cpp11::to_string(block_shape));
99 build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(output->info()->dimension(idx_width)));
100 _kernel = create_kernel(compile_context, "space_to_depth_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
101
102 // Configure kernel window
103 Window win = calculate_max_window(*output->info(), Steps());
104 ICLKernel::configure_internal(win);
105 ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
106 }
107
validate(const ITensorInfo * input,const ITensorInfo * output,int32_t block_shape)108 Status CLSpaceToDepthLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape)
109 {
110 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
111 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, block_shape));
112 return Status{};
113 }
114
run(const Window & window,cl::CommandQueue & queue)115 void CLSpaceToDepthLayerKernel::run(const Window &window, cl::CommandQueue &queue)
116 {
117 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
118 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
119
120 Window slice_out = window.first_slice_window_3D();
121 Window slice_in = window.first_slice_window_4D();
122
123 slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
124 slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
125 slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
126 slice_in.set(3, Window::Dimension(0, 0, 0));
127
128 int batch_id = 0;
129 do
130 {
131 unsigned int idx = 0;
132 add_4D_tensor_argument(idx, _input, slice_in);
133 add_argument(idx, batch_id);
134 add_3D_tensor_argument(idx, _output, slice_out);
135 enqueue(queue, *this, slice_out, lws_hint());
136
137 ++batch_id;
138 }
139 while(window.slide_window_slice_3D(slice_out));
140 }
141 } // namespace arm_compute
142