xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/kernels/ClGemmLowpOffsetContributionKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/kernels/ClGemmLowpOffsetContributionKernel.h"
25 
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/core/Utils.h"
30 #include "arm_compute/core/Validate.h"
31 
32 #include "src/core/helpers/WindowHelpers.h"
33 
34 #include "support/Cast.h"
35 #include "support/StringSupport.h"
36 
37 namespace arm_compute
38 {
39 namespace opencl
40 {
41 namespace kernels
42 {
43 namespace
44 {
validate_arguments(const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,int32_t a_offset,int32_t b_offset)45 Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
46                           int32_t a_offset, int32_t b_offset)
47 {
48     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
49 
50     if(bias != nullptr)
51     {
52         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
53         ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
54         ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
55     }
56 
57     // If a_offset == 0, vector_sum_col can be a nullptr
58     if(a_offset != 0)
59     {
60         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
61         ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
62     }
63 
64     // If b_offset == 0, vector_sum_row can be a nullptr
65     if(b_offset != 0)
66     {
67         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
68 
69         // Check if input is a 3D reinterpretation
70         const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
71 
72         // Validate input
73         ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
74         ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
75 
76         TensorShape output_shape = mm_result->tensor_shape();
77         if(output_shape.num_dimensions() > 1)
78         {
79             const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
80 
81             TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
82             vector_sum_row_shape.collapse_from(1);
83             output_shape.collapse_from(output_batch_idx);
84 
85             ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
86                                             "mm_result tensor must have the same number of batches of output tensor");
87 
88             if(a_offset != 0)
89             {
90                 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
91                 vector_sum_col_shape.collapse_from(1);
92 
93                 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
94                                                 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
95             }
96         }
97     }
98 
99     return Status{};
100 }
101 } // namespace
102 
ClGemmLowpOffsetContributionKernel()103 ClGemmLowpOffsetContributionKernel::ClGemmLowpOffsetContributionKernel()
104 {
105     _type = CLKernelType::ELEMENTWISE;
106 }
107 
configure(const CLCompileContext & compile_context,const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,int32_t k,int32_t a_offset,int32_t b_offset)108 void ClGemmLowpOffsetContributionKernel::configure(const CLCompileContext &compile_context,
109                                                    const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
110                                                    int32_t k, int32_t a_offset, int32_t b_offset)
111 {
112     // Perform validate step
113     ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result);
114     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, a_offset, b_offset));
115 
116     auto padding_info = get_padding_info({ mm_result, vector_sum_col, vector_sum_row, bias });
117 
118     // Check if input is a 3D reinterpretation
119     const bool reinterpret_as_3d = vector_sum_row != nullptr
120                                    && mm_result->num_dimensions() > 1
121                                    && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
122 
123     const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, mm_result->dimension(0));
124 
125     // Set the arguments to pass at compile time
126     CLBuildOptions build_opts;
127     build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
128     build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mm_result->dimension(0) % num_elems_processed_per_iteration));
129 
130     // If a_offset == 0, vector_sum_col can be a nullptr
131     if(a_offset != 0)
132     {
133         build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
134         build_opts.add_option_if(vector_sum_col->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
135     }
136     // If b_offset == 0, vector_sum_row can be a nullptr
137     build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
138     build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
139     build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->dimension(1)));
140     build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->dimension(2)));
141     build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
142 
143     std::string kernel_name("gemmlowp_offset_contribution");
144 
145     // A macro guard to compile ONLY the kernel of interest
146     build_opts.add_option("-D" + upper_string(kernel_name));
147 
148     // Create kernel
149     _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
150 
151     // Configure kernel window
152     Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
153     IClKernel::configure_internal(win);
154 
155     // Set config_id for enabling LWS tuning
156     _config_id = kernel_name + "_";
157     _config_id += support::cpp11::to_string(mm_result->dimension(0));
158     _config_id += "_";
159     _config_id += support::cpp11::to_string(mm_result->dimension(1));
160     _config_id += "_";
161     _config_id += support::cpp11::to_string(mm_result->dimension(2));
162 
163     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
164 }
165 
validate(const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,int32_t a_offset,int32_t b_offset)166 Status ClGemmLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
167                                                     int32_t a_offset, int32_t b_offset)
168 {
169     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, a_offset, b_offset));
170     return Status{};
171 }
172 
run_op(ITensorPack & tensors,const Window & window,cl::CommandQueue & queue)173 void ClGemmLowpOffsetContributionKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
174 {
175     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
176     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IClKernel::window(), window);
177 
178     const auto vector_sum_col = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_COL_SUM));
179     const auto vector_sum_row = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_ROW_SUM));
180     const auto bias           = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_BIAS));
181     const auto mm_result      = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_SRC_DST));
182 
183     Window collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
184     Window slice     = collapsed.first_slice_window_3D();
185 
186     // Set window for vector_sum_col
187     Window win_vector_sum_col = slice;
188     win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
189     win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
190 
191     // Set window for vector_sum_row
192     Window win_vector_sum_row = slice;
193     win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
194     win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
195     win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
196 
197     Window biases_slice = slice;
198     biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
199     biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
200 
201     do
202     {
203         unsigned int idx = 0;
204         add_3D_tensor_argument(idx, mm_result, slice);
205         add_2D_tensor_argument_if((vector_sum_col != nullptr), idx, vector_sum_col, win_vector_sum_col);
206         add_2D_tensor_argument_if((vector_sum_row != nullptr), idx, vector_sum_row, win_vector_sum_row);
207         add_1D_tensor_argument_if((bias != nullptr), idx, bias, biases_slice);
208 
209         enqueue(queue, *this, slice, lws_hint());
210     }
211     while(collapsed.slide_window_slice_3D(slice));
212 }
213 } // namespace kernels
214 } // namespace opencl
215 } // namespace arm_compute
216