xref: /aosp_15_r20/external/ComputeLibrary/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
25 
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/CL/ICLTensor.h"
29 #include "arm_compute/core/CL/OpenCL.h"
30 #include "arm_compute/core/Helpers.h"
31 #include "arm_compute/core/TensorInfo.h"
32 #include "arm_compute/core/Utils.h"
33 #include "src/core/CL/CLValidate.h"
34 #include "src/core/helpers/AutoConfiguration.h"
35 #include "src/core/helpers/WindowHelpers.h"
36 #include "support/StringSupport.h"
37 
38 namespace arm_compute
39 {
40 namespace
41 {
validate_arguments(const ITensorInfo * anchors,const ITensorInfo * all_anchors,const ComputeAnchorsInfo & info)42 Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
43 {
44     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors);
45     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(anchors);
46     ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi());
47     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32);
48     ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2);
49     if(all_anchors->total_size() > 0)
50     {
51         size_t feature_height = info.feat_height();
52         size_t feature_width  = info.feat_width();
53         size_t num_anchors    = anchors->dimension(1);
54         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(all_anchors, anchors);
55         ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2);
56         ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi());
57         ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors);
58 
59         if(is_data_type_quantized(anchors->data_type()))
60         {
61             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors);
62         }
63     }
64     return Status{};
65 }
66 } // namespace
67 
CLComputeAllAnchorsKernel()68 CLComputeAllAnchorsKernel::CLComputeAllAnchorsKernel()
69     : _anchors(nullptr), _all_anchors(nullptr)
70 {
71     _type = CLKernelType::ELEMENTWISE;
72 }
73 
configure(const ICLTensor * anchors,ICLTensor * all_anchors,const ComputeAnchorsInfo & info)74 void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
75 {
76     configure(CLKernelLibrary::get().get_compile_context(), anchors, all_anchors, info);
77 }
78 
configure(const CLCompileContext & compile_context,const ICLTensor * anchors,ICLTensor * all_anchors,const ComputeAnchorsInfo & info)79 void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
80 {
81     ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors);
82     auto padding_info = get_padding_info({ anchors, all_anchors });
83     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info));
84 
85     // Metadata
86     const size_t   num_anchors = anchors->info()->dimension(1);
87     const DataType data_type   = anchors->info()->data_type();
88     const float    width       = info.feat_width();
89     const float    height      = info.feat_height();
90 
91     // Initialize the output if empty
92     const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors);
93     auto_init_if_empty(*all_anchors->info(), TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info()));
94 
95     // Set instance variables
96     _anchors     = anchors;
97     _all_anchors = all_anchors;
98 
99     const bool is_quantized = is_data_type_quantized(anchors->info()->data_type());
100 
101     // Set build options
102     CLBuildOptions build_opts;
103     build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
104     build_opts.add_option("-DWIDTH=" + float_to_string_with_full_precision(width));
105     build_opts.add_option("-DHEIGHT=" + float_to_string_with_full_precision(height));
106     build_opts.add_option("-DSTRIDE=" + float_to_string_with_full_precision(1.f / info.spatial_scale()));
107     build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors));
108     build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi()));
109 
110     if(is_quantized)
111     {
112         const UniformQuantizationInfo qinfo = anchors->info()->quantization_info().uniform();
113         build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
114         build_opts.add_option("-DOFFSET=" + float_to_string_with_full_precision(qinfo.offset));
115     }
116 
117     // Create kernel
118     const std::string kernel_name = (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors";
119     _kernel                       = create_kernel(compile_context, kernel_name, build_opts.options());
120 
121     // The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields).
122     // This means we don't need to pad on the X dimension, as we know in advance how many fields
123     // compose the struct.
124     Window win = calculate_max_window(*all_anchors->info(), Steps(info.values_per_roi()));
125     ICLKernel::configure_internal(win);
126     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
127 }
128 
validate(const ITensorInfo * anchors,const ITensorInfo * all_anchors,const ComputeAnchorsInfo & info)129 Status CLComputeAllAnchorsKernel::validate(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
130 {
131     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(anchors, all_anchors, info));
132     return Status{};
133 }
134 
run(const Window & window,cl::CommandQueue & queue)135 void CLComputeAllAnchorsKernel::run(const Window &window, cl::CommandQueue &queue)
136 {
137     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
138     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
139 
140     // Collapse everything on the first dimension
141     Window collapsed = window.collapse(ICLKernel::window(), Window::DimX);
142 
143     // Set arguments
144     unsigned int idx = 0;
145     add_1D_tensor_argument(idx, _anchors, collapsed);
146     add_1D_tensor_argument(idx, _all_anchors, collapsed);
147 
148     // Note that we don't need to loop over the slices, as we are launching exactly
149     // as many threads as all the anchors generated
150     enqueue(queue, *this, collapsed, lws_hint());
151 }
152 } // namespace arm_compute
153