1 /* 2 * Copyright (c) 2019-2023 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H 25 #define ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H 26 27 #include "arm_compute/core/PixelValue.h" 28 #include "arm_compute/core/Types.h" 29 #include "arm_compute/core/experimental/IPostOp.h" 30 31 namespace arm_compute 32 { 33 /** Descriptor for FFT scale kernels */ 34 struct FFTScaleKernelInfo 35 { 36 float scale{ 0.f }; /**< Axis to perform the kernel on. */ 37 bool conjugate{ true }; /**< Flag to conjugate the output/ */ 38 }; 39 40 /** Descriptor for FFT digit reverse kernels */ 41 struct FFTDigitReverseKernelInfo 42 { 43 unsigned int axis{ 0 }; /**< Axis to perform the kernel on. */ 44 bool conjugate{ false }; /**< Flag to conjugate the output/ */ 45 }; 46 47 /** Descriptor used by the FFT core kernels */ 48 struct FFTRadixStageKernelInfo 49 { 50 unsigned int axis{ 0 }; /**< Axis to run the kernel on. */ 51 unsigned int radix{ 0 }; /**< Radix to use. */ 52 unsigned int Nx{ 0 }; /**< Nx coefficient. */ 53 bool is_first_stage{ false }; /**< Flags if the FFT kernels is the first stage of a decomposed FFT. */ 54 }; 55 56 class ITensorInfo; 57 /** Descriptor used by the GEMM kernels */ 58 struct GEMMKernelInfo 59 { 60 GEMMKernelInfo() = default; 61 GEMMKernelInfo( 62 unsigned int im, 63 unsigned int in, 64 unsigned int ik, 65 unsigned int idepth_output_gemm3d, 66 bool ireinterpret_input_as_3d, 67 bool ibroadcast_bias, 68 bool ifp_mixed_precision, 69 bool ihas_pad_y, 70 ActivationLayerInfo iactivation_info, 71 int inmult_transpose1xW_width, 72 int imult_interleave4x4_height, 73 GEMMLHSMatrixInfo ilhs_info, 74 GEMMRHSMatrixInfo irhs_info, 75 int32_t ina_offset, 76 int32_t inb_offset, 77 const experimental::PostOpList<ITensorInfo *> &ipost_ops = experimental::PostOpList<ITensorInfo *> {}) mGEMMKernelInfo78 : m(im), n(in), k(ik), depth_output_gemm3d(idepth_output_gemm3d), reinterpret_input_as_3d(ireinterpret_input_as_3d), broadcast_bias(ibroadcast_bias), fp_mixed_precision(ifp_mixed_precision), 79 has_pad_y(ihas_pad_y), activation_info(iactivation_info), mult_transpose1xW_width(inmult_transpose1xW_width), mult_interleave4x4_height(imult_interleave4x4_height), lhs_info(ilhs_info), 80 rhs_info(irhs_info), a_offset(ina_offset), b_offset(inb_offset), post_ops(ipost_ops) 81 { 82 } 83 84 unsigned int m{ 0 }; /**< Number of LHS rows*/ 85 unsigned int n{ 0 }; /**< Number of RHS columns*/ 86 unsigned int k{ 0 }; /**< Number of LHS columns or RHS rows */ 87 unsigned int depth_output_gemm3d{ 0 }; /**< Depth of the output tensor in case is reinterpreted as 3D */ 88 bool reinterpret_input_as_3d{ false }; /**< Flag used to reinterpret the input as 3D */ 89 bool broadcast_bias{ false }; /**< Flag used to broadcast the bias addition */ 90 bool fp_mixed_precision{ false }; /**< Flag used to indicate wider accumulators (32 bit instead of 16 for FP16). */ 91 bool has_pad_y{ false }; /**< Flag used to indicate if the input/output tensors have internal pad on the y direction */ 92 ActivationLayerInfo activation_info{}; /**< Activation function to perform after the matrix multiplication */ 93 int mult_transpose1xW_width{ 1 }; /**< Multiplication factor for the width of the 1xW transposed block */ 94 int mult_interleave4x4_height{ 1 }; /**< Multiplication factor for the height of the 4x4 interleaved block */ 95 GEMMLHSMatrixInfo lhs_info{}; /**< LHS matrix information used to retrieve the number of rows processed by each thread */ 96 GEMMRHSMatrixInfo rhs_info{}; /**< RHS matrix information used for reshaping the RHS matrix */ 97 int32_t a_offset{ 0 }; /**< Offset to be added to each element of the matrix A */ 98 int32_t b_offset{ 0 }; /**< Offset to be added to each element of the matrix B */ 99 GEMMLowpOutputStageInfo output_stage{}; /**< GEMMLowp output stage information */ 100 experimental::PostOpList<ITensorInfo *> post_ops{}; /**< (EXPERIMENTAL_POST_OPS) Specifies a list of post ops to be fused after the main op. Note unsupported post ops would not be executed. 101 * If specified, automatically disable the @ref activation_info */ 102 }; 103 104 /** Compute descriptor used by the depthwise convolution native kernel */ 105 struct DWCComputeKernelInfo 106 { 107 unsigned int n0{ 1 }; /**< Number of columns processed by each thread */ 108 unsigned int m0{ 1 }; /**< Number of rows processed by each thread */ 109 bool export_input_to_cl_image{ false }; /**< Export input to cl_image */ 110 bool export_weights_to_cl_image{ false }; /**< Export the weights to cl_image */ 111 }; 112 113 /** Compute descriptor used by the direct convolution kernel */ 114 struct DirectConvComputeKernelInfo 115 { 116 int32_t m0{ 1 }; /**< Number of rows to be processed by the kernel */ 117 int32_t n0{ 1 }; /**< Number of columns to be processed by the kernel */ 118 int32_t k0{ 1 }; /**< Number of partial accumulations to be processed in a single iteration by the kernel */ 119 bool export_weights_to_cl_image{ false }; /**< Flag to export the weights to cl_image */ 120 bool export_output_to_cl_image{ false }; /**< Flag to export the output to cl_image */ 121 bool export_input_to_cl_image{ false }; /**< Flag to export the input to cl_image */ 122 }; 123 124 /** Descriptor used by the softmax kernels */ 125 struct SoftmaxKernelInfo 126 { 127 float beta{ 1.f }; /**< A scaling factor for the exponent with default value 1.0 */ 128 bool is_log{ false }; /**< Flag used to perform Log Softmax operation */ 129 DataType input_data_type{ DataType::UNKNOWN }; /**< Input tensor data type */ 130 int32_t axis{ 0 }; /**< The dimension in which to apply softmax. */ 131 }; 132 133 /** Descriptor used by the direct convolution layer output stage kernels */ 134 struct DirectConvolutionLayerOutputStageKernelInfo 135 { 136 int32_t result_fixedpoint_multiplier{ 0 }; /**< Result output stage multiplier used for quantizing */ 137 int32_t result_shift{ 0 }; /**< Result output stage shift used for quantizing */ 138 int32_t result_offset_after_shift{ 0 }; /**< Result offset used for quantizing */ 139 DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */ 140 }; 141 142 struct InstanceNormalizationLayerKernelInfo 143 { 144 /** Default constructor */ InstanceNormalizationLayerKernelInfoInstanceNormalizationLayerKernelInfo145 InstanceNormalizationLayerKernelInfo() 146 : InstanceNormalizationLayerKernelInfo(1.f, 0.f, 1e-12, true) 147 { 148 } 149 /** Constructor 150 * 151 * @param[in] gamma The scale scalar value applied to the normalized tensor. 152 * @param[in] beta The offset scalar value applied to the normalized tensor 153 * @param[in] epsilon Lower bound value for the normalization. 154 * @param[in] use_mixed_precision Use mixed precision in case of FP16 execution. 155 */ InstanceNormalizationLayerKernelInfoInstanceNormalizationLayerKernelInfo156 InstanceNormalizationLayerKernelInfo(float gamma, float beta, float epsilon, bool use_mixed_precision) 157 : gamma(gamma), beta(beta), epsilon(epsilon), use_mixed_precision(use_mixed_precision) 158 { 159 } 160 161 float gamma; /**< The scale scalar value applied to the normalized tensor. Defaults to 1.0 */ 162 float beta; /**< The offset scalar value applied to the normalized tensor. Defaults to 0.0 */ 163 float epsilon; /**< Lower bound value for the normalization. Defaults to 1e-12 */ 164 bool use_mixed_precision; /**< Use mixed precision in case of FP16 execution. Defaults to true */ 165 }; 166 167 struct GEMMLowpReductionKernelInfo 168 { 169 /** Default constructor */ 170 GEMMLowpReductionKernelInfo() = default; 171 /** Constructor 172 * 173 * @param[in] k Number of matrix columns/rows. 174 * @param[in] is_reshaped True if the input tensor has been reshaped. 175 * @param[in] scalar Scalar value to multiply each reduced column/row by. 176 * @param[in] mul_by_scalar True if each column/row reduction has to be multiplied by a scalar value. 177 */ GEMMLowpReductionKernelInfoGEMMLowpReductionKernelInfo178 GEMMLowpReductionKernelInfo(int32_t k, bool is_reshaped, int32_t scalar, bool mul_by_scalar) 179 : k(k), is_reshaped(is_reshaped), scalar(scalar), mul_by_scalar(mul_by_scalar) 180 { 181 } 182 183 int32_t k{ 0 }; /**< Number of matrix columns/rows */ 184 bool is_reshaped{ false }; /**< True if the input tensor has been reshaped */ 185 int32_t scalar{ 0 }; /**< Scalar value to multiply each reduced column/row by */ 186 bool mul_by_scalar{ false }; /**< True if each column/row reduction has to be multiplied by a scalar value */ 187 }; 188 189 struct ScaleKernelInfo 190 { 191 /** Constructor 192 * 193 * @param[in] interpolation_policy Interpolation type to use 194 * @param[in] border_mode Border mode policy 195 * @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT and use_padding is set to false. Defaults to default @ref PixelValue 196 * @param[in] sampling_policy (Optional) Sampling policy used by the interpolation. Defaults to @ref SamplingPolicy::CENTER 197 * @param[in] use_padding (Optional) Is padding in use or not. Defaults to true. 198 * @param[in] align_corners (Optional) Align corners of input and output, only affecting bilinear policy with TOP_LEFT sampling policy. Defaults to false. 199 * @param[in] data_layout (Optional) Data layout used by the layer. Defaults to @ref DataLayout::UNKNOWN 200 */ 201 ScaleKernelInfo(InterpolationPolicy interpolation_policy, 202 BorderMode border_mode, 203 PixelValue constant_border_value = PixelValue(), 204 SamplingPolicy sampling_policy = SamplingPolicy::CENTER, 205 bool use_padding = true, 206 bool align_corners = false, 207 DataLayout data_layout = DataLayout::UNKNOWN) noexcept 208 : interpolation_policy{ interpolation_policy }, 209 border_mode{ border_mode }, 210 constant_border_value{ constant_border_value }, 211 sampling_policy{ sampling_policy }, 212 use_padding{ use_padding }, 213 align_corners{ align_corners }, 214 data_layout{ data_layout } 215 { 216 } 217 218 InterpolationPolicy interpolation_policy; /**< Interpolation type to use */ 219 BorderMode border_mode; /**< Border mode policy */ 220 PixelValue constant_border_value; /**< Constant value to use for constant border mode policy */ 221 SamplingPolicy sampling_policy; /**< Sampling policy used by the interpolation. */ 222 bool use_padding; /**< Indication of using padding */ 223 bool align_corners; /**< Align corners of input and output */ 224 DataLayout data_layout; /**< Data layout to use */ 225 }; 226 } // namespace arm_compute 227 #endif /* ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H */ 228