xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/Helpers.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_VALIDATION_HELPERS_H
25 #define ARM_COMPUTE_TEST_VALIDATION_HELPERS_H
26 
27 #include "arm_compute/core/Types.h"
28 #include "arm_compute/core/Utils.h"
29 #include "support/Half.h"
30 #include "tests/Globals.h"
31 #include "tests/SimpleTensor.h"
32 
33 #include <math.h>
34 #include <random>
35 #include <type_traits>
36 #include <utility>
37 
38 namespace arm_compute
39 {
40 namespace test
41 {
42 namespace validation
43 {
44 template <typename T>
45 struct is_floating_point : public std::is_floating_point<T>
46 {
47 };
48 
49 template <>
50 struct is_floating_point<half> : public std::true_type
51 {
52 };
53 
54 /** Helper function to get the testing range for each activation layer.
55  *
56  * @param[in] activation Activation function to test.
57  * @param[in] data_type  Data type.
58  *
59  * @return A pair containing the lower upper testing bounds for a given function.
60  */
61 template <typename T>
62 std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
63 {
64     std::pair<T, T> bounds;
65 
66     switch(data_type)
67     {
68         case DataType::F16:
69         {
70             using namespace half_float::literal;
71 
72             switch(activation)
73             {
74                 case ActivationLayerInfo::ActivationFunction::TANH:
75                 case ActivationLayerInfo::ActivationFunction::SQUARE:
76                 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
77                 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
78                     // Reduce range as exponent overflows
79                     bounds = std::make_pair(-2._h, 2._h);
80                     break;
81                 case ActivationLayerInfo::ActivationFunction::SQRT:
82                     // Reduce range as sqrt should take a non-negative number
83                     bounds = std::make_pair(0._h, 128._h);
84                     break;
85                 default:
86                     bounds = std::make_pair(-255._h, 255._h);
87                     break;
88             }
89             break;
90         }
91         case DataType::F32:
92             switch(activation)
93             {
94                 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
95                     // Reduce range as exponent overflows
96                     bounds = std::make_pair(-40.f, 40.f);
97                     break;
98                 case ActivationLayerInfo::ActivationFunction::SQRT:
99                     // Reduce range as sqrt should take a non-negative number
100                     bounds = std::make_pair(0.f, 255.f);
101                     break;
102                 default:
103                     bounds = std::make_pair(-255.f, 255.f);
104                     break;
105             }
106             break;
107         default:
108             ARM_COMPUTE_ERROR("Unsupported data type");
109     }
110 
111     return bounds;
112 }
113 
114 /** Calculate output tensor shape give a vector of input tensor to concatenate
115  *
116  * @param[in] input_shapes Shapes of the tensors to concatenate across depth.
117  *
118  * @return The shape of output concatenated tensor.
119  */
120 TensorShape calculate_depth_concatenate_shape(const std::vector<TensorShape> &input_shapes);
121 
122 /** Calculate output tensor shape for the concatenate operation along a given axis
123  *
124  * @param[in] input_shapes Shapes of the tensors to concatenate across width.
125  * @param[in] axis         Axis to use for the concatenate operation
126  *
127  * @return The shape of output concatenated tensor.
128  */
129 TensorShape calculate_concatenate_shape(const std::vector<TensorShape> &input_shapes, size_t axis);
130 
131 /** Convert an asymmetric quantized simple tensor into float using tensor quantization information.
132  *
133  * @param[in] src Quantized tensor.
134  *
135  * @return Float tensor.
136  */
137 template <typename T>
138 SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<T> &src);
139 
140 /** Convert float simple tensor into quantized using specified quantization information.
141  *
142  * @param[in] src               Float tensor.
143  * @param[in] quantization_info Quantification information.
144  *
145  * @return Quantized tensor.
146  */
147 template <typename T>
148 SimpleTensor<T> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
149 
150 /** Convert quantized simple tensor into float using tensor quantization information.
151  *
152  * @param[in] src Quantized tensor.
153  *
154  * @return Float tensor.
155  */
156 template <typename T>
157 SimpleTensor<float> convert_from_symmetric(const SimpleTensor<T> &src);
158 
159 /** Convert float simple tensor into quantized using specified quantization information.
160  *
161  * @param[in] src               Float tensor.
162  * @param[in] quantization_info Quantification information.
163  *
164  * @return Quantized tensor.
165  */
166 template <typename T>
167 SimpleTensor<T> convert_to_symmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
168 
169 /** Matrix multiply between 2 float simple tensors
170  *
171  * @param[in]  a   Input tensor A
172  * @param[in]  b   Input tensor B
173  * @param[out] out Output tensor
174  *
175  */
176 template <typename T>
177 void matrix_multiply(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &out);
178 
179 /** Transpose matrix
180  *
181  * @param[in]  in  Input tensor
182  * @param[out] out Output tensor
183  *
184  */
185 template <typename T>
186 void transpose_matrix(const SimpleTensor<T> &in, SimpleTensor<T> &out);
187 
188 /** Get a 2D tile from a tensor
189  *
190  * @note In case of out-of-bound reads, the tile will be filled with zeros
191  *
192  * @param[in]  in    Input tensor
193  * @param[out] tile  Tile
194  * @param[in]  coord Coordinates
195  */
196 template <typename T>
197 void get_tile(const SimpleTensor<T> &in, SimpleTensor<T> &tile, const Coordinates &coord);
198 
199 /** Fill with zeros the input tensor in the area defined by anchor and shape
200  *
201  * @param[in]  in     Input tensor to fill with zeros
202  * @param[out] anchor Starting point of the zeros area
203  * @param[in]  shape  Ending point of the zeros area
204  */
205 template <typename T>
206 void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &shape);
207 
208 /** Helper function to compute quantized min and max bounds
209  *
210  * @param[in] quant_info Quantization info to be used for conversion
211  * @param[in] min        Floating point minimum value to be quantized
212  * @param[in] max        Floating point maximum value to be quantized
213  */
214 std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max);
215 
216 /** Helper function to compute asymmetric quantized signed min and max bounds
217  *
218  * @param[in] quant_info Quantization info to be used for conversion
219  * @param[in] min        Floating point minimum value to be quantized
220  * @param[in] max        Floating point maximum value to be quantized
221  */
222 std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max);
223 
224 /** Helper function to compute symmetric quantized min and max bounds
225  *
226  * @param[in] quant_info Quantization info to be used for conversion
227  * @param[in] min        Floating point minimum value to be quantized
228  * @param[in] max        Floating point maximum value to be quantized
229  * @param[in] channel_id Channel id for per channel quantization info.
230  */
231 std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
232 
233 /** Add random padding along the X axis (between 1 and 16 columns per side) to all the input tensors.
234  *  This is used in our validation suite in order to simulate implicit padding addition after configuring, but before allocating.
235  *
236  * @param[in] tensors        List of tensors to add padding to
237  * @param[in] data_layout    (Optional) Data layout of the operator
238  * @param[in] only_right_pad (Optional) Only right padding testing, in case of cl image padding
239  *
240  * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC
241  */
242 void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC, bool only_right_pad = false);
243 
244 /** Add random padding along the Y axis (between 1 and 4 rows per side) to all the input tensors.
245  *  This is used in our validation suite in order to simulate implicit padding addition after configuring, but before allocating.
246  *
247  * @param[in] tensors     List of tensors to add padding to
248  * @param[in] data_layout (Optional) Data layout of the operator
249  *
250  * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC
251  */
252 void add_padding_y(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC);
253 } // namespace validation
254 } // namespace test
255 } // namespace arm_compute
256 #endif /* ARM_COMPUTE_TEST_VALIDATION_HELPERS_H */
257