1 /*
2 * Copyright (c) 2017-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifndef ARM_COMPUTE_TEST_TENSOR_LIBRARY_H
25 #define ARM_COMPUTE_TEST_TENSOR_LIBRARY_H
26
27 #include "arm_compute/core/Coordinates.h"
28 #include "arm_compute/core/Error.h"
29 #include "arm_compute/core/Helpers.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/TensorShape.h"
32 #include "arm_compute/core/Types.h"
33 #include "arm_compute/core/Window.h"
34 #include "support/Random.h"
35 #include "tests/RawTensor.h"
36 #include "tests/TensorCache.h"
37 #include "tests/Utils.h"
38 #include "tests/framework/Exceptions.h"
39 #include "utils/Utils.h"
40
41 #include <algorithm>
42 #include <cstddef>
43 #include <fstream>
44 #include <random>
45 #include <string>
46 #include <type_traits>
47 #include <vector>
48
49 namespace arm_compute
50 {
51 namespace test
52 {
53 /** Factory class to create and fill tensors.
54 *
55 * Allows to initialise tensors from loaded images or by specifying the shape
56 * explicitly. Furthermore, provides methods to fill tensors with the content of
57 * loaded images or with random values.
58 */
59 class AssetsLibrary final
60 {
61 public:
62 using RangePair = std::pair<float, float>;
63
64 public:
65 /** Initialises the library with a @p path to the assets directory.
66 * Furthermore, sets the seed for the random generator to @p seed.
67 *
68 * @param[in] path Path to load assets from.
69 * @param[in] seed Seed used to initialise the random number generator.
70 */
71 AssetsLibrary(std::string path, std::random_device::result_type seed);
72
73 /** Path to assets directory used to initialise library.
74 *
75 * @return the path to the assets directory.
76 */
77 std::string path() const;
78
79 /** Seed that is used to fill tensors with random values.
80 *
81 * @return the initial random seed.
82 */
83 std::random_device::result_type seed() const;
84
85 /** Provides a tensor shape for the specified image.
86 *
87 * @param[in] name Image file used to look up the raw tensor.
88 *
89 * @return the tensor shape for the specified image.
90 */
91 TensorShape get_image_shape(const std::string &name);
92
93 /** Provides a constant raw tensor for the specified image.
94 *
95 * @param[in] name Image file used to look up the raw tensor.
96 *
97 * @return a raw tensor for the specified image.
98 */
99 const RawTensor &get(const std::string &name) const;
100
101 /** Provides a raw tensor for the specified image.
102 *
103 * @param[in] name Image file used to look up the raw tensor.
104 *
105 * @return a raw tensor for the specified image.
106 */
107 RawTensor get(const std::string &name);
108
109 /** Creates an uninitialised raw tensor with the given @p data_type and @p
110 * num_channels. The shape is derived from the specified image.
111 *
112 * @param[in] name Image file used to initialise the tensor.
113 * @param[in] data_type Data type used to initialise the tensor.
114 * @param[in] num_channels Number of channels used to initialise the tensor.
115 *
116 * @return a raw tensor for the specified image.
117 */
118 RawTensor get(const std::string &name, DataType data_type, int num_channels = 1) const;
119
120 /** Provides a contant raw tensor for the specified image after it has been
121 * converted to @p format.
122 *
123 * @param[in] name Image file used to look up the raw tensor.
124 * @param[in] format Format used to look up the raw tensor.
125 *
126 * @return a raw tensor for the specified image.
127 */
128 const RawTensor &get(const std::string &name, Format format) const;
129
130 /** Provides a raw tensor for the specified image after it has been
131 * converted to @p format.
132 *
133 * @param[in] name Image file used to look up the raw tensor.
134 * @param[in] format Format used to look up the raw tensor.
135 *
136 * @return a raw tensor for the specified image.
137 */
138 RawTensor get(const std::string &name, Format format);
139
140 /** Provides a contant raw tensor for the specified channel after it has
141 * been extracted form the given image.
142 *
143 * @param[in] name Image file used to look up the raw tensor.
144 * @param[in] channel Channel used to look up the raw tensor.
145 *
146 * @note The channel has to be unambiguous so that the format can be
147 * inferred automatically.
148 *
149 * @return a raw tensor for the specified image channel.
150 */
151 const RawTensor &get(const std::string &name, Channel channel) const;
152
153 /** Provides a raw tensor for the specified channel after it has been
154 * extracted form the given image.
155 *
156 * @param[in] name Image file used to look up the raw tensor.
157 * @param[in] channel Channel used to look up the raw tensor.
158 *
159 * @note The channel has to be unambiguous so that the format can be
160 * inferred automatically.
161 *
162 * @return a raw tensor for the specified image channel.
163 */
164 RawTensor get(const std::string &name, Channel channel);
165
166 /** Provides a constant raw tensor for the specified channel after it has
167 * been extracted form the given image formatted to @p format.
168 *
169 * @param[in] name Image file used to look up the raw tensor.
170 * @param[in] format Format used to look up the raw tensor.
171 * @param[in] channel Channel used to look up the raw tensor.
172 *
173 * @return a raw tensor for the specified image channel.
174 */
175 const RawTensor &get(const std::string &name, Format format, Channel channel) const;
176
177 /** Provides a raw tensor for the specified channel after it has been
178 * extracted form the given image formatted to @p format.
179 *
180 * @param[in] name Image file used to look up the raw tensor.
181 * @param[in] format Format used to look up the raw tensor.
182 * @param[in] channel Channel used to look up the raw tensor.
183 *
184 * @return a raw tensor for the specified image channel.
185 */
186 RawTensor get(const std::string &name, Format format, Channel channel);
187
188 /** Puts garbage values all around the tensor for testing purposes
189 *
190 * @param[in, out] tensor To be filled tensor.
191 * @param[in] distribution Distribution used to fill the tensor's surroundings.
192 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
193 */
194 template <typename T, typename D>
195 void fill_borders_with_garbage(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
196
197 /** Fills the specified @p tensor with random values drawn from @p
198 * distribution.
199 *
200 * @param[in, out] tensor To be filled tensor.
201 * @param[in] distribution Distribution used to fill the tensor.
202 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
203 *
204 * @note The @p distribution has to provide operator(Generator &) which
205 * will be used to draw samples.
206 */
207 template <typename T, typename D>
208 void fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
209
210 template <typename T, typename D>
211 void fill_boxes(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
212
213 /** Fills the specified @p raw tensor with random values drawn from @p
214 * distribution.
215 *
216 * @param[in, out] vec To be filled vector.
217 * @param[in] distribution Distribution used to fill the tensor.
218 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
219 *
220 * @note The @p distribution has to provide operator(Generator &) which
221 * will be used to draw samples.
222 */
223 template <typename T, typename D>
224 void fill(std::vector<T> &vec, D &&distribution, std::random_device::result_type seed_offset) const;
225
226 /** Fills the specified @p raw tensor with random values drawn from @p
227 * distribution.
228 *
229 * @param[in, out] raw To be filled raw.
230 * @param[in] distribution Distribution used to fill the tensor.
231 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
232 *
233 * @note The @p distribution has to provide operator(Generator &) which
234 * will be used to draw samples.
235 */
236 template <typename D>
237 void fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const;
238
239 /** Fills the specified @p tensor with the content of the specified image
240 * converted to the given format.
241 *
242 * @param[in, out] tensor To be filled tensor.
243 * @param[in] name Image file used to fill the tensor.
244 * @param[in] format Format of the image used to fill the tensor.
245 *
246 * @warning No check is performed that the specified format actually
247 * matches the format of the tensor.
248 */
249 template <typename T>
250 void fill(T &&tensor, const std::string &name, Format format) const;
251
252 /** Fills the raw tensor with the content of the specified image
253 * converted to the given format.
254 *
255 * @param[in, out] raw To be filled raw tensor.
256 * @param[in] name Image file used to fill the tensor.
257 * @param[in] format Format of the image used to fill the tensor.
258 *
259 * @warning No check is performed that the specified format actually
260 * matches the format of the tensor.
261 */
262 void fill(RawTensor &raw, const std::string &name, Format format) const;
263
264 /** Fills the specified @p tensor with the content of the specified channel
265 * extracted from the given image.
266 *
267 * @param[in, out] tensor To be filled tensor.
268 * @param[in] name Image file used to fill the tensor.
269 * @param[in] channel Channel of the image used to fill the tensor.
270 *
271 * @note The channel has to be unambiguous so that the format can be
272 * inferred automatically.
273 *
274 * @warning No check is performed that the specified format actually
275 * matches the format of the tensor.
276 */
277 template <typename T>
278 void fill(T &&tensor, const std::string &name, Channel channel) const;
279
280 /** Fills the raw tensor with the content of the specified channel
281 * extracted from the given image.
282 *
283 * @param[in, out] raw To be filled raw tensor.
284 * @param[in] name Image file used to fill the tensor.
285 * @param[in] channel Channel of the image used to fill the tensor.
286 *
287 * @note The channel has to be unambiguous so that the format can be
288 * inferred automatically.
289 *
290 * @warning No check is performed that the specified format actually
291 * matches the format of the tensor.
292 */
293 void fill(RawTensor &raw, const std::string &name, Channel channel) const;
294
295 /** Fills the specified @p tensor with the content of the specified channel
296 * extracted from the given image after it has been converted to the given
297 * format.
298 *
299 * @param[in, out] tensor To be filled tensor.
300 * @param[in] name Image file used to fill the tensor.
301 * @param[in] format Format of the image used to fill the tensor.
302 * @param[in] channel Channel of the image used to fill the tensor.
303 *
304 * @warning No check is performed that the specified format actually
305 * matches the format of the tensor.
306 */
307 template <typename T>
308 void fill(T &&tensor, const std::string &name, Format format, Channel channel) const;
309
310 /** Fills the raw tensor with the content of the specified channel
311 * extracted from the given image after it has been converted to the given
312 * format.
313 *
314 * @param[in, out] raw To be filled raw tensor.
315 * @param[in] name Image file used to fill the tensor.
316 * @param[in] format Format of the image used to fill the tensor.
317 * @param[in] channel Channel of the image used to fill the tensor.
318 *
319 * @warning No check is performed that the specified format actually
320 * matches the format of the tensor.
321 */
322 void fill(RawTensor &raw, const std::string &name, Format format, Channel channel) const;
323
324 /** Fills the specified @p tensor with the content of the raw tensor.
325 *
326 * @param[in, out] tensor To be filled tensor.
327 * @param[in] raw Raw tensor used to fill the tensor.
328 *
329 * @warning No check is performed that the specified format actually
330 * matches the format of the tensor.
331 */
332 template <typename T>
333 void fill(T &&tensor, RawTensor raw) const;
334
335 /** Fill a tensor with uniform distribution
336 *
337 * @param[in, out] tensor To be filled tensor.
338 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
339 */
340 template <typename T>
341 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const;
342
343 /** Fill a tensor with uniform distribution
344 *
345 * @param[in, out] tensor To be filled tensor.
346 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
347 * @param[in] low lowest value in the range (inclusive)
348 * @param[in] high highest value in the range (inclusive)
349 *
350 * @note @p low and @p high must be of the same type as the data type of @p tensor
351 */
352 template <typename T, typename D>
353 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const;
354
355 /** Fill a tensor with uniform distribution across the specified range
356 *
357 * @param[in, out] tensor To be filled tensor.
358 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
359 * @param[in] excluded_range_pairs Ranges to exclude from the generator
360 */
361 template <typename T>
362 void fill_tensor_uniform_ranged(T &&tensor,
363 std::random_device::result_type seed_offset,
364 const std::vector<AssetsLibrary::RangePair> &excluded_range_pairs) const;
365
366 /** Fills the specified @p tensor with data loaded from .npy (numpy binary) in specified path.
367 *
368 * @param[in, out] tensor To be filled tensor.
369 * @param[in] name Data file.
370 *
371 * @note The numpy array stored in the binary .npy file must be row-major in the sense that it
372 * must store elements within a row consecutively in the memory, then rows within a 2D slice,
373 * then 2D slices within a 3D slice and so on. Note that it imposes no restrictions on what
374 * indexing convention is used in the numpy array. That is, the numpy array can be either fortran
375 * style or C style as long as it adheres to the rule above.
376 *
377 * More concretely, the orders of dimensions for each style are as follows:
378 * C-style (numpy default):
379 * array[HigherDims..., Z, Y, X]
380 * Fortran style:
381 * array[X, Y, Z, HigherDims...]
382 */
383 template <typename T>
384 void fill_layer_data(T &&tensor, std::string name) const;
385
386 /** Fill a tensor with a constant value
387 *
388 * @param[in, out] tensor To be filled tensor.
389 * @param[in] value Value to be assigned to all elements of the input tensor.
390 *
391 * @note @p value must be of the same type as the data type of @p tensor
392 */
393 template <typename T, typename D>
394 void fill_tensor_value(T &&tensor, D value) const;
395
396 /** Fill a tensor with a given vector with static values.
397 *
398 * @param[in, out] tensor To be filled tensor.
399 * @param[in] values A vector containing values
400 *
401 * To cope with various size tensors, the vector size doens't have to be
402 * the same as tensor's size. If the size of the tensor is larger than the vector,
403 * the iterator the vector will keep iterating and wrap around. If the vector is
404 * larger, values located after the required size won't be used.
405 */
406 template <typename T, typename DataType>
407 void fill_static_values(T &&tensor, const std::vector<DataType> &values) const;
408
409 // Function type to generate a number to fill tensors.
410 template <typename ResultType>
411 using GeneratorFunctionType = std::function<ResultType(void)>;
412 /** Fill a tensor with a value generator function.
413 *
414 * @param[in, out] tensor To be filled tensor.
415 * @param[in] generate_value A function that generates values.
416 */
417 template <typename T, typename ResultType>
418 void fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const;
419
420 private:
421 // Function prototype to convert between image formats.
422 using Converter = void (*)(const RawTensor &src, RawTensor &dst);
423 // Function prototype to extract a channel from an image.
424 using Extractor = void (*)(const RawTensor &src, RawTensor &dst);
425 // Function prototype to load an image file.
426 using Loader = RawTensor (*)(const std::string &path);
427
428 const Converter &get_converter(Format src, Format dst) const;
429 const Converter &get_converter(DataType src, Format dst) const;
430 const Converter &get_converter(Format src, DataType dst) const;
431 const Converter &get_converter(DataType src, DataType dst) const;
432 const Extractor &get_extractor(Format format, Channel) const;
433 const Loader &get_loader(const std::string &extension) const;
434
435 /** Creates a raw tensor from the specified image.
436 *
437 * @param[in] name To be loaded image file.
438 *
439 * @note If use_single_image is true @p name is ignored and the user image
440 * is loaded instead.
441 */
442 RawTensor load_image(const std::string &name) const;
443
444 /** Provides a raw tensor for the specified image and format.
445 *
446 * @param[in] name Image file used to look up the raw tensor.
447 * @param[in] format Format used to look up the raw tensor.
448 *
449 * If the tensor has already been requested before the cached version will
450 * be returned. Otherwise the tensor will be added to the cache.
451 *
452 * @note If use_single_image is true @p name is ignored and the user image
453 * is loaded instead.
454 */
455 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format) const;
456
457 /** Provides a raw tensor for the specified image, format and channel.
458 *
459 * @param[in] name Image file used to look up the raw tensor.
460 * @param[in] format Format used to look up the raw tensor.
461 * @param[in] channel Channel used to look up the raw tensor.
462 *
463 * If the tensor has already been requested before the cached version will
464 * be returned. Otherwise the tensor will be added to the cache.
465 *
466 * @note If use_single_image is true @p name is ignored and the user image
467 * is loaded instead.
468 */
469 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const;
470
471 mutable TensorCache _cache{};
472 mutable arm_compute::Mutex _format_lock{};
473 mutable arm_compute::Mutex _channel_lock{};
474 const std::string _library_path;
475 std::random_device::result_type _seed;
476 };
477
478 namespace detail
479 {
480 template <typename T>
convert_range_pair(const std::vector<AssetsLibrary::RangePair> & excluded_range_pairs)481 inline std::vector<std::pair<T, T>> convert_range_pair(const std::vector<AssetsLibrary::RangePair> &excluded_range_pairs)
482 {
483 std::vector<std::pair<T, T>> converted;
484 std::transform(excluded_range_pairs.begin(),
485 excluded_range_pairs.end(),
486 std::back_inserter(converted),
487 [](const AssetsLibrary::RangePair & p)
488 {
489 return std::pair<T, T>(static_cast<T>(p.first), static_cast<T>(p.second));
490 });
491 return converted;
492 }
493
494 /* Read npy header and check the payload is suitable for the specified type and shape
495 *
496 * @param[in] stream ifstream of the npy file
497 * @param[in] expect_typestr Expected typestr
498 * @param[in] expect_shape Shape of tensor expected to receive the data
499 *
500 * @note Advances stream to the beginning of the data payload
501 */
502 void validate_npy_header(std::ifstream &stream, const std::string &expect_typestr, const TensorShape &expect_shape);
503 } // namespace detail
504
505 template <typename T, typename D>
fill_borders_with_garbage(T && tensor,D && distribution,std::random_device::result_type seed_offset)506 void AssetsLibrary::fill_borders_with_garbage(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
507 {
508 const PaddingSize padding_size = tensor.padding();
509
510 Window window;
511 window.set(0, Window::Dimension(-padding_size.left, tensor.shape()[0] + padding_size.right, 1));
512 if(tensor.shape().num_dimensions() > 1)
513 {
514 window.set(1, Window::Dimension(-padding_size.top, tensor.shape()[1] + padding_size.bottom, 1));
515 }
516
517 std::mt19937 gen(_seed + seed_offset);
518
519 execute_window_loop(window, [&](const Coordinates & id)
520 {
521 TensorShape shape = tensor.shape();
522
523 // If outside of valid region
524 if(id.x() < 0 || id.x() >= static_cast<int>(shape.x()) || id.y() < 0 || id.y() >= static_cast<int>(shape.y()))
525 {
526 using ResultType = typename std::remove_reference<D>::type::result_type;
527 const ResultType value = distribution(gen);
528 void *const out_ptr = tensor(id);
529 store_value_with_data_type(out_ptr, value, tensor.data_type());
530 }
531 });
532 }
533
534 template <typename T, typename D>
fill_boxes(T && tensor,D && distribution,std::random_device::result_type seed_offset)535 void AssetsLibrary::fill_boxes(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
536 {
537 using DistributionType = typename std::remove_reference<D>::type;
538 using ResultType = typename DistributionType::result_type;
539
540 std::mt19937 gen(_seed + seed_offset);
541 TensorShape shape(tensor.shape());
542 const uint32_t num_boxes = tensor.num_elements() / 4;
543 // Iterate over all elements
544 DistributionType size_dist{ ResultType(0.f), ResultType(1.f) };
545 for(uint32_t element_idx = 0; element_idx < num_boxes * 4; element_idx += 4)
546 {
547 const ResultType delta = size_dist(gen);
548 const ResultType epsilon = size_dist(gen);
549 const ResultType left = distribution(gen);
550 const ResultType top = distribution(gen);
551 const ResultType right = left + delta;
552 const ResultType bottom = top + epsilon;
553 const std::tuple<ResultType, ResultType, ResultType, ResultType> box(left, top, right, bottom);
554 Coordinates x1 = index2coord(shape, element_idx);
555 Coordinates y1 = index2coord(shape, element_idx + 1);
556 Coordinates x2 = index2coord(shape, element_idx + 2);
557 Coordinates y2 = index2coord(shape, element_idx + 3);
558 ResultType &target_value_x1 = reinterpret_cast<ResultType *>(tensor(x1))[0];
559 ResultType &target_value_y1 = reinterpret_cast<ResultType *>(tensor(y1))[0];
560 ResultType &target_value_x2 = reinterpret_cast<ResultType *>(tensor(x2))[0];
561 ResultType &target_value_y2 = reinterpret_cast<ResultType *>(tensor(y2))[0];
562 store_value_with_data_type(&target_value_x1, std::get<0>(box), tensor.data_type());
563 store_value_with_data_type(&target_value_y1, std::get<1>(box), tensor.data_type());
564 store_value_with_data_type(&target_value_x2, std::get<2>(box), tensor.data_type());
565 store_value_with_data_type(&target_value_y2, std::get<3>(box), tensor.data_type());
566 }
567 fill_borders_with_garbage(tensor, distribution, seed_offset);
568 }
569
570 template <typename T, typename D>
fill(std::vector<T> & vec,D && distribution,std::random_device::result_type seed_offset)571 void AssetsLibrary::fill(std::vector<T> &vec, D &&distribution, std::random_device::result_type seed_offset) const
572 {
573 ARM_COMPUTE_ERROR_ON_MSG(vec.empty(), "Vector must not be empty");
574
575 using ResultType = typename std::remove_reference<D>::type::result_type;
576
577 std::mt19937 gen(_seed + seed_offset);
578 for(size_t i = 0; i < vec.size(); ++i)
579 {
580 const ResultType value = distribution(gen);
581
582 vec[i] = value;
583 }
584 }
585
586 template <typename T, typename ResultType>
fill_with_generator(T && tensor,const GeneratorFunctionType<ResultType> & generate_value)587 void AssetsLibrary::fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const
588 {
589 const bool is_nhwc = tensor.data_layout() == DataLayout::NHWC;
590 TensorShape shape(tensor.shape());
591
592 if(is_nhwc)
593 {
594 // Ensure that the equivalent tensors will be filled for both data layouts
595 permute(shape, PermutationVector(1U, 2U, 0U));
596 }
597
598 // Iterate over all elements
599 const uint32_t num_elements = tensor.num_elements();
600 for(uint32_t element_idx = 0; element_idx < num_elements; ++element_idx)
601 {
602 Coordinates id = index2coord(shape, element_idx);
603
604 if(is_nhwc)
605 {
606 // Write in the correct id for permuted shapes
607 permute(id, PermutationVector(2U, 0U, 1U));
608 }
609
610 // Iterate over all channels
611 for(int channel = 0; channel < tensor.num_channels(); ++channel)
612 {
613 const ResultType value = generate_value();
614 ResultType &target_value = reinterpret_cast<ResultType *>(tensor(id))[channel];
615
616 store_value_with_data_type(&target_value, value, tensor.data_type());
617 }
618 }
619 }
620
621 template <typename T, typename D>
fill(T && tensor,D && distribution,std::random_device::result_type seed_offset)622 void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
623 {
624 using ResultType = typename std::remove_reference<D>::type::result_type;
625 std::mt19937 gen(_seed + seed_offset);
626
627 GeneratorFunctionType<ResultType> number_generator = [&]()
628 {
629 const ResultType value = distribution(gen);
630 return value;
631 };
632
633 fill_with_generator(tensor, number_generator);
634 fill_borders_with_garbage(tensor, distribution, seed_offset);
635 }
636
637 template <typename T, typename DataType>
fill_static_values(T && tensor,const std::vector<DataType> & values)638 void AssetsLibrary::fill_static_values(T &&tensor, const std::vector<DataType> &values) const
639 {
640 auto it = values.begin();
641 GeneratorFunctionType<DataType> get_next_value = [&]()
642 {
643 const DataType value = *it;
644 ++it;
645
646 if(it == values.end())
647 {
648 it = values.begin();
649 }
650
651 return value;
652 };
653
654 fill_with_generator(tensor, get_next_value);
655 }
656
657 template <typename D>
fill(RawTensor & raw,D && distribution,std::random_device::result_type seed_offset)658 void AssetsLibrary::fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const
659 {
660 std::mt19937 gen(_seed + seed_offset);
661
662 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
663 {
664 using ResultType = typename std::remove_reference<D>::type::result_type;
665 const ResultType value = distribution(gen);
666
667 store_value_with_data_type(raw.data() + offset, value, raw.data_type());
668 }
669 }
670
671 template <typename T>
fill(T && tensor,const std::string & name,Format format)672 void AssetsLibrary::fill(T &&tensor, const std::string &name, Format format) const
673 {
674 const RawTensor &raw = get(name, format);
675
676 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
677 {
678 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
679
680 const RawTensor::value_type *const raw_ptr = raw.data() + offset;
681 const auto out_ptr = static_cast<RawTensor::value_type *>(tensor(id));
682 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
683 }
684 }
685
686 template <typename T>
fill(T && tensor,const std::string & name,Channel channel)687 void AssetsLibrary::fill(T &&tensor, const std::string &name, Channel channel) const
688 {
689 fill(std::forward<T>(tensor), name, get_format_for_channel(channel), channel);
690 }
691
692 template <typename T>
fill(T && tensor,const std::string & name,Format format,Channel channel)693 void AssetsLibrary::fill(T &&tensor, const std::string &name, Format format, Channel channel) const
694 {
695 const RawTensor &raw = get(name, format, channel);
696
697 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
698 {
699 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
700
701 const RawTensor::value_type *const raw_ptr = raw.data() + offset;
702 const auto out_ptr = static_cast<RawTensor::value_type *>(tensor(id));
703 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
704 }
705 }
706
707 template <typename T>
fill(T && tensor,RawTensor raw)708 void AssetsLibrary::fill(T &&tensor, RawTensor raw) const
709 {
710 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
711 {
712 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
713
714 const RawTensor::value_type *const raw_ptr = raw.data() + offset;
715 const auto out_ptr = static_cast<RawTensor::value_type *>(tensor(id));
716 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
717 }
718 }
719
720 template <typename T>
fill_tensor_uniform(T && tensor,std::random_device::result_type seed_offset)721 void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const
722 {
723 switch(tensor.data_type())
724 {
725 case DataType::U8:
726 case DataType::QASYMM8:
727 {
728 std::uniform_int_distribution<unsigned int> distribution_u8(std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max());
729 fill(tensor, distribution_u8, seed_offset);
730 break;
731 }
732 case DataType::S8:
733 case DataType::QSYMM8:
734 case DataType::QSYMM8_PER_CHANNEL:
735 case DataType::QASYMM8_SIGNED:
736 {
737 std::uniform_int_distribution<int> distribution_s8(std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max());
738 fill(tensor, distribution_s8, seed_offset);
739 break;
740 }
741 case DataType::U16:
742 {
743 std::uniform_int_distribution<uint16_t> distribution_u16(std::numeric_limits<uint16_t>::lowest(), std::numeric_limits<uint16_t>::max());
744 fill(tensor, distribution_u16, seed_offset);
745 break;
746 }
747 case DataType::S16:
748 case DataType::QSYMM16:
749 {
750 std::uniform_int_distribution<int16_t> distribution_s16(std::numeric_limits<int16_t>::lowest(), std::numeric_limits<int16_t>::max());
751 fill(tensor, distribution_s16, seed_offset);
752 break;
753 }
754 case DataType::U32:
755 {
756 std::uniform_int_distribution<uint32_t> distribution_u32(std::numeric_limits<uint32_t>::lowest(), std::numeric_limits<uint32_t>::max());
757 fill(tensor, distribution_u32, seed_offset);
758 break;
759 }
760 case DataType::S32:
761 {
762 std::uniform_int_distribution<int32_t> distribution_s32(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
763 fill(tensor, distribution_s32, seed_offset);
764 break;
765 }
766 case DataType::U64:
767 {
768 std::uniform_int_distribution<uint64_t> distribution_u64(std::numeric_limits<uint64_t>::lowest(), std::numeric_limits<uint64_t>::max());
769 fill(tensor, distribution_u64, seed_offset);
770 break;
771 }
772 case DataType::S64:
773 {
774 std::uniform_int_distribution<int64_t> distribution_s64(std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max());
775 fill(tensor, distribution_s64, seed_offset);
776 break;
777 }
778 case DataType::BFLOAT16:
779 {
780 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
781 arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution_bf16{ -1000.f, 1000.f };
782 fill(tensor, distribution_bf16, seed_offset);
783 break;
784 }
785 case DataType::F16:
786 {
787 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
788 arm_compute::utils::uniform_real_distribution_16bit<half> distribution_f16{ -100.f, 100.f };
789 fill(tensor, distribution_f16, seed_offset);
790 break;
791 }
792 case DataType::F32:
793 {
794 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
795 std::uniform_real_distribution<float> distribution_f32(-1000.f, 1000.f);
796 fill(tensor, distribution_f32, seed_offset);
797 break;
798 }
799 case DataType::F64:
800 {
801 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
802 std::uniform_real_distribution<double> distribution_f64(-1000.f, 1000.f);
803 fill(tensor, distribution_f64, seed_offset);
804 break;
805 }
806 case DataType::SIZET:
807 {
808 std::uniform_int_distribution<size_t> distribution_sizet(std::numeric_limits<size_t>::lowest(), std::numeric_limits<size_t>::max());
809 fill(tensor, distribution_sizet, seed_offset);
810 break;
811 }
812 default:
813 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
814 }
815 }
816
817 template <typename T>
fill_tensor_uniform_ranged(T && tensor,std::random_device::result_type seed_offset,const std::vector<AssetsLibrary::RangePair> & excluded_range_pairs)818 void AssetsLibrary::fill_tensor_uniform_ranged(T &&tensor,
819 std::random_device::result_type seed_offset,
820 const std::vector<AssetsLibrary::RangePair> &excluded_range_pairs) const
821 {
822 using namespace arm_compute::utils::random;
823
824 switch(tensor.data_type())
825 {
826 case DataType::U8:
827 case DataType::QASYMM8:
828 {
829 const auto converted_pairs = detail::convert_range_pair<uint32_t>(excluded_range_pairs);
830 RangedUniformDistribution<uint32_t> distribution_u8(std::numeric_limits<uint8_t>::lowest(),
831 std::numeric_limits<uint8_t>::max(),
832 converted_pairs);
833 fill(tensor, distribution_u8, seed_offset);
834 break;
835 }
836 case DataType::S8:
837 case DataType::QSYMM8:
838 {
839 const auto converted_pairs = detail::convert_range_pair<int32_t>(excluded_range_pairs);
840 RangedUniformDistribution<int32_t> distribution_s8(std::numeric_limits<int8_t>::lowest(),
841 std::numeric_limits<int8_t>::max(),
842 converted_pairs);
843 fill(tensor, distribution_s8, seed_offset);
844 break;
845 }
846 case DataType::U16:
847 {
848 const auto converted_pairs = detail::convert_range_pair<uint16_t>(excluded_range_pairs);
849 RangedUniformDistribution<uint16_t> distribution_u16(std::numeric_limits<uint16_t>::lowest(),
850 std::numeric_limits<uint16_t>::max(),
851 converted_pairs);
852 fill(tensor, distribution_u16, seed_offset);
853 break;
854 }
855 case DataType::S16:
856 case DataType::QSYMM16:
857 {
858 const auto converted_pairs = detail::convert_range_pair<int16_t>(excluded_range_pairs);
859 RangedUniformDistribution<int16_t> distribution_s16(std::numeric_limits<int16_t>::lowest(),
860 std::numeric_limits<int16_t>::max(),
861 converted_pairs);
862 fill(tensor, distribution_s16, seed_offset);
863 break;
864 }
865 case DataType::U32:
866 {
867 const auto converted_pairs = detail::convert_range_pair<uint32_t>(excluded_range_pairs);
868 RangedUniformDistribution<uint32_t> distribution_u32(std::numeric_limits<uint32_t>::lowest(),
869 std::numeric_limits<uint32_t>::max(),
870 converted_pairs);
871 fill(tensor, distribution_u32, seed_offset);
872 break;
873 }
874 case DataType::S32:
875 {
876 const auto converted_pairs = detail::convert_range_pair<int32_t>(excluded_range_pairs);
877 RangedUniformDistribution<int32_t> distribution_s32(std::numeric_limits<int32_t>::lowest(),
878 std::numeric_limits<int32_t>::max(),
879 converted_pairs);
880 fill(tensor, distribution_s32, seed_offset);
881 break;
882 }
883 case DataType::BFLOAT16:
884 {
885 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
886 const auto converted_pairs = detail::convert_range_pair<bfloat16>(excluded_range_pairs);
887 RangedUniformDistribution<bfloat16> distribution_bf16(bfloat16(-1000.f), bfloat16(1000.f), converted_pairs);
888 fill(tensor, distribution_bf16, seed_offset);
889 break;
890 }
891 case DataType::F16:
892 {
893 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
894 const auto converted_pairs = detail::convert_range_pair<half>(excluded_range_pairs);
895 RangedUniformDistribution<half> distribution_f16(half(-100.f), half(100.f), converted_pairs);
896 fill(tensor, distribution_f16, seed_offset);
897 break;
898 }
899 case DataType::F32:
900 {
901 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
902 const auto converted_pairs = detail::convert_range_pair<float>(excluded_range_pairs);
903 RangedUniformDistribution<float> distribution_f32(-1000.f, 1000.f, converted_pairs);
904 fill(tensor, distribution_f32, seed_offset);
905 break;
906 }
907 default:
908 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
909 }
910 }
911
912 template <typename T, typename D>
fill_tensor_uniform(T && tensor,std::random_device::result_type seed_offset,D low,D high)913 void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const
914 {
915 switch(tensor.data_type())
916 {
917 case DataType::U8:
918 case DataType::QASYMM8:
919 {
920 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint8_t, D>::value));
921 std::uniform_int_distribution<uint32_t> distribution_u8(low, high);
922 fill(tensor, distribution_u8, seed_offset);
923 break;
924 }
925 case DataType::S8:
926 case DataType::QSYMM8:
927 case DataType::QASYMM8_SIGNED:
928 {
929 ARM_COMPUTE_ERROR_ON(!(std::is_same<int8_t, D>::value));
930 std::uniform_int_distribution<int32_t> distribution_s8(low, high);
931 fill(tensor, distribution_s8, seed_offset);
932 break;
933 }
934 case DataType::U16:
935 {
936 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint16_t, D>::value));
937 std::uniform_int_distribution<uint16_t> distribution_u16(low, high);
938 fill(tensor, distribution_u16, seed_offset);
939 break;
940 }
941 case DataType::S16:
942 case DataType::QSYMM16:
943 {
944 ARM_COMPUTE_ERROR_ON(!(std::is_same<int16_t, D>::value));
945 std::uniform_int_distribution<int16_t> distribution_s16(low, high);
946 fill(tensor, distribution_s16, seed_offset);
947 break;
948 }
949 case DataType::U32:
950 {
951 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint32_t, D>::value));
952 std::uniform_int_distribution<uint32_t> distribution_u32(low, high);
953 fill(tensor, distribution_u32, seed_offset);
954 break;
955 }
956 case DataType::S32:
957 {
958 ARM_COMPUTE_ERROR_ON(!(std::is_same<int32_t, D>::value));
959 std::uniform_int_distribution<int32_t> distribution_s32(low, high);
960 fill(tensor, distribution_s32, seed_offset);
961 break;
962 }
963 case DataType::U64:
964 {
965 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint64_t, D>::value));
966 std::uniform_int_distribution<uint64_t> distribution_u64(low, high);
967 fill(tensor, distribution_u64, seed_offset);
968 break;
969 }
970 case DataType::S64:
971 {
972 ARM_COMPUTE_ERROR_ON(!(std::is_same<int64_t, D>::value));
973 std::uniform_int_distribution<int64_t> distribution_s64(low, high);
974 fill(tensor, distribution_s64, seed_offset);
975 break;
976 }
977 case DataType::BFLOAT16:
978 {
979 arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution_bf16{ float(low), float(high) };
980 fill(tensor, distribution_bf16, seed_offset);
981 break;
982 }
983 case DataType::F16:
984 {
985 arm_compute::utils::uniform_real_distribution_16bit<half> distribution_f16{ float(low), float(high) };
986 fill(tensor, distribution_f16, seed_offset);
987 break;
988 }
989 case DataType::F32:
990 {
991 ARM_COMPUTE_ERROR_ON(!(std::is_same<float, D>::value));
992 std::uniform_real_distribution<float> distribution_f32(low, high);
993 fill(tensor, distribution_f32, seed_offset);
994 break;
995 }
996 case DataType::F64:
997 {
998 ARM_COMPUTE_ERROR_ON(!(std::is_same<double, D>::value));
999 std::uniform_real_distribution<double> distribution_f64(low, high);
1000 fill(tensor, distribution_f64, seed_offset);
1001 break;
1002 }
1003 case DataType::SIZET:
1004 {
1005 ARM_COMPUTE_ERROR_ON(!(std::is_same<size_t, D>::value));
1006 std::uniform_int_distribution<size_t> distribution_sizet(low, high);
1007 fill(tensor, distribution_sizet, seed_offset);
1008 break;
1009 }
1010 default:
1011 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
1012 }
1013 }
1014
1015 template <typename T>
fill_layer_data(T && tensor,std::string name)1016 void AssetsLibrary::fill_layer_data(T &&tensor, std::string name) const
1017 {
1018 #ifdef _WIN32
1019 const std::string path_separator("\\");
1020 #else /* _WIN32 */
1021 const std::string path_separator("/");
1022 #endif /* _WIN32 */
1023 const std::string path = _library_path + path_separator + name;
1024
1025 // Open file
1026 std::ifstream stream(path, std::ios::in | std::ios::binary);
1027 if(!stream.good())
1028 {
1029 throw framework::FileNotFound("Could not load npy file: " + path);
1030 }
1031
1032 validate_npy_header(stream, tensor.data_type(), tensor.shape());
1033
1034 // Read data
1035 if(tensor.padding().empty())
1036 {
1037 // If tensor has no padding read directly from stream.
1038 stream.read(reinterpret_cast<char *>(tensor.data()), tensor.size());
1039 }
1040 else
1041 {
1042 // If tensor has padding accessing tensor elements through execution window.
1043 Window window;
1044 window.use_tensor_dimensions(tensor.shape());
1045
1046 execute_window_loop(window, [&](const Coordinates & id)
1047 {
1048 stream.read(reinterpret_cast<char *>(tensor(id)), tensor.element_size());
1049 });
1050 }
1051 }
1052
1053 template <typename T, typename D>
fill_tensor_value(T && tensor,D value)1054 void AssetsLibrary::fill_tensor_value(T &&tensor, D value) const
1055 {
1056 fill_tensor_uniform(tensor, 0, value, value);
1057 }
1058 } // namespace test
1059 } // namespace arm_compute
1060 #endif /* ARM_COMPUTE_TEST_TENSOR_LIBRARY_H */
1061