xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/ConcatenateLayerFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
25 #define ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "tests/AssetsLibrary.h"
31 #include "tests/Globals.h"
32 #include "tests/IAccessor.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Fixture.h"
35 #include "tests/validation/Helpers.h"
36 #include "tests/validation/reference/ConcatenateLayer.h"
37 
38 #include <random>
39 
40 namespace arm_compute
41 {
42 namespace test
43 {
44 namespace validation
45 {
46 template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T, bool CI = true>
47 class ConcatenateLayerValidationFixture : public framework::Fixture
48 {
49 private:
50     using SrcITensorType = typename std::conditional<CI, const ITensorType, ITensorType>::type;
51 
52 public:
53     template <typename...>
setup(TensorShape shape,DataType data_type,unsigned int axis)54     void setup(TensorShape shape, DataType data_type, unsigned int axis)
55     {
56         // Create input shapes
57         std::mt19937                    gen(library->seed());
58         std::uniform_int_distribution<> num_dis(2, 8);
59         std::uniform_int_distribution<> offset_dis(0, 20);
60 
61         const int num_tensors = num_dis(gen);
62 
63         std::vector<TensorShape> shapes(num_tensors, shape);
64 
65         // vector holding the quantization info:
66         //      the last element is the output quantization info
67         //      all other elements are the quantization info for the input tensors
68         std::vector<QuantizationInfo> qinfo(num_tensors + 1, QuantizationInfo());
69         for(auto &qi : qinfo)
70         {
71             qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
72         }
73         std::bernoulli_distribution           mutate_dis(0.5f);
74         std::uniform_real_distribution<float> change_dis(-0.25f, 0.f);
75 
76         // Generate more shapes based on the input
77         for(auto &s : shapes)
78         {
79             // Randomly change the dimension
80             if(mutate_dis(gen))
81             {
82                 // Decrease the dimension by a small percentage. Don't increase
83                 // as that could make tensor too large.
84                 s.set(axis, s[axis] + 2 * static_cast<int>(s[axis] * change_dis(gen)));
85             }
86         }
87 
88         _target    = compute_target(shapes, qinfo, data_type, axis);
89         _reference = compute_reference(shapes, qinfo, data_type, axis);
90     }
91 
92 protected:
93     template <typename U>
fill(U && tensor,int i)94     void fill(U &&tensor, int i)
95     {
96         library->fill_tensor_uniform(tensor, i);
97     }
98 
compute_target(const std::vector<TensorShape> & shapes,const std::vector<QuantizationInfo> & qinfo,DataType data_type,unsigned int axis)99     TensorType compute_target(const std::vector<TensorShape> &shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type, unsigned int axis)
100     {
101         std::vector<TensorType>       srcs;
102         std::vector<SrcITensorType *> src_ptrs;
103 
104         // Create tensors
105         srcs.reserve(shapes.size());
106 
107         for(size_t j = 0; j < shapes.size(); ++j)
108         {
109             srcs.emplace_back(create_tensor<TensorType>(shapes[j], data_type, 1, qinfo[j]));
110             src_ptrs.emplace_back(&srcs.back());
111         }
112 
113         const TensorShape dst_shape = misc::shape_calculator::calculate_concatenate_shape(src_ptrs, axis);
114         TensorType        dst       = create_tensor<TensorType>(dst_shape, data_type, 1, qinfo[shapes.size()]);
115 
116         // Create and configure function
117         FunctionType concat;
118         concat.configure(src_ptrs, &dst, axis);
119 
120         for(auto &src : srcs)
121         {
122             ARM_COMPUTE_ASSERT(src.info()->is_resizable());
123         }
124 
125         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
126 
127         // Allocate tensors
128         for(auto &src : srcs)
129         {
130             src.allocator()->allocate();
131             ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
132         }
133 
134         dst.allocator()->allocate();
135         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
136 
137         // Fill tensors
138         int i = 0;
139         for(auto &src : srcs)
140         {
141             fill(AccessorType(src), i++);
142         }
143 
144         // Compute function
145         concat.run();
146 
147         return dst;
148     }
149 
compute_reference(std::vector<TensorShape> & shapes,const std::vector<QuantizationInfo> & qinfo,DataType data_type,unsigned int axis)150     SimpleTensor<T> compute_reference(std::vector<TensorShape> &shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type, unsigned int axis)
151     {
152         std::vector<SimpleTensor<T>> srcs;
153         std::vector<TensorShape *>   src_ptrs;
154 
155         // Create and fill tensors
156         for(size_t j = 0; j < shapes.size(); ++j)
157         {
158             srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]);
159             fill(srcs.back(), j);
160             src_ptrs.emplace_back(&shapes[j]);
161         }
162 
163         const TensorShape dst_shape = misc::shape_calculator::calculate_concatenate_shape(src_ptrs, axis);
164         SimpleTensor<T>   dst{ dst_shape, data_type, 1, qinfo[shapes.size()] };
165         return reference::concatenate_layer<T>(srcs, dst, axis);
166     }
167 
168     TensorType      _target{};
169     SimpleTensor<T> _reference{};
170 };
171 } // namespace validation
172 } // namespace test
173 } // namespace arm_compute
174 #endif /* ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE */
175