1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FUSION_FIXTURE
25 #define ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FUSION_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "tests/AssetsLibrary.h"
30 #include "tests/Globals.h"
31 #include "tests/IAccessor.h"
32 #include "tests/framework/Asserts.h"
33 #include "tests/framework/Fixture.h"
34 #include "tests/validation/Helpers.h"
35 #include "tests/validation/reference/BatchNormalizationLayer.h"
36 #include "tests/validation/reference/ConvolutionLayer.h"
37 
38 namespace arm_compute
39 {
40 namespace test
41 {
42 namespace validation
43 {
44 template <typename TensorType, typename AccessorType, typename ConvolutionFunctionType, typename FusionFunctionType, typename T>
45 class BatchNormalizationLayerFusionValidationFixture : public framework::Fixture
46 {
47 public:
48     template <typename...>
setup(TensorShape src_shape,TensorShape w_shape,TensorShape b_shape,TensorShape dst_shape,PadStrideInfo info,Size2D dilation,bool use_conv_b,bool use_beta,bool use_gamma,float epsilon,DataType dt,DataLayout data_layout)49     void setup(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation,
50                bool use_conv_b, bool use_beta, bool use_gamma, float epsilon, DataType dt, DataLayout data_layout)
51     {
52         ARM_COMPUTE_UNUSED(dilation);
53 
54         _data_type   = dt;
55         _data_layout = data_layout;
56         _use_conv_b  = use_conv_b;
57         _use_beta    = use_beta;
58         _use_gamma   = use_gamma;
59 
60         _target    = compute_target(src_shape, w_shape, b_shape, dst_shape, info, epsilon);
61         _reference = compute_reference(src_shape, w_shape, b_shape, dst_shape, info, epsilon);
62     }
63 
64 protected:
65     template <typename U>
fill(U && src,U && w_tensor,U && b_tensor,U && mean_tensor,U && var_tensor,U && beta_tensor,U && gamma_tensor)66     void fill(U &&src, U &&w_tensor, U &&b_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
67     {
68         static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
69         using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
70 
71         DistributionType distribution{ T(-1.f), T(1.f) };
72         DistributionType distribution_gz{ T(0.f), T(1.f) };
73 
74         library->fill(src, distribution, 0);
75         library->fill(w_tensor, distribution, 1);
76         library->fill(mean_tensor, distribution, 2);
77         library->fill(var_tensor, distribution_gz, 3);
78         _use_conv_b ? library->fill(b_tensor, distribution, 4) : library->fill_tensor_value(b_tensor, T(0.f));
79         _use_beta ? library->fill(beta_tensor, distribution, 5) : library->fill_tensor_value(beta_tensor, T(0.f));
80         _use_gamma ? library->fill(gamma_tensor, distribution, 6) : library->fill_tensor_value(gamma_tensor, T(1.f));
81     }
82 
compute_target(TensorShape src_shape,TensorShape w_shape,TensorShape b_shape,TensorShape dst_shape,PadStrideInfo info,float epsilon)83     TensorType compute_target(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, float epsilon)
84     {
85         if(_data_layout == DataLayout::NHWC)
86         {
87             permute(src_shape, PermutationVector(2U, 0U, 1U));
88             permute(w_shape, PermutationVector(2U, 0U, 1U));
89             permute(dst_shape, PermutationVector(2U, 0U, 1U));
90         }
91 
92         // Create tensors
93         TensorType src      = create_tensor<TensorType>(src_shape, _data_type, 1, QuantizationInfo(), _data_layout);
94         TensorType conv_w   = create_tensor<TensorType>(w_shape, _data_type, 1, QuantizationInfo(), _data_layout);
95         TensorType conv_b   = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
96         TensorType bn_mean  = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
97         TensorType bn_var   = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
98         TensorType bn_beta  = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
99         TensorType bn_gamma = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
100         TensorType fused_w  = create_tensor<TensorType>(w_shape, _data_type, 1, QuantizationInfo(), _data_layout);
101         TensorType fused_b  = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
102         TensorType dst      = create_tensor<TensorType>(dst_shape, _data_type, 1, QuantizationInfo(), _data_layout);
103 
104         // Create and configure function
105         FusionFunctionType      fuse_fn;
106         ConvolutionFunctionType conv_fn;
107         TensorType             *conv_b_ptr = _use_conv_b ? &conv_b : nullptr;
108         TensorType             *beta_ptr   = _use_beta ? &bn_beta : nullptr;
109         TensorType             *gamma_ptr  = _use_gamma ? &bn_gamma : nullptr;
110         fuse_fn.configure(&conv_w, &bn_mean, &bn_var, &fused_w, &fused_b, conv_b_ptr, beta_ptr, gamma_ptr, epsilon);
111         conv_fn.configure(&src, &fused_w, &fused_b, &dst, info);
112 
113         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
114         ARM_COMPUTE_ASSERT(conv_w.info()->is_resizable());
115         ARM_COMPUTE_ASSERT(conv_b.info()->is_resizable());
116         ARM_COMPUTE_ASSERT(bn_mean.info()->is_resizable());
117         ARM_COMPUTE_ASSERT(bn_var.info()->is_resizable());
118         ARM_COMPUTE_ASSERT(bn_beta.info()->is_resizable());
119         ARM_COMPUTE_ASSERT(bn_gamma.info()->is_resizable());
120         ARM_COMPUTE_ASSERT(fused_w.info()->is_resizable());
121         ARM_COMPUTE_ASSERT(fused_b.info()->is_resizable());
122         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
123 
124         // Allocate tensors
125         src.allocator()->allocate();
126         conv_w.allocator()->allocate();
127         conv_b.allocator()->allocate();
128         bn_mean.allocator()->allocate();
129         bn_var.allocator()->allocate();
130         bn_beta.allocator()->allocate();
131         bn_gamma.allocator()->allocate();
132         fused_w.allocator()->allocate();
133         fused_b.allocator()->allocate();
134         dst.allocator()->allocate();
135 
136         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
137         ARM_COMPUTE_ASSERT(!conv_w.info()->is_resizable());
138         ARM_COMPUTE_ASSERT(!conv_b.info()->is_resizable());
139         ARM_COMPUTE_ASSERT(!bn_mean.info()->is_resizable());
140         ARM_COMPUTE_ASSERT(!bn_var.info()->is_resizable());
141         ARM_COMPUTE_ASSERT(!bn_beta.info()->is_resizable());
142         ARM_COMPUTE_ASSERT(!bn_gamma.info()->is_resizable());
143         ARM_COMPUTE_ASSERT(!fused_w.info()->is_resizable());
144         ARM_COMPUTE_ASSERT(!fused_b.info()->is_resizable());
145         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
146 
147         // Fill tensors
148         fill(AccessorType(src),
149              AccessorType(conv_w), AccessorType(conv_b),
150              AccessorType(bn_mean), AccessorType(bn_var), AccessorType(bn_beta), AccessorType(bn_gamma));
151 
152         // Compute function
153         fuse_fn.run();
154         conv_fn.run();
155 
156         return dst;
157     }
158 
compute_reference(TensorShape src_shape,TensorShape w_shape,TensorShape b_shape,TensorShape dst_shape,PadStrideInfo info,float epsilon)159     SimpleTensor<T> compute_reference(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, float epsilon)
160     {
161         // Create reference
162         SimpleTensor<T> src{ src_shape, _data_type, 1 };
163         SimpleTensor<T> conv_w{ w_shape, _data_type, 1 };
164         SimpleTensor<T> conv_b{ b_shape, _data_type, 1 };
165         SimpleTensor<T> bn_var{ b_shape, _data_type, 1 };
166         SimpleTensor<T> bn_mean{ b_shape, _data_type, 1 };
167         SimpleTensor<T> bn_beta{ b_shape, _data_type, 1 };
168         SimpleTensor<T> bn_gamma{ b_shape, _data_type, 1 };
169 
170         // Fill reference
171         fill(src, conv_w, conv_b, bn_mean, bn_var, bn_beta, bn_gamma);
172 
173         // Calculate Conv + BN
174         auto conv_res = reference::convolution_layer(src, conv_w, conv_b, dst_shape, info);
175         return reference::batch_normalization_layer(conv_res, bn_mean, bn_var, bn_beta, bn_gamma, epsilon, ActivationLayerInfo());
176     }
177 
178     TensorType      _target{};
179     SimpleTensor<T> _reference{};
180     DataType        _data_type{};
181     DataLayout      _data_layout{};
182     bool            _use_conv_b{};
183     bool            _use_beta{};
184     bool            _use_gamma{};
185 };
186 } // namespace validation
187 } // namespace test
188 } // namespace arm_compute
189 #endif /* ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FUSION_FIXTURE */
190