1 /*
2 * Copyright (c) 2017-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/core/Types.h"
25 #include "arm_compute/runtime/CL/CLTensor.h"
26 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
27 #include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
28 #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
29 #include "arm_compute/runtime/CL/functions/CLFuseBatchNormalization.h"
30 #include "tests/CL/CLAccessor.h"
31 #include "tests/PaddingCalculator.h"
32 #include "tests/datasets/LargeConvolutionLayerDataset.h"
33 #include "tests/datasets/RandomBatchNormalizationLayerDataset.h"
34 #include "tests/datasets/SmallConvolutionLayerDataset.h"
35 #include "tests/framework/Asserts.h"
36 #include "tests/framework/Macros.h"
37 #include "tests/framework/datasets/Datasets.h"
38 #include "tests/validation/Helpers.h"
39 #include "tests/validation/Validation.h"
40 #include "tests/validation/fixtures/BatchNormalizationLayerFixture.h"
41 #include "tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h"
42
43 namespace arm_compute
44 {
45 namespace test
46 {
47 namespace validation
48 {
49 namespace
50 {
51 RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
52 constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.0001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
53 constexpr AbsoluteTolerance<float> tolerance_f16(0.02f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
54 const auto act_infos = framework::dataset::make("ActivationInfo",
55 {
56 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
57 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
58 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
59 });
60
61 const auto common_fusion_dataset = combine(combine(combine(framework::dataset::make("UseBias",
62 { false, true }),
63 framework::dataset::make("UseBeta", { false, true })),
64 framework::dataset::make("UseGamma", { false, true })),
65 framework::dataset::make("Epsilon", { 0.001f }));
66
67 } // namespace
68
69 TEST_SUITE(CL)
70 TEST_SUITE(BatchNormalizationLayer)
71
72 template <typename T>
73 using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
74
75 // *INDENT-OFF*
76 // clang-format off
77 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
78 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
79 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
80 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
81 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
82 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
83 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Unsupported fused activation
84 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b
85 }),
86 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
87 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
88 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
89 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16),
90 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
91 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
92 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
93 })),
94 framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
95 TensorInfo(TensorShape(2U), 1, DataType::F32),
96 TensorInfo(TensorShape(2U), 1, DataType::F16),
97 TensorInfo(TensorShape(2U), 1, DataType::F32),
98 TensorInfo(TensorShape(5U), 1, DataType::F32),
99 TensorInfo(TensorShape(2U), 1, DataType::F32),
100 TensorInfo(TensorShape(2U), 1, DataType::F32),
101 })),
102 framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
103 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
104 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
105 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
106 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f),
107 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
108 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f),
109 })),
110 framework::dataset::make("Expected", { true, false, false, false, false, false, false})),
111 input_info, output_info, mvbg_info, act_info, expected)
112 {
113 const auto &mean_info = mvbg_info;
114 const auto &var_info = mvbg_info;
115 const auto &beta_info = mvbg_info;
116 const auto &gamma_info = mvbg_info;
117 bool has_error = bool(CLBatchNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), &mean_info.clone()->set_is_resizable(false), &var_info.clone()->set_is_resizable(false), &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f, act_info));
118 ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
119 }
120 // clang-format on
121 // *INDENT-ON*
122
123 TEST_SUITE(Float)
TEST_SUITE(FP32)124 TEST_SUITE(FP32)
125 FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
126 combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))),
127 act_infos),
128 framework::dataset::make("DataType", DataType::F32)),
129 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
130 {
131 // Validate output
132 validate(CLAccessor(_target), _reference, abs_tolerance_f32, 0);
133 }
134 TEST_SUITE_END() //FP32
135
TEST_SUITE(FP16)136 TEST_SUITE(FP16)
137 FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
138 combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))),
139 framework::dataset::make("ActivationInfo",
140 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
141 framework::dataset::make("DataType", DataType::F16)),
142 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
143 {
144 // Validate output
145 validate(CLAccessor(_target), _reference, tolerance_f16, 0);
146 }
147 TEST_SUITE_END() // FP16
TEST_SUITE_END()148 TEST_SUITE_END() // Float
149
150 TEST_SUITE_END() // BatchNormalizationLayer
151
152 TEST_SUITE(BatchNormalizationLayerFusion)
153 // *INDENT-OFF*
154 // clang-format off
155 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
156 framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Valid
157 TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Mismatching data types
158 TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
159 }),
160 framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
161 TensorInfo(TensorShape(2U), 1, DataType::F16),
162 TensorInfo(TensorShape(5U), 1, DataType::F32),
163 })),
164 framework::dataset::make("Expected", { true, false, false})),
165 weights_info, mvbg_info, expected)
166 {
167 const auto &weights_in_info = weights_info;
168 const auto &mean_info = mvbg_info;
169 const auto &var_info = mvbg_info;
170 const auto &fused_weights_info = weights_info;
171 const auto &fused_bias_info = mvbg_info;
172 const auto &conv_bias_info = mvbg_info;
173 const auto &beta_info = mvbg_info;
174 const auto &gamma_info = mvbg_info;
175 bool has_error = bool(CLFuseBatchNormalization::validate(
176 &weights_in_info.clone()->set_is_resizable(false), &mean_info.clone()->set_is_resizable(false),
177 &var_info.clone()->set_is_resizable(false), &fused_weights_info.clone()->set_is_resizable(false),
178 &fused_bias_info.clone()->set_is_resizable(false), &conv_bias_info.clone()->set_is_resizable(false),
179 &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f));
180 ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
181 }
182 // clang-format on
183 // *INDENT-ON*
184 template <typename T>
185 using CLBatchNormalizationLayerFusionFixture = BatchNormalizationLayerFusionValidationFixture<CLTensor, CLAccessor, CLConvolutionLayer, CLFuseBatchNormalization, T>;
186
187 TEST_SUITE(Float)
TEST_SUITE(FP32)188 TEST_SUITE(FP32)
189 FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchNormalizationLayerFusionFixture<float>, framework::DatasetMode::PRECOMMIT,
190 combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(), common_fusion_dataset),
191 framework::dataset::make("DataType", DataType::F32)),
192 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
193 {
194 // Validate output
195 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
196 }
197 FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchNormalizationLayerFusionFixture<float>, framework::DatasetMode::NIGHTLY,
198 combine(combine(combine(datasets::SmallConvolutionLayerDataset(), common_fusion_dataset),
199 framework::dataset::make("DataType", DataType::F32)),
200 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
201 {
202 // Validate output
203 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
204 }
205 TEST_SUITE_END() // FP32
206 TEST_SUITE_END() // Float
207
208 TEST_SUITE_END() // BatchNormalizationLayerFusion
209 TEST_SUITE_END() // CL
210 } // namespace validation
211 } // namespace test
212 } // namespace arm_compute
213