1 //
2 // Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3 // Copyright © 2021-2022 Arm Ltd and Contributors. All rights reserved.
4 // SPDX-License-Identifier: MIT
5 //
6
7 #include "ReduceSumTestImpl.hpp"
8
9 #include <DataTypeUtils.hpp>
10 #include <armnnTestUtils/TensorCopyUtils.hpp>
11 #include <armnnTestUtils/WorkloadTestUtils.hpp>
12
13 #include <armnnTestUtils/TensorHelpers.hpp>
14
15 namespace
16 {
17
18 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
ReduceTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::TensorInfo inputTensorInfo,const armnn::TensorInfo outputTensorInfo,const std::vector<float> & inputData,const std::vector<float> & outputData,const std::vector<int32_t> vAxis,const armnn::ReduceOperation reduceOperation,bool keepDims=false)19 LayerTestResult<float, 4> ReduceTestCommon(
20 armnn::IWorkloadFactory& workloadFactory,
21 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
22 const armnn::ITensorHandleFactory& tensorHandleFactory,
23 const armnn::TensorInfo inputTensorInfo,
24 const armnn::TensorInfo outputTensorInfo,
25 const std::vector<float>& inputData,
26 const std::vector<float>& outputData,
27 const std::vector<int32_t> vAxis,
28 const armnn::ReduceOperation reduceOperation,
29 bool keepDims = false)
30 {
31 IgnoreUnused(memoryManager);
32 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
33
34 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
35
36 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
38
39 armnn::ReduceQueueDescriptor descriptor;
40 std::vector<uint32_t> updated_idx;
41 uint32_t resolvedAxis = 0;
42 for (uint32_t i = 0; i < vAxis.size(); ++i)
43 {
44 if (vAxis[i] < 0)
45 {
46 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
47 } else
48 {
49 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
50 }
51
52 updated_idx.push_back(resolvedAxis);
53 }
54
55 descriptor.m_Parameters.m_vAxis = updated_idx;
56 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
57 descriptor.m_Parameters.m_KeepDims = keepDims;
58 armnn::WorkloadInfo info;
59
60 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
61 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
62
63 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
64 descriptor,
65 info);
66
67 inputHandle->Allocate();
68 outputHandle->Allocate();
69
70 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
71
72 workload->Execute();
73
74 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
75
76 return LayerTestResult<float, 4>(actualOutput,
77 outputData,
78 outputHandle->GetShape(),
79 outputTensorInfo.GetShape());
80 }
81
82 } // namespace
83
84 template<armnn::DataType ArmnnType, typename T>
ReduceSumSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)85 LayerTestResult<float, 4> ReduceSumSimpleTest(
86 armnn::IWorkloadFactory& workloadFactory,
87 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
88 const armnn::ITensorHandleFactory& tensorHandleFactory)
89 {
90 const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
91 const armnn::TensorShape outputShape{ 1, 1, 1, 1};
92
93 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
94
95 if (armnn::IsQuantizedType<T>())
96 {
97 inputTensorInfo.SetQuantizationScale(1.0f);
98 inputTensorInfo.SetQuantizationOffset(0);
99 }
100
101 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
102
103 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
104 std::vector<float> outputValues({ 34.0f });
105
106 return ReduceTestCommon<ArmnnType>(workloadFactory,
107 memoryManager,
108 tensorHandleFactory,
109 inputTensorInfo,
110 outputTensorInfo,
111 inputValues,
112 outputValues,
113 { -1 },
114 armnn::ReduceOperation::Sum);
115 }
116
117 template<armnn::DataType ArmnnType, typename T>
ReduceSumSingleAxisTest1(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)118 LayerTestResult<float, 4> ReduceSumSingleAxisTest1(
119 armnn::IWorkloadFactory& workloadFactory,
120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
121 const armnn::ITensorHandleFactory& tensorHandleFactory)
122 {
123 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
124 const armnn::TensorShape outputShape{ 1, 1, 2, 4};
125
126 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
127
128 if (armnn::IsQuantizedType<T>())
129 {
130 inputTensorInfo.SetQuantizationScale(1.0f);
131 inputTensorInfo.SetQuantizationOffset(0);
132 }
133
134 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
135
136 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
137 5.0f, 6.0f, 7.0f, 8.0f,
138
139 10.0f, 20.0f, 30.0f, 40.0f,
140 50.0f, 60.0f, 70.0f, 80.0f,
141
142 100.0f, 200.0f, 300.0f, 400.0f,
143 500.0f, 600.0f, 700.0f, 800.0f });
144 std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
145 555.0f, 666.0f, 777.0f, 888.0f });
146
147 return ReduceTestCommon<ArmnnType>(workloadFactory,
148 memoryManager,
149 tensorHandleFactory,
150 inputTensorInfo,
151 outputTensorInfo,
152 inputValues,
153 outputValues,
154 { 1 },
155 armnn::ReduceOperation::Sum);
156 }
157
158 template<armnn::DataType ArmnnType, typename T>
ReduceSumSingleAxisTest2(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)159 LayerTestResult<float, 4> ReduceSumSingleAxisTest2(
160 armnn::IWorkloadFactory& workloadFactory,
161 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
162 const armnn::ITensorHandleFactory& tensorHandleFactory)
163 {
164 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
165 const armnn::TensorShape outputShape{ 1, 1, 3, 4};
166
167 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
168
169 if (armnn::IsQuantizedType<T>())
170 {
171 inputTensorInfo.SetQuantizationScale(1.0f);
172 inputTensorInfo.SetQuantizationOffset(0);
173 }
174
175 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
176
177 std::vector<float> inputValues( {7, 8, 6, 1,
178 1, 1, 8, 7,
179 3, 7, 7, 7,
180
181 6, 8, 4, 7,
182 3, 8, 7, 3,
183 5, 8, 8, 8,
184
185
186 7, 8, 2, 7,
187 3, 8, 5, 6,
188 8, 4, 2, 7,
189
190 1, 6, 7, 2,
191 8, 3, 3, 1,
192 7, 6, 2, 6,
193
194
195 5, 3, 4, 8,
196 7, 8, 2, 4,
197 6, 6, 2, 8,
198
199 2, 2, 7, 2,
200 5, 3, 6, 3,
201 6, 1, 8, 8});
202 std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
203 27.0f, 31.0f, 31.0f, 24.0f,
204 35.0f, 32.0f, 29.0f, 44.0f});
205
206 return ReduceTestCommon<ArmnnType>(workloadFactory,
207 memoryManager,
208 tensorHandleFactory,
209 inputTensorInfo,
210 outputTensorInfo,
211 inputValues,
212 outputValues,
213 { 1 },
214 armnn::ReduceOperation::Sum);
215 }
216
217 template<armnn::DataType ArmnnType, typename T>
ReduceSumSingleAxisTest3(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)218 LayerTestResult<float, 4> ReduceSumSingleAxisTest3(
219 armnn::IWorkloadFactory& workloadFactory,
220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
221 const armnn::ITensorHandleFactory& tensorHandleFactory)
222 {
223 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
224 const armnn::TensorShape outputShape{ 1, 6, 3, 1};
225
226 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
227
228 if (armnn::IsQuantizedType<T>())
229 {
230 inputTensorInfo.SetQuantizationScale(1.0f);
231 inputTensorInfo.SetQuantizationOffset(0);
232 }
233
234 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
235
236 std::vector<float> inputValues( {7, 8, 6, 1,
237 1, 1, 8, 7,
238 3, 7, 7, 7,
239
240 6, 8, 4, 7,
241 3, 8, 7, 3,
242 5, 8, 8, 8,
243
244
245 7, 8, 2, 7,
246 3, 8, 5, 6,
247 8, 4, 2, 7,
248
249 1, 6, 7, 2,
250 8, 3, 3, 1,
251 7, 6, 2, 6,
252
253
254 5, 3, 4, 8,
255 7, 8, 2, 4,
256 6, 6, 2, 8,
257
258 2, 2, 7, 2,
259 5, 3, 6, 3,
260 6, 1, 8, 8});
261 std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
262 25.0f, 21.0f, 29.0f,
263
264 24.0f, 22.0f, 21.0f,
265 16.0f, 15.0f, 21.0f,
266
267 20.0f, 21.0f, 22.0f,
268 13.0f, 17.0f, 23.0f});
269
270 return ReduceTestCommon<ArmnnType>(workloadFactory,
271 memoryManager,
272 tensorHandleFactory,
273 inputTensorInfo,
274 outputTensorInfo,
275 inputValues,
276 outputValues,
277 { 3 },
278 armnn::ReduceOperation::Sum,
279 true);
280 }
281
282 template<armnn::DataType ArmnnType, typename T>
ReduceSumMultipleAxisTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)283 LayerTestResult<float, 4> ReduceSumMultipleAxisTest(
284 armnn::IWorkloadFactory& workloadFactory,
285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
286 const armnn::ITensorHandleFactory& tensorHandleFactory)
287 {
288 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
289 const armnn::TensorShape outputShape{ 1, 1, 1, 4};
290
291 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
292
293 if (armnn::IsQuantizedType<T>())
294 {
295 inputTensorInfo.SetQuantizationScale(1.0f);
296 inputTensorInfo.SetQuantizationOffset(0);
297 }
298
299 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
300
301 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
302 5.0f, 6.0f, 7.0f, 8.0f,
303
304 10.0f, 20.0f, 30.0f, 40.0f,
305 50.0f, 60.0f, 70.0f, 80.0f,
306
307 100.0f, 200.0f, 300.0f, 400.0f,
308 500.0f, 600.0f, 700.0f, 800.0f });
309 std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
310
311 return ReduceTestCommon<ArmnnType>(workloadFactory,
312 memoryManager,
313 tensorHandleFactory,
314 inputTensorInfo,
315 outputTensorInfo,
316 inputValues,
317 outputValues,
318 { 1, 2 },
319 armnn::ReduceOperation::Sum);
320 }
321
322 // Explicit template specializations
323
324 template LayerTestResult<float, 4>
325 ReduceSumSimpleTest<armnn::DataType::Float32>(
326 armnn::IWorkloadFactory& workloadFactory,
327 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
328 const armnn::ITensorHandleFactory& tensorHandleFactory);
329
330 template LayerTestResult<float, 4>
331 ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
332 armnn::IWorkloadFactory& workloadFactory,
333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
334 const armnn::ITensorHandleFactory& tensorHandleFactory);
335
336 template LayerTestResult<float, 4>
337 ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
338 armnn::IWorkloadFactory& workloadFactory,
339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
340 const armnn::ITensorHandleFactory& tensorHandleFactory);
341
342 template LayerTestResult<float, 4>
343 ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
344 armnn::IWorkloadFactory& workloadFactory,
345 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
346 const armnn::ITensorHandleFactory& tensorHandleFactory);
347
348 template LayerTestResult<float, 4>
349 ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
350 armnn::IWorkloadFactory& workloadFactory,
351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
352 const armnn::ITensorHandleFactory& tensorHandleFactory);
353