xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxTestImpl.hpp"
7 
8 
9 #include <DataTypeUtils.hpp>
10 #include <armnnTestUtils/TensorCopyUtils.hpp>
11 #include <armnnTestUtils/WorkloadTestUtils.hpp>
12 
13 #include <armnnTestUtils/TensorHelpers.hpp>
14 
15 namespace
16 {
17 
18 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
ArgMinMaxTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr &,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::ArgMinMaxFunction argMinMaxFunction,const armnn::TensorInfo inputTensorInfo,const armnn::TensorInfo outputTensorInfo,const std::vector<float> & inputData,const std::vector<int32_t> & outputData,int axis=3)19 LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
20         armnn::IWorkloadFactory& workloadFactory,
21         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
22         const armnn::ITensorHandleFactory& tensorHandleFactory,
23         armnn::ArgMinMaxFunction argMinMaxFunction,
24         const armnn::TensorInfo inputTensorInfo,
25         const armnn::TensorInfo outputTensorInfo,
26         const std::vector<float>& inputData,
27         const std::vector<int32_t>& outputData,
28         int axis = 3)
29 {
30     std::vector<T> inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
31     std::vector<int32_t> actualOutput(outputTensorInfo.GetNumElements());
32 
33     std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
34     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
35 
36     armnn::ArgMinMaxQueueDescriptor descriptor;
37     descriptor.m_Parameters.m_Function = argMinMaxFunction;
38     descriptor.m_Parameters.m_Axis = axis;
39     armnn::WorkloadInfo info;
40 
41     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
42     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
43 
44     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ArgMinMax,
45                                                                                 descriptor, info);
46 
47     inputHandle->Allocate();
48     outputHandle->Allocate();
49 
50     CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
51 
52     workload->PostAllocationConfigure();
53     workload->Execute();
54 
55     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
56 
57     return LayerTestResult<int32_t, 3>(actualOutput,
58                                        outputData,
59                                        outputHandle->GetShape(),
60                                        outputTensorInfo.GetShape());
61 }
62 
63 } // namespace
64 
65 template<armnn::DataType ArmnnType, typename T>
ArgMaxSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)66 LayerTestResult<int32_t, 3> ArgMaxSimpleTest(
67         armnn::IWorkloadFactory& workloadFactory,
68         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
69         const armnn::ITensorHandleFactory& tensorHandleFactory)
70 {
71     const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
72     const armnn::TensorShape outputShape{ 1, 1, 1 };
73 
74     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
75 
76     if (armnn::IsQuantizedType<T>())
77     {
78         inputTensorInfo.SetQuantizationScale(1.0f);
79         inputTensorInfo.SetQuantizationOffset(0);
80     }
81 
82     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
83 
84     std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
85     std::vector<int32_t> outputValues({ 3 });
86 
87     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
88                                           armnn::ArgMinMaxFunction::Max,
89                                           inputTensorInfo, outputTensorInfo,
90                                           inputValues, outputValues, -1); // axis -1 === 3
91 }
92 
93 template<armnn::DataType ArmnnType, typename T>
ArgMinSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)94 LayerTestResult<int32_t, 3> ArgMinSimpleTest(
95         armnn::IWorkloadFactory& workloadFactory,
96         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
97         const armnn::ITensorHandleFactory& tensorHandleFactory)
98 {
99     const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
100     const armnn::TensorShape outputShape{ 1, 1, 1 };
101 
102     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
103 
104     if (armnn::IsQuantizedType<T>())
105     {
106         inputTensorInfo.SetQuantizationScale(1.0f);
107         inputTensorInfo.SetQuantizationOffset(0);
108     }
109 
110     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
111 
112     std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
113     std::vector<int32_t> outputValues({ 1 });
114 
115     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
116                                           armnn::ArgMinMaxFunction::Min,
117                                           inputTensorInfo, outputTensorInfo,
118                                           inputValues, outputValues, 3);
119 }
120 
121 template<armnn::DataType ArmnnType, typename T>
ArgMinChannelTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)122 LayerTestResult<int32_t, 3> ArgMinChannelTest(
123         armnn::IWorkloadFactory& workloadFactory,
124         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
125         const armnn::ITensorHandleFactory& tensorHandleFactory)
126 {
127     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
128     const armnn::TensorShape outputShape{ 1, 2, 4 };
129 
130     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
131 
132     if (armnn::IsQuantizedType<T>())
133     {
134         inputTensorInfo.SetQuantizationScale(1.0f);
135         inputTensorInfo.SetQuantizationOffset(0);
136     }
137 
138     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
139 
140     std::vector<float> inputValues({   1.0f,   2.0f,   3.0f,   4.0f,
141                                        5.0f,   6.0f,   7.0f,   8.0f,
142 
143                                       10.0f,  20.0f,  30.0f,  40.0f,
144                                       50.0f,  60.0f,  70.0f,  80.0f,
145 
146                                      100.0f, 200.0f, 300.0f, 400.0f,
147                                      500.0f, 600.0f, 700.0f, 800.0f });
148     std::vector<int32_t> outputValues({ 0, 0, 0, 0,
149                                         0, 0, 0, 0 });
150 
151     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
152                                           armnn::ArgMinMaxFunction::Min,
153                                           inputTensorInfo, outputTensorInfo,
154                                           inputValues, outputValues, 1);
155 }
156 
157 template<armnn::DataType ArmnnType, typename T>
ArgMaxChannelTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)158 LayerTestResult<int32_t, 3> ArgMaxChannelTest(
159         armnn::IWorkloadFactory& workloadFactory,
160         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
161         const armnn::ITensorHandleFactory& tensorHandleFactory)
162 {
163     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
164     const armnn::TensorShape outputShape{ 1, 2, 4 };
165 
166     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
167 
168     if (armnn::IsQuantizedType<T>())
169     {
170         inputTensorInfo.SetQuantizationScale(1.0f);
171         inputTensorInfo.SetQuantizationOffset(0);
172     }
173 
174     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
175 
176     std::vector<float> inputValues({  1.0f,   2.0f,   3.0f,   4.0f,
177                                       5.0f,   6.0f,   7.0f,   8.0f,
178 
179                                      10.0f,  20.0f,  30.0f,  40.0f,
180                                      50.0f,  60.0f,  70.0f,  80.0f,
181 
182                                     100.0f, 200.0f, 300.0f, 400.0f,
183                                     500.0f, 600.0f, 700.0f, 800.0f });
184     std::vector<int32_t> outputValues({ 2, 2, 2, 2,
185                                         2, 2, 2, 2 });
186 
187     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
188                                           armnn::ArgMinMaxFunction::Max,
189                                           inputTensorInfo, outputTensorInfo,
190                                           inputValues, outputValues, 1);
191 }
192 
193 template<armnn::DataType ArmnnType, typename T>
ArgMaxHeightTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)194 LayerTestResult<int32_t, 3> ArgMaxHeightTest(
195         armnn::IWorkloadFactory& workloadFactory,
196         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
197         const armnn::ITensorHandleFactory& tensorHandleFactory)
198 {
199     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
200     const armnn::TensorShape outputShape{ 1, 3, 4 };
201 
202     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
203     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
204 
205     if (armnn::IsQuantizedType<T>())
206     {
207         inputTensorInfo.SetQuantizationScale(1.0f);
208         inputTensorInfo.SetQuantizationOffset(0);
209     }
210 
211     std::vector<float> inputValues({  1.0f,   2.0f,   3.0f,   4.0f,
212                                       5.0f,   6.0f,   7.0f,   8.0f,
213 
214                                      10.0f,  20.0f,  30.0f,  40.0f,
215                                      50.0f,  60.0f,  70.0f,  80.0f,
216 
217                                     100.0f, 200.0f, 300.0f, 400.0f,
218                                     500.0f, 600.0f, 700.0f, 800.0f });
219     std::vector<int32_t> outputValues({ 1, 1, 1, 1,
220                                         1, 1, 1, 1,
221                                         1, 1, 1, 1 });
222 
223     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
224                                           armnn::ArgMinMaxFunction::Max,
225                                           inputTensorInfo, outputTensorInfo,
226                                           inputValues, outputValues, 2);
227 }
228 
229 template<armnn::DataType ArmnnType, typename T>
ArgMinWidthTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)230 LayerTestResult<int32_t, 3> ArgMinWidthTest(
231         armnn::IWorkloadFactory& workloadFactory,
232         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
233         const armnn::ITensorHandleFactory& tensorHandleFactory)
234 {
235     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
236     const armnn::TensorShape outputShape{ 1, 3, 2 };
237 
238     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
239     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
240 
241     if (armnn::IsQuantizedType<T>())
242     {
243         inputTensorInfo.SetQuantizationScale(1.0f);
244         inputTensorInfo.SetQuantizationOffset(0);
245     }
246 
247     std::vector<float> inputValues({  1.0f,   2.0f,   3.0f,   4.0f,
248                                       5.0f,   6.0f,   7.0f,   8.0f,
249 
250                                      10.0f,  20.0f,  30.0f,  40.0f,
251                                      50.0f,  60.0f,  70.0f,  80.0f,
252 
253                                     100.0f, 200.0f, 300.0f, 400.0f,
254                                     500.0f, 600.0f, 700.0f, 800.0f });
255     std::vector<int32_t> outputValues({ 0, 0,
256                                         0, 0,
257                                         0, 0 });
258 
259     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
260                                           armnn::ArgMinMaxFunction::Min,
261                                           inputTensorInfo, outputTensorInfo,
262                                           inputValues, outputValues, 3);
263 }
264 
265 
266 // Explicit template specializations
267 
268 template LayerTestResult<int32_t, 3>
269 ArgMaxSimpleTest<armnn::DataType::Float32>(
270         armnn::IWorkloadFactory& workloadFactory,
271         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
272         const armnn::ITensorHandleFactory& tensorHandleFactory);
273 
274 template LayerTestResult<int32_t, 3>
275 ArgMaxSimpleTest<armnn::DataType::Float16>(
276         armnn::IWorkloadFactory& workloadFactory,
277         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
278         const armnn::ITensorHandleFactory& tensorHandleFactory);
279 
280 template LayerTestResult<int32_t, 3>
281 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
282         armnn::IWorkloadFactory& workloadFactory,
283         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
284         const armnn::ITensorHandleFactory& tensorHandleFactory);
285 
286 template LayerTestResult<int32_t, 3>
287 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
288         armnn::IWorkloadFactory& workloadFactory,
289         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
290         const armnn::ITensorHandleFactory& tensorHandleFactory);
291 
292 template LayerTestResult<int32_t, 3>
293 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
294         armnn::IWorkloadFactory& workloadFactory,
295         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
296         const armnn::ITensorHandleFactory& tensorHandleFactory);
297 
298 template LayerTestResult<int32_t, 3>
299 ArgMaxSimpleTest<armnn::DataType::Signed32>(
300         armnn::IWorkloadFactory& workloadFactory,
301         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
302         const armnn::ITensorHandleFactory& tensorHandleFactory);
303 
304 template LayerTestResult<int32_t, 3>
305 ArgMinSimpleTest<armnn::DataType::Float32>(
306         armnn::IWorkloadFactory& workloadFactory,
307         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
308         const armnn::ITensorHandleFactory& tensorHandleFactory);
309 
310 template LayerTestResult<int32_t, 3>
311 ArgMinSimpleTest<armnn::DataType::Float16>(
312         armnn::IWorkloadFactory& workloadFactory,
313         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
314         const armnn::ITensorHandleFactory& tensorHandleFactory);
315 
316 template LayerTestResult<int32_t, 3>
317 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
318         armnn::IWorkloadFactory& workloadFactory,
319         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
320         const armnn::ITensorHandleFactory& tensorHandleFactory);
321 
322 template LayerTestResult<int32_t, 3>
323 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
324         armnn::IWorkloadFactory& workloadFactory,
325         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
326         const armnn::ITensorHandleFactory& tensorHandleFactory);
327 
328 template LayerTestResult<int32_t, 3>
329 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
330         armnn::IWorkloadFactory& workloadFactory,
331         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
332         const armnn::ITensorHandleFactory& tensorHandleFactory);
333 
334 template LayerTestResult<int32_t, 3>
335 ArgMinSimpleTest<armnn::DataType::Signed32>(
336         armnn::IWorkloadFactory& workloadFactory,
337         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
338         const armnn::ITensorHandleFactory& tensorHandleFactory);
339 
340 template LayerTestResult<int32_t, 3>
341 ArgMinChannelTest<armnn::DataType::Float32>(
342         armnn::IWorkloadFactory& workloadFactory,
343         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
344         const armnn::ITensorHandleFactory& tensorHandleFactory);
345 
346 template LayerTestResult<int32_t, 3>
347 ArgMinChannelTest<armnn::DataType::Float16>(
348         armnn::IWorkloadFactory& workloadFactory,
349         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
350         const armnn::ITensorHandleFactory& tensorHandleFactory);
351 
352 template LayerTestResult<int32_t, 3>
353 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
354         armnn::IWorkloadFactory& workloadFactory,
355         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
356         const armnn::ITensorHandleFactory& tensorHandleFactory);
357 
358 template LayerTestResult<int32_t, 3>
359 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
360         armnn::IWorkloadFactory& workloadFactory,
361         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
362         const armnn::ITensorHandleFactory& tensorHandleFactory);
363 
364 template LayerTestResult<int32_t, 3>
365 ArgMinChannelTest<armnn::DataType::QSymmS16>(
366         armnn::IWorkloadFactory& workloadFactory,
367         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
368         const armnn::ITensorHandleFactory& tensorHandleFactory);
369 
370 template LayerTestResult<int32_t, 3>
371 ArgMinChannelTest<armnn::DataType::Signed32>(
372         armnn::IWorkloadFactory& workloadFactory,
373         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
374         const armnn::ITensorHandleFactory& tensorHandleFactory);
375 
376 template LayerTestResult<int32_t, 3>
377 ArgMaxChannelTest<armnn::DataType::Float32>(
378         armnn::IWorkloadFactory& workloadFactory,
379         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
380         const armnn::ITensorHandleFactory& tensorHandleFactory);
381 
382 template LayerTestResult<int32_t, 3>
383 ArgMaxChannelTest<armnn::DataType::Float16>(
384         armnn::IWorkloadFactory& workloadFactory,
385         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
386         const armnn::ITensorHandleFactory& tensorHandleFactory);
387 
388 template LayerTestResult<int32_t, 3>
389 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
390         armnn::IWorkloadFactory& workloadFactory,
391         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
392         const armnn::ITensorHandleFactory& tensorHandleFactory);
393 
394 template LayerTestResult<int32_t, 3>
395 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
396         armnn::IWorkloadFactory& workloadFactory,
397         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398         const armnn::ITensorHandleFactory& tensorHandleFactory);
399 
400 template LayerTestResult<int32_t, 3>
401 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
402         armnn::IWorkloadFactory& workloadFactory,
403         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
404         const armnn::ITensorHandleFactory& tensorHandleFactory);
405 
406 template LayerTestResult<int32_t, 3>
407 ArgMaxChannelTest<armnn::DataType::Signed32>(
408         armnn::IWorkloadFactory& workloadFactory,
409         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
410         const armnn::ITensorHandleFactory& tensorHandleFactory);
411 
412 template LayerTestResult<int32_t, 3>
413 ArgMaxHeightTest<armnn::DataType::Float32>(
414         armnn::IWorkloadFactory& workloadFactory,
415         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
416         const armnn::ITensorHandleFactory& tensorHandleFactory);
417 
418 template LayerTestResult<int32_t, 3>
419 ArgMaxHeightTest<armnn::DataType::Float16>(
420         armnn::IWorkloadFactory& workloadFactory,
421         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422         const armnn::ITensorHandleFactory& tensorHandleFactory);
423 
424 template LayerTestResult<int32_t, 3>
425 ArgMaxHeightTest<armnn::DataType::Signed32>(
426         armnn::IWorkloadFactory& workloadFactory,
427         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428         const armnn::ITensorHandleFactory& tensorHandleFactory);
429 
430 template LayerTestResult<int32_t, 3>
431 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
432         armnn::IWorkloadFactory& workloadFactory,
433         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
434         const armnn::ITensorHandleFactory& tensorHandleFactory);
435 
436 template LayerTestResult<int32_t, 3>
437 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
438         armnn::IWorkloadFactory& workloadFactory,
439         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
440         const armnn::ITensorHandleFactory& tensorHandleFactory);
441 
442 template LayerTestResult<int32_t, 3>
443 ArgMinWidthTest<armnn::DataType::Float32>(
444         armnn::IWorkloadFactory& workloadFactory,
445         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
446         const armnn::ITensorHandleFactory& tensorHandleFactory);
447 
448 template LayerTestResult<int32_t, 3>
449 ArgMinWidthTest<armnn::DataType::Float16>(
450         armnn::IWorkloadFactory& workloadFactory,
451         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452         const armnn::ITensorHandleFactory& tensorHandleFactory);
453 
454 template LayerTestResult<int32_t, 3>
455 ArgMinWidthTest<armnn::DataType::Signed32>(
456         armnn::IWorkloadFactory& workloadFactory,
457         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
458         const armnn::ITensorHandleFactory& tensorHandleFactory);
459 
460 template LayerTestResult<int32_t, 3>
461 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
462         armnn::IWorkloadFactory& workloadFactory,
463         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
464         const armnn::ITensorHandleFactory& tensorHandleFactory);
465 
466 template LayerTestResult<int32_t, 3>
467 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
468         armnn::IWorkloadFactory& workloadFactory,
469         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
470         const armnn::ITensorHandleFactory& tensorHandleFactory);
471