1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "PadTestImpl.hpp"
7
8 #include <armnnUtils/QuantizeHelper.hpp>
9
10 #include <armnnTestUtils/TensorCopyUtils.hpp>
11 #include <armnnTestUtils/WorkloadTestUtils.hpp>
12
13 #include <armnnTestUtils/TensorHelpers.hpp>
14
15 //
16 // Implementation templates
17 //
18
19 template<armnn::DataType ArmnnType, typename T>
Pad2dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset,const float customPaddingValue)20 LayerTestResult<T, 2> Pad2dTestCommon(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 const armnn::ITensorHandleFactory& tensorHandleFactory,
24 float qScale,
25 int32_t qOffset,
26 const float customPaddingValue)
27 {
28 IgnoreUnused(memoryManager);
29 const armnn::TensorShape inputShape{ 3, 3 };
30 const armnn::TensorShape outputShape{ 7, 7 };
31
32 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
33 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
34
35 std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
36 {
37 // Height (3) x Width (3)
38 4, 8, 6,
39 7, 4, 4,
40 3, 2, 4
41 },
42 qScale, qOffset);
43
44 auto p = customPaddingValue;
45 std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
46 {
47 p, p, p, p, p, p, p,
48 p, p, p, p, p, p, p,
49 p, p, 4, 8, 6, p, p,
50 p, p, 7, 4, 4, p, p,
51 p, p, 3, 2, 4, p, p,
52 p, p, p, p, p, p, p,
53 p, p, p, p, p, p, p
54 },
55 qScale, qOffset);
56
57 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
58
59 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
60 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
61
62 armnn::PadQueueDescriptor descriptor;
63
64 std::vector<std::pair<unsigned int, unsigned int>> padList;
65 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
66 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
67
68 descriptor.m_Parameters.m_PadList = padList;
69 descriptor.m_Parameters.m_PadValue = customPaddingValue;
70 armnn::WorkloadInfo info;
71
72 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
73 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
74
75 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
76 descriptor,
77 info);
78
79 inputHandle->Allocate();
80 outputHandle->Allocate();
81
82 CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
83
84 workload->PostAllocationConfigure();
85 workload->Execute();
86
87 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
88
89 return LayerTestResult<T, 2>(actualOutput,
90 expectedOutputValues,
91 outputHandle->GetShape(),
92 outputTensorInfo.GetShape());
93 }
94
95 template<armnn::DataType ArmnnType, typename T>
Pad3dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)96 LayerTestResult<T, 3> Pad3dTestCommon(
97 armnn::IWorkloadFactory& workloadFactory,
98 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
99 const armnn::ITensorHandleFactory& tensorHandleFactory,
100 float qScale,
101 int32_t qOffset)
102 {
103 IgnoreUnused(memoryManager);
104 const armnn::TensorShape inputShape{ 2, 2, 2 };
105 const armnn::TensorShape outputShape{ 3, 5, 6 };
106
107 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
108 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
109
110 std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
111 {
112 // Channel 0, Height (2) x Width (2)
113 0, 4,
114 2, 5,
115
116 // Channel 1, Height (2) x Width (2)
117 6, 1,
118 5, 2
119 },
120 qScale, qOffset);
121
122 std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
123 {
124 0, 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 4, 0, 0,
127 0, 0, 2, 5, 0, 0,
128 0, 0, 0, 0, 0, 0,
129
130 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0,
132 0, 0, 6, 1, 0, 0,
133 0, 0, 5, 2, 0, 0,
134 0, 0, 0, 0, 0, 0,
135
136 0, 0, 0, 0, 0, 0,
137 0, 0, 0, 0, 0, 0,
138 0, 0, 0, 0, 0, 0,
139 0, 0, 0, 0, 0, 0,
140 0, 0, 0, 0, 0, 0
141 },
142 qScale, qOffset);
143
144 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
145
146 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
147 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
148
149 armnn::PadQueueDescriptor descriptor;
150
151 std::vector<std::pair<unsigned int, unsigned int>> PadList;
152 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
153 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
154 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
155
156 descriptor.m_Parameters.m_PadList = PadList;
157 armnn::WorkloadInfo info;
158
159 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
160 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
161
162 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
163 descriptor,
164 info);
165
166 inputHandle->Allocate();
167 outputHandle->Allocate();
168
169 CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
170
171 workload->PostAllocationConfigure();
172 workload->Execute();
173
174 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
175
176 return LayerTestResult<T, 3>(actualOutput,
177 expectedOutputValues,
178 outputHandle->GetShape(),
179 outputTensorInfo.GetShape());
180 }
181
182 template<armnn::DataType ArmnnType, typename T>
Pad4dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)183 LayerTestResult<T, 4> Pad4dTestCommon(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
186 const armnn::ITensorHandleFactory& tensorHandleFactory,
187 float qScale,
188 int32_t qOffset)
189 {
190 IgnoreUnused(memoryManager);
191 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
192 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
193
194 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
195 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
196
197 std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
198 {
199 // Batch 0, Channel 0, Height (3) x Width (2)
200 0, 1,
201 2, 3,
202 4, 5,
203
204 // Batch 0, Channel 1, Height (3) x Width (2)
205 6, 7,
206 8, 9,
207 10, 11,
208
209 // Batch 1, Channel 0, Height (3) x Width (2)
210 12, 13,
211 14, 15,
212 16, 17,
213
214 // Batch 1, Channel 1, Height (3) x Width (2)
215 18, 19,
216 20, 21,
217 22, 23
218 },
219 qScale, qOffset);
220
221 std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
222 {
223 0, 0, 0, 0,
224 0, 0, 0, 0,
225 0, 0, 0, 0,
226 0, 0, 0, 0,
227 0, 0, 0, 0,
228 0, 0, 0, 0,
229 0, 0, 0, 0,
230
231 0, 0, 0, 0,
232 0, 0, 0, 0,
233 0, 0, 0, 0,
234 0, 0, 0, 0,
235 0, 0, 0, 0,
236 0, 0, 0, 0,
237 0, 0, 0, 0,
238
239 0, 0, 0, 0,
240 0, 0, 0, 0,
241 0, 0, 0, 0,
242 0, 0, 0, 0,
243 0, 0, 0, 0,
244 0, 0, 0, 0,
245 0, 0, 0, 0,
246
247 0, 0, 0, 0,
248 0, 0, 0, 0,
249 0, 0, 0, 0,
250 0, 0, 0, 0,
251 0, 0, 0, 0,
252 0, 0, 0, 0,
253 0, 0, 0, 0,
254
255 0, 0, 0, 0,
256 0, 0, 0, 0,
257 0, 0, 0, 0,
258 0, 0, 0, 0,
259 0, 0, 0, 0,
260 0, 0, 0, 0,
261 0, 0, 0, 0,
262
263 0, 0, 0, 0,
264 0, 0, 0, 0,
265 0, 0, 0, 0,
266 0, 0, 0, 0,
267 0, 0, 0, 0,
268 0, 0, 0, 0,
269 0, 0, 0, 0,
270
271 0, 0, 0, 0,
272 0, 0, 0, 0,
273 0, 0, 0, 0,
274 0, 0, 0, 0,
275 0, 0, 0, 0,
276 0, 0, 0, 0,
277 0, 0, 0, 0,
278
279 0, 0, 0, 0,
280 0, 0, 0, 0,
281 0, 0, 0, 0,
282 0, 0, 1, 0,
283 0, 2, 3, 0,
284 0, 4, 5, 0,
285 0, 0, 0, 0,
286
287 0, 0, 0, 0,
288 0, 0, 0, 0,
289 0, 0, 0, 0,
290 0, 6, 7, 0,
291 0, 8, 9, 0,
292 0, 10, 11, 0,
293 0, 0, 0, 0,
294
295 0, 0, 0, 0,
296 0, 0, 0, 0,
297 0, 0, 0, 0,
298 0, 0, 0, 0,
299 0, 0, 0, 0,
300 0, 0, 0, 0,
301 0, 0, 0, 0,
302
303 0, 0, 0, 0,
304 0, 0, 0, 0,
305 0, 0, 0, 0,
306 0, 0, 0, 0,
307 0, 0, 0, 0,
308 0, 0, 0, 0,
309 0, 0, 0, 0,
310
311 0, 0, 0, 0,
312 0, 0, 0, 0,
313 0, 0, 0, 0,
314 0, 0, 0, 0,
315 0, 0, 0, 0,
316 0, 0, 0, 0,
317 0, 0, 0, 0,
318
319 0, 0, 0, 0,
320 0, 0, 0, 0,
321 0, 0, 0, 0,
322 0, 12, 13, 0,
323 0, 14, 15, 0,
324 0, 16, 17, 0,
325 0, 0, 0, 0,
326
327 0, 0, 0, 0,
328 0, 0, 0, 0,
329 0, 0, 0, 0,
330 0, 18, 19, 0,
331 0, 20, 21, 0,
332 0, 22, 23, 0,
333 0, 0, 0, 0,
334
335 0, 0, 0, 0,
336 0, 0, 0, 0,
337 0, 0, 0, 0,
338 0, 0, 0, 0,
339 0, 0, 0, 0,
340 0, 0, 0, 0,
341 0, 0, 0, 0,
342
343 0, 0, 0, 0,
344 0, 0, 0, 0,
345 0, 0, 0, 0,
346 0, 0, 0, 0,
347 0, 0, 0, 0,
348 0, 0, 0, 0,
349 0, 0, 0, 0,
350
351 0, 0, 0, 0,
352 0, 0, 0, 0,
353 0, 0, 0, 0,
354 0, 0, 0, 0,
355 0, 0, 0, 0,
356 0, 0, 0, 0,
357 0, 0, 0, 0,
358
359 0, 0, 0, 0,
360 0, 0, 0, 0,
361 0, 0, 0, 0,
362 0, 0, 0, 0,
363 0, 0, 0, 0,
364 0, 0, 0, 0,
365 0, 0, 0, 0,
366
367 0, 0, 0, 0,
368 0, 0, 0, 0,
369 0, 0, 0, 0,
370 0, 0, 0, 0,
371 0, 0, 0, 0,
372 0, 0, 0, 0,
373 0, 0, 0, 0,
374
375 0, 0, 0, 0,
376 0, 0, 0, 0,
377 0, 0, 0, 0,
378 0, 0, 0, 0,
379 0, 0, 0, 0,
380 0, 0, 0, 0,
381 0, 0, 0, 0
382 },
383 qScale, qOffset);
384
385 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
386
387 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
388 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
389
390 armnn::PadQueueDescriptor descriptor;
391
392 std::vector<std::pair<unsigned int, unsigned int>> PadList;
393 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
394 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
395 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
396 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
397
398 descriptor.m_Parameters.m_PadList = PadList;
399 armnn::WorkloadInfo info;
400
401 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
402 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
403
404 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
405 descriptor,
406 info);
407
408 inputHandle->Allocate();
409 outputHandle->Allocate();
410
411 CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
412
413 workload->PostAllocationConfigure();
414 workload->Execute();
415
416 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
417
418 return LayerTestResult<T, 4>(actualOutput,
419 expectedOutputValues,
420 outputHandle->GetShape(),
421 outputTensorInfo.GetShape());
422 }
423
424 template<armnn::DataType ArmnnType, typename T>
PadQAsymmTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset,const float customPaddingValue)425 LayerTestResult<T, 2> PadQAsymmTestCommon(
426 armnn::IWorkloadFactory& workloadFactory,
427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428 const armnn::ITensorHandleFactory& tensorHandleFactory,
429 float qScale,
430 int32_t qOffset,
431 const float customPaddingValue)
432 {
433 IgnoreUnused(memoryManager);
434 const armnn::TensorShape inputShape{ 3, 3 };
435 const armnn::TensorShape outputShape{ 7, 7 };
436
437 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
438 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
439
440 std::vector<T> inputValues =
441 {
442 // Height (3) x Width (3)
443 4, 8, 6,
444 7, 4, 4,
445 3, 2, 4
446 };
447
448 T p = static_cast<T>(customPaddingValue);
449 std::vector<T> expectedOutputValues =
450 {
451 p, p, p, p, p, p, p,
452 p, p, p, p, p, p, p,
453 p, p, 4, 8, 6, p, p,
454 p, p, 7, 4, 4, p, p,
455 p, p, 3, 2, 4, p, p,
456 p, p, p, p, p, p, p,
457 p, p, p, p, p, p, p
458 };
459
460 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
461
462 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
463 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
464
465
466 armnn::PadQueueDescriptor descriptor;
467
468 std::vector<std::pair<unsigned int, unsigned int>> padList;
469 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
470 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
471
472 descriptor.m_Parameters.m_PadList = padList;
473 descriptor.m_Parameters.m_PadValue = customPaddingValue;
474 armnn::WorkloadInfo info;
475
476 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
477 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
478
479 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
480 descriptor,
481 info);
482
483 inputHandle->Allocate();
484 outputHandle->Allocate();
485
486 CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
487
488 workload->PostAllocationConfigure();
489 workload->Execute();
490
491 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
492
493 return LayerTestResult<T, 2>(actualOutput,
494 expectedOutputValues,
495 outputHandle->GetShape(),
496 outputTensorInfo.GetShape());
497 }
498
499 //
500 // Explicit template specializations
501 //
502
503 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
504 Pad2dTestCommon<armnn::DataType::QSymmS16>(
505 armnn::IWorkloadFactory& workloadFactory,
506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
507 const armnn::ITensorHandleFactory& tensorHandleFactory,
508 float qScale,
509 int32_t qOffset,
510 const float customPaddingValue);
511
512 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
513 Pad3dTestCommon<armnn::DataType::QSymmS16>(
514 armnn::IWorkloadFactory& workloadFactory,
515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
516 const armnn::ITensorHandleFactory& tensorHandleFactory,
517 float qScale,
518 int32_t qOffset);
519
520 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
521 Pad4dTestCommon<armnn::DataType::QSymmS16>(
522 armnn::IWorkloadFactory& workloadFactory,
523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
524 const armnn::ITensorHandleFactory& tensorHandleFactory,
525 float qScale,
526 int32_t qOffset);
527
528 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
529 PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
530 armnn::IWorkloadFactory& workloadFactory,
531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
532 const armnn::ITensorHandleFactory& tensorHandleFactory,
533 float qScale,
534 int32_t qOffset,
535 const float customPaddingValue);
536
537 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
538 PadQAsymmTestCommon<armnn::DataType::QAsymmU8>(
539 armnn::IWorkloadFactory& workloadFactory,
540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
541 const armnn::ITensorHandleFactory& tensorHandleFactory,
542 float qScale,
543 int32_t qOffset,
544 const float customPaddingValue);
545
546 //
547 // Implementation functions
548 //
549
PadUint82dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)550 LayerTestResult<uint8_t, 2> PadUint82dTest(
551 armnn::IWorkloadFactory& workloadFactory,
552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
553 const armnn::ITensorHandleFactory& tensorHandleFactory)
554 {
555 return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
556 }
557
PadUint82dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)558 LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
559 armnn::IWorkloadFactory& workloadFactory,
560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
561 const armnn::ITensorHandleFactory& tensorHandleFactory)
562 {
563 return Pad2dTestCommon<armnn::DataType::QAsymmU8>(
564 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
565 }
566
PadUint83dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)567 LayerTestResult<uint8_t, 3> PadUint83dTest(
568 armnn::IWorkloadFactory& workloadFactory,
569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
570 const armnn::ITensorHandleFactory& tensorHandleFactory)
571 {
572 return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
573 }
574
PadUint84dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)575 LayerTestResult<uint8_t, 4> PadUint84dTest(
576 armnn::IWorkloadFactory& workloadFactory,
577 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
578 const armnn::ITensorHandleFactory& tensorHandleFactory)
579 {
580 return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
581 }
582
PadFloat322dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)583 LayerTestResult<float, 2> PadFloat322dTest(
584 armnn::IWorkloadFactory& workloadFactory,
585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
586 const armnn::ITensorHandleFactory& tensorHandleFactory)
587 {
588 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
589 }
590
PadFloat322dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)591 LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
592 armnn::IWorkloadFactory& workloadFactory,
593 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
594 const armnn::ITensorHandleFactory& tensorHandleFactory)
595 {
596 return Pad2dTestCommon<armnn::DataType::Float32>(
597 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
598 }
599
PadFloat323dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)600 LayerTestResult<float, 3> PadFloat323dTest(
601 armnn::IWorkloadFactory& workloadFactory,
602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
603 const armnn::ITensorHandleFactory& tensorHandleFactory)
604 {
605 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
606 }
607
PadFloat324dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)608 LayerTestResult<float, 4> PadFloat324dTest(
609 armnn::IWorkloadFactory& workloadFactory,
610 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
611 const armnn::ITensorHandleFactory& tensorHandleFactory)
612 {
613 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
614 }
615
PadBFloat162dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)616 LayerTestResult<armnn::BFloat16, 2> PadBFloat162dTest(
617 armnn::IWorkloadFactory& workloadFactory,
618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
619 const armnn::ITensorHandleFactory& tensorHandleFactory)
620 {
621 return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
622 }
623
PadBFloat162dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)624 LayerTestResult<armnn::BFloat16, 2> PadBFloat162dCustomPaddingTest(
625 armnn::IWorkloadFactory& workloadFactory,
626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
627 const armnn::ITensorHandleFactory& tensorHandleFactory)
628 {
629 return Pad2dTestCommon<armnn::DataType::BFloat16>(
630 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
631 }
632
PadBFloat163dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)633 LayerTestResult<armnn::BFloat16, 3> PadBFloat163dTest(
634 armnn::IWorkloadFactory& workloadFactory,
635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636 const armnn::ITensorHandleFactory& tensorHandleFactory)
637 {
638 return Pad3dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
639 }
640
PadBFloat164dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)641 LayerTestResult<armnn::BFloat16, 4> PadBFloat164dTest(
642 armnn::IWorkloadFactory& workloadFactory,
643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
644 const armnn::ITensorHandleFactory& tensorHandleFactory)
645 {
646 return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
647 }
648
PadInt82dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)649 LayerTestResult<int8_t, 2> PadInt82dTest(
650 armnn::IWorkloadFactory& workloadFactory,
651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
652 const armnn::ITensorHandleFactory& tensorHandleFactory)
653 {
654 return Pad2dTestCommon<armnn::DataType::QSymmS8>(
655 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
656 }
657
PadInt82dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)658 LayerTestResult<int8_t, 2> PadInt82dCustomPaddingTest(
659 armnn::IWorkloadFactory& workloadFactory,
660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
661 const armnn::ITensorHandleFactory& tensorHandleFactory)
662 {
663 return Pad2dTestCommon<armnn::DataType::QSymmS8>(
664 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
665 }
666
PadInt83dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)667 LayerTestResult<int8_t, 3> PadInt83dTest(
668 armnn::IWorkloadFactory& workloadFactory,
669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
670 const armnn::ITensorHandleFactory& tensorHandleFactory)
671 {
672 return Pad3dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
673 }
674
PadInt84dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)675 LayerTestResult<int8_t, 4> PadInt84dTest(
676 armnn::IWorkloadFactory& workloadFactory,
677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
678 const armnn::ITensorHandleFactory& tensorHandleFactory)
679 {
680 return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
681 }
682
PadInt8AsymmTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)683 LayerTestResult<int8_t, 2> PadInt8AsymmTest(
684 armnn::IWorkloadFactory& workloadFactory,
685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
686 const armnn::ITensorHandleFactory& tensorHandleFactory)
687 {
688 return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
689 workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 2);
690 }
691
PadInt8CustomPaddingAsymmTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)692 LayerTestResult<int8_t, 2> PadInt8CustomPaddingAsymmTest(
693 armnn::IWorkloadFactory& workloadFactory,
694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
695 const armnn::ITensorHandleFactory& tensorHandleFactory)
696 {
697 return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
698 workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 3, 1.0f);
699 }
700