xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "MultiplicationTestImpl.hpp"
7 
8 #include "ElementwiseTestImpl.hpp"
9 #include <reference/test/RefWorkloadFactoryHelper.hpp>
10 
11 template<>
CreateWorkload(const armnn::IWorkloadFactory & workloadFactory,const armnn::WorkloadInfo & info,const armnn::MultiplicationQueueDescriptor & descriptor)12 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MultiplicationQueueDescriptor>(
13     const armnn::IWorkloadFactory& workloadFactory,
14     const armnn::WorkloadInfo& info,
15     const armnn::MultiplicationQueueDescriptor& descriptor)
16 {
17     return workloadFactory.CreateWorkload(armnn::LayerType::Multiplication, descriptor, info);
18 }
19 
MultiplicationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)20 LayerTestResult<float, 4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
21                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
22                                              const armnn::ITensorHandleFactory& tensorHandleFactory)
23 {
24     const unsigned int width        = 2u;
25     const unsigned int height       = 2u;
26     const unsigned int channelCount = 2u;
27     const unsigned int batchSize    = 2u;
28 
29     unsigned int shape[] = { batchSize, channelCount, height, width };
30 
31     std::vector<float> input0 =
32     {
33         1, 1, 1, 1,  2, 2, 2, 2,
34         3, 3, 3, 3,  4, 4, 4, 4
35     };
36 
37     std::vector<float> input1 =
38     {
39         2, 2, 2, 2,  3, 3, 3, 3,
40         4, 4, 4, 4,  5, 5, 5, 5
41     };
42 
43     std::vector<float> output =
44     {
45          2,  2,  2,  2,   6,  6,  6,  6,
46         12, 12, 12, 12,  20, 20, 20, 20
47     };
48 
49     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
50         workloadFactory,
51         memoryManager,
52         shape,
53         input0,
54         shape,
55         input1,
56         shape,
57         output,
58         tensorHandleFactory);
59 }
60 
Multiplication5dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)61 LayerTestResult<float, 5> Multiplication5dTest(armnn::IWorkloadFactory& workloadFactory,
62                                                const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
63                                                const armnn::ITensorHandleFactory& tensorHandleFactory)
64 {
65     const unsigned int width        = 3u;
66     const unsigned int height       = 2u;
67     const unsigned int channelCount = 2u;
68     const unsigned int batchSize    = 2u;;
69     const unsigned int depth        = 2u;
70 
71     unsigned int shape[] = { depth, batchSize, channelCount, height, width };
72 
73     std::vector<float> input0 =
74     {
75         1.80f, 0.20f, 2.30f,  1.30f, 2.10f, 1.00f,
76         2.60f, 0.60f, 2.10f,  2.30f, 2.30f, 2.00f,
77 
78         2.50f, 1.00f, 2.90f,  3.10f, 1.50f, 2.40f,
79         2.80f, 1.10f, 1.00f,  3.20f, 1.00f, 2.30f,
80 
81 
82         0.30f, 2.20f, 1.00f,  0.20f, 1.60f, 1.40f,
83         0.80f, 3.20f, 0.10f,  0.10f, 3.10f, 2.10f,
84 
85         1.50f, 2.40f, 1.40f,  0.70f, 2.40f, 1.40f,
86         1.60f, 1.20f, 1.90f,  0.80f, 0.00f, 0.10f,
87     };
88 
89     std::vector<float> input1 =
90     {
91         0.70f, 1.00f, 2.90f,  2.20f, 3.10f, 2.80f,
92         1.80f, 2.00f, 0.50f,  2.30f, 1.20f, 2.70f,
93 
94         2.40f, 0.20f, 3.20f,  1.60f, 0.20f, 2.50f,
95         2.30f, 0.70f, 2.70f,  1.80f, 2.90f, 2.70f,
96 
97 
98         3.20f, 3.20f, 0.70f,  1.90f, 2.70f, 2.50f,
99         2.40f, 0.90f, 2.30f,  1.80f, 2.50f, 2.00f,
100 
101         1.60f, 2.20f, 1.60f,  2.00f, 0.30f, 3.20f,
102         0.40f, 3.00f, 2.60f,  0.30f, 0.00f, 2.50f,
103     };
104 
105     std::vector<float> output =
106     {
107         1.26f, 0.20f, 6.67f,  2.86f, 6.51f, 2.80f,
108         4.68f, 1.20f, 1.05f,  5.29f, 2.76f, 5.40f,
109 
110         6.00f, 0.20f, 9.28f,  4.96f, 0.30f, 6.00f,
111         6.44f, 0.77f, 2.70f,  5.76f, 2.90f, 6.21f,
112 
113 
114         0.96f, 7.04f, 0.70f,  0.38f, 4.32f, 3.50f,
115         1.92f, 2.88f, 0.23f,  0.18f, 7.75f, 4.20f,
116 
117         2.40f, 5.28f, 2.24f,  1.40f, 0.72f, 4.48f,
118         0.64f, 3.60f, 4.94f,  0.24f, 0.00f, 0.25f,
119     };
120 
121     return ElementwiseTestHelper<5, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
122         workloadFactory,
123         memoryManager,
124         shape,
125         input0,
126         shape,
127         input1,
128         shape,
129         output,
130         tensorHandleFactory);
131 }
132 
MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)133 LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
134     armnn::IWorkloadFactory& workloadFactory,
135     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
136     const armnn::ITensorHandleFactory& tensorHandleFactory)
137 {
138     unsigned int shape0[] = { 1, 2, 2, 2 };
139     unsigned int shape1[] = { 1, 1, 1, 1 };
140 
141     std::vector<float> input0 = { 1, 2, 3, 4, 5, 6, 7, 8};
142 
143     std::vector<float> input1 = { 2 };
144 
145     std::vector<float> output = { 2, 4, 6, 8, 10, 12, 14, 16};
146 
147     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
148         workloadFactory,
149         memoryManager,
150         shape0,
151         input0,
152         shape1,
153         input1,
154         shape0,
155         output,
156         tensorHandleFactory);
157 }
158 
MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)159 LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
160     armnn::IWorkloadFactory& workloadFactory,
161     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
162     const armnn::ITensorHandleFactory& tensorHandleFactory)
163 {
164     unsigned int shape0[] = { 1, 3, 3, 2 };
165     unsigned int shape1[] = { 1, 1, 1, 2 };
166 
167     std::vector<float> input0 =
168     {
169         1,   2,    3,  4,    5,  6,
170         7,   8,    9, 10,   11, 12,
171         13, 14,   15, 16,   17, 18
172     };
173 
174     std::vector<float> input1 = { 1, 2 };
175 
176     std::vector<float> output =
177     {
178          1,  4,    3,  8,    5, 12,
179          7, 16,    9, 20,   11, 24,
180         13, 28,   15, 32,   17, 36
181     };
182 
183     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
184         workloadFactory,
185         memoryManager,
186         shape0,
187         input0,
188         shape1,
189         input1,
190         shape0,
191         output,
192         tensorHandleFactory);
193 }
194 
MultiplicationUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)195 LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
196     armnn::IWorkloadFactory& workloadFactory,
197     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
198     const armnn::ITensorHandleFactory& tensorHandleFactory)
199 {
200     constexpr unsigned int batchSize = 1u;
201     constexpr unsigned int channels  = 2u;
202     constexpr unsigned int height    = 2u;
203     constexpr unsigned int width     = 3u;
204 
205     const unsigned int shape[] = { batchSize, channels, height, width };
206 
207     // See dequantized values to the right
208     std::vector<uint8_t> input0 =
209     {
210          62,  37,   3, 172,  13, 111, // 244, 144,   8, 684,  48, 440,
211         188,  20,  73,  31,  23,  31  // 748,  76, 288, 120,  88, 120
212     };
213 
214     // See dequantized values to the right
215     std::vector<uint8_t> input1 =
216     {
217         126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
218          48, 115, 151,  79,  78,  97  // 150, 351, 459, 243, 240, 297
219     };
220 
221     // See dequantized values to the right
222     std::vector<uint8_t> output =
223     {
224          64,  72,   0, 255,   8, 236, //  93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
225          77,  15,  92,  16,  10,  21, // 112200,  26676,        132192,           29160, 21120,  35640
226     };
227 
228     // Scale/offset chosen to have output values out of range
229     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
230         workloadFactory,
231         memoryManager,
232         shape,
233         input0,
234         4.0f,
235         1,
236         shape,
237         input1,
238         3.0f,
239         -2,
240         shape,
241         output,
242         tensorHandleFactory,
243         1366.255f,
244         -5);
245 }
246 
MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)247 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
248     armnn::IWorkloadFactory& workloadFactory,
249     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
250     const armnn::ITensorHandleFactory& tensorHandleFactory)
251 {
252     const unsigned int shape0[] = { 1, 2, 2, 3 };
253     const unsigned int shape1[] = { 1, 1, 1, 1 };
254 
255     std::vector<uint8_t> input0 =
256     {
257         1, 2, 3,    4,  5,  6,
258         7, 8, 9,   10, 11, 12
259     };
260 
261     std::vector<uint8_t> input1 = { 2 };
262 
263     std::vector<uint8_t> output =
264     {
265         2,  4,   6,    8, 10, 12,
266         14, 16, 18,   20, 22, 24
267     };
268 
269     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
270         workloadFactory,
271         memoryManager,
272         shape0,
273         input0,
274         shape1,
275         input1,
276         shape0,
277         output,
278         tensorHandleFactory);
279 }
280 
MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)281 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
282     armnn::IWorkloadFactory& workloadFactory,
283     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
284     const armnn::ITensorHandleFactory& tensorHandleFactory)
285 {
286     const unsigned int shape0[] = { 1, 2, 2, 3 };
287     const unsigned int shape1[] = { 1, 1, 1, 3 };
288 
289     std::vector<uint8_t> input0 =
290     {
291         1, 2, 3,    4,  5,  6,
292         7, 8, 9,   10, 11, 12
293     };
294 
295     std::vector<uint8_t> input1 = { 1, 2, 3 };
296 
297     std::vector<uint8_t> output =
298     {
299         1,  4,   9,     4, 10, 18,
300         7, 16,  27,    10, 22, 36
301     };
302 
303     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
304         workloadFactory,
305         memoryManager,
306         shape0,
307         input0,
308         shape1,
309         input1,
310         shape0,
311         output,
312         tensorHandleFactory);
313 }
314 
MultiplicationInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)315 LayerTestResult<int16_t, 4> MultiplicationInt16Test(
316     armnn::IWorkloadFactory& workloadFactory,
317     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
318     const armnn::ITensorHandleFactory& tensorHandleFactory)
319 {
320     const unsigned int shape[] = { 1, 2, 2, 3 };
321 
322     std::vector<int16_t> input0 =
323     {
324         6,   7,  8,  9, 10, 11,
325         12, 13, 14, 15, 16, 17
326     };
327 
328     std::vector<int16_t> input1 =
329     {
330         1, 2, 3,  4,  5,  6,
331         7, 8, 9, 10, 11, 12
332     };
333 
334     std::vector<int16_t> output =
335     {
336         6,   14,  24,  36,  50,  66,
337         84, 104, 126, 150, 176, 204
338     };
339 
340     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
341         workloadFactory,
342         memoryManager,
343         shape,
344         input0,
345         shape,
346         input1,
347         shape,
348         output,
349         tensorHandleFactory);
350 }
351 
MultiplicationBroadcast1ElementInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)352 LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
353     armnn::IWorkloadFactory& workloadFactory,
354     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
355     const armnn::ITensorHandleFactory& tensorHandleFactory)
356 {
357     const unsigned int shape0[] = { 1, 2, 2, 3 };
358     const unsigned int shape1[] = { 1, 1, 1, 1 };
359 
360     std::vector<int16_t> input0 =
361     {
362         1, 2, 3,  4,  5,  6,
363         7, 8, 9, 10, 11, 12
364     };
365 
366     std::vector<int16_t> input1 = { 2 };
367 
368     std::vector<int16_t> output =
369     {
370         2,   4,  6,  8, 10, 12,
371         14, 16, 18, 20, 22, 24
372     };
373 
374     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
375         workloadFactory,
376         memoryManager,
377         shape0,
378         input0,
379         shape1,
380         input1,
381         shape0,
382         output,
383         tensorHandleFactory);
384 }
385 
MultiplicationBroadcast1DVectorInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)386 LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
387     armnn::IWorkloadFactory& workloadFactory,
388     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
389     const armnn::ITensorHandleFactory& tensorHandleFactory)
390 {
391     const unsigned int shape0[] = { 1, 2, 2, 3 };
392     const unsigned int shape1[] = { 1, 1, 1, 3 };
393 
394     std::vector<int16_t> input0 =
395     {
396         1, 2, 3,  4,  5,  6,
397         7, 8, 9, 10, 11, 12
398     };
399 
400     std::vector<int16_t> input1 = { 1, 2, 3 };
401 
402     std::vector<int16_t> output =
403     {
404         1,  4,  9,  4, 10, 18,
405         7, 16, 27, 10, 22, 36
406     };
407 
408     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
409         workloadFactory,
410         memoryManager,
411         shape0,
412         input0,
413         shape1,
414         input1,
415         shape0,
416         output,
417         tensorHandleFactory);
418 }
419 
MultiplicationInt32Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)420 LayerTestResult<int32_t, 4> MultiplicationInt32Test(
421     armnn::IWorkloadFactory& workloadFactory,
422     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
423     const armnn::ITensorHandleFactory& tensorHandleFactory)
424 {
425     const unsigned int shape[] = { 1, 2, 2, 3 };
426 
427     std::vector<int32_t> input0 =
428     {
429         6,   7,  8,  9, 10, 11,
430         12, 13, 14, 15, 16, 17
431     };
432 
433     std::vector<int32_t> input1 =
434     {
435         1, 2, 3,  4,  5,  6,
436         7, 8, 9, 10, 11, 12
437     };
438 
439     std::vector<int32_t> output =
440     {
441         6,   14,  24,  36,  50,  66,
442         84, 104, 126, 150, 176, 204
443     };
444 
445     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Signed32>(
446         workloadFactory,
447         memoryManager,
448         shape,
449         input0,
450         shape,
451         input1,
452         shape,
453         output,
454         tensorHandleFactory);
455 }
456 
MultiplicationBroadcast1ElementInt32Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)457 LayerTestResult<int32_t, 4> MultiplicationBroadcast1ElementInt32Test(
458     armnn::IWorkloadFactory& workloadFactory,
459     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
460     const armnn::ITensorHandleFactory& tensorHandleFactory)
461 {
462     const unsigned int shape0[] = { 1, 2, 2, 3 };
463     const unsigned int shape1[] = { 1, 1, 1, 1 };
464 
465     std::vector<int32_t> input0 =
466     {
467         1, 2, 3,  4,  5,  6,
468         7, 8, 9, 10, 11, 12
469     };
470 
471     std::vector<int32_t> input1 = { 2 };
472 
473     std::vector<int32_t> output =
474     {
475         2,   4,  6,  8, 10, 12,
476         14, 16, 18, 20, 22, 24
477     };
478 
479     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Signed32>(
480         workloadFactory,
481         memoryManager,
482         shape0,
483         input0,
484         shape1,
485         input1,
486         shape0,
487         output,
488         tensorHandleFactory);
489 }
490 
MultiplicationBroadcast1DVectorInt32Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)491 LayerTestResult<int32_t, 4> MultiplicationBroadcast1DVectorInt32Test(
492     armnn::IWorkloadFactory& workloadFactory,
493     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
494     const armnn::ITensorHandleFactory& tensorHandleFactory)
495 {
496     const unsigned int shape0[] = { 1, 2, 2, 3 };
497     const unsigned int shape1[] = { 1, 1, 1, 3 };
498 
499     std::vector<int32_t> input0 =
500     {
501         1, 2, 3,  4,  5,  6,
502         7, 8, 9, 10, 11, 12
503     };
504 
505     std::vector<int32_t> input1 = { 1, 2, 3 };
506 
507     std::vector<int32_t> output =
508     {
509         1,  4,  9,  4, 10, 18,
510         7, 16, 27, 10, 22, 36
511     };
512 
513     return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Signed32>(
514         workloadFactory,
515         memoryManager,
516         shape0,
517         input0,
518         shape1,
519         input1,
520         shape0,
521         output,
522         tensorHandleFactory);
523 }
524 
CompareMultiplicationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory)525 LayerTestResult<float,4> CompareMultiplicationTest(
526     armnn::IWorkloadFactory& workloadFactory,
527     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
528     armnn::IWorkloadFactory& refWorkloadFactory,
529     const armnn::ITensorHandleFactory& tensorHandleFactory,
530     const armnn::ITensorHandleFactory& refTensorHandleFactory)
531 {
532     IgnoreUnused(memoryManager);
533     const unsigned int width = 16;
534     const unsigned int height = 32;
535     const unsigned int channelCount = 2;
536     const unsigned int batchSize = 5;
537 
538     armnn::TensorInfo inputTensorInfo0;
539     armnn::TensorInfo inputTensorInfo1;
540     armnn::TensorInfo outputTensorInfo;
541 
542     constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
543 
544     inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
545     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
546     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
547 
548     auto input0 = MakeRandomTensor<float>(inputTensorInfo0, 803506992);
549     auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 54902257);
550 
551     std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
552     std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
553 
554     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
555     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
556     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
557 
558     std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
559     std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
560     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
561 
562     armnn::MultiplicationQueueDescriptor data;
563     armnn::WorkloadInfo info;
564     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
565     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
566     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
567 
568     armnn::MultiplicationQueueDescriptor refData = data;
569     armnn::WorkloadInfo refInfo = info;
570     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
571     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
572     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
573 
574     std::unique_ptr<armnn::IWorkload> workload
575                 = workloadFactory.CreateWorkload(armnn::LayerType::Multiplication, data, info);
576     std::unique_ptr<armnn::IWorkload> workloadRef
577                 = refWorkloadFactory.CreateWorkload(armnn::LayerType::Multiplication, refData, refInfo);
578 
579     inputHandle0->Allocate();
580     inputHandle1->Allocate();
581     outputHandle->Allocate();
582     inputHandle0Ref->Allocate();
583     inputHandle1Ref->Allocate();
584     outputHandleRef->Allocate();
585 
586     CopyDataToITensorHandle(inputHandle0.get(), input0.data());
587     CopyDataToITensorHandle(inputHandle1.get(), input1.data());
588     CopyDataToITensorHandle(inputHandle0Ref.get(), input0.data());
589     CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data());
590 
591     workload->PostAllocationConfigure();
592     workload->Execute();
593     workloadRef->PostAllocationConfigure();
594     workloadRef->Execute();
595     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
596     CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
597 
598     return LayerTestResult<float, 4>(actualOutput,
599                                      expectedOutput,
600                                      outputHandle->GetShape(),
601                                      outputTensorInfo.GetShape());
602 }
603