xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "FakeQuantizationTestImpl.hpp"
7 
8 
9 #include <armnn/backends/TensorHandle.hpp>
10 
11 #include <armnnTestUtils/TensorCopyUtils.hpp>
12 #include <armnnTestUtils/WorkloadTestUtils.hpp>
13 
14 #include <armnnTestUtils/TensorHelpers.hpp>
15 
FakeQuantizationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)16 LayerTestResult<float, 2> FakeQuantizationTest(
17     armnn::IWorkloadFactory& workloadFactory,
18     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
19     const armnn::ITensorHandleFactory& tensorHandleFactory)
20 {
21     IgnoreUnused(memoryManager);
22     constexpr unsigned int width = 2;
23     constexpr unsigned int height = 3;
24 
25     const armnn::TensorInfo tensorInfo({ height, width }, armnn::DataType::Float32);
26 
27     std::vector<float> input =
28     {
29        -10.0f, -5.0f,
30          0.0f,  5.0f,
31         10.0f, 10.0f
32     };
33 
34     std::vector<float> actualOutput(tensorInfo.GetNumElements());
35     std::vector<float> expectedOutput(tensorInfo.GetNumElements());
36 
37     std::unique_ptr<armnn::ITensorHandle> inputHandle   = tensorHandleFactory.CreateTensorHandle(tensorInfo);
38     std::unique_ptr<armnn::ITensorHandle> outputHandle  = tensorHandleFactory.CreateTensorHandle(tensorInfo);
39 
40     armnn::FakeQuantizationQueueDescriptor data;
41     armnn::WorkloadInfo info;
42 
43     AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
44     AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
45 
46     float min = -10.f;
47     float max =  10.f;
48 
49     data.m_Parameters.m_Min = min;
50     data.m_Parameters.m_Max = max;
51 
52     armnn::PassthroughTensorHandle refHandle(tensorInfo, expectedOutput.data());
53     armnn::FakeQuantizationQueueDescriptor refData = data;
54     armnn::WorkloadInfo refInfo = info;
55     SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
56 
57     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FakeQuantization,
58                                                                                 data,
59                                                                                 info);
60 
61     inputHandle->Allocate();
62     outputHandle->Allocate();
63 
64     CopyDataToITensorHandle(inputHandle.get(), input.data());
65 
66     workload->PostAllocationConfigure();
67     workload->Execute();
68 
69     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
70 
71     expectedOutput =
72     {
73         0.0f,     63.0f,
74         128.0f,   191.0f,
75         255.0f,   255.0f
76     };
77 
78     return LayerTestResult<float, 2>(actualOutput,
79                                      expectedOutput,
80                                      outputHandle->GetShape(),
81                                      tensorInfo.GetShape());
82 }
83