xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConvertFp32ToFp16TestImpl.hpp"
7 
8 
9 #include <armnnTestUtils/TensorCopyUtils.hpp>
10 #include <armnnTestUtils/WorkloadTestUtils.hpp>
11 
12 #include <armnnTestUtils/TensorHelpers.hpp>
13 
SimpleConvertFp32ToFp16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)14 LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
15     armnn::IWorkloadFactory& workloadFactory,
16     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
17     const armnn::ITensorHandleFactory& tensorHandleFactory)
18 {
19     IgnoreUnused(memoryManager);
20     using namespace half_float::literal;
21 
22     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
23     const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
24 
25     std::vector<float> input =
26         {
27             -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
28             1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
29         };
30 
31     std::vector<armnn::Half> expectedOutput =
32         {
33             -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
34             1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h
35         };
36 
37     std::vector<armnn::Half> actualOutput(outputTensorInfo.GetNumElements());
38 
39     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
41 
42     armnn::ConvertFp32ToFp16QueueDescriptor data;
43     armnn::WorkloadInfo info;
44     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
45     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
46 
47     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToFp16,
48                                                                                 data,
49                                                                                 info);
50 
51     inputHandle->Allocate();
52     outputHandle->Allocate();
53 
54     CopyDataToITensorHandle(inputHandle.get(), input.data());
55 
56     workload->Execute();
57 
58     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
59 
60     return LayerTestResult<armnn::Half, 4>(actualOutput,
61                                            expectedOutput,
62                                            outputHandle->GetShape(),
63                                            outputTensorInfo.GetShape());
64 }
65