1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "QuantizeTestImpl.hpp"
7
8 #include <ResolveType.hpp>
9
10
11 #include <armnn/backends/IBackendInternal.hpp>
12 #include <armnn/backends/WorkloadFactory.hpp>
13
14 #include <armnnTestUtils/TensorCopyUtils.hpp>
15 #include <armnnTestUtils/WorkloadTestUtils.hpp>
16
17 #include <armnnTestUtils/TensorHelpers.hpp>
18
19 namespace
20 {
21
22 template<typename T, std::size_t Dim>
QuantizeTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::TensorInfo & inputTensorInfo,const armnn::TensorInfo & outputTensorInfo,const std::vector<float> & inputData,const std::vector<T> & expectedOutputData,armnn::QuantizeQueueDescriptor descriptor)23 LayerTestResult<T, Dim> QuantizeTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 const armnn::ITensorHandleFactory& tensorHandleFactory,
27 const armnn::TensorInfo& inputTensorInfo,
28 const armnn::TensorInfo& outputTensorInfo,
29 const std::vector<float>& inputData,
30 const std::vector<T>& expectedOutputData,
31 armnn::QuantizeQueueDescriptor descriptor)
32 {
33 IgnoreUnused(memoryManager);
34 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
35
36 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
38
39 armnn::WorkloadInfo info;
40 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
41 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
42
43 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Quantize,
44 descriptor,
45 info);
46
47 inputHandle->Allocate();
48 outputHandle->Allocate();
49
50 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
51
52 ExecuteWorkload(*workload, memoryManager);
53
54 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
55
56 return LayerTestResult<T, Dim>(actualOutput,
57 expectedOutputData,
58 outputHandle->GetShape(),
59 outputTensorInfo.GetShape());
60 }
61
62 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
QuantizeSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)63 LayerTestResult<T, 4> QuantizeSimpleTest(
64 armnn::IWorkloadFactory& workloadFactory,
65 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
66 const armnn::ITensorHandleFactory& tensorHandleFactory)
67 {
68 armnn::QuantizeQueueDescriptor desc;
69
70 const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
71 const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1);
72
73 std::vector<float> inputData = std::vector<float>(
74 {
75 1.0f, 2.0f, 3.0f,
76 4.0f, 5.0f, 6.0f,
77 7.0f, 8.0f, 9.0f,
78 10.0f, 11.0f, 12.0f,
79 });
80
81 std::vector<T> expectedOutputData = std::vector<T>(
82 {
83 3, 5, 7,
84 9, 11, 13,
85 15, 17, 19,
86 21, 23, 25,
87 });
88
89 return QuantizeTestImpl<T, 4>(workloadFactory,
90 memoryManager,
91 tensorHandleFactory,
92 inputTensorInfo,
93 outputTensorInfo,
94 inputData,
95 expectedOutputData,
96 desc);
97 }
98
99 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
QuantizeClampTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)100 LayerTestResult<T, 4> QuantizeClampTest(
101 armnn::IWorkloadFactory& workloadFactory,
102 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
103 const armnn::ITensorHandleFactory& tensorHandleFactory)
104 {
105 armnn::QuantizeQueueDescriptor desc;
106
107 const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32);
108 const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
109
110 const T max = std::numeric_limits<T>::max();
111 const T min = std::numeric_limits<T>::lowest();
112
113 std::vector<float> inputData = std::vector<float>(
114 {
115 -100.0f, 100.0f
116 });
117
118 std::vector<T> expectedOutputData = std::vector<T>(
119 {
120 min, max
121 });
122
123 return QuantizeTestImpl<T, 4>(workloadFactory,
124 memoryManager,
125 tensorHandleFactory,
126 inputTensorInfo,
127 outputTensorInfo,
128 inputData,
129 expectedOutputData,
130 desc);
131 }
132
133 } // anonymous namespace
134
QuantizeSimpleUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)135 LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
136 armnn::IWorkloadFactory& workloadFactory,
137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
138 const armnn::ITensorHandleFactory& tensorHandleFactory)
139 {
140 return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
141 }
142
QuantizeClampUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)143 LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
144 armnn::IWorkloadFactory& workloadFactory,
145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
146 const armnn::ITensorHandleFactory& tensorHandleFactory)
147 {
148 return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
149 }
150
QuantizeClampAsymmInt8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)151 LayerTestResult<int8_t, 4> QuantizeClampAsymmInt8Test(
152 armnn::IWorkloadFactory& workloadFactory,
153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
154 const armnn::ITensorHandleFactory& tensorHandleFactory)
155 {
156 return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
157 }
158
QuantizeClampInt8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)159 LayerTestResult<int8_t, 4> QuantizeClampInt8Test(
160 armnn::IWorkloadFactory& workloadFactory,
161 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
162 const armnn::ITensorHandleFactory& tensorHandleFactory)
163 {
164 return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
165 }
166
QuantizeClampInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)167 LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
168 armnn::IWorkloadFactory& workloadFactory,
169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
170 const armnn::ITensorHandleFactory& tensorHandleFactory)
171 {
172 return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory);
173 }
174