1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "LogSoftmaxTestImpl.hpp"
7
8 #include <Half.hpp>
9 #include <armnnUtils/QuantizeHelper.hpp>
10 #include <ResolveType.hpp>
11
12
13 #include <armnn/backends/TensorHandle.hpp>
14 #include <armnn/backends/IBackendInternal.hpp>
15 #include <armnn/backends/WorkloadFactory.hpp>
16
17 #include <armnnTestUtils/TensorCopyUtils.hpp>
18 #include <armnnTestUtils/WorkloadTestUtils.hpp>
19
20 #include <armnnTestUtils/TensorHelpers.hpp>
21
22 namespace
23 {
24
25 template<armnn::DataType ArmnnType,
26 std::size_t NumDims,
27 typename T = armnn::ResolveType<ArmnnType>>
LogSoftmaxTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::TensorInfo & inputInfo,const armnn::TensorInfo & outputInfo,const std::vector<float> & inputValues,const std::vector<float> & expectedOutputValues,armnn::LogSoftmaxQueueDescriptor descriptor,float qScale=1.0f,int32_t qOffset=0)28 LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29 armnn::IWorkloadFactory& workloadFactory,
30 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
31 const armnn::ITensorHandleFactory& tensorHandleFactory,
32 const armnn::TensorInfo& inputInfo,
33 const armnn::TensorInfo& outputInfo,
34 const std::vector<float>& inputValues,
35 const std::vector<float>& expectedOutputValues,
36 armnn::LogSoftmaxQueueDescriptor descriptor,
37 float qScale = 1.0f,
38 int32_t qOffset = 0)
39 {
40 IgnoreUnused(memoryManager);
41
42 auto inputTensor = armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset);
43
44 std::vector<T> actualOutput(outputInfo.GetNumElements());
45 std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
46
47 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
48 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
49
50 armnn::WorkloadInfo info;
51
52 AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
53 AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
54
55 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::LogSoftmax,
56 descriptor,
57 info);
58
59 inputHandle->Allocate();
60 outputHandle->Allocate();
61
62 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
63
64 ExecuteWorkload(*workload, memoryManager);
65
66 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
67
68 return LayerTestResult<T, NumDims>(actualOutput,
69 expectedOutput,
70 outputHandle->GetShape(),
71 outputInfo.GetShape());
72
73 }
74
75 } // anonymous namespace
76
77 template<armnn::DataType ArmnnType, typename T>
LogSoftmaxTest1(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)78 LayerTestResult<T, 4> LogSoftmaxTest1(
79 armnn::IWorkloadFactory& workloadFactory,
80 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
81 const armnn::ITensorHandleFactory& tensorHandleFactory)
82 {
83 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
84
85 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
86 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
87
88 std::vector<float> inputValues
89 {
90 0.f, -6.f, 2.f, 4.f,
91 3.f, -2.f, 10.f, 1.f
92 };
93
94 std::vector<float> expectedOutputValues
95 {
96 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
97 -7.00104f, -12.00104f, -0.00105f, -9.00104f
98 };
99
100 armnn::LogSoftmaxQueueDescriptor descriptor;
101 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
102 descriptor.m_Parameters.m_Axis = -1; // default axis
103
104 return LogSoftmaxTestImpl<ArmnnType, 4>(
105 workloadFactory,
106 memoryManager,
107 tensorHandleFactory,
108 inputTensorInfo,
109 outputTensorInfo,
110 inputValues,
111 expectedOutputValues,
112 descriptor);
113 }
114
115 template<armnn::DataType ArmnnType, typename T>
LogSoftmaxTest2(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)116 LayerTestResult<T, 4> LogSoftmaxTest2(
117 armnn::IWorkloadFactory& workloadFactory,
118 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
119 const armnn::ITensorHandleFactory& tensorHandleFactory)
120 {
121 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
122
123 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
124 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
125
126 std::vector<float> inputValues
127 {
128 0.f, -6.f, 2.f, 4.f,
129 3.f, -2.f, 10.f, 1.f
130 };
131
132 std::vector<float> expectedOutputValues
133 {
134 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
135 -7.00104f, -12.00104f, -0.00105f, -9.00104f
136 };
137
138 armnn::LogSoftmaxQueueDescriptor descriptor;
139 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
140 descriptor.m_Parameters.m_Axis = 3; // positive axis
141
142 return LogSoftmaxTestImpl<ArmnnType, 4>(
143 workloadFactory,
144 memoryManager,
145 tensorHandleFactory,
146 inputTensorInfo,
147 outputTensorInfo,
148 inputValues,
149 expectedOutputValues,
150 descriptor);
151 }
152
153 template<armnn::DataType ArmnnType, typename T>
LogSoftmaxTest3(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)154 LayerTestResult<T, 4> LogSoftmaxTest3(
155 armnn::IWorkloadFactory& workloadFactory,
156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
157 const armnn::ITensorHandleFactory& tensorHandleFactory)
158 {
159 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
160
161 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
162 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
163
164 std::vector<float> inputValues
165 {
166 0.0f, -0.6f, 0.2f, 0.4f,
167 0.3f, -0.2f, 1.0f, 0.1f
168 };
169
170 std::vector<float> expectedOutputValues
171 {
172 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
173 -7.00104f, -12.00104f, -0.00105f, -9.00104f
174 };
175
176 armnn::LogSoftmaxQueueDescriptor descriptor;
177 descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
178 descriptor.m_Parameters.m_Axis = 3; // positive axis
179
180 return LogSoftmaxTestImpl<ArmnnType, 4>(
181 workloadFactory,
182 memoryManager,
183 tensorHandleFactory,
184 inputTensorInfo,
185 outputTensorInfo,
186 inputValues,
187 expectedOutputValues,
188 descriptor);
189 }
190
191 template<armnn::DataType ArmnnType, typename T>
LogSoftmaxTest4(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)192 LayerTestResult<T, 4> LogSoftmaxTest4(
193 armnn::IWorkloadFactory& workloadFactory,
194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
195 const armnn::ITensorHandleFactory& tensorHandleFactory)
196 {
197 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
198
199 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
200 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
201
202 std::vector<float> inputValues
203 {
204 0.f, -6.f, 2.f, 4.f,
205 3.f, -2.f, 10.f, 1.f
206 };
207
208 std::vector<float> expectedOutputValues
209 {
210 -3.048587f, -4.018149f, -8.000336f, -0.048587f,
211 -0.048587f, -0.018149f, -0.000335f, -3.048587f
212 };
213
214 armnn::LogSoftmaxQueueDescriptor descriptor;
215 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
216 descriptor.m_Parameters.m_Axis = -2; // negative axis
217
218 return LogSoftmaxTestImpl<ArmnnType, 4>(
219 workloadFactory,
220 memoryManager,
221 tensorHandleFactory,
222 inputTensorInfo,
223 outputTensorInfo,
224 inputValues,
225 expectedOutputValues,
226 descriptor);
227 }
228
229 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
230 LogSoftmaxTest1<armnn::DataType::Float32>(
231 armnn::IWorkloadFactory& workloadFactory,
232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
233 const armnn::ITensorHandleFactory& tensorHandleFactory);
234
235 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
236 LogSoftmaxTest2<armnn::DataType::Float32>(
237 armnn::IWorkloadFactory& workloadFactory,
238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
239 const armnn::ITensorHandleFactory& tensorHandleFactory);
240
241 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
242 LogSoftmaxTest3<armnn::DataType::Float32>(
243 armnn::IWorkloadFactory& workloadFactory,
244 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
245 const armnn::ITensorHandleFactory& tensorHandleFactory);
246
247 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
248 LogSoftmaxTest4<armnn::DataType::Float32>(
249 armnn::IWorkloadFactory& workloadFactory,
250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
251 const armnn::ITensorHandleFactory& tensorHandleFactory);
252
253 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
254 LogSoftmaxTest1<armnn::DataType::Float16>(
255 armnn::IWorkloadFactory& workloadFactory,
256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
257 const armnn::ITensorHandleFactory& tensorHandleFactory);
258
259 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
260 LogSoftmaxTest2<armnn::DataType::Float16>(
261 armnn::IWorkloadFactory& workloadFactory,
262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
263 const armnn::ITensorHandleFactory& tensorHandleFactory);
264
265 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
266 LogSoftmaxTest3<armnn::DataType::Float16>(
267 armnn::IWorkloadFactory& workloadFactory,
268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
269 const armnn::ITensorHandleFactory& tensorHandleFactory);
270
271 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
272 LogSoftmaxTest4<armnn::DataType::Float16>(
273 armnn::IWorkloadFactory& workloadFactory,
274 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
275 const armnn::ITensorHandleFactory& tensorHandleFactory);
276