1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include <ResolveType.hpp>
9
10
11 #include <armnn/backends/IBackendInternal.hpp>
12 #include <armnn/backends/WorkloadFactory.hpp>
13
14 #include <backendsCommon/test/WorkloadFactoryHelper.hpp>
15 #include <armnnTestUtils/WorkloadTestUtils.hpp>
16
17 #include <armnnTestUtils/TensorHelpers.hpp>
18
19 template<typename T>
SimpleTransposeTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::TransposeDescriptor descriptor,armnn::TensorInfo inputTensorInfo,armnn::TensorInfo outputTensorInfo,const std::vector<T> & inputData,const std::vector<T> & outputExpectedData)20 LayerTestResult<T, 4> SimpleTransposeTestImpl(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 const armnn::ITensorHandleFactory& tensorHandleFactory,
24 armnn::TransposeDescriptor descriptor,
25 armnn::TensorInfo inputTensorInfo,
26 armnn::TensorInfo outputTensorInfo,
27 const std::vector<T>& inputData,
28 const std::vector<T>& outputExpectedData)
29 {
30 IgnoreUnused(memoryManager);
31 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
32
33 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
34 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
35
36 armnn::TransposeQueueDescriptor data;
37 data.m_Parameters = descriptor;
38 armnn::WorkloadInfo info;
39 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
40 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
41
42 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Transpose,
43 data,
44 info);
45
46 inputHandle->Allocate();
47 outputHandle->Allocate();
48
49 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
50
51 workload->Execute();
52
53 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
54
55 return LayerTestResult<T, 4>(actualOutput,
56 outputExpectedData,
57 outputHandle->GetShape(),
58 outputTensorInfo.GetShape());
59 }
60
61 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SimpleTransposeTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)62 LayerTestResult<T, 4> SimpleTransposeTest(
63 armnn::IWorkloadFactory& workloadFactory,
64 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
65 const armnn::ITensorHandleFactory& tensorHandleFactory)
66 {
67 armnn::TensorInfo inputTensorInfo;
68 armnn::TensorInfo outputTensorInfo;
69
70 unsigned int inputShape[] = { 1, 2, 2, 2 };
71 unsigned int outputShape[] = { 1, 2, 2, 2 };
72
73 armnn::TransposeDescriptor descriptor;
74 descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
75
76 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
77 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
78
79 // Set quantization parameters if the requested type is a quantized type.
80 float qScale = 0.5f;
81 int32_t qOffset = 5;
82 if(armnn::IsQuantizedType<T>())
83 {
84 inputTensorInfo.SetQuantizationScale(qScale);
85 inputTensorInfo.SetQuantizationOffset(qOffset);
86 outputTensorInfo.SetQuantizationScale(qScale);
87 outputTensorInfo.SetQuantizationOffset(qOffset);
88 }
89
90 std::vector<T> input = armnnUtils::QuantizedVector<T>(
91 {
92 1, 2,
93 3, 4,
94 5, 6,
95 7, 8
96 },
97 qScale, qOffset);
98
99 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
100 {
101 1, 5, 2, 6,
102 3, 7, 4, 8
103 },
104 qScale, qOffset);
105
106 return SimpleTransposeTestImpl(workloadFactory, memoryManager, tensorHandleFactory,
107 descriptor, inputTensorInfo,
108 outputTensorInfo, input, outputExpected);
109 }
110
111 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
TransposeValueSet1Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)112 LayerTestResult<T, 4> TransposeValueSet1Test(
113 armnn::IWorkloadFactory& workloadFactory,
114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
115 const armnn::ITensorHandleFactory& tensorHandleFactory)
116 {
117 armnn::TensorInfo inputTensorInfo;
118 armnn::TensorInfo outputTensorInfo;
119
120 unsigned int inputShape[] = { 1, 2, 2, 3 };
121 unsigned int outputShape[] = { 1, 3, 2, 2 };
122
123 armnn::TransposeDescriptor descriptor;
124 descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
125
126 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
127 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
128
129 // Set quantization parameters if the requested type is a quantized type.
130 float qScale = 0.5f;
131 int32_t qOffset = 5;
132 if(armnn::IsQuantizedType<T>())
133 {
134 inputTensorInfo.SetQuantizationScale(qScale);
135 inputTensorInfo.SetQuantizationOffset(qOffset);
136 outputTensorInfo.SetQuantizationScale(qScale);
137 outputTensorInfo.SetQuantizationOffset(qOffset);
138 }
139
140 std::vector<T> input = armnnUtils::QuantizedVector<T>(
141 {
142 1, 2, 3,
143 11, 12, 13,
144 21, 22, 23,
145 31, 32, 33
146 },
147 qScale, qOffset);
148
149 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
150 {
151 1, 11, 21, 31,
152 2, 12, 22, 32,
153 3, 13, 23, 33
154 },
155 qScale, qOffset);
156
157 return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
158 descriptor, inputTensorInfo,
159 outputTensorInfo, input, outputExpected);
160 }
161
162 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
TransposeValueSet2Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)163 LayerTestResult<T, 4> TransposeValueSet2Test(
164 armnn::IWorkloadFactory& workloadFactory,
165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
166 const armnn::ITensorHandleFactory& tensorHandleFactory)
167 {
168 armnn::TensorInfo inputTensorInfo;
169 armnn::TensorInfo outputTensorInfo;
170
171 unsigned int inputShape[] = { 1, 3, 2, 2 };
172 unsigned int outputShape[] = { 1, 2, 2, 3 };
173
174 armnn::TransposeDescriptor descriptor;
175 descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
176
177 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
178 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
179
180 // Set quantization parameters if the requested type is a quantized type.
181 float qScale = 0.5f;
182 int32_t qOffset = 5;
183 if(armnn::IsQuantizedType<T>())
184 {
185 inputTensorInfo.SetQuantizationScale(qScale);
186 inputTensorInfo.SetQuantizationOffset(qOffset);
187 outputTensorInfo.SetQuantizationScale(qScale);
188 outputTensorInfo.SetQuantizationOffset(qOffset);
189 }
190
191 std::vector<T> input = armnnUtils::QuantizedVector<T>(
192 {
193 1, 11, 21, 31,
194 2, 12, 22, 32,
195 3, 13, 23, 33
196 },
197 qScale, qOffset);
198
199 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
200 {
201 1, 2, 3,
202 11, 12, 13,
203 21, 22, 23,
204 31, 32, 33,
205 },
206 qScale, qOffset);
207
208 return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
209 descriptor, inputTensorInfo,
210 outputTensorInfo, input, outputExpected);
211 }
212
213 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
TransposeValueSet3Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)214 LayerTestResult<T, 4> TransposeValueSet3Test(
215 armnn::IWorkloadFactory& workloadFactory,
216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
217 const armnn::ITensorHandleFactory& tensorHandleFactory)
218 {
219 armnn::TensorInfo inputTensorInfo;
220 armnn::TensorInfo outputTensorInfo;
221
222 unsigned int inputShape[] = { 1, 2, 3, 3 };
223 unsigned int outputShape[] = { 1, 3, 2, 3 };
224
225 armnn::TransposeDescriptor descriptor;
226 descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
227
228 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
229 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
230
231 // Set quantization parameters if the requested type is a quantized type.
232 float qScale = 0.5f;
233 int32_t qOffset = 5;
234 if(armnn::IsQuantizedType<T>())
235 {
236 inputTensorInfo.SetQuantizationScale(qScale);
237 inputTensorInfo.SetQuantizationOffset(qOffset);
238 outputTensorInfo.SetQuantizationScale(qScale);
239 outputTensorInfo.SetQuantizationOffset(qOffset);
240 }
241
242 std::vector<T> input = armnnUtils::QuantizedVector<T>(
243 {
244 1, 2, 3,
245 11, 12, 13,
246 21, 22, 23,
247 31, 32, 33,
248 41, 42, 43,
249 51, 52, 53
250 },
251 qScale, qOffset);
252
253 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
254 {
255 1, 11, 21, 31, 41, 51,
256 2, 12, 22, 32, 42, 52,
257 3, 13, 23, 33, 43, 53
258 },
259 qScale, qOffset);
260
261 return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
262 descriptor, inputTensorInfo,
263 outputTensorInfo, input, outputExpected);
264 }
265