1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include <ResolveType.hpp>
9
10
11 #include <armnn/backends/IBackendInternal.hpp>
12 #include <armnn/backends/WorkloadFactory.hpp>
13
14 #include <armnnTestUtils/WorkloadTestUtils.hpp>
15
16 #include <armnnTestUtils/TensorHelpers.hpp>
17
18 template<typename T>
SimplePermuteTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::PermuteDescriptor descriptor,armnn::TensorInfo inputTensorInfo,armnn::TensorInfo outputTensorInfo,const std::vector<T> & inputData,const std::vector<T> & outputExpectedData)19 LayerTestResult<T, 4> SimplePermuteTestImpl(
20 armnn::IWorkloadFactory& workloadFactory,
21 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
22 const armnn::ITensorHandleFactory& tensorHandleFactory,
23 armnn::PermuteDescriptor descriptor,
24 armnn::TensorInfo inputTensorInfo,
25 armnn::TensorInfo outputTensorInfo,
26 const std::vector<T>& inputData,
27 const std::vector<T>& outputExpectedData)
28 {
29 IgnoreUnused(memoryManager);
30
31 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
32
33 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
34 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
35
36 armnn::PermuteQueueDescriptor data;
37 data.m_Parameters = descriptor;
38 armnn::WorkloadInfo info;
39 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
40 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
41
42 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Permute, data, info);
43
44 inputHandle->Allocate();
45 outputHandle->Allocate();
46
47 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
48
49 workload->Execute();
50
51 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
52
53 return LayerTestResult<T, 4>(actualOutput,
54 outputExpectedData,
55 outputHandle->GetShape(),
56 outputTensorInfo.GetShape());
57 }
58
59 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SimplePermuteTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)60 LayerTestResult<T, 4> SimplePermuteTest(
61 armnn::IWorkloadFactory& workloadFactory,
62 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
63 const armnn::ITensorHandleFactory& tensorHandleFactory)
64 {
65 armnn::TensorInfo inputTensorInfo;
66 armnn::TensorInfo outputTensorInfo;
67
68 unsigned int inputShape[] = { 1, 2, 2, 2 };
69 unsigned int outputShape[] = { 1, 2, 2, 2 };
70
71 armnn::PermuteDescriptor descriptor;
72 descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
73
74 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
75 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
76
77 // Set quantization parameters if the requested type is a quantized type.
78 float qScale = 0.5f;
79 int32_t qOffset = 5;
80 if(armnn::IsQuantizedType<T>())
81 {
82 inputTensorInfo.SetQuantizationScale(qScale);
83 inputTensorInfo.SetQuantizationOffset(qOffset);
84 outputTensorInfo.SetQuantizationScale(qScale);
85 outputTensorInfo.SetQuantizationOffset(qOffset);
86 }
87
88 std::vector<T> input = armnnUtils::QuantizedVector<T>(
89 {
90 1, 2,
91 3, 4,
92 5, 6,
93 7, 8
94 },
95 qScale, qOffset);
96
97 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
98 {
99 1, 5, 2, 6,
100 3, 7, 4, 8
101 },
102 qScale, qOffset);
103
104 return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
105 descriptor, inputTensorInfo,
106 outputTensorInfo, input, outputExpected);
107 }
108
109 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
PermuteValueSet1Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)110 LayerTestResult<T, 4> PermuteValueSet1Test(
111 armnn::IWorkloadFactory& workloadFactory,
112 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
113 const armnn::ITensorHandleFactory& tensorHandleFactory)
114 {
115 armnn::TensorInfo inputTensorInfo;
116 armnn::TensorInfo outputTensorInfo;
117
118 unsigned int inputShape[] = { 1, 2, 2, 3 };
119 unsigned int outputShape[] = { 1, 3, 2, 2 };
120
121 armnn::PermuteDescriptor descriptor;
122 descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
123
124 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
125 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
126
127 // Set quantization parameters if the requested type is a quantized type.
128 float qScale = 0.5f;
129 int32_t qOffset = 5;
130 if(armnn::IsQuantizedType<T>())
131 {
132 inputTensorInfo.SetQuantizationScale(qScale);
133 inputTensorInfo.SetQuantizationOffset(qOffset);
134 outputTensorInfo.SetQuantizationScale(qScale);
135 outputTensorInfo.SetQuantizationOffset(qOffset);
136 }
137
138 std::vector<T> input = armnnUtils::QuantizedVector<T>(
139 {
140 1, 2, 3,
141 11, 12, 13,
142 21, 22, 23,
143 31, 32, 33
144 },
145 qScale, qOffset);
146
147 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
148 {
149 1, 11, 21, 31,
150 2, 12, 22, 32,
151 3, 13, 23, 33
152 },
153 qScale, qOffset);
154
155 return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
156 descriptor, inputTensorInfo,
157 outputTensorInfo, input, outputExpected);
158 }
159
160 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
PermuteValueSet2Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)161 LayerTestResult<T, 4> PermuteValueSet2Test(
162 armnn::IWorkloadFactory& workloadFactory,
163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
164 const armnn::ITensorHandleFactory& tensorHandleFactory)
165 {
166 armnn::TensorInfo inputTensorInfo;
167 armnn::TensorInfo outputTensorInfo;
168
169 unsigned int inputShape[] = { 1, 3, 2, 2 };
170 unsigned int outputShape[] = { 1, 2, 2, 3 };
171
172 armnn::PermuteDescriptor descriptor;
173 descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
174
175 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
176 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
177
178 // Set quantization parameters if the requested type is a quantized type.
179 float qScale = 0.5f;
180 int32_t qOffset = 5;
181 if(armnn::IsQuantizedType<T>())
182 {
183 inputTensorInfo.SetQuantizationScale(qScale);
184 inputTensorInfo.SetQuantizationOffset(qOffset);
185 outputTensorInfo.SetQuantizationScale(qScale);
186 outputTensorInfo.SetQuantizationOffset(qOffset);
187 }
188
189 std::vector<T> input = armnnUtils::QuantizedVector<T>(
190 {
191 1, 11, 21, 31,
192 2, 12, 22, 32,
193 3, 13, 23, 33
194 },
195 qScale, qOffset);
196
197 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
198 {
199 1, 2, 3,
200 11, 12, 13,
201 21, 22, 23,
202 31, 32, 33,
203 },
204 qScale, qOffset);
205
206 return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
207 descriptor, inputTensorInfo,
208 outputTensorInfo, input, outputExpected);
209 }
210
211 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
PermuteValueSet3Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)212 LayerTestResult<T, 4> PermuteValueSet3Test(
213 armnn::IWorkloadFactory& workloadFactory,
214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
215 const armnn::ITensorHandleFactory& tensorHandleFactory)
216 {
217 armnn::TensorInfo inputTensorInfo;
218 armnn::TensorInfo outputTensorInfo;
219
220 unsigned int inputShape[] = { 1, 2, 3, 3 };
221 unsigned int outputShape[] = { 1, 3, 2, 3 };
222
223 armnn::PermuteDescriptor descriptor;
224 descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
225
226 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
227 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
228
229 // Set quantization parameters if the requested type is a quantized type.
230 float qScale = 0.5f;
231 int32_t qOffset = 5;
232 if(armnn::IsQuantizedType<T>())
233 {
234 inputTensorInfo.SetQuantizationScale(qScale);
235 inputTensorInfo.SetQuantizationOffset(qOffset);
236 outputTensorInfo.SetQuantizationScale(qScale);
237 outputTensorInfo.SetQuantizationOffset(qOffset);
238 }
239
240 std::vector<T> input = armnnUtils::QuantizedVector<T>(
241 {
242 1, 2, 3,
243 11, 12, 13,
244 21, 22, 23,
245 31, 32, 33,
246 41, 42, 43,
247 51, 52, 53
248 },
249 qScale, qOffset);
250
251 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
252 {
253 1, 11, 21, 31, 41, 51,
254 2, 12, 22, 32, 42, 52,
255 3, 13, 23, 33, 43, 53
256 },
257 qScale, qOffset);
258
259 return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
260 descriptor, inputTensorInfo,
261 outputTensorInfo, input, outputExpected);
262 }
263