1 //
2 // Copyright © 2019, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "InstanceNormalizationTestImpl.hpp"
7 
8 #include <armnnUtils/QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
12 #include <armnn/backends/TensorHandle.hpp>
13 #include <armnn/backends/IBackendInternal.hpp>
14 #include <armnn/backends/WorkloadFactory.hpp>
15 
16 #include <armnnTestUtils/DataLayoutUtils.hpp>
17 #include <armnnTestUtils/TensorCopyUtils.hpp>
18 #include <armnnTestUtils/WorkloadTestUtils.hpp>
19 
20 #include <armnnTestUtils/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
InstanceNormTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::TensorInfo & inputTensorInfo,const armnn::TensorInfo & outputTensorInfo,const std::vector<float> & inputValues,const std::vector<float> & expectedOutputValues,armnn::InstanceNormalizationQueueDescriptor descriptor,float qScale=1.0f,int32_t qOffset=0)26 LayerTestResult<T, 4> InstanceNormTestImpl(
27     armnn::IWorkloadFactory& workloadFactory,
28     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
29     const armnn::ITensorHandleFactory& tensorHandleFactory,
30     const armnn::TensorInfo& inputTensorInfo,
31     const armnn::TensorInfo& outputTensorInfo,
32     const std::vector<float>& inputValues,
33     const std::vector<float>& expectedOutputValues,
34     armnn::InstanceNormalizationQueueDescriptor descriptor,
35     float qScale = 1.0f,
36     int32_t qOffset = 0)
37 {
38     IgnoreUnused(memoryManager);
39     std::vector<T> inputTensor = armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset);
40     std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
41     std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
42 
43     std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
44     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
45 
46     armnn::WorkloadInfo info;
47 
48     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
49     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
50 
51     std::unique_ptr<armnn::IWorkload> workload
52             = workloadFactory.CreateWorkload(armnn::LayerType::InstanceNormalization, descriptor, info);
53 
54     inputHandle->Allocate();
55     outputHandle->Allocate();
56 
57     CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
58 
59     workload->Execute();
60 
61     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
62 
63     return LayerTestResult<T, 4>(actualOutput,
64                                  expectedOutput,
65                                  outputHandle->GetShape(),
66                                  outputTensorInfo.GetShape());
67 }
68 
69 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
InstanceNormTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::DataLayout dataLayout)70 LayerTestResult<T, 4> InstanceNormTest(
71     armnn::IWorkloadFactory& workloadFactory,
72     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
73     const armnn::ITensorHandleFactory& tensorHandleFactory,
74     armnn::DataLayout dataLayout)
75 {
76     // BatchSize: 2
77     // Height: 2
78     // Width: 2
79     // Channels: 2
80 
81     const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
82 
83     armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
84     armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
85 
86     std::vector<float> inputValues
87         {
88             // Batch 0, Height 0, Width 0 x Channel (2)
89             0.f,  1.f,
90             // Batch 0, Height 0, Width 1 x Channel (2)
91             0.f,  2.f,
92 
93             // Batch 0, Height 1, Width 0 x Channel (2)
94             0.f,  2.f,
95             // Batch 0, Height 1, Width 1 x Channel (2)
96             0.f,  4.f,
97 
98             // Batch 1, Height 0, Width 0 x Channel (2)
99             1.f, -1.f,
100             // Batch 1, Height 0, Width 1 x Channel (2)
101            -1.f,  2.f,
102 
103             // Batch 1, Height 1, Width 0 x Channel (2)
104            -1.f, -2.f,
105             // Batch 1, Height 1, Width 1 x Channel (2)
106             1.f,  4.f
107         };
108 
109     std::vector<float> expectedOutputValues
110         {
111             // Batch 0, Height 0, Width 0 x Channel (2)
112             0.f, -1.1470304f,
113             // Batch 0, Height 0, Width 1 x Channel (2)
114             0.f, -0.22940612f,
115             // Batch 0, Height 1, Width 0 x Channel (2)
116             0.f, -0.22940612f,
117             // Batch 0, Height 1, Width 1 x Channel (2)
118             0.f,  1.6058424f,
119 
120             // Batch 1, Height 0, Width 0 x Channel (2)
121             0.99995005f, -0.7337929f,
122             // Batch 1, Height 0, Width 1 x Channel (2)
123            -0.99995005f,  0.52413774f,
124 
125             // Batch 1, Height 1, Width 0 x Channel (2)
126            -0.99995005f, -1.1531031f,
127             // Batch 1, Height 1, Width 1 x Channel (2)
128             0.99995005f,  1.3627582f
129         };
130 
131     if (dataLayout == armnn::DataLayout::NCHW)
132     {
133         PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
134         PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
135     }
136 
137     armnn::InstanceNormalizationQueueDescriptor descriptor;
138     descriptor.m_Parameters.m_Eps        = 0.0001f;
139     descriptor.m_Parameters.m_Beta       = 0.0f;
140     descriptor.m_Parameters.m_Gamma      = 1.0f;
141     descriptor.m_Parameters.m_DataLayout = dataLayout;
142 
143     return InstanceNormTestImpl<ArmnnType>(
144         workloadFactory,
145         memoryManager,
146         tensorHandleFactory,
147         inputTensorInfo,
148         outputTensorInfo,
149         inputValues,
150         expectedOutputValues,
151         descriptor);
152 }
153 
154 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
InstanceNormTest2(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::DataLayout dataLayout)155 LayerTestResult<T, 4> InstanceNormTest2(
156     armnn::IWorkloadFactory& workloadFactory,
157     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
158     const armnn::ITensorHandleFactory& tensorHandleFactory,
159     armnn::DataLayout dataLayout)
160 {
161     // BatchSize: 2
162     // Height: 2
163     // Width: 2
164     // Channels: 2
165 
166     const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
167 
168     armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
169     armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
170 
171     std::vector<float> inputValues
172         {
173             // Batch 0, Height 0, Width 0 x Channel (2)
174             0.f,  1.f,
175             // Batch 0, Height 0, Width 1 x Channel (2)
176             0.f,  2.f,
177 
178             // Batch 0, Height 1, Width 0 x Channel (2)
179             0.f,  2.f,
180             // Batch 0, Height 1, Width 1 x Channel (2)
181             0.f,  4.f,
182 
183             // Batch 1, Height 0, Width 0 x Channel (2)
184             1.f, -1.f,
185             // Batch 1, Height 0, Width 1 x Channel (2)
186             -1.f,  2.f,
187 
188             // Batch 1, Height 1, Width 0 x Channel (2)
189             -1.f, -2.f,
190             // Batch 1, Height 1, Width 1 x Channel (2)
191             1.f,  4.f
192         };
193 
194     std::vector<float> expectedOutputValues
195         {
196             // Batch 0, Height 0, Width 0 x Channel (2)
197             10.f,     7.7059393f,
198             // Batch 0, Height 0, Width 1 x Channel (2)
199             10.f,     9.541187f,
200 
201             // Batch 0, Height 1, Width 0 x Channel (2)
202             10.f,     9.541187f,
203             // Batch 0, Height 1, Width 1 x Channel (2)
204             10.f,     13.211685f,
205 
206             // Batch 1, Height 0, Width 0 x Channel (2)
207             11.9999f, 8.532414f,
208             // Batch 1, Height 0, Width 1 x Channel (2)
209             8.0001f,  11.048275f,
210 
211             // Batch 1, Height 1, Width 0 x Channel (2)
212             8.0001f,  7.693794f,
213             // Batch 1, Height 1, Width 1 x Channel (2)
214             11.9999f, 12.725516f
215         };
216 
217     if (dataLayout == armnn::DataLayout::NCHW)
218     {
219         PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
220         PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
221     }
222 
223     armnn::InstanceNormalizationQueueDescriptor descriptor;
224     descriptor.m_Parameters.m_Eps        = 0.0001f;
225     descriptor.m_Parameters.m_Beta       = 10.0f;
226     descriptor.m_Parameters.m_Gamma      = 2.0f;
227     descriptor.m_Parameters.m_DataLayout = dataLayout;
228 
229     return InstanceNormTestImpl<ArmnnType>(
230         workloadFactory,
231         memoryManager,
232         tensorHandleFactory,
233         inputTensorInfo,
234         outputTensorInfo,
235         inputValues,
236         expectedOutputValues,
237         descriptor);
238 }
239 
240 } // anonymous namespace
241 
InstanceNormFloat32Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::DataLayout dataLayout)242 LayerTestResult<float, 4> InstanceNormFloat32Test(
243     armnn::IWorkloadFactory& workloadFactory,
244     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
245     const armnn::ITensorHandleFactory& tensorHandleFactory,
246     armnn::DataLayout dataLayout)
247 {
248     return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
249 }
250 
InstanceNormFloat16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::DataLayout dataLayout)251 LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
252     armnn::IWorkloadFactory& workloadFactory,
253     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
254     const armnn::ITensorHandleFactory& tensorHandleFactory,
255     armnn::DataLayout dataLayout)
256 {
257     return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
258 }
259 
InstanceNormFloat32Test2(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::DataLayout dataLayout)260 LayerTestResult<float, 4> InstanceNormFloat32Test2(
261     armnn::IWorkloadFactory& workloadFactory,
262     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
263     const armnn::ITensorHandleFactory& tensorHandleFactory,
264     armnn::DataLayout dataLayout)
265 {
266     return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
267 }
268 
InstanceNormFloat16Test2(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::DataLayout dataLayout)269 LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
270     armnn::IWorkloadFactory& workloadFactory,
271     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
272     const armnn::ITensorHandleFactory& tensorHandleFactory,
273     armnn::DataLayout dataLayout)
274 {
275     return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
276 }
277