xref: /aosp_15_r20/external/armnn/src/armnn/test/ShapeInferenceTests.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <doctest/doctest.h>
7 
8 #include <armnn/Tensor.hpp>
9 #include <Graph.hpp>
10 #include <InternalTypes.hpp>
11 #include <layers/FullyConnectedLayer.hpp>
12 #include <armnn/backends/TensorHandle.hpp>
13 #include <armnn/backends/WorkloadData.hpp>
14 
15 #include <string>
16 
17 TEST_SUITE("ShapeInferenceTests")
18 {
19 using namespace armnn;
20 namespace
21 {
22 
23 constexpr const bool maskPermutations[6][4] = {{false, false, false, false},
24                                                {true,  false, false, false},
25                                                {false, true,  false, false},
26                                                {false, false, true,  false},
27                                                {false, false, false,  true},
28                                                {true,  true,  true,  true}};
29 
30 template<typename LayerT, typename... Args>
BuildGraph(Graph * graph,const std::vector<TensorShape> & inputShapes,Args &&...args)31 LayerT* BuildGraph(Graph* graph, const std::vector<TensorShape>& inputShapes, Args &&... args)
32 {
33     auto layer = graph->AddLayer<LayerT>(std::forward<Args>(args)...);
34 
35     uint32_t inputCount = 0;
36     for (auto inputShape : inputShapes)
37     {
38         TensorInfo inputTensorInfo(inputShape, DataType::Float32);
39 
40         auto input = graph->AddLayer<InputLayer>(static_cast<int>(inputCount), "input");
41         input->GetOutputSlot().SetTensorInfo(inputTensorInfo);
42         input->GetOutputSlot().Connect(layer->GetInputSlot(inputCount));
43         inputCount++;
44     }
45 
46     return layer;
47 }
48 
49 template<typename LayerT>
RunShapeInferenceTest(LayerT * const layer,const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists)50 void RunShapeInferenceTest(LayerT* const layer,
51                            const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists)
52 {
53     std::vector<unsigned int> numDimensions;
54     std::vector<TensorShape> expectedOutputShapes;
55 
56     for (auto dimensionSizeList : dimensionSizeLists)
57     {
58         numDimensions.emplace_back(dimensionSizeList.size());
59         expectedOutputShapes.emplace_back(TensorShape(dimensionSizeList));
60     }
61 
62     const unsigned int outputSize = layer->GetNumOutputSlots();
63 
64     const auto runTestWithMask = [&](const bool maskPermutations[])
65     {
66         for (unsigned int i = 0; i < outputSize; ++i)
67         {
68             layer->GetOutputSlot(i).SetTensorInfo({{numDimensions[i], dimensionSizeLists[i].begin(), maskPermutations},
69                                                   DataType::Float32});
70         }
71 
72         layer->ValidateTensorShapesFromInputs();
73 
74         for (unsigned int i = 0; i < outputSize; ++i)
75         {
76             CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
77         }
78     };
79 
80     // Test inference with Dimensionality::NotSpecified
81     for (unsigned int j = 0; j < outputSize; ++j)
82     {
83         layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
84     }
85 
86     layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
87 
88     CHECK_THROWS_AS(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
89 
90     layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
91     layer->ValidateTensorShapesFromInputs();
92 
93     for (unsigned int i = 0; i < outputSize; ++i)
94     {
95         CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
96     }
97 
98     // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
99     for (unsigned int i = 0; i < numDimensions[0]; ++i)
100     {
101         runTestWithMask(maskPermutations[i]);
102     }
103 
104     // maskPermutations[5] equates to all dimensions being known
105     runTestWithMask(maskPermutations[5]);
106 }
107 
108 template<typename LayerT, typename... Args>
CreateGraphAndRunTest(const std::vector<TensorShape> & inputShapes,const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,Args &&...args)109 void CreateGraphAndRunTest(const std::vector<TensorShape>& inputShapes,
110                            const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
111                            Args &&... args)
112 {
113     Graph graph(true);
114 
115     auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
116 
117     RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
118 }
119 
120 TEST_CASE("NetworkOptionsTest")
121 {
122      BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
123      {
124         { "InferAndValidate", true }
125      });
126 
127     INetworkPtr network = INetwork::Create({ShapeInferenceMethodOption});
128     TensorInfo tensorInfo({ 5, 7, 6, 2 }, DataType::Float32);
129 
130     auto inputLayer = network->AddInputLayer(1, "inputLayer");
131     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
132 
133     ActivationDescriptor descriptor;
134     descriptor.m_Function = ActivationFunction::Abs;
135     auto activationLayer = network->AddActivationLayer(descriptor, "activation");
136 
137     inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
138     activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
139 
140     CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
141 
142     CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
143 
144 
145     ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
146                                                {
147                                                        { "InferAndValidate", false }
148                                                });
149 
150     network = INetwork::Create({ShapeInferenceMethodOption});
151 
152     inputLayer = network->AddInputLayer(1, "inputLayer");
153     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
154 
155     activationLayer = network->AddActivationLayer(descriptor, "activation");
156 
157     inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
158     activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
159 
160     CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
161 
162     network = INetwork::Create();
163 
164     inputLayer = network->AddInputLayer(1, "inputLayer");
165     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
166 
167     activationLayer = network->AddActivationLayer(descriptor, "activation");
168 
169     inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
170     activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
171 
172     CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
173 }
174 
175 TEST_CASE("AbsTest")
176 {
177     ActivationDescriptor descriptor;
178     descriptor.m_Function = ActivationFunction::Abs;
179     CreateGraphAndRunTest<ActivationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation");
180 }
181 
182 TEST_CASE("AdditionTest")
183 {
184     CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
185 }
186 
187 TEST_CASE("ArgMinMaxTest")
188 {
189     armnn::ArgMinMaxDescriptor descriptor;
190     descriptor.m_Function = ArgMinMaxFunction::Min;
191     descriptor.m_Axis = 1;
192 
193     CreateGraphAndRunTest<ArgMinMaxLayer>({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax");
194 }
195 
196 TEST_CASE("BatchNormalizationTest")
197 {
198     BatchNormalizationDescriptor descriptor;
199     CreateGraphAndRunTest<BatchNormalizationLayer>({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm");
200 }
201 
202 TEST_CASE("BatchToSpaceNdTest")
203 {
204     BatchToSpaceNdDescriptor descriptor;
205 
206     std::vector<unsigned int> blockShape {2, 2};
207     std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
208 
209     descriptor.m_BlockShape = blockShape;
210     descriptor.m_Crops = crops;
211     descriptor.m_DataLayout = DataLayout::NHWC;
212 
213     CreateGraphAndRunTest<BatchToSpaceNdLayer>({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend");
214 }
215 
216 TEST_CASE("ComparisionTest")
217 {
218     ComparisonDescriptor descriptor;
219     descriptor.m_Operation = ComparisonOperation::Equal;
220     CreateGraphAndRunTest<ComparisonLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }},
221                                            {{ 5, 7, 6, 2 }},
222                                            descriptor,
223                                            "comparision");
224 }
225 
226 TEST_CASE("ComparisionTestSmallerRHS")
227 {
228     ComparisonDescriptor descriptor;
229     descriptor.m_Operation = ComparisonOperation::Equal;
230     CreateGraphAndRunTest<ComparisonLayer>({{ 5, 7, 6, 2 }, { 1 }},
231                                            {{ 5, 7, 6, 2 }},
232                                            descriptor,
233                                            "comparision");
234 }
235 
236 TEST_CASE("ComparisionTestSmallerLHS")
237 {
238     ComparisonDescriptor descriptor;
239     descriptor.m_Operation = ComparisonOperation::Equal;
240     CreateGraphAndRunTest<ComparisonLayer>({{ 1 }, { 5, 7, 6, 2 }},
241                                            {{ 5, 7, 6, 2 }},
242                                            descriptor,
243                                            "comparision");
244 }
245 
246 TEST_CASE("ConcatTest")
247 {
248     ConcatDescriptor descriptor(2, 3);
249 
250     descriptor.SetViewOriginCoord(0, 0, 0);
251     descriptor.SetViewOriginCoord(1, 0, 1);
252 
253     CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat");
254 }
255 
256 TEST_CASE("ConstantTest")
257 {
258     Graph graph;
259     TensorShape outputShape{ 1, 1, 3, 3 };
260     auto layer = BuildGraph<ConstantLayer>(&graph, {}, "constant");
261 
262     std::vector<float> data(9, 0.0f);
263     ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, data);
264     layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
265 
266     layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
267 
268     layer->ValidateTensorShapesFromInputs();
269 
270     CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
271 }
272 
273 TEST_CASE("ConvertFp16ToFp32Test")
274 {
275     CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
276 }
277 
278 TEST_CASE("ConvertFp32ToFp16Test")
279 {
280     CreateGraphAndRunTest<ConvertFp32ToFp16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
281 }
282 
283 TEST_CASE("Convolution2dTest")
284 {
285     const TensorShape inputShape{1, 1, 10, 10};
286 
287     Convolution2dDescriptor descriptor;
288 
289     descriptor.m_PadLeft = 0;
290     descriptor.m_PadTop = 0;
291     descriptor.m_PadRight = 0;
292     descriptor.m_PadBottom = 0;
293     descriptor.m_StrideX = 1;
294     descriptor.m_StrideY = 1;
295     descriptor.m_DilationX = 3;
296     descriptor.m_DilationY = 3;
297 
298     CreateGraphAndRunTest<Convolution2dLayer>({ inputShape, { 1, 1, 3, 3 } },
299                                               { { 1, 1, 4, 4 } }, descriptor,
300                                               "convd");
301 }
302 
303 TEST_CASE("DebugLayerTest")
304 {
305     const TensorShape tensorShape;
306     CreateGraphAndRunTest<DebugLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug");
307 }
308 
309 TEST_CASE("DepthToSpaceTest")
310 {
311     DepthToSpaceDescriptor descriptor;
312 
313     descriptor.m_BlockSize = 2;
314     descriptor.m_DataLayout = DataLayout::NHWC;
315 
316     CreateGraphAndRunTest<DepthToSpaceLayer>({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace");
317 }
318 
319 TEST_CASE("DepthwiseConvolutionTest")
320 {
321     DepthwiseConvolution2dDescriptor descriptor;
322 
323     descriptor.m_StrideX = 2;
324     descriptor.m_StrideY = 1;
325     descriptor.m_PadLeft = 0;
326     descriptor.m_PadRight = 0;
327     descriptor.m_PadTop = 1;
328     descriptor.m_PadBottom = 1;
329     descriptor.m_DilationX = 0;
330     descriptor.m_DilationY = 0;
331     descriptor.m_DataLayout = DataLayout::NHWC;
332     descriptor.m_BiasEnabled = false;
333 
334     CreateGraphAndRunTest<DepthwiseConvolution2dLayer>({{ 8, 16, 2, 1 },   // input
335                                                         { 2, 5, 3, 2 }},   // weights
336                                                        {{ 8, 18, 1, 2 }}, // output
337                                                        descriptor,
338                                                        "conv2d");
339 }
340 
341 TEST_CASE("DequantizeTest")
342 {
343     const TensorShape tensorShape{5, 7, 6, 2};
344     CreateGraphAndRunTest<DequantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize");
345 }
346 
347 TEST_CASE("DetectionPostProcessTest")
348 {
349     const TensorShape detectionBoxesInfo{ 1, 3, 4 };
350     const TensorShape detectionScoresInfo{ 1, 3, 4 };
351     const TensorShape detectionClassesInfo{ 1, 3, 4 };
352 
353     armnn::DetectionPostProcessDescriptor descriptor;
354     descriptor.m_UseRegularNms = true;
355     descriptor.m_MaxDetections = 3;
356     descriptor.m_MaxClassesPerDetection = 1;
357     descriptor.m_DetectionsPerClass =1;
358     descriptor.m_NmsScoreThreshold = 0.0;
359     descriptor.m_NmsIouThreshold = 0.5;
360     descriptor.m_NumClasses = 2;
361     descriptor.m_ScaleY = 10.0;
362     descriptor.m_ScaleX = 10.0;
363     descriptor.m_ScaleH = 5.0;
364     descriptor.m_ScaleW = 5.0;
365 
366     std::vector<float> data(9, 0.0f);
367     ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, data);
368 
369     Graph graph;
370 
371     auto layer = BuildGraph<DetectionPostProcessLayer>(&graph,
372                                                        {detectionBoxesInfo, detectionScoresInfo},
373                                                        descriptor,
374                                                        "detectionpostprocess");
375 
376     layer->m_Anchors = std::make_unique<ScopedTensorHandle>(anchorsTensor);
377 
378     RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3  }, { 1, 3 }, { 1 }});
379 }
380 
381 TEST_CASE("FakeQuantizationTest")
382 {
383     FakeQuantizationDescriptor descriptor;
384     descriptor.m_Max = 1;
385     descriptor.m_Min = 1;
386     CreateGraphAndRunTest<FakeQuantizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization");
387 }
388 
389 TEST_CASE("FloorTest")
390 {
391     const TensorShape tensorShape{5, 7, 6, 2};
392     CreateGraphAndRunTest<FloorLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
393 }
394 
395 TEST_CASE("FullyConnectedTest")
396 {
397     const unsigned int inputWidth = 3u;
398     const unsigned int inputHeight = 2u;
399     const unsigned int inputChannels = 1u;
400     const unsigned int outputChannels = 2u;
401 
402     CreateGraphAndRunTest<FullyConnectedLayer>({{ 1, inputChannels, inputHeight, inputWidth }, // input
403                                                 { inputChannels, outputChannels }},            // weights
404                                                {{ 1, outputChannels }},                        // output
405                                                FullyConnectedDescriptor(),
406                                                "fc");
407 }
408 
409 TEST_CASE("GatherTest")
410 {
411     CreateGraphAndRunTest<GatherLayer>({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather");
412 }
413 
414 TEST_CASE("InstanceNormalizationTest")
415 {
416     const TensorShape tensorShape{5, 7, 6, 2};
417 
418     CreateGraphAndRunTest<InstanceNormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
419                                                       InstanceNormalizationDescriptor(),
420                                                       "instancenorm");
421 }
422 
423 TEST_CASE("L2NormalizationTest")
424 {
425     const TensorShape tensorShape{5, 7, 6, 2};
426 
427     CreateGraphAndRunTest<L2NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
428                                                 L2NormalizationDescriptor(),
429                                                 "l2norm");
430 }
431 
432 TEST_CASE("LogSoftMaxTest")
433 {
434     const TensorShape tensorShape{5, 7, 6, 2};
435 
436     CreateGraphAndRunTest<LogSoftmaxLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax");
437 }
438 
439 TEST_CASE("LstmTest")
440 {
441     const TensorShape inputShape{2, 5};
442     const TensorShape inputCellState{2, 20};
443     const TensorShape expectedOutputShape{2, 20};
444 
445     LstmDescriptor descriptor;
446 
447     descriptor.m_ActivationFunc = 4;
448     descriptor.m_CifgEnabled = false;
449     descriptor.m_PeepholeEnabled = false;
450     descriptor.m_ProjectionEnabled = false;
451 
452     Graph graph;
453     auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm");
454 
455     std::vector<float> data(60, 0.0f);
456     ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data);
457 
458     layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
459     layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
460     layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
461     layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
462     layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
463     layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
464     layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
465     layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
466     layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
467     layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
468     layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
469     layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
470 
471     RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
472 }
473 
474 TEST_CASE("MeanLayerTest")
475 {
476     MeanDescriptor descriptor;
477     descriptor.m_Axis = {0};
478 
479     CreateGraphAndRunTest<MeanLayer>({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean");
480 }
481 
482 TEST_CASE("MemCopyTest")
483 {
484     CreateGraphAndRunTest<MemCopyLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy");
485 }
486 
487 TEST_CASE("MemImportTest")
488 {
489     CreateGraphAndRunTest<MemImportLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport");
490 }
491 
492 TEST_CASE("MergeTest")
493 {
494     const TensorShape tensorShape{ 5, 7, 6, 2 };
495     CreateGraphAndRunTest<MergeLayer>({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge");
496 }
497 
498 TEST_CASE("NormalizationTest")
499 {
500     const TensorShape tensorShape{5, 7, 6, 2};
501 
502     CreateGraphAndRunTest<NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm");
503 }
504 
505 TEST_CASE("PermuteTest")
506 {
507     PermuteDescriptor descriptor;
508     descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
509 
510     CreateGraphAndRunTest<PermuteLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute");
511 }
512 
513 TEST_CASE("Pooling2dTest")
514 {
515     armnn::Pooling2dDescriptor descriptor;
516     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
517     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
518     descriptor.m_StrideX = 2;
519     descriptor.m_StrideY = 4;
520     descriptor.m_PadLeft = descriptor.m_PadRight = 3;
521     descriptor.m_PadTop = descriptor.m_PadBottom = 0;
522     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
523     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
524 
525     CreateGraphAndRunTest<Pooling2dLayer>({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d");
526 }
527 
528 TEST_CASE("QLstmTest")
529 {
530     const TensorShape inputShape{2, 5};
531     const TensorShape inputCellState{2, 20};
532     const TensorShape expectedOutputShape{2, 20};
533 
534     QLstmDescriptor descriptor;
535 
536     descriptor.m_CifgEnabled = false;
537     descriptor.m_PeepholeEnabled = false;
538     descriptor.m_ProjectionEnabled = false;
539 
540     Graph graph;
541     auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm");
542 
543     std::vector<float> data(60, 0.0f);
544     ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data);
545 
546     layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
547     layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
548     layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
549     layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
550     layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
551     layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
552     layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
553     layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
554     layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
555     layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
556     layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
557     layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
558 
559     RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
560 }
561 
562 TEST_CASE("QuantizedLstmTest")
563 {
564     const TensorShape inputShape{2, 5};
565     const TensorShape inputCellState{2, 20};
566     const TensorShape expectedOutputShape{2, 20};
567 
568     Graph graph;
569     auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState},  "quatizedlstm");
570 
571     std::vector<float> data(60, 0.0f);
572     ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data);
573 
574     layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
575     layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
576     layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
577     layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
578     layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
579     layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
580     layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
581     layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
582     layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
583     layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
584     layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
585     layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
586 
587     RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
588 }
589 
590 TEST_CASE("QuantizeTest")
591 {
592     const TensorShape tensorShape { 5, 4, 7, 6 };
593     CreateGraphAndRunTest<QuantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean");
594 }
595 
596 TEST_CASE("RankTest")
597 {
598    // due to rank having a scalar output we need a custom test
599    const TensorShape expectedOutputs(Dimensionality::Scalar);
600 
601    Graph graph;
602    auto layer = BuildGraph<RankLayer>(&graph, {{ 1, 1, 1, 1 }},  "rank");
603 
604    layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
605 
606    CHECK_THROWS_AS(
607            layer->ValidateTensorShapesFromInputs(), LayerValidationException);
608 
609    layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
610 
611     layer->ValidateTensorShapesFromInputs();
612 
613    CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
614 
615    layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
616 
617     layer->ValidateTensorShapesFromInputs();
618 
619    CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
620 }
621 
622 TEST_CASE("ReshapeTest")
623 {
624     ReshapeDescriptor descriptor;
625 
626     descriptor.m_TargetShape = { 1, 1, 1, 8 };
627 
628     CreateGraphAndRunTest<ReshapeLayer>({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape");
629 }
630 
631 TEST_CASE("ResizeTest")
632 {
633     ResizeDescriptor descriptor;
634 
635     descriptor.m_TargetHeight = 6;
636     descriptor.m_TargetWidth = 2;
637 
638     CreateGraphAndRunTest<ResizeLayer>({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize");
639 }
640 
641 TEST_CASE("SliceTest")
642 {
643     SliceDescriptor descriptor;
644     descriptor.m_Begin  = { 1, 0, 1, 2 };
645     descriptor.m_Size   = { 2, 1, 2, 3 };
646 
647     CreateGraphAndRunTest<SliceLayer>({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean");
648 }
649 
650 TEST_CASE("SpaceToBatchNdTest")
651 {
652     SpaceToBatchNdDescriptor descriptor;
653 
654     std::vector<unsigned int> blockShape {2, 2};
655     std::vector<std::pair<unsigned int, unsigned int>> padlist = {{0, 0}, {0, 0}};
656 
657     descriptor.m_BlockShape = blockShape;
658     descriptor.m_PadList = padlist;
659     descriptor.m_DataLayout = DataLayout::NHWC;
660 
661     CreateGraphAndRunTest<SpaceToBatchNdLayer>({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd");
662 }
663 
664 TEST_CASE("SpaceToDepth")
665 {
666     SpaceToDepthDescriptor descriptor;
667 
668     descriptor.m_BlockSize = 2;
669     descriptor.m_DataLayout = DataLayout::NHWC;
670 
671     CreateGraphAndRunTest<SpaceToDepthLayer>({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth");
672 }
673 
674 TEST_CASE("SplitterTest")
675 {
676     SplitterDescriptor descriptor(2, 3);
677 
678     descriptor.SetViewSize(0, 0, 1);
679     descriptor.SetViewSize(0, 1, 2);
680     descriptor.SetViewSize(0, 2, 2);
681 
682     descriptor.SetViewSize(1, 0, 1);
683     descriptor.SetViewSize(1, 1, 2);
684     descriptor.SetViewSize(1, 2, 2);
685 
686     CreateGraphAndRunTest<SplitterLayer>({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter");
687 }
688 
689 TEST_CASE("StackTest")
690 {
691     StackDescriptor descriptor;
692 
693     descriptor.m_Axis = 0;
694     descriptor.m_NumInputs = 2;
695     descriptor.m_InputShape = { 3, 2, 3 };
696 
697     CreateGraphAndRunTest<StackLayer>({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack");
698 }
699 
700 TEST_CASE("StridedSliceTest")
701 {
702     StridedSliceDescriptor descriptor;
703 
704     descriptor.m_Begin  = {0, 0, 0, 0};
705     descriptor.m_End    = {3, 2, 3, 1};
706     descriptor.m_Stride = {2, 2, 2, 1};
707 
708     CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice");
709 }
710 
711 TEST_CASE("Switchtest")
712 {
713     CreateGraphAndRunTest<SwitchLayer>({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch");
714 }
715 
716 TEST_CASE("TransposeConvolution2dTest")
717 {
718     StridedSliceDescriptor descriptor;
719 
720     descriptor.m_Begin  = {0, 0, 0, 0};
721     descriptor.m_End    = {3, 2, 3, 1};
722     descriptor.m_Stride = {2, 2, 2, 1};
723 
724     CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t");
725 }
726 
727 TEST_CASE("TransposeTest")
728 {
729     armnn::TransposeDescriptor descriptor;
730     descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
731 
732     CreateGraphAndRunTest<TransposeLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice");
733 }
734 
735 }
736 }
737