xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/WorkloadDataValidation.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <armnnTestUtils/WorkloadTestUtils.hpp>
7 
8 #include <armnn/Exceptions.hpp>
9 
10 #include <armnn/backends/TensorHandle.hpp>
11 #include <armnn/backends/Workload.hpp>
12 
13 #include <reference/workloads/RefWorkloads.hpp>
14 #include <reference/RefWorkloadFactory.hpp>
15 
16 #include <doctest/doctest.h>
17 
18 using namespace armnn;
19 
20 TEST_SUITE("WorkloadInfoValidation")
21 {
22 TEST_CASE("BatchNormalizationQueueDescriptor_Validate_DifferentQuantizationData")
23 {
24     TensorShape inputShape { 1, 3, 2, 2 };
25     TensorShape outputShape { 1, 3, 2, 2 };
26 
27     TensorInfo inputTensorInfo(inputShape, armnn::DataType::QAsymmU8, .1f, 125);
28     TensorInfo outputTensorInfo(outputShape, armnn::DataType::QAsymmU8, .2f, 120);
29 
30     BatchNormalizationQueueDescriptor invalidData;
31     WorkloadInfo                      invalidInfo;
32 
33     unsigned int sameShape[] = { 10 };
34     TensorInfo sameInfo = armnn::TensorInfo(1, sameShape, armnn::DataType::QAsymmU8);
35     ScopedTensorHandle sameTensor(sameInfo);
36 
37     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
38     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
39 
40     invalidData.m_Mean = &sameTensor;
41     invalidData.m_Variance = &sameTensor;
42     invalidData.m_Beta= &sameTensor;
43     invalidData.m_Gamma = &sameTensor;
44 
45     CHECK_NOTHROW(RefBatchNormalizationWorkload(invalidData, invalidInfo));
46 }
47 
48 TEST_CASE("QueueDescriptor_Validate_WrongNumOfInputsOutputs")
49 {
50     InputQueueDescriptor invalidData;
51     WorkloadInfo invalidInfo;
52     //Invalid argument exception is expected, because no inputs and no outputs were defined.
53     CHECK_THROWS_AS(RefWorkloadFactory().CreateWorkload(LayerType::Input, invalidData, invalidInfo),
54                     armnn::InvalidArgumentException);
55 }
56 
57 TEST_CASE("RefPooling2dFloat32Workload_Validate_WrongDimTensor")
58 {
59     armnn::TensorInfo inputTensorInfo;
60     armnn::TensorInfo outputTensorInfo;
61 
62     unsigned int inputShape[]  = {2, 3, 4}; // <- Invalid - input tensor has to be 4D.
63     unsigned int outputShape[] = {2, 3, 4, 5};
64 
65     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
66     inputTensorInfo  = armnn::TensorInfo(3, inputShape, armnn::DataType::Float32);
67 
68     Pooling2dQueueDescriptor invalidData;
69     WorkloadInfo           invalidInfo;
70 
71     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
72     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
73 
74     // Invalid argument exception is expected, input tensor has to be 4D.
75     CHECK_THROWS_AS(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
76 }
77 
78 TEST_CASE("RefPooling3dFloat32Workload_Validate_WrongDimTensor")
79 {
80     armnn::TensorInfo inputTensorInfo;
81     armnn::TensorInfo outputTensorInfo;
82 
83     unsigned int inputShape[]  = {2, 3, 4, 5}; // <- Invalid - input tensor has to be 5D.
84     unsigned int outputShape[] = {2, 3, 4, 5, 6};
85 
86     outputTensorInfo = armnn::TensorInfo(5, outputShape, armnn::DataType::Float32);
87     inputTensorInfo  = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
88 
89     Pooling3dQueueDescriptor invalidData;
90     WorkloadInfo           invalidInfo;
91 
92     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
93     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
94 
95     // Invalid argument exception is expected, input tensor has to be 5D.
96     CHECK_THROWS_AS(RefPooling3dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
97 }
98 
99 TEST_CASE("SoftmaxQueueDescriptor_Validate_WrongInputHeight")
100 {
101     unsigned int inputHeight = 1;
102     unsigned int inputWidth = 1;
103     unsigned int inputChannels = 4;
104     unsigned int inputNum = 2;
105 
106     unsigned int outputChannels = inputChannels;
107     unsigned int outputHeight = inputHeight + 1;    //Makes data invalid - Softmax expects height and width to be 1.
108     unsigned int outputWidth = inputWidth;
109     unsigned int outputNum = inputNum;
110 
111     armnn::TensorInfo inputTensorInfo;
112     armnn::TensorInfo outputTensorInfo;
113 
114     unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
115     unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
116 
117     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
118     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
119 
120     SoftmaxQueueDescriptor invalidData;
121     WorkloadInfo           invalidInfo;
122 
123     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
124     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
125 
126     //Invalid argument exception is expected, because height != 1.
127     CHECK_THROWS_AS(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
128 }
129 
130 TEST_CASE("FullyConnectedQueueDescriptor_Validate_RequiredDataMissing")
131 {
132     unsigned int inputWidth = 1;
133     unsigned int inputHeight = 1;
134     unsigned int inputChannels = 5;
135     unsigned int inputNum = 2;
136 
137     unsigned int outputWidth = 1;
138     unsigned int outputHeight = 1;
139     unsigned int outputChannels = 3;
140     unsigned int outputNum = 2;
141 
142     // Define the tensor descriptors.
143     armnn::TensorInfo inputTensorInfo;
144     armnn::TensorInfo outputTensorInfo;
145     armnn::TensorInfo weightsDesc;
146     armnn::TensorInfo biasesDesc;
147 
148     unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
149     unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
150     unsigned int weightsShape[] = { 1, 1, inputChannels, outputChannels };
151     unsigned int biasShape[] = { 1, outputChannels, outputHeight, outputWidth };
152 
153     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
154     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
155     weightsDesc = armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32);
156     biasesDesc = armnn::TensorInfo(4, biasShape, armnn::DataType::Float32);
157 
158     FullyConnectedQueueDescriptor invalidData;
159     WorkloadInfo                  invalidInfo;
160 
161     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
162     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
163     invalidData.m_Parameters.m_BiasEnabled = true;
164     invalidData.m_Parameters.m_TransposeWeightMatrix = false;
165 
166 
167     //Invalid argument exception is expected, because not all required fields have been provided.
168     //In particular inputsData[0], outputsData[0] and weightsData can not be null.
169     CHECK_THROWS_AS(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
170 }
171 
172 
173 TEST_CASE("NormalizationQueueDescriptor_Validate_WrongInputHeight")
174 {
175     constexpr unsigned int inputNum = 5;
176     constexpr unsigned int inputHeight   = 32;
177     constexpr unsigned int inputWidth    = 24;
178     constexpr unsigned int inputChannels = 3;
179 
180     constexpr unsigned int outputNum = inputNum;
181     constexpr unsigned int outputChannels = inputChannels;
182     constexpr unsigned int outputHeight = inputHeight + 1; //Makes data invalid - normalization requires.
183                                                            //Input and output to have the same dimensions.
184     constexpr unsigned int outputWidth  = inputWidth;
185 
186 
187     armnn::TensorInfo inputTensorInfo;
188     armnn::TensorInfo outputTensorInfo;
189 
190     unsigned int inputShape[]  = {inputNum, inputChannels, inputHeight, inputWidth};
191     unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
192 
193     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
194     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
195 
196 
197     armnn::NormalizationAlgorithmMethod normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
198     armnn::NormalizationAlgorithmChannel normChannel = armnn::NormalizationAlgorithmChannel::Across;
199     float alpha = 1.f;
200     float beta = 1.f;
201     float kappa = 1.f;
202     uint32_t normSize = 5;
203 
204     NormalizationQueueDescriptor invalidData;
205     WorkloadInfo                 invalidInfo;
206 
207     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
208     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
209     invalidData.m_Parameters.m_NormChannelType = normChannel;
210     invalidData.m_Parameters.m_NormMethodType  = normMethod;
211     invalidData.m_Parameters.m_NormSize        = normSize;
212     invalidData.m_Parameters.m_Alpha           = alpha;
213     invalidData.m_Parameters.m_Beta            = beta;
214     invalidData.m_Parameters.m_K               = kappa;
215 
216     //Invalid argument exception is expected, because input height != output height.
217     CHECK_THROWS_AS(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
218 }
219 
220 TEST_CASE("SplitterQueueDescriptor_Validate_WrongWindow")
221 {
222     constexpr unsigned int inputNum = 1;
223     constexpr unsigned int inputHeight   = 32;
224     constexpr unsigned int inputWidth    = 24;
225     constexpr unsigned int inputChannels = 3;
226 
227     constexpr unsigned int outputNum = inputNum;
228     constexpr unsigned int outputChannels = inputChannels;
229     constexpr unsigned int outputHeight = 18;
230     constexpr unsigned int outputWidth  = inputWidth;
231 
232 
233     armnn::TensorInfo inputTensorInfo;
234     armnn::TensorInfo outputTensorInfo;
235 
236     unsigned int inputShape[]  = {inputNum, inputChannels, inputHeight, inputWidth};
237     unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
238 
239     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
240     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
241 
242     SplitterQueueDescriptor invalidData;
243     WorkloadInfo            invalidInfo;
244 
245     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
246     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
247 
248     // Invalid, since it has only 3 dimensions while the input tensor is 4d.
249     std::vector<unsigned int> wOrigin = {0, 0, 0};
250     armnn::SplitterQueueDescriptor::ViewOrigin window(wOrigin);
251     invalidData.m_ViewOrigins.push_back(window);
252 
253     INFO("Invalid argument exception is expected, because split window dimensionality does not match input.");
254     CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
255 
256     // Invalid, since window extends past the boundary of input tensor.
257     std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
258     armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
259     invalidData.m_ViewOrigins[0] = window3;
260     INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
261     CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
262 
263 
264     std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
265     armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
266     invalidData.m_ViewOrigins[0] = window4;
267 
268     std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
269     armnn::SplitterQueueDescriptor::ViewOrigin window5(wOrigin4);
270     invalidData.m_ViewOrigins.push_back(window5);
271 
272     INFO("Invalid exception due to number of split windows not matching number of outputs.");
273     CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
274 }
275 
276 
277 TEST_CASE("ConcatQueueDescriptor_Validate_WrongWindow")
278 {
279     constexpr unsigned int inputNum = 1;
280     constexpr unsigned int inputChannels = 3;
281     constexpr unsigned int inputHeight   = 32;
282     constexpr unsigned int inputWidth    = 24;
283 
284     constexpr unsigned int outputNum = 1;
285     constexpr unsigned int outputChannels = 3;
286     constexpr unsigned int outputHeight = 32;
287     constexpr unsigned int outputWidth  = 24;
288 
289 
290     armnn::TensorInfo inputTensorInfo;
291     armnn::TensorInfo outputTensorInfo;
292 
293     unsigned int inputShape[]  = {inputNum, inputChannels, inputHeight, inputWidth};
294     unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
295 
296     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
297     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
298 
299     ConcatQueueDescriptor invalidData;
300     WorkloadInfo          invalidInfo;
301 
302     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
303     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
304 
305     // Invalid, since it has only 3 dimensions while the input tensor is 4d.
306     std::vector<unsigned int> wOrigin = {0, 0, 0};
307     armnn::ConcatQueueDescriptor::ViewOrigin window(wOrigin);
308     invalidData.m_ViewOrigins.push_back(window);
309 
310     INFO("Invalid argument exception is expected, because merge window dimensionality does not match input.");
311     CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
312 
313     // Invalid, since window extends past the boundary of output tensor.
314     std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
315     armnn::ConcatQueueDescriptor::ViewOrigin window3(wOrigin3);
316     invalidData.m_ViewOrigins[0] = window3;
317     INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
318     CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
319 
320 
321     std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
322     armnn::ConcatQueueDescriptor::ViewOrigin window4(wOrigin4);
323     invalidData.m_ViewOrigins[0] = window4;
324 
325     std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
326     armnn::ConcatQueueDescriptor::ViewOrigin window5(wOrigin4);
327     invalidData.m_ViewOrigins.push_back(window5);
328 
329     INFO("Invalid exception due to number of merge windows not matching number of inputs.");
330     CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
331 }
332 
333 TEST_CASE("AdditionQueueDescriptor_Validate_InputNumbers")
334 {
335     armnn::TensorInfo input1TensorInfo;
336     armnn::TensorInfo input2TensorInfo;
337     armnn::TensorInfo input3TensorInfo;
338     armnn::TensorInfo outputTensorInfo;
339 
340     unsigned int shape[]  = {1, 1, 1, 1};
341 
342     input1TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
343     input2TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
344     input3TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
345     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
346 
347     AdditionQueueDescriptor invalidData;
348     WorkloadInfo            invalidInfo;
349 
350     AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
351     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
352 
353     // Too few inputs.
354     CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
355 
356     AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
357 
358     // Correct.
359     CHECK_NOTHROW(RefAdditionWorkload<>(invalidData, invalidInfo));
360 
361     AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr);
362 
363     // Too many inputs.
364     CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
365 }
366 
367 TEST_CASE("AdditionQueueDescriptor_Validate_InputShapes")
368 {
369     armnn::TensorInfo input1TensorInfo;
370     armnn::TensorInfo input2TensorInfo;
371     armnn::TensorInfo outputTensorInfo;
372 
373     unsigned int shape1[] = {1, 1, 2, 1};
374     unsigned int shape2[] = {1, 1, 3, 2};
375 
376     // Incompatible shapes even with broadcasting.
377     {
378         input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
379         input2TensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
380         outputTensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
381 
382         AdditionQueueDescriptor invalidData;
383         WorkloadInfo            invalidInfo;
384 
385         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
386         AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
387         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
388 
389         CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
390     }
391 
392     // Output size not compatible with input sizes.
393     {
394         input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
395         input2TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
396         outputTensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
397 
398         AdditionQueueDescriptor invalidData;
399         WorkloadInfo            invalidInfo;
400 
401         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
402         AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
403         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
404 
405         // Output differs.
406         CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
407     }
408 }
409 
410 TEST_CASE("MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch")
411 {
412     armnn::TensorInfo input0TensorInfo;
413     armnn::TensorInfo input1TensorInfo;
414     armnn::TensorInfo outputTensorInfo;
415 
416     constexpr unsigned int input0Shape[] = { 2, 2, 4, 4 };
417     constexpr std::size_t dimensionCount = std::extent<decltype(input0Shape)>::value;
418 
419     // Checks dimension consistency for input tensors.
420     for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
421     {
422         unsigned int input1Shape[dimensionCount];
423         for (unsigned int i = 0; i < dimensionCount; ++i)
424         {
425             input1Shape[i] = input0Shape[i];
426         }
427 
428         ++input1Shape[dimIndex];
429 
430         input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
431         input1TensorInfo = armnn::TensorInfo(dimensionCount, input1Shape, armnn::DataType::Float32);
432         outputTensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
433 
434         MultiplicationQueueDescriptor invalidData;
435         WorkloadInfo                  invalidInfo;
436 
437         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
438         AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
439         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
440 
441         CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
442     }
443 
444     // Checks dimension consistency for input and output tensors.
445     for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
446     {
447         unsigned int outputShape[dimensionCount];
448         for (unsigned int i = 0; i < dimensionCount; ++i)
449         {
450             outputShape[i] = input0Shape[i];
451         }
452 
453         ++outputShape[dimIndex];
454 
455         input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
456         input1TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
457         outputTensorInfo = armnn::TensorInfo(dimensionCount, outputShape, armnn::DataType::Float32);
458 
459         MultiplicationQueueDescriptor invalidData;
460         WorkloadInfo                  invalidInfo;
461 
462         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
463         AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
464         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
465 
466         CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
467     }
468 }
469 
470 TEST_CASE("ReshapeQueueDescriptor_Validate_MismatchingNumElements")
471 {
472     armnn::TensorInfo inputTensorInfo;
473     armnn::TensorInfo outputTensorInfo;
474 
475     // The input and output shapes should have the same number of elements, but these don't.
476     unsigned int inputShape[] = { 1, 1, 2, 3 };
477     unsigned int outputShape[] = { 1, 1, 1, 2 };
478 
479     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
480     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
481 
482     ReshapeQueueDescriptor invalidData;
483     WorkloadInfo           invalidInfo;
484 
485     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
486     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
487 
488     // InvalidArgumentException is expected, because the number of elements don't match.
489     CHECK_THROWS_AS(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
490 }
491 
492 
493 TEST_CASE("LstmQueueDescriptor_Validate")
494 {
495     armnn::DataType dataType = armnn::DataType::Float32;
496 
497     float qScale = 1.0f;
498     int32_t qOffset = 0;
499 
500     unsigned int batchSize = 2;
501     unsigned int outputSize = 3;
502     unsigned int inputSize = 5;
503     unsigned numUnits = 4;
504 
505     armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, dataType,  qScale, qOffset );
506     armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, dataType, qScale, qOffset);
507     armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, dataType, qScale, qOffset);
508 
509     // Scratch buffer size with CIFG [batchSize, numUnits * 4]
510     armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
511     armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset);
512     armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
513     armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
514 
515     armnn::TensorInfo tensorInfo3({outputSize}, dataType, qScale, qOffset);
516     armnn::TensorInfo tensorInfo4({numUnits}, dataType, qScale, qOffset);
517     armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, dataType, qScale, qOffset);
518     armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, dataType, qScale, qOffset);
519     armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, dataType, qScale, qOffset);
520 
521     LstmQueueDescriptor data;
522     WorkloadInfo        info;
523 
524     AddInputToWorkload(data, info, inputTensorInfo, nullptr);
525     AddInputToWorkload(data, info, outputStateInTensorInfo, nullptr);
526     AddInputToWorkload(data, info, cellStateInTensorInfo, nullptr);
527 
528     AddOutputToWorkload(data, info, scratchBufferTensorInfo, nullptr);
529     AddOutputToWorkload(data, info, outputStateOutTensorInfo, nullptr);
530     AddOutputToWorkload(data, info, cellStateOutTensorInfo, nullptr);
531     // AddOutputToWorkload(data, info, outputTensorInfo, nullptr); is left out
532 
533     armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
534     armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
535     armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
536     armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
537     armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
538     armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
539     armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
540     armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
541     armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
542     armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
543     armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
544     armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
545     armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
546     armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
547     armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
548     armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
549     armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
550     armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
551     armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
552     armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
553     armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
554 
555     data.m_InputToInputWeights = &inputToInputWeightsTensor;
556     data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
557     data.m_InputToCellWeights = &inputToCellWeightsTensor;
558     data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
559     data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
560     data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
561     data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
562     data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
563     data.m_CellToInputWeights = &cellToInputWeightsTensor;
564     data.m_InputGateBias = &inputGateBiasTensor;
565     data.m_ForgetGateBias = &forgetGateBiasTensor;
566     data.m_CellBias = &cellBiasTensor;
567     data.m_OutputGateBias = &outputGateBiasTensor;
568     data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
569     data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
570     data.m_ProjectionWeights = &projectionWeightsTensor;
571     data.m_ProjectionBias = &projectionBiasTensor;
572 
573     data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
574     data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
575     data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
576     data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
577 
578     // Flags to set test configuration
579     data.m_Parameters.m_ActivationFunc = 4;
580     data.m_Parameters.m_CifgEnabled = false;
581     data.m_Parameters.m_PeepholeEnabled = true;
582     data.m_Parameters.m_ProjectionEnabled = true;
583     data.m_Parameters.m_LayerNormEnabled = true;
584 
585     // check wrong number of outputs
586     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
587     AddOutputToWorkload(data, info, outputTensorInfo, nullptr);
588 
589     // check wrong cifg parameter configuration
590     data.m_Parameters.m_CifgEnabled = true;
591     armnn::TensorInfo scratchBufferTensorInfo2({batchSize, numUnits * 3}, dataType, qScale, qOffset);
592     SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo2, nullptr);
593     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
594     data.m_Parameters.m_CifgEnabled = false;
595     SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo, nullptr);
596 
597     // check wrong inputGateBias configuration
598     data.m_InputGateBias = nullptr;
599     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
600     data.m_InputGateBias = &inputGateBiasTensor;
601 
602     // check inconsistant projection parameters
603     data.m_Parameters.m_ProjectionEnabled = false;
604     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
605     data.m_Parameters.m_ProjectionEnabled = true;
606     data.m_ProjectionWeights = nullptr;
607     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
608     data.m_ProjectionWeights = &projectionWeightsTensor;
609 
610     // check missing input layer normalisation weights
611     data.m_InputLayerNormWeights = nullptr;
612     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
613     data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
614 
615     // layer norm disabled but normalisation weights are present
616     data.m_Parameters.m_LayerNormEnabled = false;
617     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
618     data.m_Parameters.m_LayerNormEnabled = true;
619 
620     // check invalid outputTensor shape
621     armnn::TensorInfo incorrectOutputTensorInfo({batchSize, outputSize + 1}, dataType, qScale, qOffset);
622     SetWorkloadOutput(data, info, 3, incorrectOutputTensorInfo, nullptr);
623     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
624     SetWorkloadOutput(data, info, 3, outputTensorInfo, nullptr);
625 
626     // check invalid cell clipping parameters
627     data.m_Parameters.m_ClippingThresCell = -1.0f;
628     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
629     data.m_Parameters.m_ClippingThresCell = 0.0f;
630 
631     // check invalid projection clipping parameters
632     data.m_Parameters.m_ClippingThresProj = -1.0f;
633     CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
634     data.m_Parameters.m_ClippingThresProj = 0.0f;
635 
636     // check correct configuration
637     CHECK_NOTHROW(data.Validate(info));
638 }
639 
640 TEST_CASE("BiasPerAxisQuantization_ValidateCorrectValues")
641 {
642     constexpr unsigned int nInput  = 1u;
643     constexpr unsigned int cInput  = 3u;
644     constexpr unsigned int hInput  = 3u;
645     constexpr unsigned int wInput  = 3u;
646 
647     constexpr unsigned int nOutput = nInput;
648     constexpr unsigned int cOutput = cInput;
649     constexpr unsigned int hOutput = 1u;
650     constexpr unsigned int wOutput = 1u;
651 
652     const TensorShape inputShape { nInput,  cInput,  hInput,  wInput  };
653     const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
654     const TensorShape weightShape{ cOutput, cInput,  hInput,  wInput  };
655     const TensorShape biasShape  { cOutput                            };
656 
657     constexpr DataType inputType  = DataType::QAsymmU8;
658     constexpr DataType weightType = DataType::QSymmS8;
659     constexpr DataType biasType   = DataType::Signed32;
660 
661     constexpr float perTensorScale = 1.5f;
662     const TensorInfo inputInfo (inputShape,  inputType, perTensorScale);
663     const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
664 
665     const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
666     const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
667 
668     Convolution2dQueueDescriptor queueDescriptor;
669     queueDescriptor.m_Parameters.m_BiasEnabled = true;
670 
671     WorkloadInfo workloadInfo;
672     AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
673     AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
674     AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
675 
676     // Test 1: correct per-axis quantization values
677     const std::vector<float> biasPerAxisScales1  = { 3.75f, 5.25f };
678     const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
679 
680     AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo1, nullptr);
681 
682     CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
683 }
684 
685 TEST_CASE("BiasPerAxisQuantization_ValidateIncorrectValues")
686 {
687     constexpr unsigned int nInput  = 1u;
688     constexpr unsigned int cInput  = 3u;
689     constexpr unsigned int hInput  = 3u;
690     constexpr unsigned int wInput  = 3u;
691 
692     constexpr unsigned int nOutput = nInput;
693     constexpr unsigned int cOutput = cInput;
694     constexpr unsigned int hOutput = 1u;
695     constexpr unsigned int wOutput = 1u;
696 
697     const TensorShape inputShape { nInput,  cInput,  hInput,  wInput  };
698     const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
699     const TensorShape weightShape{ cOutput, cInput,  hInput,  wInput  };
700     const TensorShape biasShape  { cOutput                            };
701 
702     constexpr DataType inputType  = DataType::QAsymmU8;
703     constexpr DataType weightType = DataType::QSymmS8;
704     constexpr DataType biasType   = DataType::Signed32;
705 
706     constexpr float perTensorScale = 1.5f;
707     const TensorInfo inputInfo (inputShape,  inputType, perTensorScale);
708     const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
709 
710     const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
711     const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
712 
713     Convolution2dQueueDescriptor queueDescriptor;
714     queueDescriptor.m_Parameters.m_BiasEnabled = true;
715 
716     WorkloadInfo workloadInfo;
717     AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
718     AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
719     AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
720 
721     // Test 2: wrong per-axis quantization values
722     const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
723     const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
724 
725     AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo2, nullptr);
726 
727     CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
728 
729 }
730 
731 TEST_CASE("BiasPerAxisQuantization_ValidateInvalidArgumentException")
732 {
733     constexpr unsigned int nInput  = 1u;
734     constexpr unsigned int cInput  = 3u;
735     constexpr unsigned int hInput  = 3u;
736     constexpr unsigned int wInput  = 3u;
737 
738     constexpr unsigned int nOutput = nInput;
739     constexpr unsigned int cOutput = cInput;
740     constexpr unsigned int hOutput = 1u;
741     constexpr unsigned int wOutput = 1u;
742 
743     const TensorShape inputShape { nInput,  cInput,  hInput,  wInput  };
744     const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
745     const TensorShape weightShape{ cOutput, cInput,  hInput,  wInput  };
746     const TensorShape biasShape  { cOutput                            };
747 
748     constexpr DataType inputType  = DataType::QAsymmU8;
749     constexpr DataType weightType = DataType::QSymmS8;
750     constexpr DataType biasType   = DataType::Signed32;
751 
752     constexpr float perTensorScale = 1.5f;
753     const TensorInfo inputInfo (inputShape,  inputType, perTensorScale);
754     const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
755 
756     const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
757     const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
758 
759     Convolution2dQueueDescriptor queueDescriptor;
760     queueDescriptor.m_Parameters.m_BiasEnabled = true;
761 
762     WorkloadInfo workloadInfo;
763     AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
764     AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
765     AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
766 
767     // Test 3: mismatched number of quantization scales
768     const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
769     const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
770 
771     AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo3, nullptr);
772 
773     CHECK_THROWS_AS(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
774 }
775 
776 
777 }
778