xref: /aosp_15_r20/external/android-nn-driver/test/GenericLayerTests.cpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1*3e777be0SXin Li //
2*3e777be0SXin Li // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3*3e777be0SXin Li // SPDX-License-Identifier: MIT
4*3e777be0SXin Li //
5*3e777be0SXin Li 
6*3e777be0SXin Li #include "DriverTestHelpers.hpp"
7*3e777be0SXin Li 
8*3e777be0SXin Li #include <log/log.h>
9*3e777be0SXin Li 
10*3e777be0SXin Li DOCTEST_TEST_SUITE("GenericLayerTests")
11*3e777be0SXin Li {
12*3e777be0SXin Li 
13*3e777be0SXin Li using namespace android::hardware;
14*3e777be0SXin Li using namespace driverTestHelpers;
15*3e777be0SXin Li using namespace armnn_driver;
16*3e777be0SXin Li 
17*3e777be0SXin Li using HalPolicy = hal_1_0::HalPolicy;
18*3e777be0SXin Li 
19*3e777be0SXin Li DOCTEST_TEST_CASE("GetSupportedOperations")
20*3e777be0SXin Li {
21*3e777be0SXin Li     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
22*3e777be0SXin Li 
23*3e777be0SXin Li     V1_0::ErrorStatus errorStatus;
24*3e777be0SXin Li     std::vector<bool> supported;
25*3e777be0SXin Li 
26*3e777be0SXin Li     auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
__anon8f0bb1c60102(V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported) 27*3e777be0SXin Li     {
28*3e777be0SXin Li         errorStatus = _errorStatus;
29*3e777be0SXin Li         supported = _supported;
30*3e777be0SXin Li     };
31*3e777be0SXin Li 
32*3e777be0SXin Li     HalPolicy::Model model0 = {};
33*3e777be0SXin Li 
34*3e777be0SXin Li     // Add operands
35*3e777be0SXin Li     int32_t actValue      = 0;
36*3e777be0SXin Li     float   weightValue[] = {2, 4, 1};
37*3e777be0SXin Li     float   biasValue[]   = {4};
38*3e777be0SXin Li 
39*3e777be0SXin Li     AddInputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3});
40*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3}, weightValue);
41*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1}, biasValue);
42*3e777be0SXin Li     AddIntOperand<HalPolicy>(model0, actValue);
43*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 1});
44*3e777be0SXin Li 
45*3e777be0SXin Li     model0.operations.resize(1);
46*3e777be0SXin Li 
47*3e777be0SXin Li     // Make a correct fully connected operation
48*3e777be0SXin Li     model0.operations[0].type    = HalPolicy::OperationType::FULLY_CONNECTED;
49*3e777be0SXin Li     model0.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3};
50*3e777be0SXin Li     model0.operations[0].outputs = hidl_vec<uint32_t>{4};
51*3e777be0SXin Li 
52*3e777be0SXin Li     driver->getSupportedOperations(model0, cb);
53*3e777be0SXin Li     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
54*3e777be0SXin Li     DOCTEST_CHECK(supported.size() == (size_t)1);
55*3e777be0SXin Li     DOCTEST_CHECK(supported[0] == true);
56*3e777be0SXin Li 
57*3e777be0SXin Li     V1_0::Model model1 = {};
58*3e777be0SXin Li 
59*3e777be0SXin Li     AddInputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3});
60*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
61*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1}, biasValue);
62*3e777be0SXin Li     AddIntOperand<HalPolicy>(model1, actValue);
63*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
64*3e777be0SXin Li 
65*3e777be0SXin Li     model1.operations.resize(2);
66*3e777be0SXin Li 
67*3e777be0SXin Li     // Make a correct fully connected operation
68*3e777be0SXin Li     model1.operations[0].type    = HalPolicy::OperationType::FULLY_CONNECTED;
69*3e777be0SXin Li     model1.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3};
70*3e777be0SXin Li     model1.operations[0].outputs = hidl_vec<uint32_t>{4};
71*3e777be0SXin Li 
72*3e777be0SXin Li     // Add an incorrect fully connected operation
73*3e777be0SXin Li     AddIntOperand<HalPolicy>(model1, actValue);
74*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
75*3e777be0SXin Li 
76*3e777be0SXin Li     model1.operations[1].type    = HalPolicy::OperationType::FULLY_CONNECTED;
77*3e777be0SXin Li     model1.operations[1].inputs  = hidl_vec<uint32_t>{4}; // Only 1 input operand, expected 4
78*3e777be0SXin Li     model1.operations[1].outputs = hidl_vec<uint32_t>{5};
79*3e777be0SXin Li 
80*3e777be0SXin Li     driver->getSupportedOperations(model1, cb);
81*3e777be0SXin Li 
82*3e777be0SXin Li     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
83*3e777be0SXin Li     DOCTEST_CHECK(supported.empty());
84*3e777be0SXin Li 
85*3e777be0SXin Li     // Test Broadcast on add/mul operators
86*3e777be0SXin Li     HalPolicy::Model model2 = {};
87*3e777be0SXin Li 
88*3e777be0SXin Li     AddInputOperand<HalPolicy>(model2,
89*3e777be0SXin Li                                hidl_vec<uint32_t>{1, 1, 3, 4},
90*3e777be0SXin Li                                HalPolicy::OperandType::TENSOR_FLOAT32,
91*3e777be0SXin Li                                0.0f,
92*3e777be0SXin Li                                0,
93*3e777be0SXin Li                                2);
94*3e777be0SXin Li     AddInputOperand<HalPolicy>(model2,
95*3e777be0SXin Li                                hidl_vec<uint32_t>{4},
96*3e777be0SXin Li                                HalPolicy::OperandType::TENSOR_FLOAT32,
97*3e777be0SXin Li                                0.0f,
98*3e777be0SXin Li                                0,
99*3e777be0SXin Li                                2);
100*3e777be0SXin Li     AddIntOperand<HalPolicy>(model2, actValue, 2);
101*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
102*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
103*3e777be0SXin Li 
104*3e777be0SXin Li     model2.operations.resize(2);
105*3e777be0SXin Li 
106*3e777be0SXin Li     model2.operations[0].type    = HalPolicy::OperationType::ADD;
107*3e777be0SXin Li     model2.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2};
108*3e777be0SXin Li     model2.operations[0].outputs = hidl_vec<uint32_t>{3};
109*3e777be0SXin Li 
110*3e777be0SXin Li     model2.operations[1].type    = HalPolicy::OperationType::MUL;
111*3e777be0SXin Li     model2.operations[1].inputs  = hidl_vec<uint32_t>{0, 1, 2};
112*3e777be0SXin Li     model2.operations[1].outputs = hidl_vec<uint32_t>{4};
113*3e777be0SXin Li 
114*3e777be0SXin Li     driver->getSupportedOperations(model2, cb);
115*3e777be0SXin Li     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
116*3e777be0SXin Li     DOCTEST_CHECK(supported.size() == (size_t)2);
117*3e777be0SXin Li     DOCTEST_CHECK(supported[0] == true);
118*3e777be0SXin Li     DOCTEST_CHECK(supported[1] == true);
119*3e777be0SXin Li 
120*3e777be0SXin Li     V1_0::Model model3 = {};
121*3e777be0SXin Li 
122*3e777be0SXin Li     AddInputOperand<HalPolicy>(model3,
123*3e777be0SXin Li                                hidl_vec<uint32_t>{1, 1, 3, 4},
124*3e777be0SXin Li                                HalPolicy::OperandType::TENSOR_INT32);
125*3e777be0SXin Li     AddInputOperand<HalPolicy>(model3,
126*3e777be0SXin Li                                hidl_vec<uint32_t>{4},
127*3e777be0SXin Li                                HalPolicy::OperandType::TENSOR_INT32);
128*3e777be0SXin Li     AddInputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 1, 3, 4});
129*3e777be0SXin Li 
130*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 1, 3, 4});
131*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model3,
132*3e777be0SXin Li                                 hidl_vec<uint32_t>{1, 1, 3, 4},
133*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
134*3e777be0SXin Li                                 1.f / 225.f);
135*3e777be0SXin Li 
136*3e777be0SXin Li     model3.operations.resize(1);
137*3e777be0SXin Li 
138*3e777be0SXin Li     // Add unsupported operation, should return no error but we don't support it
139*3e777be0SXin Li     model3.operations[0].type    = HalPolicy::OperationType::HASHTABLE_LOOKUP;
140*3e777be0SXin Li     model3.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2};
141*3e777be0SXin Li     model3.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
142*3e777be0SXin Li 
143*3e777be0SXin Li     driver->getSupportedOperations(model3, cb);
144*3e777be0SXin Li     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
145*3e777be0SXin Li     DOCTEST_CHECK(supported.size() == (size_t)1);
146*3e777be0SXin Li     DOCTEST_CHECK(supported[0] == false);
147*3e777be0SXin Li 
148*3e777be0SXin Li     HalPolicy::Model model4 = {};
149*3e777be0SXin Li 
150*3e777be0SXin Li     AddIntOperand<HalPolicy>(model4, 0);
151*3e777be0SXin Li 
152*3e777be0SXin Li     model4.operations.resize(1);
153*3e777be0SXin Li 
154*3e777be0SXin Li     // Add invalid operation
155*3e777be0SXin Li     model4.operations[0].type    = static_cast<HalPolicy::OperationType>(100);
156*3e777be0SXin Li     model4.operations[0].outputs = hidl_vec<uint32_t>{0};
157*3e777be0SXin Li 
158*3e777be0SXin Li     driver->getSupportedOperations(model4, cb);
159*3e777be0SXin Li     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
160*3e777be0SXin Li     DOCTEST_CHECK(supported.empty());
161*3e777be0SXin Li }
162*3e777be0SXin Li 
163*3e777be0SXin Li // The purpose of this test is to ensure that when encountering an unsupported operation
164*3e777be0SXin Li // it is skipped and getSupportedOperations() continues (rather than failing and stopping).
165*3e777be0SXin Li // As per IVGCVSW-710.
166*3e777be0SXin Li DOCTEST_TEST_CASE("UnsupportedLayerContinueOnFailure")
167*3e777be0SXin Li {
168*3e777be0SXin Li     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
169*3e777be0SXin Li 
170*3e777be0SXin Li     V1_0::ErrorStatus errorStatus;
171*3e777be0SXin Li     std::vector<bool> supported;
172*3e777be0SXin Li 
173*3e777be0SXin Li     auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
__anon8f0bb1c60202(V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported) 174*3e777be0SXin Li     {
175*3e777be0SXin Li         errorStatus = _errorStatus;
176*3e777be0SXin Li         supported = _supported;
177*3e777be0SXin Li     };
178*3e777be0SXin Li 
179*3e777be0SXin Li     HalPolicy::Model model = {};
180*3e777be0SXin Li 
181*3e777be0SXin Li     // Operands
182*3e777be0SXin Li     int32_t actValue      = 0;
183*3e777be0SXin Li     float   weightValue[] = {2, 4, 1};
184*3e777be0SXin Li     float   biasValue[]   = {4};
185*3e777be0SXin Li 
186*3e777be0SXin Li     // HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do
187*3e777be0SXin Li     AddInputOperand<HalPolicy>(model,
188*3e777be0SXin Li                                hidl_vec<uint32_t>{1, 1, 3, 4},
189*3e777be0SXin Li                                HalPolicy::OperandType::TENSOR_INT32);
190*3e777be0SXin Li     AddInputOperand<HalPolicy>(model,
191*3e777be0SXin Li                                hidl_vec<uint32_t>{4},
192*3e777be0SXin Li                                HalPolicy::OperandType::TENSOR_INT32,
193*3e777be0SXin Li                                0.0f,
194*3e777be0SXin Li                                0,
195*3e777be0SXin Li                                2);
196*3e777be0SXin Li     AddInputOperand<HalPolicy>(model,
197*3e777be0SXin Li                                hidl_vec<uint32_t>{1, 1, 3, 4},
198*3e777be0SXin Li                                HalPolicy::OperandType::TENSOR_FLOAT32,
199*3e777be0SXin Li                                0.0f,
200*3e777be0SXin Li                                0,
201*3e777be0SXin Li                                2);
202*3e777be0SXin Li 
203*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
204*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model,
205*3e777be0SXin Li                                 hidl_vec<uint32_t>{1, 1, 3, 4},
206*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
207*3e777be0SXin Li                                 1.f / 225.f);
208*3e777be0SXin Li 
209*3e777be0SXin Li     // Fully connected is supported
210*3e777be0SXin Li     AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3});
211*3e777be0SXin Li 
212*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3}, weightValue);
213*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
214*3e777be0SXin Li 
215*3e777be0SXin Li     AddIntOperand<HalPolicy>(model, actValue);
216*3e777be0SXin Li 
217*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1});
218*3e777be0SXin Li 
219*3e777be0SXin Li     // EMBEDDING_LOOKUP is unsupported
220*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
221*3e777be0SXin Li 
222*3e777be0SXin Li     model.operations.resize(3);
223*3e777be0SXin Li 
224*3e777be0SXin Li     // Unsupported
225*3e777be0SXin Li     model.operations[0].type    = HalPolicy::OperationType::HASHTABLE_LOOKUP;
226*3e777be0SXin Li     model.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2};
227*3e777be0SXin Li     model.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
228*3e777be0SXin Li 
229*3e777be0SXin Li     // Supported
230*3e777be0SXin Li     model.operations[1].type    = HalPolicy::OperationType::FULLY_CONNECTED;
231*3e777be0SXin Li     model.operations[1].inputs  = hidl_vec<uint32_t>{5, 6, 7, 8};
232*3e777be0SXin Li     model.operations[1].outputs = hidl_vec<uint32_t>{9};
233*3e777be0SXin Li 
234*3e777be0SXin Li     // Unsupported
235*3e777be0SXin Li     model.operations[2].type    = HalPolicy::OperationType::EMBEDDING_LOOKUP;
236*3e777be0SXin Li     model.operations[2].inputs  = hidl_vec<uint32_t>{1, 2};
237*3e777be0SXin Li     model.operations[2].outputs = hidl_vec<uint32_t>{10};
238*3e777be0SXin Li 
239*3e777be0SXin Li     // We are testing that the unsupported layers return false and the test continues rather than failing and stopping
240*3e777be0SXin Li     driver->getSupportedOperations(model, cb);
241*3e777be0SXin Li     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
242*3e777be0SXin Li     DOCTEST_CHECK(supported.size() == (size_t)3);
243*3e777be0SXin Li     DOCTEST_CHECK(supported[0] == false);
244*3e777be0SXin Li     DOCTEST_CHECK(supported[1] == true);
245*3e777be0SXin Li     DOCTEST_CHECK(supported[2] == false);
246*3e777be0SXin Li }
247*3e777be0SXin Li 
248*3e777be0SXin Li // The purpose of this test is to ensure that when encountering an failure
249*3e777be0SXin Li // during mem pool mapping we properly report an error to the framework via a callback
250*3e777be0SXin Li DOCTEST_TEST_CASE("ModelToINetworkConverterMemPoolFail")
251*3e777be0SXin Li {
252*3e777be0SXin Li     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
253*3e777be0SXin Li 
254*3e777be0SXin Li     V1_0::ErrorStatus errorStatus;
255*3e777be0SXin Li     std::vector<bool> supported;
256*3e777be0SXin Li 
257*3e777be0SXin Li     auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
__anon8f0bb1c60302(V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported) 258*3e777be0SXin Li     {
259*3e777be0SXin Li         errorStatus = _errorStatus;
260*3e777be0SXin Li         supported = _supported;
261*3e777be0SXin Li     };
262*3e777be0SXin Li 
263*3e777be0SXin Li     HalPolicy::Model model = {};
264*3e777be0SXin Li 
265*3e777be0SXin Li     model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
266*3e777be0SXin Li 
267*3e777be0SXin Li     // Memory pool mapping should fail, we should report an error
268*3e777be0SXin Li     driver->getSupportedOperations(model, cb);
269*3e777be0SXin Li     DOCTEST_CHECK((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
270*3e777be0SXin Li     DOCTEST_CHECK(supported.empty());
271*3e777be0SXin Li }
272*3e777be0SXin Li 
273*3e777be0SXin Li }
274