xref: /aosp_15_r20/external/android-nn-driver/test/GenericLayerTests.cpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "DriverTestHelpers.hpp"
7 
8 #include <log/log.h>
9 
10 DOCTEST_TEST_SUITE("GenericLayerTests")
11 {
12 
13 using namespace android::hardware;
14 using namespace driverTestHelpers;
15 using namespace armnn_driver;
16 
17 using HalPolicy = hal_1_0::HalPolicy;
18 
19 DOCTEST_TEST_CASE("GetSupportedOperations")
20 {
21     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
22 
23     V1_0::ErrorStatus errorStatus;
24     std::vector<bool> supported;
25 
26     auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
__anon8f0bb1c60102(V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported) 27     {
28         errorStatus = _errorStatus;
29         supported = _supported;
30     };
31 
32     HalPolicy::Model model0 = {};
33 
34     // Add operands
35     int32_t actValue      = 0;
36     float   weightValue[] = {2, 4, 1};
37     float   biasValue[]   = {4};
38 
39     AddInputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3});
40     AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3}, weightValue);
41     AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1}, biasValue);
42     AddIntOperand<HalPolicy>(model0, actValue);
43     AddOutputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 1});
44 
45     model0.operations.resize(1);
46 
47     // Make a correct fully connected operation
48     model0.operations[0].type    = HalPolicy::OperationType::FULLY_CONNECTED;
49     model0.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3};
50     model0.operations[0].outputs = hidl_vec<uint32_t>{4};
51 
52     driver->getSupportedOperations(model0, cb);
53     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
54     DOCTEST_CHECK(supported.size() == (size_t)1);
55     DOCTEST_CHECK(supported[0] == true);
56 
57     V1_0::Model model1 = {};
58 
59     AddInputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3});
60     AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
61     AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1}, biasValue);
62     AddIntOperand<HalPolicy>(model1, actValue);
63     AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
64 
65     model1.operations.resize(2);
66 
67     // Make a correct fully connected operation
68     model1.operations[0].type    = HalPolicy::OperationType::FULLY_CONNECTED;
69     model1.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3};
70     model1.operations[0].outputs = hidl_vec<uint32_t>{4};
71 
72     // Add an incorrect fully connected operation
73     AddIntOperand<HalPolicy>(model1, actValue);
74     AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
75 
76     model1.operations[1].type    = HalPolicy::OperationType::FULLY_CONNECTED;
77     model1.operations[1].inputs  = hidl_vec<uint32_t>{4}; // Only 1 input operand, expected 4
78     model1.operations[1].outputs = hidl_vec<uint32_t>{5};
79 
80     driver->getSupportedOperations(model1, cb);
81 
82     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
83     DOCTEST_CHECK(supported.empty());
84 
85     // Test Broadcast on add/mul operators
86     HalPolicy::Model model2 = {};
87 
88     AddInputOperand<HalPolicy>(model2,
89                                hidl_vec<uint32_t>{1, 1, 3, 4},
90                                HalPolicy::OperandType::TENSOR_FLOAT32,
91                                0.0f,
92                                0,
93                                2);
94     AddInputOperand<HalPolicy>(model2,
95                                hidl_vec<uint32_t>{4},
96                                HalPolicy::OperandType::TENSOR_FLOAT32,
97                                0.0f,
98                                0,
99                                2);
100     AddIntOperand<HalPolicy>(model2, actValue, 2);
101     AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
102     AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
103 
104     model2.operations.resize(2);
105 
106     model2.operations[0].type    = HalPolicy::OperationType::ADD;
107     model2.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2};
108     model2.operations[0].outputs = hidl_vec<uint32_t>{3};
109 
110     model2.operations[1].type    = HalPolicy::OperationType::MUL;
111     model2.operations[1].inputs  = hidl_vec<uint32_t>{0, 1, 2};
112     model2.operations[1].outputs = hidl_vec<uint32_t>{4};
113 
114     driver->getSupportedOperations(model2, cb);
115     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
116     DOCTEST_CHECK(supported.size() == (size_t)2);
117     DOCTEST_CHECK(supported[0] == true);
118     DOCTEST_CHECK(supported[1] == true);
119 
120     V1_0::Model model3 = {};
121 
122     AddInputOperand<HalPolicy>(model3,
123                                hidl_vec<uint32_t>{1, 1, 3, 4},
124                                HalPolicy::OperandType::TENSOR_INT32);
125     AddInputOperand<HalPolicy>(model3,
126                                hidl_vec<uint32_t>{4},
127                                HalPolicy::OperandType::TENSOR_INT32);
128     AddInputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 1, 3, 4});
129 
130     AddOutputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 1, 3, 4});
131     AddOutputOperand<HalPolicy>(model3,
132                                 hidl_vec<uint32_t>{1, 1, 3, 4},
133                                 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
134                                 1.f / 225.f);
135 
136     model3.operations.resize(1);
137 
138     // Add unsupported operation, should return no error but we don't support it
139     model3.operations[0].type    = HalPolicy::OperationType::HASHTABLE_LOOKUP;
140     model3.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2};
141     model3.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
142 
143     driver->getSupportedOperations(model3, cb);
144     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
145     DOCTEST_CHECK(supported.size() == (size_t)1);
146     DOCTEST_CHECK(supported[0] == false);
147 
148     HalPolicy::Model model4 = {};
149 
150     AddIntOperand<HalPolicy>(model4, 0);
151 
152     model4.operations.resize(1);
153 
154     // Add invalid operation
155     model4.operations[0].type    = static_cast<HalPolicy::OperationType>(100);
156     model4.operations[0].outputs = hidl_vec<uint32_t>{0};
157 
158     driver->getSupportedOperations(model4, cb);
159     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
160     DOCTEST_CHECK(supported.empty());
161 }
162 
163 // The purpose of this test is to ensure that when encountering an unsupported operation
164 // it is skipped and getSupportedOperations() continues (rather than failing and stopping).
165 // As per IVGCVSW-710.
166 DOCTEST_TEST_CASE("UnsupportedLayerContinueOnFailure")
167 {
168     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
169 
170     V1_0::ErrorStatus errorStatus;
171     std::vector<bool> supported;
172 
173     auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
__anon8f0bb1c60202(V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported) 174     {
175         errorStatus = _errorStatus;
176         supported = _supported;
177     };
178 
179     HalPolicy::Model model = {};
180 
181     // Operands
182     int32_t actValue      = 0;
183     float   weightValue[] = {2, 4, 1};
184     float   biasValue[]   = {4};
185 
186     // HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do
187     AddInputOperand<HalPolicy>(model,
188                                hidl_vec<uint32_t>{1, 1, 3, 4},
189                                HalPolicy::OperandType::TENSOR_INT32);
190     AddInputOperand<HalPolicy>(model,
191                                hidl_vec<uint32_t>{4},
192                                HalPolicy::OperandType::TENSOR_INT32,
193                                0.0f,
194                                0,
195                                2);
196     AddInputOperand<HalPolicy>(model,
197                                hidl_vec<uint32_t>{1, 1, 3, 4},
198                                HalPolicy::OperandType::TENSOR_FLOAT32,
199                                0.0f,
200                                0,
201                                2);
202 
203     AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
204     AddOutputOperand<HalPolicy>(model,
205                                 hidl_vec<uint32_t>{1, 1, 3, 4},
206                                 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
207                                 1.f / 225.f);
208 
209     // Fully connected is supported
210     AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3});
211 
212     AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3}, weightValue);
213     AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
214 
215     AddIntOperand<HalPolicy>(model, actValue);
216 
217     AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1});
218 
219     // EMBEDDING_LOOKUP is unsupported
220     AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
221 
222     model.operations.resize(3);
223 
224     // Unsupported
225     model.operations[0].type    = HalPolicy::OperationType::HASHTABLE_LOOKUP;
226     model.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2};
227     model.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
228 
229     // Supported
230     model.operations[1].type    = HalPolicy::OperationType::FULLY_CONNECTED;
231     model.operations[1].inputs  = hidl_vec<uint32_t>{5, 6, 7, 8};
232     model.operations[1].outputs = hidl_vec<uint32_t>{9};
233 
234     // Unsupported
235     model.operations[2].type    = HalPolicy::OperationType::EMBEDDING_LOOKUP;
236     model.operations[2].inputs  = hidl_vec<uint32_t>{1, 2};
237     model.operations[2].outputs = hidl_vec<uint32_t>{10};
238 
239     // We are testing that the unsupported layers return false and the test continues rather than failing and stopping
240     driver->getSupportedOperations(model, cb);
241     DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
242     DOCTEST_CHECK(supported.size() == (size_t)3);
243     DOCTEST_CHECK(supported[0] == false);
244     DOCTEST_CHECK(supported[1] == true);
245     DOCTEST_CHECK(supported[2] == false);
246 }
247 
248 // The purpose of this test is to ensure that when encountering an failure
249 // during mem pool mapping we properly report an error to the framework via a callback
250 DOCTEST_TEST_CASE("ModelToINetworkConverterMemPoolFail")
251 {
252     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
253 
254     V1_0::ErrorStatus errorStatus;
255     std::vector<bool> supported;
256 
257     auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
__anon8f0bb1c60302(V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported) 258     {
259         errorStatus = _errorStatus;
260         supported = _supported;
261     };
262 
263     HalPolicy::Model model = {};
264 
265     model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
266 
267     // Memory pool mapping should fail, we should report an error
268     driver->getSupportedOperations(model, cb);
269     DOCTEST_CHECK((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
270     DOCTEST_CHECK(supported.empty());
271 }
272 
273 }
274