1*3e777be0SXin Li //
2*3e777be0SXin Li // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3*3e777be0SXin Li // SPDX-License-Identifier: MIT
4*3e777be0SXin Li //
5*3e777be0SXin Li
6*3e777be0SXin Li #define LOG_TAG "ArmnnDriver"
7*3e777be0SXin Li
8*3e777be0SXin Li #include "ModelToINetworkConverter.hpp"
9*3e777be0SXin Li #include "Utils.hpp"
10*3e777be0SXin Li
11*3e777be0SXin Li #include <log/log.h>
12*3e777be0SXin Li #include <type_traits>
13*3e777be0SXin Li
14*3e777be0SXin Li #ifdef ARMNN_ANDROID_S
15*3e777be0SXin Li #include <LegacyUtils.h>
16*3e777be0SXin Li #endif
17*3e777be0SXin Li
18*3e777be0SXin Li namespace armnn_driver
19*3e777be0SXin Li {
20*3e777be0SXin Li
21*3e777be0SXin Li template<typename HalPolicy>
ModelToINetworkConverter(const std::vector<armnn::BackendId> & backends,const HalModel & model,const std::set<unsigned int> & forcedUnsupportedOperations)22*3e777be0SXin Li ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(const std::vector<armnn::BackendId>& backends,
23*3e777be0SXin Li const HalModel& model,
24*3e777be0SXin Li const std::set<unsigned int>& forcedUnsupportedOperations)
25*3e777be0SXin Li : m_Data(backends)
26*3e777be0SXin Li , m_Model(model)
27*3e777be0SXin Li , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
28*3e777be0SXin Li , m_ConversionResult(ConversionResult::Success)
29*3e777be0SXin Li {
30*3e777be0SXin Li try
31*3e777be0SXin Li {
32*3e777be0SXin Li Convert();
33*3e777be0SXin Li }
34*3e777be0SXin Li catch (std::exception& e)
35*3e777be0SXin Li {
36*3e777be0SXin Li m_ConversionResult = ConversionResult::UnsupportedFeature;
37*3e777be0SXin Li ALOGE("%s: Unexpected exception: %s", __func__, e.what());
38*3e777be0SXin Li }
39*3e777be0SXin Li }
40*3e777be0SXin Li
41*3e777be0SXin Li template<typename HalPolicy>
Convert()42*3e777be0SXin Li void ModelToINetworkConverter<HalPolicy>::Convert()
43*3e777be0SXin Li {
44*3e777be0SXin Li using HalModel = typename HalPolicy::Model;
45*3e777be0SXin Li using HalOperand = typename HalPolicy::Operand;
46*3e777be0SXin Li using HalOperandType = typename HalPolicy::OperandType;
47*3e777be0SXin Li
48*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str());
49*3e777be0SXin Li
50*3e777be0SXin Li // map the memory pool into shared pointers
51*3e777be0SXin Li m_Data.m_MemPools.clear();
52*3e777be0SXin Li #if !defined(ARMNN_ANDROID_S)
53*3e777be0SXin Li if (!setRunTimePoolInfosFromHidlMemories(&m_Data.m_MemPools, m_Model.pools))
54*3e777be0SXin Li #else
55*3e777be0SXin Li if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.m_MemPools, uncheckedConvert(m_Model.pools)))
56*3e777be0SXin Li #endif
57*3e777be0SXin Li {
58*3e777be0SXin Li Fail("%s: Setting of run time pool infos from Hidl Memories has failed.", __func__);
59*3e777be0SXin Li m_ConversionResult = ConversionResult::ErrorMappingPools;
60*3e777be0SXin Li return;
61*3e777be0SXin Li }
62*3e777be0SXin Li
63*3e777be0SXin Li
64*3e777be0SXin Li uint32_t totalPoolSize = 0;
65*3e777be0SXin Li for (auto&& pool : m_Model.pools)
66*3e777be0SXin Li {
67*3e777be0SXin Li totalPoolSize += pool.size();
68*3e777be0SXin Li }
69*3e777be0SXin Li
70*3e777be0SXin Li using NetworkOptions = std::vector<armnn::BackendOptions>;
71*3e777be0SXin Li NetworkOptions networkOptions;
72*3e777be0SXin Li armnn::BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
73*3e777be0SXin Li {
74*3e777be0SXin Li { "InferAndValidate", true }
75*3e777be0SXin Li });
76*3e777be0SXin Li
77*3e777be0SXin Li networkOptions.push_back(shapeInferenceMethodOption);
78*3e777be0SXin Li
79*3e777be0SXin Li // Create armnn::INetwork
80*3e777be0SXin Li m_Data.m_Network = armnn::INetwork::Create(networkOptions);
81*3e777be0SXin Li
82*3e777be0SXin Li // add operations to it
83*3e777be0SXin Li // track which layer outputs each operand
84*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): m_OutputSlotForOperand");
85*3e777be0SXin Li m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(getMainModel(m_Model).operands.size(), nullptr);
86*3e777be0SXin Li try
87*3e777be0SXin Li {
88*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): for getMainModel(m_Model).inputIndexes.size()");
89*3e777be0SXin Li for (uint32_t i = 0; i < getMainModel(m_Model).inputIndexes.size(); i++)
90*3e777be0SXin Li {
91*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): getMainModel(m_Model).inputIndexes[i]");
92*3e777be0SXin Li // inputs in android nn are represented by operands
93*3e777be0SXin Li uint32_t inputIndex = getMainModel(m_Model).inputIndexes[i];
94*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): getMainModel(m_Model).operands[inputIndex];");
95*3e777be0SXin Li const HalOperand& operand = getMainModel(m_Model).operands[inputIndex];
96*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): GetTensorInfoForOperand(operand)");
97*3e777be0SXin Li const std::string layerName = "Input_" + std::to_string(i);
98*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): m_Data.m_Network->AddInputLayer(i, layerName.c_str())");
99*3e777be0SXin Li armnn::IConnectableLayer* layer = m_Data.m_Network->AddInputLayer(i, layerName.c_str());
100*3e777be0SXin Li
101*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): layer->GetOutputSlot(0)");
102*3e777be0SXin Li armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
103*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand))");
104*3e777be0SXin Li outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
105*3e777be0SXin Li
106*3e777be0SXin Li ALOGV("ModelToINetworkConverter::Convert(): m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot");
107*3e777be0SXin Li // store for later layers
108*3e777be0SXin Li m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot;
109*3e777be0SXin Li }
110*3e777be0SXin Li }
111*3e777be0SXin Li catch (UnsupportedOperand<HalOperandType>& e)
112*3e777be0SXin Li {
113*3e777be0SXin Li Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
114*3e777be0SXin Li m_ConversionResult = ConversionResult::UnsupportedFeature;
115*3e777be0SXin Li }
116*3e777be0SXin Li catch (const armnn::InvalidArgumentException& e)
117*3e777be0SXin Li {
118*3e777be0SXin Li Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
119*3e777be0SXin Li m_ConversionResult = ConversionResult::UnsupportedFeature;
120*3e777be0SXin Li }
121*3e777be0SXin Li bool UnsupportedDynamicOperation = false;
122*3e777be0SXin Li for (uint32_t operationIdx = 0; operationIdx < getMainModel(m_Model).operations.size(); operationIdx++)
123*3e777be0SXin Li {
124*3e777be0SXin Li const auto& operation = getMainModel(m_Model).operations[operationIdx];
125*3e777be0SXin Li
126*3e777be0SXin Li bool ok = true;
127*3e777be0SXin Li if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
128*3e777be0SXin Li {
129*3e777be0SXin Li Fail("%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
130*3e777be0SXin Li ok = false;
131*3e777be0SXin Li }
132*3e777be0SXin Li
133*3e777be0SXin Li if (ok)
134*3e777be0SXin Li {
135*3e777be0SXin Li try
136*3e777be0SXin Li {
137*3e777be0SXin Li ok = HalPolicy::ConvertOperation(operation, m_Model, m_Data);
138*3e777be0SXin Li }
139*3e777be0SXin Li catch (UnsupportedOperand<HalOperandType>& e)
140*3e777be0SXin Li {
141*3e777be0SXin Li Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
142*3e777be0SXin Li ok = false;
143*3e777be0SXin Li }
144*3e777be0SXin Li catch (const armnn::InvalidArgumentException& e)
145*3e777be0SXin Li {
146*3e777be0SXin Li Fail("%s: Failed to convert operation in %s", __func__, e.what());
147*3e777be0SXin Li ok = false;
148*3e777be0SXin Li }
149*3e777be0SXin Li }
150*3e777be0SXin Li
151*3e777be0SXin Li // Store whether this operation was successfully converted.
152*3e777be0SXin Li m_OperationSupported.emplace(operationIdx, ok);
153*3e777be0SXin Li
154*3e777be0SXin Li // Any single operation failing will fail the entire conversion.
155*3e777be0SXin Li // We still need to continue and check the other ones.
156*3e777be0SXin Li if (!ok)
157*3e777be0SXin Li {
158*3e777be0SXin Li if (m_Data.m_DynamicInputsEncountered)
159*3e777be0SXin Li {
160*3e777be0SXin Li Fail("%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx);
161*3e777be0SXin Li UnsupportedDynamicOperation = true;
162*3e777be0SXin Li }
163*3e777be0SXin Li
164*3e777be0SXin Li m_ConversionResult = ConversionResult::UnsupportedFeature;
165*3e777be0SXin Li }
166*3e777be0SXin Li m_Data.m_DynamicInputsEncountered = false;
167*3e777be0SXin Li }
168*3e777be0SXin Li
169*3e777be0SXin Li // Due to the NNAPI partitioner not supporting partition boundaries of unknown size,
170*3e777be0SXin Li // any operations who's outputs connect to an unsupported operation with with dynamic inputs
171*3e777be0SXin Li // will cause a failure.
172*3e777be0SXin Li
173*3e777be0SXin Li // The simplest solution to this problem is to not support any operations in a model containing
174*3e777be0SXin Li // an unsupported operation with with dynamic inputs.
175*3e777be0SXin Li if (UnsupportedDynamicOperation)
176*3e777be0SXin Li {
177*3e777be0SXin Li Fail("%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported",
178*3e777be0SXin Li __func__);
179*3e777be0SXin Li for (auto& operation : m_OperationSupported)
180*3e777be0SXin Li {
181*3e777be0SXin Li operation.second = false;
182*3e777be0SXin Li }
183*3e777be0SXin Li }
184*3e777be0SXin Li
185*3e777be0SXin Li try
186*3e777be0SXin Li {
187*3e777be0SXin Li if (m_ConversionResult == ConversionResult::Success)
188*3e777be0SXin Li {
189*3e777be0SXin Li for (uint32_t i = 0; i < getMainModel(m_Model).outputIndexes.size(); i++)
190*3e777be0SXin Li {
191*3e777be0SXin Li // outputs in android nn are represented by operands
192*3e777be0SXin Li uint32_t outputIndex = getMainModel(m_Model).outputIndexes[i];
193*3e777be0SXin Li const std::string layerName = "Output_" + std::to_string(i);
194*3e777be0SXin Li armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i, layerName.c_str());
195*3e777be0SXin Li
196*3e777be0SXin Li if (!m_Data.m_OutputSlotForOperand[outputIndex])
197*3e777be0SXin Li {
198*3e777be0SXin Li Fail("%s: OutputSlot %i does not exist", __func__, outputIndex);
199*3e777be0SXin Li m_ConversionResult = ConversionResult::UnsupportedFeature;
200*3e777be0SXin Li break;
201*3e777be0SXin Li }
202*3e777be0SXin Li m_Data.m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
203*3e777be0SXin Li }
204*3e777be0SXin Li }
205*3e777be0SXin Li }
206*3e777be0SXin Li catch (const armnn::InvalidArgumentException& e)
207*3e777be0SXin Li {
208*3e777be0SXin Li Fail("%s: Failed to convert output operand to TensorShape: %s", __func__, e.what());
209*3e777be0SXin Li m_ConversionResult = ConversionResult::UnsupportedFeature;
210*3e777be0SXin Li }
211*3e777be0SXin Li }
212*3e777be0SXin Li
213*3e777be0SXin Li template<typename HalPolicy>
IsOperationSupported(uint32_t operationIndex) const214*3e777be0SXin Li bool ModelToINetworkConverter<HalPolicy>::IsOperationSupported(uint32_t operationIndex) const
215*3e777be0SXin Li {
216*3e777be0SXin Li std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
217*3e777be0SXin Li if (it == m_OperationSupported.end())
218*3e777be0SXin Li {
219*3e777be0SXin Li return Fail("%s: Unrecognised Operation Index: %i", __func__, operationIndex);
220*3e777be0SXin Li }
221*3e777be0SXin Li return it->second;
222*3e777be0SXin Li }
223*3e777be0SXin Li
224*3e777be0SXin Li ///
225*3e777be0SXin Li /// Class template specializations
226*3e777be0SXin Li ///
227*3e777be0SXin Li
228*3e777be0SXin Li template class ModelToINetworkConverter<hal_1_0::HalPolicy>;
229*3e777be0SXin Li
230*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_1
231*3e777be0SXin Li template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
232*3e777be0SXin Li #endif
233*3e777be0SXin Li
234*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_2
235*3e777be0SXin Li template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
236*3e777be0SXin Li template class ModelToINetworkConverter<hal_1_2::HalPolicy>;
237*3e777be0SXin Li #endif
238*3e777be0SXin Li
239*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
240*3e777be0SXin Li template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
241*3e777be0SXin Li template class ModelToINetworkConverter<hal_1_2::HalPolicy>;
242*3e777be0SXin Li template class ModelToINetworkConverter<hal_1_3::HalPolicy>;
243*3e777be0SXin Li #endif
244*3e777be0SXin Li
245*3e777be0SXin Li } // armnn_driver
246