1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <Graph.hpp>
7 #include <LayersFwd.hpp>
8
9 #include <armnn/backends/IBackendInternal.hpp>
10 #include <armnn/backends/SubgraphView.hpp>
11
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/utility/Assert.hpp>
16 #include <armnn/utility/NumericCast.hpp>
17
18 #include <fmt/format.h>
19
20 #include <unordered_map>
21 #include <DotSerializer.hpp>
22 #include <sstream>
23
24 namespace armnn
25 {
26
Graph(const Graph & other)27 Graph::Graph(const Graph& other)
28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33 std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34
35 for (auto&& otherLayer : other.m_Layers)
36 {
37 Layer* const layer = otherLayer->Clone(*this);
38 otherToClonedMap.emplace(otherLayer, layer);
39 }
40
41 // Copies slot connections.
42 for (auto&& otherLayer : other.m_Layers)
43 {
44 Layer* const thisLayer = otherToClonedMap[otherLayer];
45
46 auto outputSlot = thisLayer->BeginOutputSlots();
47 for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48 {
49 for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50 {
51 const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52 Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53
54 InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55 outputSlot->Connect(inputSlot);
56 }
57 outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
58 ++outputSlot;
59 }
60 }
61 }
62
Print() const63 Status Graph::Print() const
64 {
65 if (m_Layers.empty())
66 {
67 ARMNN_LOG(info) << "\n Graph is empty.\n";
68 return Status::Success;
69 }
70 ARMNN_LOG(info) << "\n";
71 ARMNN_LOG(info) << "Walking Pattern: \n";
72
73 for (auto&& it : TopologicalSort())
74 {
75 auto numInputSlots = it->GetNumInputSlots();
76 auto numOutputSlots = it->GetNumOutputSlots();
77
78 ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
79 << ":" << it->GetBackendId().Get()
80 << " has " << numInputSlots << " input slots"
81 << " and " << numOutputSlots << " output slots.";
82
83 for (auto i : it->GetInputSlots())
84 {
85 std::ostringstream message;
86 auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
87 unsigned int numDims = inputTensorShape.GetNumDimensions();
88
89 message << "The input slot has shape [ ";
90 for (unsigned int dim=0; dim < numDims; dim++)
91 {
92 message << inputTensorShape[dim] << ",";
93 }
94 message << " ]";
95 ARMNN_LOG(info) << message.str();
96 }
97
98 for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
99 {
100 const armnn::Layer *layer = it;
101 std::ostringstream message;
102 auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
103 unsigned int numDims = outputTensorShape.GetNumDimensions();
104
105 message << "The output slot has shape [ ";
106 for (unsigned int dim=0; dim < numDims; dim++)
107 {
108 message << outputTensorShape[dim] << ",";
109 }
110 message << " ]";
111 ARMNN_LOG(info) << message.str();
112 }
113 ARMNN_LOG(info) << "\n";
114 }
115 ARMNN_LOG(info) << "\n\n";
116
117 return Status::Success;
118 }
119
SerializeToDot(std::ostream & stream)120 Status Graph::SerializeToDot(std::ostream& stream)
121 {
122 {
123 DotGraph graph(stream, "Optimized");
124
125 {
126 // Default node attributes:
127 DotDefaults nodes(stream, "node");
128 nodes.GetAttributeSet()
129 .AddAttribute("shape", "record");
130 }
131
132 {
133 // Default edge attributes:
134 DotDefaults edges(stream, "edge");
135 edges.GetAttributeSet()
136 .AddAttribute("fontsize", 8)
137 .AddAttribute("fontcolor", "blue")
138 .AddAttribute("fontname", "arial-bold");
139 }
140
141 // First declares the nodes.
142 for (auto&& layer : m_Layers)
143 {
144 DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
145 // Extracts the layer parameters.
146 ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
147 node.GetContents().AddContent(name + " : " + value);
148 };
149 layer->SerializeLayerParameters(extractParams);
150 }
151
152 // Second declares the edges.
153 for (auto&& layer : m_Layers)
154 {
155 LayerGuid toId = layer->GetGuid();
156
157 for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
158 {
159 OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
160 LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
161 DotEdge edge(stream, fromId, toId);
162
163 // Now print the tensor shape on the edge.
164 {
165 // Constructs the label attribute with HTML markup.
166 std::stringstream ss;
167 ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
168 edge.GetAttributeSet().AddAttribute("label", ss);
169 }
170 }
171 }
172 }
173
174 if (stream.bad())
175 {
176 return Status::Failure;
177 }
178 return Status::Success;
179 }
180
AllocateDynamicBuffers()181 Status Graph::AllocateDynamicBuffers()
182 {
183 // Layers must be sorted in topological order
184 ARMNN_ASSERT(m_LayersInOrder);
185 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
186
187 std::unordered_set<const ITensorHandle*> preallocatedTensors;
188 std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
189
190 // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
191 // is a TensorHandle, the function just returns it
192 auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
193 {
194 ITensorHandle* ancestor = subTensorHandle;
195 while (ancestor && ancestor->GetParent())
196 {
197 ancestor = ancestor->GetParent();
198 }
199 return ancestor;
200 };
201
202 // Checks whether a TensorHandle has been pre-allocated
203 auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
204 {
205 return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
206 };
207
208 // Constant tensor handles need to last from the beginning of execution till the end,
209 // therefore we pre-allocate them upfront
210 for (auto&& layer : m_Layers)
211 {
212 if (layer->GetType() == LayerType::Constant)
213 {
214 for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
215 {
216 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
217
218 if (tensorHandle && !IsPreallocated(tensorHandle))
219 {
220 tensorHandle->Allocate();
221 preallocatedTensors.insert(tensorHandle);
222 }
223 }
224 }
225 }
226
227 // Iterate over the network in topological order
228 for (auto&& layer : m_Layers)
229 {
230 // Count the amount of times each output slot references a certain buffer (ITensorHandle).
231 // The first time we encounter a new tensor handle, we start managing its lifetime.
232 for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
233 {
234 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
235
236 if (tensorHandle && !IsPreallocated(tensorHandle))
237 {
238 unsigned int numConnections = slot->GetNumConnections();
239 if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
240 {
241 handleReferenceCounts[tensorHandle] = numConnections;
242 tensorHandle->Manage();
243 if (handleReferenceCounts[tensorHandle] == 0u)
244 {
245 // if nobody consumes this tensor we call Allocate()
246 tensorHandle->Allocate();
247 }
248 }
249 else
250 {
251 handleReferenceCounts[tensorHandle] += numConnections;
252 }
253 }
254 }
255
256 // Loop through the input slots in the same layer and decrement the reference counter associated
257 // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
258 for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
259 {
260 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
261 slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
262
263 if (tensorHandle && !IsPreallocated(tensorHandle))
264 {
265 --handleReferenceCounts[tensorHandle];
266
267 if (handleReferenceCounts[tensorHandle] == 0u)
268 {
269 // Stop managing lifetime of tensor handle
270 tensorHandle->Allocate();
271 handleReferenceCounts.erase(tensorHandle);
272 }
273 }
274 }
275 }
276
277 return Status::Success;
278 }
279
TopologicalSort() const280 const Graph& Graph::TopologicalSort() const
281 {
282 if (!m_LayersInOrder)
283 {
284 // Resets layer order.
285 for (auto&& it : m_Layers)
286 {
287 it->ResetPriority();
288 }
289
290 auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
291 {
292 return layerA->GetPriority() < layerB->GetPriority();
293 };
294
295 m_Layers.sort(compareLayerPriority);
296
297 m_LayersInOrder = true;
298 }
299
300 return *this;
301 }
302
AddCompatibilityLayers(std::map<BackendId,std::unique_ptr<IBackendInternal>> & backends,TensorHandleFactoryRegistry & registry)303 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
304 TensorHandleFactoryRegistry& registry)
305 {
306 // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
307 // connections to other layers).
308 auto MayNeedCompatibilityLayer = [](const Layer& layer)
309 {
310 // All layers should have been associated with a valid compute device at this point.
311 ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
312 // Does not need another compatibility layer if a copy or import layer is already present.
313 return layer.GetType() != LayerType::MemCopy &&
314 layer.GetType() != LayerType::MemImport;
315 };
316
317 auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
318 {
319 return strategy == EdgeStrategy::CopyToTarget ||
320 strategy == EdgeStrategy::ExportToTarget;
321 };
322
323 ForEachLayer([this, &backends, ®istry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
324 {
325 ARMNN_ASSERT(srcLayer);
326
327 if (!MayNeedCompatibilityLayer(*srcLayer))
328 {
329 // The current layer does not need copy layers, move to the next one
330 return;
331 }
332
333 const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
334 for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
335 {
336 OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
337 const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
338 const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
339 for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
340 {
341 InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
342 ARMNN_ASSERT(dstInputSlot);
343
344 EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
345 ARMNN_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
346 "Undefined memory strategy found while adding copy layers for compatibility");
347
348 const Layer& dstLayer = dstInputSlot->GetOwningLayer();
349 if (MayNeedCompatibilityLayer(dstLayer) &&
350 IsCompatibilityStrategy(strategy))
351 {
352 // A copy layer is needed in between the source and destination layers.
353 // Record the operation rather than attempting to modify the graph as we go.
354 // (invalidating iterators)
355 const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
356 srcLayer->GetName(),
357 srcOutputIndex,
358 dstLayer.GetName(),
359 dstInputSlot->GetSlotIndex());
360 Layer* compLayer = nullptr;
361 if (strategy == EdgeStrategy::CopyToTarget)
362 {
363 compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
364 }
365 else
366 {
367 ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
368 compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
369 }
370
371 compLayer->SetBackendId(dstLayer.GetBackendId());
372
373 OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
374 auto backendIt = backends.find(dstLayer.GetBackendId());
375 if (backendIt != backends.end() &&
376 backendIt->second &&
377 backendIt->second->SupportsTensorAllocatorAPI())
378 {
379 auto backend = backendIt->second.get();
380 auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
381 bool found = false;
382
383 for (auto preference : tensorHandleFactoryIds)
384 {
385 auto factory = registry.GetFactory(preference);
386 if (factory)
387 {
388 auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
389 auto srcFactory = registry.GetFactory(srcPref);
390
391 if (srcFactory)
392 {
393 bool canExportImport =
394 (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
395
396 if (factory->SupportsMapUnmap() || canExportImport)
397 {
398 compOutputSlot.SetTensorHandleFactory(preference);
399 found = true;
400 break;
401 }
402 }
403 }
404 }
405
406 if (!found)
407 {
408 compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
409 }
410 }
411 else
412 {
413 compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
414 }
415
416 // The output strategy of a compatibility layer is always DirectCompatibility.
417 compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
418
419 // Recalculate the connection index on the previous layer as we have just inserted into it.
420 const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
421 auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
422 std::find(newSourceConnections.begin(),
423 newSourceConnections.end(),
424 &compLayer->GetInputSlot(0)));
425
426 // The input strategy of a compatibility layer is always DirectCompatibilty.
427 srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
428 EdgeStrategy::DirectCompatibility);
429 }
430 }
431 }
432 });
433 }
434
SubstituteSubgraph(SubgraphView & subgraph,IConnectableLayer * substituteLayer)435 void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
436 {
437 ARMNN_ASSERT(substituteLayer != nullptr);
438
439 // Create a new sub-graph with only the given layer, using
440 // the given sub-graph as a reference of which parent graph to use
441 SubgraphView substituteSubgraph(substituteLayer);
442
443 SubstituteSubgraph(subgraph, substituteSubgraph);
444 }
445
SubstituteSubgraph(SubgraphView & subgraph,const SubgraphView & substituteSubgraph)446 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
447 {
448 // Look through each layer in the new subgraph and add any that are not already a member of this graph
449 substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
450 {
451 if (std::find(std::begin(m_Layers),
452 std::end(m_Layers),
453 iConnectableLayer) == std::end(m_Layers))
454 {
455 auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
456 layer->Reparent(*this, m_Layers.end());
457 m_LayersInOrder = false;
458 }
459 });
460
461 ReplaceSubgraphConnections(subgraph, substituteSubgraph);
462 EraseSubgraphLayers(subgraph);
463 TopologicalSort();
464 }
465
ReplaceSubgraphConnections(const SubgraphView & subgraph,const SubgraphView & substituteSubgraph)466 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
467 {
468 ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
469 "New sub-graph used for substitution must not be empty");
470
471 const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
472 std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
473 {
474 IgnoreUnused(layer);
475 layer = PolymorphicDowncast<Layer*>(layer);
476 ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
477 "Substitute layer is not a member of graph");
478 });
479
480 const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
481 const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
482
483 unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
484 unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
485
486 const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
487 const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
488
489 ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
490 ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
491
492 // Disconnect the sub-graph and replace it with the substitute sub-graph
493
494 // Step 1: process input slots
495 for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
496 {
497 IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
498 ARMNN_ASSERT(subgraphInputSlot);
499
500 // Only disconnect if the InputSlot has a connection, this might not be the case when
501 // dealing with working copies of SubgraphViews
502 // Note: we don't need this check for OutputSlot as it iterates over a vector of valid connections
503 if (subgraphInputSlot->GetConnection())
504 {
505 IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
506 ARMNN_ASSERT(connectedOutputSlot);
507 connectedOutputSlot->Disconnect(*subgraphInputSlot);
508
509 IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
510 ARMNN_ASSERT(substituteInputSlot);
511 connectedOutputSlot->Connect(*substituteInputSlot);
512 }
513 }
514
515 // Step 2: process output slots
516 for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
517 {
518 auto subgraphOutputSlot =
519 PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
520 ARMNN_ASSERT(subgraphOutputSlot);
521
522 auto substituteOutputSlot =
523 PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
524 ARMNN_ASSERT(substituteOutputSlot);
525
526 subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
527 }
528 }
529
EraseSubgraphLayers(SubgraphView & subgraph)530 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
531 {
532
533 for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
534 {
535 auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
536 EraseLayer(layer);
537 }
538 subgraph.Clear();
539 }
540
541 /// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
542 /// LayerValidationException thrown if no TensorInfo is set.
543 ///
544 /// @throws LayerValidationException
VerifyConstantLayerSetTensorInfo() const545 void Graph::VerifyConstantLayerSetTensorInfo() const
546 {
547 for (auto&& layer : TopologicalSort())
548 {
549 if (layer->GetType() == armnn::LayerType::Constant)
550 {
551 for (auto&& output: layer->GetOutputSlots())
552 {
553 if (!output.IsTensorInfoSet())
554 {
555 std::ostringstream message;
556 message << "Output slot TensorInfo not set on "
557 << GetLayerTypeAsCString(layer->GetType())
558 << " layer \""
559 << layer->GetName()
560 << "\"";
561 throw LayerValidationException(message.str());
562 }
563 }
564 }
565 }
566 }
567
InferTensorInfos()568 void Graph::InferTensorInfos()
569 {
570 for (auto&& layer : TopologicalSort())
571 {
572 for (auto&& input : layer->GetInputSlots())
573 {
574 const IOutputSlot* source = input.GetConnectedOutputSlot();
575 if (source == NULL)
576 {
577 // Throws exception due to a layer input not being connected to an output slot.
578 // Verifies input slot weights and bias are set for FullyConnected layers.
579 ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
580 }
581
582 if (!source->IsTensorInfoSet())
583 {
584 std::ostringstream message;
585 message << "Output slot TensorInfo not set on "
586 << GetLayerTypeAsCString(layer->GetType())
587 << " layer "
588 << std::quoted(layer->GetName());
589 throw LayerValidationException(message.str());
590 }
591 }
592
593 if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
594 {
595 layer->ValidateTensorShapesFromInputs();
596 }
597 }
598 }
599
600 /// Throws exception due to a layer input not being connected to an output slot.
601 /// Verifies weights and bias are set for layers on input slots 1
602 /// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
603 ///
604 /// @param layer constant pointer to a Layer object
605 /// @param slotIndex input slot index of layer
606 /// @throws LayerValidationException
ConstructErrorMessageForUnconnectedInputs(Layer * const layer,unsigned int slotIndex)607 void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
608 unsigned int slotIndex)
609 {
610 std::ostringstream message;
611 bool noWeightsAndBias = false;
612
613 if ((layer->GetType() == armnn::LayerType::FullyConnected ||
614 layer->GetType() == armnn::LayerType::Convolution2d ||
615 layer->GetType() == armnn::LayerType::Convolution3d ||
616 layer->GetType() == armnn::LayerType::DepthwiseConvolution2d) && slotIndex > 0)
617 {
618 message << std::endl;
619
620 // If weights are not set and is bias enabled, also check if bias is set
621 if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
622 {
623 const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
624 if (biasSource == NULL)
625 {
626 message << "Weights and bias layers not set." << std::endl;
627 noWeightsAndBias = true;
628 }
629 }
630
631 // Only weights or bias are not set
632 if (!noWeightsAndBias)
633 {
634 if (slotIndex == 1)
635 {
636 message << "Weights layer not set." << std::endl;
637 }
638 else
639 {
640 message << "Bias layer not set." << std::endl;
641 }
642 }
643 }
644
645 std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
646 message << "Input slot(s) "
647 << slotString
648 << " for "
649 << GetLayerTypeAsCString(layer->GetType())
650 << " not connected to an output slot. " << std::endl
651 << "Layer name: "
652 << std::quoted(layer->GetName());
653 throw LayerValidationException(message.str());
654 }
655
GetProfiler() const656 const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
657 {
658 return m_Profiler;
659 }
660
SetLayersOutOfOrder()661 void Graph::SetLayersOutOfOrder()
662 {
663 m_LayersInOrder = false;
664 }
665
666 } // namespace armnn
667