1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ClSplitterWorkload.hpp"
7
8 #include "ClWorkloadUtils.hpp"
9
10 #include <aclCommon/ArmComputeTensorUtils.hpp>
11 #include <aclCommon/ArmComputeUtils.hpp>
12 #include <armnn/utility/PolymorphicDowncast.hpp>
13 #include <armnn/backends/TensorHandle.hpp>
14 #include <cl/ClTensorHandle.hpp>
15
16
17 namespace armnn
18 {
19
20 using namespace armcomputetensorutils;
21
22 namespace
23 {
CalcAclAxis(unsigned int numDimensions,unsigned int splitAxis)24 unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int splitAxis)
25 {
26 return (numDimensions - splitAxis) - 1;
27 }
28
29 } //namespace
30
ClSplitterWorkloadValidate(const TensorInfo & input,const std::vector<std::reference_wrapper<TensorInfo>> & outputs,unsigned int splitAxis)31 arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo& input,
32 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
33 unsigned int splitAxis)
34 {
35 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
36
37 size_t numOutputs = outputs.size();
38
39 std::vector<arm_compute::TensorInfo> aclOutputs;
40 aclOutputs.reserve(numOutputs);
41
42 std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
43 aclOutputPtr.reserve(numOutputs);
44
45 for (size_t i = 0u; i < outputs.size(); ++i)
46 {
47 aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
48 aclOutputPtr.emplace_back(&aclOutputs.back());
49 }
50
51 unsigned int aclAxis = CalcAclAxis(input.GetNumDimensions(), splitAxis);
52 return arm_compute::CLSplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
53 }
54
ClSplitterWorkload(const SplitterQueueDescriptor & descriptor,const WorkloadInfo & info,const arm_compute::CLCompileContext &)55 ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor,
56 const WorkloadInfo& info,
57 const arm_compute::CLCompileContext&)
58 : ClBaseWorkload<SplitterQueueDescriptor>(descriptor, info)
59 {
60 // Report Profiling Details
61 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClSplitterWorkload_Construct",
62 descriptor.m_Parameters,
63 info,
64 this->GetGuid());
65 bool allOutputsAreSubtensors = true;
66
67 // Check that all outputs are sub-tensors
68 for (auto output : m_Data.m_Outputs)
69 {
70 if (output && !output->GetParent())
71 {
72 // Non sub-tensor input found so we need to execute the split function
73 allOutputsAreSubtensors = false;
74 break;
75 }
76 }
77
78 if (allOutputsAreSubtensors)
79 {
80 // Can skip configuring the split function since it's not executed
81 return;
82 }
83
84 arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
85 m_Data.m_Inputs[0])->GetTensor();
86
87 std::vector<arm_compute::ICLTensor *> aclOutputs;
88 for (auto output : m_Data.m_Outputs)
89 {
90 arm_compute::ICLTensor& aclOutput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(output)->GetTensor();
91 aclOutputs.emplace_back(&aclOutput);
92 }
93
94 // Create the layer function
95
96 // Configure input and output tensors
97 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
98 if (splitAxis.size() != 1)
99 {
100 throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
101 }
102
103 unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
104 auto layer = std::make_unique<arm_compute::CLSplit>();
105 {
106 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSplitterWorkload_configure");
107 layer->configure(&input, aclOutputs, aclAxis);
108 }
109
110 // Prepare
111 layer->prepare();
112
113 m_Layer = std::move(layer);
114 }
115
Execute() const116 void ClSplitterWorkload::Execute() const
117 {
118 if (m_Layer)
119 {
120 ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSplitterWorkload_Execute", this->GetGuid());
121 m_Layer->run();
122 }
123 }
124
125 } //namespace armnn
126