xref: /aosp_15_r20/external/armnn/src/backends/neon/workloads/NeonConcatWorkload.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonConcatWorkload.hpp"
7 
8 #include "NeonWorkloadUtils.hpp"
9 
10 #include <aclCommon/ArmComputeTensorUtils.hpp>
11 #include <armnn/utility/PolymorphicDowncast.hpp>
12 #include <armnn/backends/TensorHandle.hpp>
13 #include <neon/NeonTensorHandle.hpp>
14 
15 namespace armnn
16 {
17 using namespace armcomputetensorutils;
18 
19 namespace
20 {
CalcAxis(const armnn::OriginsDescriptor & descriptor)21 size_t CalcAxis(const armnn::OriginsDescriptor& descriptor)
22 {
23     return (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
24 }
25 } //namespace
26 
NeonConcatWorkloadValidate(const std::vector<const TensorInfo * > & inputs,const TensorInfo & output,const OriginsDescriptor & descriptor)27 arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
28                                                const TensorInfo& output,
29                                                const OriginsDescriptor& descriptor)
30 
31 {
32     std::vector<arm_compute::TensorInfo> aclInputs;
33     for (const TensorInfo* input : inputs)
34     {
35         arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
36         aclInputs.emplace_back(aclInputInfo);
37     }
38     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
39     std::vector<const arm_compute::ITensorInfo*> aclInputPtrs;
40     for (arm_compute::ITensorInfo& input : aclInputs)
41     {
42         aclInputPtrs.emplace_back(&input);
43     }
44 
45     size_t aclAxis = CalcAxis(descriptor);
46     return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
47 }
48 
NeonConcatWorkload(const ConcatQueueDescriptor & descriptor,const WorkloadInfo & info)49 NeonConcatWorkload::NeonConcatWorkload(
50 const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
51         : NeonBaseWorkload<ConcatQueueDescriptor>(descriptor, info)
52 {
53     // Report Profiling Details
54     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonConcatWorkload_Construct",
55                                          descriptor.m_Parameters,
56                                          info,
57                                          this->GetGuid());
58 
59     bool allInputsAreSubtensors = true;
60 
61     // Check that all inputs are sub-tensors
62     for (auto input : descriptor.m_Inputs)
63     {
64         if (!input->GetParent())
65         {
66             // Non sub-tensor input found so we need to execute the concat function
67             allInputsAreSubtensors = false;
68             break;
69         }
70     }
71 
72     if (allInputsAreSubtensors)
73     {
74         // Can skip configuring the concat function since it's not executed
75         return;
76     }
77 
78     std::vector<const arm_compute::ITensor *> aclInputs;
79     for (auto input : m_Data.m_Inputs)
80     {
81         arm_compute::ITensor& aclInput  = armnn::PolymorphicPointerDowncast<IAclTensorHandle>(input)->GetTensor();
82         aclInputs.emplace_back(&aclInput);
83     }
84     arm_compute::ITensor& output = armnn::PolymorphicPointerDowncast<IAclTensorHandle>(
85         m_Data.m_Outputs[0])->GetTensor();
86 
87     // Create the layer function
88     m_Layer.reset(new arm_compute::NEConcatenateLayer());
89 
90     // Configure input and output tensors
91     size_t aclAxis = CalcAxis(descriptor.m_Parameters);
92     m_Layer->configure(aclInputs, &output, aclAxis);
93 
94     // Prepare
95     m_Layer->prepare();
96 }
97 
Execute() const98 void NeonConcatWorkload::Execute() const
99 {
100     if (m_Layer)
101     {
102         ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConcatWorkload_Execute", this->GetGuid());
103         m_Layer->run();
104     }
105 }
106 
107 } //namespace armnn
108 
109