1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ClTransposeConvolution2dWorkload.hpp"
7
8 #include "ClWorkloadUtils.hpp"
9
10 #include <cl/ClLayerSupport.hpp>
11 #include <cl/ClTensorHandle.hpp>
12 #include <cl/ClLayerSupport.hpp>
13
14 #include <aclCommon/ArmComputeUtils.hpp>
15 #include <aclCommon/ArmComputeTensorUtils.hpp>
16
17 #include <armnn/backends/TensorHandle.hpp>
18
19 #include <arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h>
20
21 namespace armnn
22 {
23
24 using namespace armcomputetensorutils;
25
ClTransposeConvolution2dWorkloadValidate(const TensorInfo & input,const TensorInfo & output,const TransposeConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases)26 arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo& input,
27 const TensorInfo& output,
28 const TransposeConvolution2dDescriptor& descriptor,
29 const TensorInfo& weights,
30 const Optional<TensorInfo>& biases)
31 {
32 arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
33 arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
34 arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
35
36 arm_compute::TensorInfo aclBiasesInfo;
37 arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
38
39 if (descriptor.m_BiasEnabled)
40 {
41 ARMNN_ASSERT(biases.has_value());
42
43 aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
44 optionalAclBiasesInfo = &aclBiasesInfo;
45 }
46
47 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(descriptor);
48
49 return arm_compute::CLDeconvolutionLayer::validate(&aclInputInfo,
50 &aclWeightsInfo,
51 optionalAclBiasesInfo,
52 &aclOutputInfo,
53 padStrideInfo);
54 }
55
ClTransposeConvolution2dWorkload(const TransposeConvolution2dQueueDescriptor & descriptor,const WorkloadInfo & info,std::shared_ptr<arm_compute::MemoryManagerOnDemand> & memoryManager,const arm_compute::CLCompileContext & clCompileContext)56 ClTransposeConvolution2dWorkload::ClTransposeConvolution2dWorkload(
57 const TransposeConvolution2dQueueDescriptor& descriptor,
58 const WorkloadInfo& info,
59 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
60 const arm_compute::CLCompileContext& clCompileContext)
61 : ClBaseWorkload<TransposeConvolution2dQueueDescriptor>(descriptor, info)
62 , m_Layer(memoryManager)
63 {
64 // Add details for profiling output
65 WorkloadInfo detailsInfo;
66
67 detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
68 detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
69 detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
70 if (descriptor.m_Parameters.m_BiasEnabled)
71 {
72 detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
73 }
74
75 // Report Profiling Details
76 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClTransposeConvolution2dWorkload_Construct",
77 descriptor.m_Parameters,
78 detailsInfo,
79 this->GetGuid());
80
81 const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
82
83 m_WeightsTensor = std::make_unique<arm_compute::CLTensor>();
84 BuildArmComputeTensor(*m_WeightsTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
85
86 if (m_Data.m_Parameters.m_BiasEnabled)
87 {
88 m_BiasesTensor = std::make_unique<arm_compute::CLTensor>();
89 BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
90 }
91
92 m_Data.ValidateInputsOutputs("ClTransposeConvolution2dWorkload", 1, 1);
93
94 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
95 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
96
97 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
98
99 input.info()->set_data_layout(aclDataLayout);
100 output.info()->set_data_layout(aclDataLayout);
101
102 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
103 {
104 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClTransposeConvolution2dWorkload_configure");
105 m_Layer.configure(clCompileContext, &input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output,
106 padStrideInfo);
107 }
108
109 InitializeArmComputeClTensorData(*m_WeightsTensor, m_Data.m_Weight);
110 if (m_BiasesTensor)
111 {
112 InitializeArmComputeClTensorData(*m_BiasesTensor, m_Data.m_Bias);
113 }
114
115 m_Layer.prepare();
116
117 FreeUnusedTensors();
118 }
119
Execute() const120 void ClTransposeConvolution2dWorkload::Execute() const
121 {
122 ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClTransposeConvolution2dWorkload_Execute", this->GetGuid());
123 RunClFunction(m_Layer, CHECK_LOCATION());
124 }
125
FreeUnusedTensors()126 void ClTransposeConvolution2dWorkload::FreeUnusedTensors()
127 {
128 FreeTensorIfUnused(m_WeightsTensor);
129 FreeTensorIfUnused(m_BiasesTensor);
130 }
131
132 } // namespace armnn
133