xref: /aosp_15_r20/external/armnn/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefTransposeConvolution2dWorkload.hpp"
7 
8 #include "RefWorkloadUtils.hpp"
9 #include "TransposeConvolution2d.hpp"
10 
11 #include <Profiling.hpp>
12 
13 namespace armnn
14 {
15 
RefTransposeConvolution2dWorkload(const TransposeConvolution2dQueueDescriptor & descriptor,const WorkloadInfo & info)16 RefTransposeConvolution2dWorkload::RefTransposeConvolution2dWorkload(
17     const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) :
18     RefBaseWorkload<TransposeConvolution2dQueueDescriptor>(descriptor, info)
19 {
20     // set up weights decoder
21     m_Weights = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
22     const TensorInfo& weightsInfo = m_Weights->GetTensorInfo();
23 
24     m_WeightsDecoder = MakeDecoder<float>(weightsInfo, m_Weights->Map(true));
25     m_WeightsShape   = weightsInfo.GetShape();
26 
27     // set up biases decoder
28     if (descriptor.m_Parameters.m_BiasEnabled)
29     {
30         m_Biases = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
31         const TensorInfo& biasesInfo = m_Biases->GetTensorInfo();
32         m_BiasesDecoder = MakeDecoder<float>(biasesInfo, m_Biases->Map(true));
33     }
34 }
35 
Execute() const36 void RefTransposeConvolution2dWorkload::Execute() const
37 {
38     Execute(m_Data.m_Inputs, m_Data.m_Outputs);
39 }
40 
ExecuteAsync(ExecutionData & executionData)41 void RefTransposeConvolution2dWorkload::ExecuteAsync(ExecutionData& executionData)
42 {
43     WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
44     Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
45 }
46 
Execute(std::vector<ITensorHandle * > inputs,std::vector<ITensorHandle * > outputs) const47 void RefTransposeConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs,
48                                                 std::vector<ITensorHandle*> outputs) const
49 {
50     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTransposeConvolution2dWorkload_Execute");
51 
52     const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
53     const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
54 
55     std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
56     std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
57 
58     TransposeConvolution2dImpl(m_Data.m_Parameters,
59                                inputInfo.GetShape(),
60                                *inputDecoder,
61                                outputInfo.GetShape(),
62                                *outputEncoder,
63                                m_WeightsShape,
64                                *m_WeightsDecoder,
65                                m_BiasesDecoder.get());
66 }
67 
68 } // namespace armnn