xref: /aosp_15_r20/external/armnn/src/backends/reference/workloads/RefMeanWorkload.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefMeanWorkload.hpp"
7 
8 #include "Reduce.hpp"
9 #include "RefWorkloadUtils.hpp"
10 
11 #include "Profiling.hpp"
12 
13 #include <vector>
14 
15 namespace armnn
16 {
17 
RefMeanWorkload(const MeanQueueDescriptor & descriptor,const WorkloadInfo & info)18 RefMeanWorkload::RefMeanWorkload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info)
19   :RefBaseWorkload<MeanQueueDescriptor>(descriptor, info) {}
20 
Execute() const21 void RefMeanWorkload::Execute() const
22 {
23     Execute(m_Data.m_Inputs, m_Data.m_Outputs);
24 }
25 
ExecuteAsync(ExecutionData & executionData)26 void RefMeanWorkload::ExecuteAsync(ExecutionData& executionData)
27 {
28     WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
29     Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
30 }
31 
Execute(std::vector<ITensorHandle * > inputs,std::vector<ITensorHandle * > outputs) const32 void RefMeanWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
33 {
34     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanWorkload_Execute");
35 
36     const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
37     const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
38 
39     auto inputDecoder  = MakeDecoder<float>(inputInfo,  inputs[0]->Map());
40     auto outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
41 
42     Reduce(inputInfo,
43            outputInfo,
44            *inputDecoder,
45            *outputEncoder,
46            m_Data.m_Parameters.m_Axis,
47            armnn::ReduceOperation::Mean);
48 }
49 
50 } //namespace armnn
51