1 // 2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved. 3 // SPDX-License-Identifier: MIT 4 // 5 6 #include "RefConvertFp32ToFp16Workload.hpp" 7 #include "RefWorkloadUtils.hpp" 8 #include "Profiling.hpp" 9 10 #include <armnnUtils/FloatingPointConverter.hpp> 11 12 #include <Half.hpp> 13 14 namespace armnn 15 { 16 Execute() const17void RefConvertFp32ToFp16Workload::Execute() const 18 { 19 Execute(m_Data.m_Inputs, m_Data.m_Outputs); 20 } 21 ExecuteAsync(ExecutionData & executionData)22void RefConvertFp32ToFp16Workload::ExecuteAsync(ExecutionData& executionData) 23 { 24 WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); 25 Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); 26 } 27 Execute(std::vector<ITensorHandle * > inputs,std::vector<ITensorHandle * > outputs) const28void RefConvertFp32ToFp16Workload::Execute(std::vector<ITensorHandle*> inputs, 29 std::vector<ITensorHandle*> outputs) const 30 { 31 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToFp16Workload_Execute"); 32 33 const float* const input = reinterpret_cast<const float*>(inputs[0]->Map()); 34 Half* const output = reinterpret_cast<Half*>(outputs[0]->Map()); 35 36 // convert Fp32 input to Fp16 output 37 unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements(); 38 armnnUtils::FloatingPointConverter::ConvertFloat32To16(input, numElements, output); 39 } 40 41 } //namespace armnn 42