1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ClConvertFp16ToFp32Workload.hpp"
7 #include <cl/ClTensorHandle.hpp>
8
9 #include "ClWorkloadUtils.hpp"
10
11 namespace armnn
12 {
13 using namespace armcomputetensorutils;
14
15 static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
16
ClConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor & descriptor,const WorkloadInfo & info,const arm_compute::CLCompileContext & clCompileContext)17 ClConvertFp16ToFp32Workload::ClConvertFp16ToFp32Workload(
18 const ConvertFp16ToFp32QueueDescriptor& descriptor,
19 const WorkloadInfo& info,
20 const arm_compute::CLCompileContext& clCompileContext) :
21 Float16ToFloat32Workload<ConvertFp16ToFp32QueueDescriptor>(descriptor, info)
22 {
23 this->m_Data.ValidateInputsOutputs("ClConvertFp16ToFp32Workload", 1, 1);
24
25 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
26 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
27
28 // Create Proxy tensor and set the initial tensor handle to it
29 m_InputProxy = std::make_unique<ICLTensorProxy>(&input);
30 m_OutputProxy = std::make_unique<ICLTensorProxy>(&output);
31
32 {
33 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvertFp16ToFp32Workload_configure");
34 m_Layer.configure(clCompileContext, m_InputProxy.get(), m_OutputProxy.get(), g_AclConvertPolicy, 0);
35 }
36 }
37
Execute() const38 void ClConvertFp16ToFp32Workload::Execute() const
39 {
40 ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvertFp16ToFp32Workload_Execute", this->GetGuid());
41 RunClFunction(m_Layer, CHECK_LOCATION());
42 }
43
ClConvertFp16ToFp32WorkloadValidate(const TensorInfo & input,const TensorInfo & output)44 arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
45 {
46 if (input.GetDataType() != DataType::Float16)
47 {
48 return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Input should be Float16");
49 }
50 if (output.GetDataType() != DataType::Float32)
51 {
52 return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Output should be Float32");
53 }
54
55 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
56 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
57
58 const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate(
59 &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
60
61 return aclStatus;
62 }
63
ReplaceInputTensorHandle(ITensorHandle * tensorHandle,unsigned int slot)64 void ClConvertFp16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
65 {
66 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
67 this->m_Data.m_Inputs[slot] = tensorHandle;
68 try
69 {
70 Reconfigure();
71 }
72 catch(armnn::UnimplementedException& e)
73 {
74 // Cannot reconfigure, revert the slot back and throw the exception.
75 this->m_Data.m_Inputs[slot] = backupHandle;
76 throw e;
77 }
78 }
79
80 // Replace output tensor handle with the given TensorHandle
ReplaceOutputTensorHandle(ITensorHandle * tensorHandle,unsigned int slot)81 void ClConvertFp16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
82 {
83 ITensorHandle* backupHandle = this->m_Data.m_Outputs[slot];
84 this->m_Data.m_Outputs[slot] = tensorHandle;
85 try
86 {
87 Reconfigure();
88 }
89 catch(armnn::UnimplementedException& e)
90 {
91 // Cannot reconfigure, revert the slot back and throw the exception.
92 this->m_Data.m_Outputs[slot] = backupHandle;
93 throw e;
94 }
95 }
96
Reconfigure()97 void ClConvertFp16ToFp32Workload::Reconfigure()
98 {
99 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
100 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
101 m_InputProxy->set(&input);
102 m_OutputProxy->set(&output);
103 }
104
105 } //namespace armnn
106