1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonDequantizeWorkload.hpp"
7
8 #include "NeonWorkloadUtils.hpp"
9
10 #include <arm_compute/runtime/NEON/functions/NEDequantizationLayer.h>
11
12 #include <aclCommon/ArmComputeTensorUtils.hpp>
13 #include <armnn/utility/PolymorphicDowncast.hpp>
14 #include <armnn/backends/TensorHandle.hpp>
15 #include <neon/NeonTensorHandle.hpp>
16
17 namespace armnn
18 {
19
20 using namespace armcomputetensorutils;
21
NeonDequantizeWorkloadValidate(const TensorInfo & input,const TensorInfo & output)22 arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo& input,
23 const TensorInfo& output)
24 {
25 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
26 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
27
28 return arm_compute::NEDequantizationLayer::validate(&aclInput, &aclOutput);
29 }
30
NeonDequantizeWorkload(const DequantizeQueueDescriptor & descriptor,const WorkloadInfo & info)31 NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info)
32 : NeonBaseWorkload<DequantizeQueueDescriptor>(descriptor, info)
33 {
34 m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1);
35
36 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
37 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
38
39 std::unique_ptr<arm_compute::NEDequantizationLayer> layer(new arm_compute::NEDequantizationLayer());
40 layer->configure(&input, &output);
41 layer->prepare();
42 m_Layer.reset(layer.release());
43 }
44
Execute() const45 void NeonDequantizeWorkload::Execute() const
46 {
47 ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDequantizeWorkload_Execute", this->GetGuid());
48 m_Layer->run();
49 }
50
51 } //namespace armnn
52
53