1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonQuantizeWorkload.hpp"
7 #include "NeonWorkloadUtils.hpp"
8
9 #include <neon/NeonTensorHandle.hpp>
10 #include <aclCommon/ArmComputeTensorUtils.hpp>
11 #include <armnn/utility/PolymorphicDowncast.hpp>
12 #include <arm_compute/core/Types.h>
13
14 namespace armnn
15 {
16 using namespace armcomputetensorutils;
17
NeonQuantizeWorkloadValidate(const TensorInfo & input,const TensorInfo & output)18 arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
19 {
20 const arm_compute::TensorInfo neonInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
21 const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
22
23 return arm_compute::NEQuantizationLayer::validate(&neonInputInfo, &neonOutputInfo);
24 }
25
NeonQuantizeWorkload(const QuantizeQueueDescriptor & descriptor,const WorkloadInfo & workloadInfo)26 NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descriptor,
27 const WorkloadInfo& workloadInfo)
28 : NeonBaseWorkload<QuantizeQueueDescriptor>(descriptor, workloadInfo)
29 {
30 m_Data.ValidateInputsOutputs("NeonQuantizeWorkload", 1, 1);
31
32 arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>(
33 m_Data.m_Inputs[0])->GetTensor();
34 arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(
35 m_Data.m_Outputs[0])->GetTensor();
36
37 m_Layer.reset(new arm_compute::NEQuantizationLayer());
38 m_Layer->configure(&input, &output);
39 m_Layer->prepare();
40 }
41
Execute() const42 void NeonQuantizeWorkload::Execute() const
43 {
44 if (m_Layer)
45 {
46 ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonQuantizeWorkload_Execute", this->GetGuid());
47 m_Layer->run();
48 }
49 }
50
51 } // namespace armnn
52