1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonLogicalNotWorkload.hpp"
7
8 #include "NeonWorkloadUtils.hpp"
9
10 #include <aclCommon/ArmComputeTensorHandle.hpp>
11 #include <aclCommon/ArmComputeTensorUtils.hpp>
12
13 #include <armnn/utility/PolymorphicDowncast.hpp>
14
15
16 namespace armnn
17 {
18
NeonLogicalNotWorkloadValidate(const TensorInfo & input,const TensorInfo & output)19 arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo& input,
20 const TensorInfo& output)
21 {
22 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
23 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
24
25 const arm_compute::Status aclStatus = arm_compute::NELogicalNot::validate(&aclInputInfo,
26 &aclOutputInfo);
27 return aclStatus;
28 }
29
NeonLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor & descriptor,const WorkloadInfo & info)30 NeonLogicalNotWorkload::NeonLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
31 const WorkloadInfo& info)
32 : NeonBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
33 {
34 // Report Profiling Details
35 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonLogicalNotWorkload_Construct",
36 descriptor.m_Parameters,
37 info,
38 this->GetGuid());
39
40 m_Data.ValidateInputsOutputs("NeonLogicalNotWorkload", 1, 1);
41
42 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
43 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
44
45 m_LogicalNotLayer.configure(&input, &output);
46 }
47
Execute() const48 void NeonLogicalNotWorkload::Execute() const
49 {
50 ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLogicalNotWorkload_Execute", this->GetGuid());
51 m_LogicalNotLayer.run();
52 }
53
54 } // namespace armnn
55