1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonSpaceToBatchNdWorkload.hpp"
7
8 #include "NeonWorkloadUtils.hpp"
9
10 #include <armnn/utility/NumericCast.hpp>
11 #include <armnn/utility/PolymorphicDowncast.hpp>
12
13 #include <ResolveType.hpp>
14
15 namespace armnn
16 {
17
18 using namespace armcomputetensorutils;
19
NeonSpaceToBatchNdWorkloadValidate(const TensorInfo & input,const TensorInfo & output,const SpaceToBatchNdDescriptor & descriptor)20 arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo& input,
21 const TensorInfo& output,
22 const SpaceToBatchNdDescriptor& descriptor)
23 {
24 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
25 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
26
27 // ArmNN blockShape is [H, W] Cl asks for W, H
28 int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
29 int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
30
31 arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
32 descriptor.m_PadList[1].first, descriptor.m_PadList[0].first);
33 arm_compute::Size2D paddingRightBottom = BuildArmComputeSize2D(
34 descriptor.m_PadList[1].second, descriptor.m_PadList[0].second);
35
36 return arm_compute::NESpaceToBatchLayer::validate(&aclInputInfo,
37 blockWidth,
38 blockHeight,
39 paddingLeftTop,
40 paddingRightBottom,
41 &aclOutputInfo);
42 }
43
NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueueDescriptor & descriptor,const WorkloadInfo & info)44 NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueueDescriptor& descriptor,
45 const WorkloadInfo& info)
46 : NeonBaseWorkload<SpaceToBatchNdQueueDescriptor>(descriptor, info)
47 {
48 // Report Profiling Details
49 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSpaceToBatchNdWorkload_Construct",
50 descriptor.m_Parameters,
51 info,
52 this->GetGuid());
53
54 m_Data.ValidateInputsOutputs("NESpaceToBatchNdWorkload", 1, 1);
55
56 arm_compute::ITensor& input =
57 PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
58 arm_compute::ITensor& output =
59 PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
60
61 // ArmNN blockShape is [H, W] Cl asks for W, H
62 int32_t blockHeight = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
63 int32_t blockWidth = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
64
65 arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
66 m_Data.m_Parameters.m_PadList[1].first, m_Data.m_Parameters.m_PadList[0].first);
67 arm_compute::Size2D paddingRightBottom = BuildArmComputeSize2D(
68 m_Data.m_Parameters.m_PadList[1].second, m_Data.m_Parameters.m_PadList[0].second);
69
70 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
71 input.info()->set_data_layout(aclDataLayout);
72 output.info()->set_data_layout(aclDataLayout);
73
74 m_Layer.reset(new arm_compute::NESpaceToBatchLayer());
75 m_Layer->configure(&input,
76 blockWidth,
77 blockHeight,
78 paddingLeftTop,
79 paddingRightBottom,
80 &output);
81 m_Layer->prepare();
82 }
83
Execute() const84 void NeonSpaceToBatchNdWorkload::Execute() const
85 {
86 if (m_Layer)
87 {
88 ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSpaceToBatchNdWorkload_Execute", this->GetGuid());
89 m_Layer->run();
90 }
91 }
92
93 } //namespace armnn