1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ConvertFp32ToFp16Layer.hpp"
6
7 #include "LayerCloneBase.hpp"
8
9 #include <armnn/TypesUtils.hpp>
10 #include <armnn/backends/WorkloadData.hpp>
11 #include <armnn/backends/WorkloadFactory.hpp>
12
13 namespace armnn
14 {
15
ConvertFp32ToFp16Layer(const char * name)16 ConvertFp32ToFp16Layer::ConvertFp32ToFp16Layer(const char* name)
17 : Layer(1, 1, LayerType::ConvertFp32ToFp16, name)
18 {
19 }
20
CreateWorkload(const IWorkloadFactory & factory) const21 std::unique_ptr<IWorkload> ConvertFp32ToFp16Layer::CreateWorkload(const IWorkloadFactory& factory) const
22 {
23 ConvertFp32ToFp16QueueDescriptor descriptor;
24 SetAdditionalInfo(descriptor);
25
26 return factory.CreateWorkload(LayerType::ConvertFp32ToFp16, descriptor, PrepInfoAndDesc(descriptor));
27 }
28
Clone(Graph & graph) const29 ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
30 {
31 return CloneBase<ConvertFp32ToFp16Layer>(graph, GetName());
32 }
33
ValidateTensorShapesFromInputs()34 void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
35 {
36
37 VerifyLayerConnections(1, CHECK_LOCATION());
38
39 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
40
41 VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
42
43 auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
44
45 ARMNN_ASSERT(inferredShapes.size() == 1);
46
47 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
48 }
49
ExecuteStrategy(IStrategy & strategy) const50 void ConvertFp32ToFp16Layer::ExecuteStrategy(IStrategy& strategy) const
51 {
52 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
53 }
54
55 } // namespace armnn
56