1*3e777be0SXin Li //
2*3e777be0SXin Li // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3*3e777be0SXin Li // SPDX-License-Identifier: MIT
4*3e777be0SXin Li //
5*3e777be0SXin Li
6*3e777be0SXin Li #pragma once
7*3e777be0SXin Li
8*3e777be0SXin Li #include "DriverTestHelpers.hpp"
9*3e777be0SXin Li
10*3e777be0SXin Li #include <armnn/StrategyBase.hpp>
11*3e777be0SXin Li #include <armnn/utility/IgnoreUnused.hpp>
12*3e777be0SXin Li
13*3e777be0SXin Li #include <numeric>
14*3e777be0SXin Li
15*3e777be0SXin Li using namespace armnn;
16*3e777be0SXin Li using namespace driverTestHelpers;
17*3e777be0SXin Li
18*3e777be0SXin Li struct DilationTestOptions
19*3e777be0SXin Li {
DilationTestOptionsDilationTestOptions20*3e777be0SXin Li DilationTestOptions() :
21*3e777be0SXin Li m_IsDepthwiseConvolution{false},
22*3e777be0SXin Li m_IsPaddingExplicit{false},
23*3e777be0SXin Li m_HasDilation{false}
24*3e777be0SXin Li {}
25*3e777be0SXin Li
26*3e777be0SXin Li ~DilationTestOptions() = default;
27*3e777be0SXin Li
28*3e777be0SXin Li bool m_IsDepthwiseConvolution;
29*3e777be0SXin Li bool m_IsPaddingExplicit;
30*3e777be0SXin Li bool m_HasDilation;
31*3e777be0SXin Li };
32*3e777be0SXin Li
33*3e777be0SXin Li class DilationTestVisitor : public StrategyBase<ThrowingStrategy>
34*3e777be0SXin Li {
35*3e777be0SXin Li public:
DilationTestVisitor()36*3e777be0SXin Li DilationTestVisitor() :
37*3e777be0SXin Li DilationTestVisitor(1u, 1u)
38*3e777be0SXin Li {}
39*3e777be0SXin Li
DilationTestVisitor(uint32_t expectedDilationX,uint32_t expectedDilationY)40*3e777be0SXin Li DilationTestVisitor(uint32_t expectedDilationX, uint32_t expectedDilationY) :
41*3e777be0SXin Li m_ExpectedDilationX{expectedDilationX},
42*3e777be0SXin Li m_ExpectedDilationY{expectedDilationY}
43*3e777be0SXin Li {}
44*3e777be0SXin Li
ExecuteStrategy(const armnn::IConnectableLayer * layer,const armnn::BaseDescriptor & descriptor,const std::vector<armnn::ConstTensor> & constants,const char * name,const armnn::LayerBindingId id=0)45*3e777be0SXin Li void ExecuteStrategy(const armnn::IConnectableLayer* layer,
46*3e777be0SXin Li const armnn::BaseDescriptor& descriptor,
47*3e777be0SXin Li const std::vector<armnn::ConstTensor>& constants,
48*3e777be0SXin Li const char* name,
49*3e777be0SXin Li const armnn::LayerBindingId id = 0) override
50*3e777be0SXin Li {
51*3e777be0SXin Li armnn::IgnoreUnused(layer, constants, id, name);
52*3e777be0SXin Li switch (layer->GetType())
53*3e777be0SXin Li {
54*3e777be0SXin Li case armnn::LayerType::Constant:
55*3e777be0SXin Li break;
56*3e777be0SXin Li case armnn::LayerType::Convolution2d:
57*3e777be0SXin Li {
58*3e777be0SXin Li CheckDilationParams(static_cast<const armnn::Convolution2dDescriptor&>(descriptor));
59*3e777be0SXin Li break;
60*3e777be0SXin Li }
61*3e777be0SXin Li case armnn::LayerType::DepthwiseConvolution2d:
62*3e777be0SXin Li {
63*3e777be0SXin Li CheckDilationParams(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
64*3e777be0SXin Li break;
65*3e777be0SXin Li }
66*3e777be0SXin Li default:
67*3e777be0SXin Li {
68*3e777be0SXin Li m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
69*3e777be0SXin Li }
70*3e777be0SXin Li }
71*3e777be0SXin Li }
72*3e777be0SXin Li
73*3e777be0SXin Li private:
74*3e777be0SXin Li uint32_t m_ExpectedDilationX;
75*3e777be0SXin Li uint32_t m_ExpectedDilationY;
76*3e777be0SXin Li
77*3e777be0SXin Li template<typename ConvolutionDescriptor>
CheckDilationParams(const ConvolutionDescriptor & descriptor)78*3e777be0SXin Li void CheckDilationParams(const ConvolutionDescriptor& descriptor)
79*3e777be0SXin Li {
80*3e777be0SXin Li CHECK_EQ(descriptor.m_DilationX, m_ExpectedDilationX);
81*3e777be0SXin Li CHECK_EQ(descriptor.m_DilationY, m_ExpectedDilationY);
82*3e777be0SXin Li }
83*3e777be0SXin Li };
84*3e777be0SXin Li
85*3e777be0SXin Li template<typename HalPolicy>
DilationTestImpl(const DilationTestOptions & options)86*3e777be0SXin Li void DilationTestImpl(const DilationTestOptions& options)
87*3e777be0SXin Li {
88*3e777be0SXin Li using HalModel = typename HalPolicy::Model;
89*3e777be0SXin Li using HalOperationType = typename HalPolicy::OperationType;
90*3e777be0SXin Li
91*3e777be0SXin Li const armnn::Compute backend = armnn::Compute::CpuRef;
92*3e777be0SXin Li auto driver = std::make_unique<ArmnnDriver>(DriverOptions(backend, false));
93*3e777be0SXin Li HalModel model = {};
94*3e777be0SXin Li
95*3e777be0SXin Li // add operands
96*3e777be0SXin Li std::vector<float> weightData(9, 1.0f);
97*3e777be0SXin Li std::vector<float> biasData(1, 0.0f );
98*3e777be0SXin Li
99*3e777be0SXin Li // input
100*3e777be0SXin Li AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3, 3, 1});
101*3e777be0SXin Li
102*3e777be0SXin Li // weights & biases
103*3e777be0SXin Li AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3, 3, 1}, weightData.data());
104*3e777be0SXin Li AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasData.data());
105*3e777be0SXin Li
106*3e777be0SXin Li uint32_t numInputs = 3u;
107*3e777be0SXin Li // padding
108*3e777be0SXin Li if (options.m_IsPaddingExplicit)
109*3e777be0SXin Li {
110*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 1);
111*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 1);
112*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 1);
113*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 1);
114*3e777be0SXin Li numInputs += 4;
115*3e777be0SXin Li }
116*3e777be0SXin Li else
117*3e777be0SXin Li {
118*3e777be0SXin Li AddIntOperand<HalPolicy>(model, android::nn::kPaddingSame);
119*3e777be0SXin Li numInputs += 1;
120*3e777be0SXin Li }
121*3e777be0SXin Li
122*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 2); // stride x
123*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 2); // stride y
124*3e777be0SXin Li numInputs += 2;
125*3e777be0SXin Li
126*3e777be0SXin Li if (options.m_IsDepthwiseConvolution)
127*3e777be0SXin Li {
128*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 1); // depth multiplier
129*3e777be0SXin Li numInputs++;
130*3e777be0SXin Li }
131*3e777be0SXin Li
132*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 0); // no activation
133*3e777be0SXin Li numInputs += 1;
134*3e777be0SXin Li
135*3e777be0SXin Li // dilation
136*3e777be0SXin Li if (options.m_HasDilation)
137*3e777be0SXin Li {
138*3e777be0SXin Li AddBoolOperand<HalPolicy>(model, false); // default data layout
139*3e777be0SXin Li
140*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 2); // dilation X
141*3e777be0SXin Li AddIntOperand<HalPolicy>(model, 2); // dilation Y
142*3e777be0SXin Li
143*3e777be0SXin Li numInputs += 3;
144*3e777be0SXin Li }
145*3e777be0SXin Li
146*3e777be0SXin Li // output
147*3e777be0SXin Li AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 1, 1});
148*3e777be0SXin Li
149*3e777be0SXin Li // set up the convolution operation
150*3e777be0SXin Li model.operations.resize(1);
151*3e777be0SXin Li model.operations[0].type = options.m_IsDepthwiseConvolution ?
152*3e777be0SXin Li HalOperationType::DEPTHWISE_CONV_2D : HalOperationType::CONV_2D;
153*3e777be0SXin Li
154*3e777be0SXin Li std::vector<uint32_t> inputs(numInputs);
155*3e777be0SXin Li std::iota(inputs.begin(), inputs.end(), 0u);
156*3e777be0SXin Li std::vector<uint32_t> outputs = { numInputs };
157*3e777be0SXin Li
158*3e777be0SXin Li model.operations[0].inputs = hidl_vec<uint32_t>(inputs);
159*3e777be0SXin Li model.operations[0].outputs = hidl_vec<uint32_t>(outputs);
160*3e777be0SXin Li
161*3e777be0SXin Li // convert model
162*3e777be0SXin Li ConversionData data({backend});
163*3e777be0SXin Li data.m_Network = armnn::INetwork::Create();
164*3e777be0SXin Li data.m_OutputSlotForOperand = std::vector<IOutputSlot*>(model.operands.size(), nullptr);
165*3e777be0SXin Li
166*3e777be0SXin Li bool ok = HalPolicy::ConvertOperation(model.operations[0], model, data);
167*3e777be0SXin Li DOCTEST_CHECK(ok);
168*3e777be0SXin Li
169*3e777be0SXin Li // check if dilation params are as expected
170*3e777be0SXin Li DilationTestVisitor visitor = options.m_HasDilation ? DilationTestVisitor(2, 2) : DilationTestVisitor();
171*3e777be0SXin Li data.m_Network->ExecuteStrategy(visitor);
172*3e777be0SXin Li }
173