xref: /aosp_15_r20/external/android-nn-driver/test/Convolution2D.hpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "DriverTestHelpers.hpp"
9 
10 #include <log/log.h>
11 
12 #include <OperationsUtils.h>
13 
14 using namespace android::hardware;
15 using namespace driverTestHelpers;
16 using namespace armnn_driver;
17 
18 using RequestArgument = V1_0::RequestArgument;
19 
20 namespace driverTestHelpers
21 {
22 #define ARMNN_ANDROID_FP16_TEST(result, fp16Expectation, fp32Expectation, fp16Enabled) \
23    if (fp16Enabled) \
24    { \
25        DOCTEST_CHECK_MESSAGE((result == fp16Expectation || result == fp32Expectation), result << \
26        " does not match either " << fp16Expectation << "[fp16] or " << fp32Expectation << "[fp32]"); \
27    } else \
28    { \
29       DOCTEST_CHECK(result == fp32Expectation); \
30    }
31 
32 void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled);
33 
34 void SetModelFp16Flag(V1_1::Model& model, bool fp16Enabled);
35 
36 template<typename HalPolicy>
PaddingTestImpl(android::nn::PaddingScheme paddingScheme,bool fp16Enabled=false)37 void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled = false)
38 {
39     using HalModel         = typename HalPolicy::Model;
40     using HalOperationType = typename HalPolicy::OperationType;
41 
42     armnn::Compute computeDevice = armnn::Compute::GpuAcc;
43 
44 #ifndef ARMCOMPUTECL_ENABLED
45     computeDevice = armnn::Compute::CpuRef;
46 #endif
47 
48     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice, fp16Enabled));
49     HalModel model = {};
50 
51     uint32_t outSize = paddingScheme == android::nn::kPaddingSame ? 2 : 1;
52 
53     // add operands
54     float weightValue[] = {1.f, -1.f, 0.f, 1.f};
55     float biasValue[] = {0.f};
56 
57     AddInputOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 2, 3, 1});
58     AddTensorOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 2, 2, 1}, weightValue);
59     AddTensorOperand<HalPolicy>(model, hidl_vec < uint32_t > {1}, biasValue);
60     AddIntOperand<HalPolicy>(model, (int32_t) paddingScheme); // padding
61     AddIntOperand<HalPolicy>(model, 2); // stride x
62     AddIntOperand<HalPolicy>(model, 2); // stride y
63     AddIntOperand<HalPolicy>(model, 0); // no activation
64     AddOutputOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 1, outSize, 1});
65 
66     // make the convolution operation
67     model.operations.resize(1);
68     model.operations[0].type = HalOperationType::CONV_2D;
69     model.operations[0].inputs = hidl_vec < uint32_t > {0, 1, 2, 3, 4, 5, 6};
70     model.operations[0].outputs = hidl_vec < uint32_t > {7};
71 
72     // make the prepared model
73     SetModelFp16Flag(model, fp16Enabled);
74     android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
75 
76     // construct the request
77     V1_0::DataLocation inloc = {};
78     inloc.poolIndex = 0;
79     inloc.offset = 0;
80     inloc.length = 6 * sizeof(float);
81     RequestArgument input = {};
82     input.location = inloc;
83     input.dimensions = hidl_vec < uint32_t > {};
84 
85     V1_0::DataLocation outloc = {};
86     outloc.poolIndex = 1;
87     outloc.offset = 0;
88     outloc.length = outSize * sizeof(float);
89     RequestArgument output = {};
90     output.location = outloc;
91     output.dimensions = hidl_vec < uint32_t > {};
92 
93     V1_0::Request request = {};
94     request.inputs = hidl_vec < RequestArgument > {input};
95     request.outputs = hidl_vec < RequestArgument > {output};
96 
97     // set the input data (matching source test)
98     float indata[] = {1024.25f, 1.f, 0.f, 3.f, -1, -1024.25f};
99     AddPoolAndSetData(6, request, indata);
100 
101     // add memory for the output
102     android::sp<IMemory> outMemory = AddPoolAndGetData<float>(outSize, request);
103     float* outdata = reinterpret_cast<float*>(static_cast<void*>(outMemory->getPointer()));
104 
105     // run the execution
106     if (preparedModel.get() != nullptr)
107     {
108         Execute(preparedModel, request);
109     }
110 
111     // check the result
112     switch (paddingScheme)
113     {
114         case android::nn::kPaddingValid:
115             ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
116             break;
117         case android::nn::kPaddingSame:
118             ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
119             DOCTEST_CHECK(outdata[1] == 0.f);
120             break;
121         default:
122             DOCTEST_CHECK(false);
123             break;
124     }
125 }
126 
127 } // namespace driverTestHelpers
128