xref: /aosp_15_r20/external/ComputeLibrary/examples/neon_gemm_qasymm8.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2020-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
25 #include "arm_compute/core/WindowIterator.h"
26 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
27 #include "arm_compute/runtime/NEON/NEFunctions.h"
28 #include "arm_compute/runtime/NEON/NEScheduler.h"
29 #include "support/ToolchainSupport.h"
30 #include "utils/Utils.h"
31 
32 #include <cstdlib>
33 
34 using namespace arm_compute;
35 using namespace utils;
36 
37 // Find min and max value in a float array
find_min_max(int size,const float * data,float * min,float * max)38 void find_min_max(int size, const float *data, float *min, float *max)
39 {
40     *min = *max = data[0];
41     for(int i = 0; i < size; i++)
42     {
43         const float val = data[i];
44         *min            = std::min(*min, val);
45         *max            = std::max(*max, val);
46     }
47 }
48 
49 // Return reasonable quantisation parameters to use for an array of floats
50 // based on min and max values
choose_quantization_params(float min,float max)51 QuantizationInfo choose_quantization_params(float min, float max)
52 {
53     // Extend the [min,max] interval to contain 0 so we can represent it exactly
54     min = std::min(min, 0.f);
55     max = std::max(max, 0.f);
56 
57     // Set the quantized min and max in float values
58     const float qmin = 0;
59     const float qmax = 255;
60 
61     // Determine the scale
62     const float scale = (max - min) / (qmax - qmin);
63 
64     // Determine the zero-point; using affine equation val = (qval-zerop) * scale
65     const float zero_point_real = qmin - min / scale;
66 
67     // But we need to nudge the zero_point to an integer (exact quantized value)
68     std::uint8_t zero_point_nudged = 0;
69     if(zero_point_real < qmin)
70     {
71         zero_point_nudged = qmin;
72     }
73     else if(zero_point_real > qmax)
74     {
75         zero_point_nudged = qmax;
76     }
77     else
78     {
79         zero_point_nudged = static_cast<std::uint8_t>(support::cpp11::round(zero_point_real));
80     }
81 
82     QuantizationInfo qinfo = QuantizationInfo(scale, zero_point_nudged);
83     return qinfo;
84 }
85 
quantize_values(int size,qasymm8_t * output,float * input,const QuantizationInfo qinfo)86 void quantize_values(int size, qasymm8_t *output, float *input, const QuantizationInfo qinfo)
87 {
88     for(int i = 0; i < size; i++)
89     {
90         output[i] = quantize_qasymm8(input[i], qinfo);
91     }
92     std::cout << "\n";
93 }
94 
main(int argc,char ** argv)95 int main(int argc, char **argv)
96 {
97     Tensor src1;
98     Tensor src2;
99     Tensor dst0;
100     Tensor q_src1;
101     Tensor q_src2;
102     Tensor q_dst0;
103     Tensor q_res;
104     Tensor q_res_output;
105     size_t M             = 4;
106     size_t N             = 4;
107     size_t K             = 4;
108     bool   default_input = true;
109 
110     // Parse args
111     if(argc < 3) /* case default matrix sizes */
112     {
113         // Print help
114         std::cout << "Usage: ./build/neon_gemm_qasymm8 M N K\n";
115         std::cout << "Too few or no inputs provided. Using default M=4, N=4, K=4\n\n";
116     }
117     else /* case M N K arguments provided */
118     {
119         M             = strtol(argv[1], nullptr, 10);
120         N             = strtol(argv[2], nullptr, 10);
121         K             = strtol(argv[3], nullptr, 10);
122         default_input = false;
123     }
124 
125     /*** Floating point matrix multiplication ***/
126 
127     // Initialise input matrices
128     NEGEMM fgemm{};
129 
130     src1.allocator()->init(TensorInfo(TensorShape(K, M), 1, DataType::F32));
131     src2.allocator()->init(TensorInfo(TensorShape(N, K), 1, DataType::F32));
132     dst0.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::F32));
133     fgemm.configure(&src1, &src2, nullptr, &dst0, 1, 0);
134 
135     // Allocate matrices
136     src1.allocator()->allocate();
137     src2.allocator()->allocate();
138     dst0.allocator()->allocate();
139 
140     // Fill in tensors, by default fill in with known data - for easy testing
141     auto *src1_ptr = reinterpret_cast<float *>(src1.buffer());
142     auto *src2_ptr = reinterpret_cast<float *>(src2.buffer());
143     auto *dst0_ptr = reinterpret_cast<float *>(dst0.buffer());
144 
145     // Fill in: one is the identity matrix, other is sequential values
146     // src1: Identity matrix
147     for(size_t i = 0; i < M * K; i++)
148     {
149         src1_ptr[i] = 0;
150     }
151     for(size_t i = 0; i < M; i++)
152     {
153         src1_ptr[i * K + i] = 1.0f;
154     }
155 
156     // src2: Sequential values matrix
157     for(size_t i = 0; i < K * N; i++)
158     {
159         src2_ptr[i] = i * 1.123f;
160     }
161 
162     // Otherwise if M, N, K is given, fill in with random values
163     if(!default_input)
164     {
165         fill_random_tensor(src1, 0.f, 1.f);
166         fill_random_tensor(src2, 0.f, 1.f);
167     }
168 
169     // Run single precision gemm and print result
170     fgemm.run();
171 
172 #if ARM_COMPUTE_DEBUG_ENABLED
173     std::cout << "Result matrix:\n";
174     src1.print(std::cout);
175     src2.print(std::cout);
176     dst0.print(std::cout);
177 #endif // ARM_COMPUTE_DEBUG_ENABLED
178 
179     /*** Quantised asymmetric 8bit matrix  multiplication ***/
180 
181     // Start by finding the quantisation parameters for each set of values
182     float src1_min;
183     float src1_max;
184     float src2_min;
185     float src2_max;
186     float dst0_min;
187     float dst0_max;
188 
189     find_min_max(M * K, src1_ptr, &src1_min, &src1_max);
190     find_min_max(K * N, src2_ptr, &src2_min, &src2_max);
191     find_min_max(M * N, dst0_ptr, &dst0_min, &dst0_max);
192 
193     const QuantizationInfo src1_qinfo = choose_quantization_params(src1_min, src1_max);
194     const QuantizationInfo src2_qinfo = choose_quantization_params(src2_min, src2_max);
195     const QuantizationInfo dst0_qinfo = choose_quantization_params(dst0_min, dst0_max);
196 
197     std::cout << "Matrix 1: min=" << src1_min << ", max=" << src1_max << ", ";
198     std::cout << "QuantisationInfo(" << src1_qinfo.scale()[0] << ", " << src1_qinfo.offset()[0] << ")\n";
199     std::cout << "Matrix 2: min=" << src2_min << ", max=" << src2_max << ", ";
200     std::cout << "QuantisationInfo(" << src2_qinfo.scale()[0] << ", " << src2_qinfo.offset()[0] << ")\n";
201     std::cout << "Result  : min=" << dst0_min << ", max=" << dst0_max << ", ";
202     std::cout << "QuantisationInfo(" << dst0_qinfo.scale()[0] << ", " << dst0_qinfo.offset()[0] << ")\n";
203 
204     // We now have the quantisation info and can configure the quantised tensors
205     q_src1.allocator()->init(TensorInfo(TensorShape(K, M), 1, DataType::QASYMM8, src1_qinfo));
206     q_src2.allocator()->init(TensorInfo(TensorShape(N, K), 1, DataType::QASYMM8, src2_qinfo));
207     q_dst0.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::QASYMM8, dst0_qinfo));
208 
209     // In this approach we use the QuantizationLayer construct to perform quantization
210     NEQuantizationLayer q1;
211     NEQuantizationLayer q2;
212     NEQuantizationLayer q3;
213     q1.configure(&src1, &q_src1);
214     q2.configure(&src2, &q_src2);
215     q3.configure(&dst0, &q_dst0);
216 
217     // Configure low precision gemm and initialise result tensor (pre-output)
218     NEGEMMLowpMatrixMultiplyCore qgemm;
219     q_res.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::S32));
220     qgemm.configure(&q_src1, &q_src2, nullptr, &q_res);
221 
222     // Configure output stage after computing shift and multiplier parameters
223     NEGEMMLowpOutputStage gemmlowp_output_stage;
224     int                   output_multiplier;
225     int                   output_shift;
226     float                 multiplier = (src1_qinfo.uniform().scale * src2_qinfo.uniform().scale) / dst0_qinfo.uniform().scale;
227     quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
228     std::cout << "(q_multiplier, q_shift) = (" << output_multiplier << ", " << output_shift << ")\n\n";
229 
230     GEMMLowpOutputStageInfo info;
231     info.type                = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
232     info.gemmlowp_multiplier = output_multiplier;
233     info.gemmlowp_shift      = output_shift;
234     info.gemmlowp_offset     = dst0_qinfo.uniform().offset;
235     info.output_data_type    = DataType::QASYMM8;
236     q_res_output.info()->set_data_type(DataType::QASYMM8);
237     q_res_output.info()->set_num_channels(1);
238     gemmlowp_output_stage.configure(&q_res, nullptr, &q_res_output, info);
239 
240     // Allocate all tensors
241     q_src1.allocator()->allocate();
242     q_src2.allocator()->allocate();
243     q_dst0.allocator()->allocate();
244     q_res.allocator()->allocate();
245     q_res_output.allocator()->allocate();
246 
247     // Run quantization layers (quantizes values of each tensor)
248     q1.run();
249     q2.run();
250     q3.run();
251     // Run low precision matrix multiply kernel
252     qgemm.run();
253     // Run output stage kernel
254     gemmlowp_output_stage.run();
255     std::cout << "\nTest Passed\n";
256 
257 #if ARM_COMPUTE_DEBUG_ENABLED
258     // Print quantized source matrices
259     q_src1.print(std::cout);
260     q_src2.print(std::cout);
261     // Print result matrix in int32 form - before output stage processing
262     std::cout << "Lowp GEMM output (int32):\n";
263     q_res.print(std::cout);
264     // Print QASYMM8 (quantized) matrix
265     std::cout << "Output pipeline result matrix:\n";
266     q_res_output.print(std::cout);
267 
268     // Expected result
269     std::cout << "Expected result:\n";
270     q_dst0.print(std::cout);
271 #endif // ARM_COMPUTE_DEBUG_ENABLED
272 }
273