xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #pragma once
25 
26 #include <assert.h>
27 
28 #include <algorithm>
29 
30 #include "arm_gemm.hpp"
31 #include "ndrange.hpp"
32 #include "utils.hpp"
33 
34 #include "mergeresults.hpp"
35 #include "transform.hpp"
36 
37 #ifdef CYCLE_PROFILING
38 #include "profiler.hpp"
39 #endif
40 
41 namespace arm_gemm {
42 
43 // Implementation of the GemmCommon abstract class.
44 template<typename strategy, typename To, typename Tr>
45 class GemmHybridQuantized : public GemmCommon<To, Tr> {
46     typedef typename strategy::operand_type Toi;
47     typedef typename strategy::result_type Tri;
48 
49     /* const properties set by constructor */
50     const CPUInfo * const _ci;
51 
52     const unsigned int _Msize;
53     const unsigned int _Nsize;
54     const unsigned int _Ksize;
55 
56     const unsigned int _nbatches;
57     const unsigned int _nmulti;
58 
59     /* Blocking info */
60     const unsigned int _k_block;
61     const unsigned int _n_block;
62     const unsigned int _Mround;
63 
64     /* Pretransposed buffer. */
65     const Toi *_B_transposed=nullptr;
66 
67     const NDRange<4> _window_range;
68 
69     Requantize32  _qp;
70     int32_t *row_bias = nullptr;
71     int32_t *col_bias = nullptr;
72 
73     void *working_space = nullptr;
74 
75     unsigned int _nthreads;
76 
get_col_sum_size() const77     unsigned int get_col_sum_size() const {
78         return _Nsize * _nmulti * sizeof(int32_t);
79     }
80 
compute_k_block(const GemmArgs & args)81     static unsigned int compute_k_block(const GemmArgs &args) {
82         // We don't support K blocks as we only temporarily store 32 bit results.
83         return args._Ksize;
84 
85         if (args._cfg && args._cfg->inner_block_size) {
86             return args._cfg->inner_block_size;
87         }
88 
89         const unsigned int L1_size = args._ci->get_L1_cache_size();
90 
91         // k_block: Find out how much of the larger array can be loaded into half the cache.
92         // This should account for associative caches.
93         unsigned int k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
94 
95         // Needs to be (at least a single) multiple of the K unroll level.
96         k_block /= strategy::k_unroll();
97         k_block = std::max(k_block, 1U) * strategy::k_unroll();
98 
99         // Now tune to presented problem size; this is how many blocks we need.
100         unsigned int numk_blocks = iceildiv(args._Ksize, k_block);
101 
102         // So divide the space equally into that many blocks.
103         k_block = iceildiv(args._Ksize, numk_blocks);
104 
105         // And round UP to the K unroll level required.
106         k_block = roundup(k_block, strategy::k_unroll());
107 
108         return k_block;
109     }
110 
compute_n_block(const GemmArgs & args)111     static unsigned int compute_n_block(const GemmArgs &args) {
112         if (args._cfg && args._cfg->outer_block_size) {
113             unsigned int n_block = args._cfg->outer_block_size;
114 
115             // Needs to be (at least a single) multiple of the kernel output width.
116             n_block /= strategy::out_width();
117             n_block = std::max(n_block, 1u) * strategy::out_width();
118 
119             return n_block;
120         }
121 
122         const unsigned int k_block = compute_k_block(args);
123         const unsigned int L2_size = args._ci->get_L2_cache_size();
124 
125         // n_block: Work out how many rows (of length k_block) will fit in the L2
126         // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
127         const unsigned int scaled_l2_size = (L2_size * 9) / 10;
128         const unsigned int k_block_area = k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height());
129 
130         // .. if the L1 contents is bigger than the L2, just return a minimal size block.
131         if (k_block_area > scaled_l2_size) {
132             return strategy::out_width();
133         }
134 
135         unsigned int n_block = (scaled_l2_size - k_block_area) / (sizeof(Toi) * k_block);
136 
137         // Needs to be (at least a single) multiple of the kernel output width.
138         n_block /= strategy::out_width();
139         n_block = std::max(n_block, 1u) * strategy::out_width();
140 
141         // And tune to the presented problem size.
142         unsigned int numblocks = iceildiv(args._Nsize, n_block);
143         n_block = iceildiv(args._Nsize, numblocks);
144         n_block = roundup(n_block, strategy::out_width());
145 
146         assert(n_block > 0);
147 
148         return n_block;
149     }
150 
151 public:
152     GemmHybridQuantized(GemmHybridQuantized &) = delete;
153     GemmHybridQuantized & operator= (GemmHybridQuantized &) = delete;
154 
155     /* Constructor */
GemmHybridQuantized(const GemmArgs & args,const Requantize32 & qp)156     GemmHybridQuantized(const GemmArgs &args, const Requantize32 &qp)
157               : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
158                 _nbatches(args._nbatches), _nmulti(args._nmulti),
159                 _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
160                 _Mround(roundup(args._Msize, strategy::out_height())),
161                 _window_range(iceildiv(args._Msize, strategy::out_height()), _nbatches, iceildiv(_Nsize, _n_block), _nmulti),
162                 _qp (qp), _nthreads(args._maxthreads) { }
163 
164     // Interface implementation - Compulsory functions
get_window_size() const165     ndrange_t get_window_size() const override {
166         return { _window_range.total_size() };
167     }
168 
169     // This kernel can always be dynamically scheduled.
supports_dynamic_scheduling() const170     bool supports_dynamic_scheduling() const override {
171         return true;
172     }
173 
174     // Execute
execute(const ndcoord_t & work_range,const ndcoord_t &,int threadid)175     void execute(const ndcoord_t &work_range, const ndcoord_t &, int threadid) override {
176 #ifdef CYCLE_PROFILING
177         profiler prof;
178 #endif
179         strategy strat(_ci);
180 
181         uintptr_t working_int = reinterpret_cast<uintptr_t>(working_space);
182 
183         Tri *result_buffer = reinterpret_cast<Tri *>(working_int + (threadid * strategy::out_height() * _Nsize * sizeof(Tri)));
184 
185         /* Make sure we've been set up correctly. */
186         assert(_B_transposed);
187         static_assert(std::is_same<To, Toi>::value, "gemm_native: Operand types must be the same.");
188 
189         /* For now, each work item implies all the K for a given output
190          * pixel (so we don't need to synchronize access to the output
191          * array).  So separate the loop over K blocks here.  */
192         for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) {
193             unsigned int kmax   = std::min(k0 + _k_block, _Ksize);
194             unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
195 
196             auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
197 
198             if (p.done()) {
199                 return;
200             }
201 
202             do {
203                 const unsigned int m_start = p.dim(0) * strategy::out_height();
204                 const unsigned int m_end   = std::min((p.dim(0) + 1) * strategy::out_height(), _Msize);
205                 const unsigned int batch   = p.dim(1);
206                 const unsigned int n0      = p.dim(2) * _n_block;
207                 const unsigned int nmax    = std::min(n0 + _n_block, _Nsize);
208                 const unsigned int multi   = p.dim(3);
209 
210                 int32_t local_row_sums[strategy::out_height()];
211 
212                 const Toi *b_panel = _B_transposed +
213                                      (multi * roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll())) +
214                                      (k0 * roundup(_Nsize, strategy::out_width())) +
215                                      (n0 * kern_k);
216 
217                 {
218 #ifdef CYCLE_PROFILING
219                     auto p = prof.ScopedProfiler(PROFILE_KERNEL, (m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
220 #endif
221                     strat.kernel(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (m_start * this->_lda) + k0, this->_lda,
222                                  b_panel,
223                                  result_buffer, (nmax-n0),
224                                  (m_end - m_start), (nmax - n0), kern_k,
225                                  nullptr, Activation(), false);
226                 }
227 
228                 {
229 #ifdef CYCLE_PROFILING
230                     auto p = prof.ScopedProfiler(PROFILE_ROWSUMS, (m_end - m_start) * _Ksize);
231 #endif
232                     compute_row_sums(_qp, _Ksize, (m_end - m_start),
233                                      this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (m_start * this->_lda), this->_lda,
234                                      local_row_sums);
235                 }
236 
237                 {
238 #ifdef CYCLE_PROFILING
239                     auto p = prof.ScopedProfiler(PROFILE_QUANTIZE, (m_end - m_start) * _Nsize);
240 #endif
241 
242                     requantize_block_32(_qp, (nmax - n0), (m_end - m_start), result_buffer, (nmax - n0),
243                                         this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc,
244                                         local_row_sums, col_bias + (multi * _Nsize) + n0, n0);
245                 }
246             } while (p.next_dim0());
247         }
248     }
249 
250     // Working space needed for intermediate result buffers.
get_working_size() const251     size_t get_working_size() const override {
252         return (_nthreads * strategy::out_height() * _Nsize * sizeof(Tri));
253     }
254 
set_working_space(void * buffer)255     void set_working_space(void *buffer) override {
256         working_space = buffer;
257     }
258 
259     // Interface implementation - pretransposed
B_is_pretransposed() const260     bool B_is_pretransposed() const override {
261         return true;
262     }
263 
B_pretranspose_required() const264     bool B_pretranspose_required() const override {
265         return (_B_transposed==nullptr);
266     }
267 
get_B_pretransposed_array_size() const268     size_t get_B_pretransposed_array_size() const override {
269         return get_col_sum_size() + (roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll()) * _nmulti * sizeof(Toi));
270     }
271 
requantize_bias(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)272     void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
273         col_bias = reinterpret_cast<int32_t *>(in_buffer);
274 
275         for (unsigned int i=0; i<_nmulti; i++) {
276             compute_col_sums(_qp, _Nsize, _Ksize, B + (i * B_multi_stride), ldb, col_bias + (i * _Nsize),  _Ksize, i, 0);
277         }
278     }
279 
pretranspose_B_array(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)280     void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
281         requantize_bias(in_buffer, B, ldb, B_multi_stride);
282 
283         uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
284         Toi *buffer = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
285         _B_transposed = buffer;
286         strategy strat(_ci);
287 
288         for (unsigned int multi=0; multi<_nmulti; multi++) {
289             for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) {
290                 const unsigned int kmax = std::min(k0 + _k_block, _Ksize);
291                 const unsigned int k_size = roundup(kmax-k0, strategy::k_unroll());
292 
293                 for (unsigned int x0=0; x0<_Nsize; x0+=_n_block) {
294                     const unsigned int xmax = std::min(x0+_n_block, _Nsize);
295 
296                     const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size;
297 
298                     strat.transforms.PrepareB( buffer, B + (multi * B_multi_stride), ldb,
299                                                x0, xmax, k0, kmax);
300 
301                     buffer += size;
302                 }
303             }
304         }
305     }
306 
set_pretransposed_B_data(void * in_buffer)307     void set_pretransposed_B_data(void *in_buffer) override {
308         uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
309         _B_transposed = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
310         col_bias = reinterpret_cast<int32_t *>(in_buffer);
311     }
312 
set_quantized_bias(const int32_t * bias,size_t bias_multi_stride)313     void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
314         _qp.bias = bias;
315         _qp.bias_multi_stride = bias_multi_stride;
316     }
317 
get_config()318     GemmConfig get_config() override {
319         GemmConfig c;
320 
321         c.method = GemmMethod::GEMM_HYBRID;
322         c.inner_block_size = _k_block;
323         c.outer_block_size = _n_block;
324         c.filter = get_type_name<strategy>();
325 
326         return c;
327     }
328 };
329 
330 } // namespace arm_gemm
331